init cobra
This commit is contained in:
parent
9a1421102b
commit
bdcdbd817a
12
LICENSE
12
LICENSE
@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
2
Makefile
2
Makefile
@ -40,7 +40,7 @@ export GO111MODULE=on
|
||||
# DIRS defines a single level directly, we only look at *.go in this directory.
|
||||
# REC_DIRS defines a source code tree. All go files are analyzed recursively.
|
||||
DIRS := .
|
||||
REC_DIRS := cli
|
||||
REC_DIRS := cmd
|
||||
|
||||
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
|
||||
GO_SRC := $(foreach dir,$(DIRS),$(wildcard $(dir)/*.go))
|
||||
|
320
cli/cluster.go
320
cli/cluster.go
@ -1,320 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultContainerNamePrefix = "k3d"
|
||||
)
|
||||
|
||||
type cluster struct {
|
||||
name string
|
||||
image string
|
||||
status string
|
||||
serverPorts []string
|
||||
server types.Container
|
||||
workers []types.Container
|
||||
}
|
||||
|
||||
// GetContainerName generates the container names
|
||||
func GetContainerName(role, clusterName string, postfix int) string {
|
||||
if postfix >= 0 {
|
||||
return fmt.Sprintf("%s-%s-%s-%d", defaultContainerNamePrefix, clusterName, role, postfix)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s", defaultContainerNamePrefix, clusterName, role)
|
||||
}
|
||||
|
||||
// GetAllContainerNames returns a list of all containernames that will be created
|
||||
func GetAllContainerNames(clusterName string, serverCount, workerCount int) []string {
|
||||
names := []string{}
|
||||
for postfix := 0; postfix < serverCount; postfix++ {
|
||||
names = append(names, GetContainerName("server", clusterName, postfix))
|
||||
}
|
||||
for postfix := 0; postfix < workerCount; postfix++ {
|
||||
names = append(names, GetContainerName("worker", clusterName, postfix))
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// createDirIfNotExists checks for the existence of a directory and creates it along with all required parents if not.
|
||||
// It returns an error if the directory (or parents) couldn't be created and nil if it worked fine or if the path already exists.
|
||||
func createDirIfNotExists(path string) error {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return os.MkdirAll(path, os.ModePerm)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createClusterDir creates a directory with the cluster name under $HOME/.config/k3d/<cluster_name>.
|
||||
// The cluster directory will be used e.g. to store the kubeconfig file.
|
||||
func createClusterDir(name string) {
|
||||
clusterPath, _ := getClusterDir(name)
|
||||
if err := createDirIfNotExists(clusterPath); err != nil {
|
||||
log.Fatalf("ERROR: couldn't create cluster directory [%s] -> %+v", clusterPath, err)
|
||||
}
|
||||
// create subdir for sharing container images
|
||||
if err := createDirIfNotExists(clusterPath + "/images"); err != nil {
|
||||
log.Fatalf("ERROR: couldn't create cluster sub-directory [%s] -> %+v", clusterPath+"/images", err)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteClusterDir contrary to createClusterDir, this deletes the cluster directory under $HOME/.config/k3d/<cluster_name>
|
||||
func deleteClusterDir(name string) {
|
||||
clusterPath, _ := getClusterDir(name)
|
||||
if err := os.RemoveAll(clusterPath); err != nil {
|
||||
log.Printf("WARNING: couldn't delete cluster directory [%s]. You might want to delete it manually.", clusterPath)
|
||||
}
|
||||
}
|
||||
|
||||
// getClusterDir returns the path to the cluster directory which is $HOME/.config/k3d/<cluster_name>
|
||||
func getClusterDir(name string) (string, error) {
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
log.Printf("ERROR: Couldn't get user's home directory")
|
||||
return "", err
|
||||
}
|
||||
return path.Join(homeDir, ".config", "k3d", name), nil
|
||||
}
|
||||
|
||||
func getClusterKubeConfigPath(cluster string) (string, error) {
|
||||
clusterDir, err := getClusterDir(cluster)
|
||||
return path.Join(clusterDir, "kubeconfig.yaml"), err
|
||||
}
|
||||
|
||||
func createKubeConfigFile(cluster string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", cluster))
|
||||
filters.Add("label", "component=server")
|
||||
server, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
Filters: filters,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get server container for cluster %s\n%+v", cluster, err)
|
||||
}
|
||||
|
||||
if len(server) == 0 {
|
||||
return fmt.Errorf("No server container for cluster %s", cluster)
|
||||
}
|
||||
|
||||
// get kubeconfig file from container and read contents
|
||||
reader, _, err := docker.CopyFromContainer(ctx, server[0].ID, "/output/kubeconfig.yaml")
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't copy kubeconfig.yaml from server container %s\n%+v", server[0].ID, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
readBytes, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't read kubeconfig from container\n%+v", err)
|
||||
}
|
||||
|
||||
// create destination kubeconfig file
|
||||
destPath, err := getClusterKubeConfigPath(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeconfigfile, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't create kubeconfig file %s\n%+v", destPath, err)
|
||||
}
|
||||
defer kubeconfigfile.Close()
|
||||
|
||||
// write to file, skipping the first 512 bytes which contain file metadata
|
||||
// and trimming any NULL characters
|
||||
trimBytes := bytes.Trim(readBytes[512:], "\x00")
|
||||
|
||||
// Fix up kubeconfig.yaml file.
|
||||
//
|
||||
// K3s generates the default kubeconfig.yaml with host name as 'localhost'.
|
||||
// Change the host name to the name user specified via the --api-port argument.
|
||||
//
|
||||
// When user did not specify the host name and when we are running against a remote docker,
|
||||
// set the host name to remote docker machine's IP address.
|
||||
//
|
||||
// Otherwise, the hostname remains as 'localhost'
|
||||
apiHost := server[0].Labels["apihost"]
|
||||
|
||||
if apiHost != "" {
|
||||
s := string(trimBytes)
|
||||
s = strings.Replace(s, "localhost", apiHost, 1)
|
||||
trimBytes = []byte(s)
|
||||
}
|
||||
_, err = kubeconfigfile.Write(trimBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't write to kubeconfig.yaml\n%+v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKubeConfig(cluster string) (string, error) {
|
||||
kubeConfigPath, err := getClusterKubeConfigPath(cluster)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if clusters, err := getClusters(false, cluster); err != nil || len(clusters) != 1 {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", fmt.Errorf("Cluster %s does not exist", cluster)
|
||||
}
|
||||
|
||||
// If kubeconfi.yaml has not been created, generate it now
|
||||
if _, err := os.Stat(kubeConfigPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err = createKubeConfigFile(cluster); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return kubeConfigPath, nil
|
||||
}
|
||||
|
||||
// printClusters prints the names of existing clusters
|
||||
func printClusters() {
|
||||
clusters, err := getClusters(true, "")
|
||||
if err != nil {
|
||||
log.Fatalf("ERROR: Couldn't list clusters\n%+v", err)
|
||||
}
|
||||
if len(clusters) == 0 {
|
||||
log.Printf("No clusters found!")
|
||||
return
|
||||
}
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetAlignment(tablewriter.ALIGN_CENTER)
|
||||
table.SetHeader([]string{"NAME", "IMAGE", "STATUS", "WORKERS"})
|
||||
|
||||
for _, cluster := range clusters {
|
||||
workersRunning := 0
|
||||
for _, worker := range cluster.workers {
|
||||
if worker.State == "running" {
|
||||
workersRunning++
|
||||
}
|
||||
}
|
||||
workerData := fmt.Sprintf("%d/%d", workersRunning, len(cluster.workers))
|
||||
clusterData := []string{cluster.name, cluster.image, cluster.status, workerData}
|
||||
table.Append(clusterData)
|
||||
}
|
||||
|
||||
table.Render()
|
||||
}
|
||||
|
||||
// Classify cluster state: Running, Stopped or Abnormal
|
||||
func getClusterStatus(server types.Container, workers []types.Container) string {
|
||||
// The cluster is in the abnromal state when server state and the worker
|
||||
// states don't agree.
|
||||
for _, w := range workers {
|
||||
if w.State != server.State {
|
||||
return "unhealthy"
|
||||
}
|
||||
}
|
||||
|
||||
switch server.State {
|
||||
case "exited": // All containers in this state are most likely
|
||||
// as the result of running the "k3d stop" command.
|
||||
return "stopped"
|
||||
}
|
||||
|
||||
return server.State
|
||||
}
|
||||
|
||||
// getClusters uses the docker API to get existing clusters and compares that with the list of cluster directories
|
||||
// When 'all' is true, 'cluster' contains all clusters found from the docker daemon
|
||||
// When 'all' is false, 'cluster' contains up to one cluster whose name matches 'name'. 'cluster' can
|
||||
// be empty if no matching cluster is found.
|
||||
func getClusters(all bool, name string) (map[string]cluster, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// Prepare docker label filters
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", "component=server")
|
||||
|
||||
// get all servers created by k3d
|
||||
k3dServers, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
All: true,
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("WARNING: couldn't list server containers\n%+v", err)
|
||||
}
|
||||
|
||||
clusters := make(map[string]cluster)
|
||||
|
||||
// don't filter for servers but for workers now
|
||||
filters.Del("label", "component=server")
|
||||
filters.Add("label", "component=worker")
|
||||
|
||||
// for all servers created by k3d, get workers and cluster information
|
||||
for _, server := range k3dServers {
|
||||
clusterName := server.Labels["cluster"]
|
||||
|
||||
// Skip the cluster if we don't want all of them, and
|
||||
// the cluster name does not match.
|
||||
if all || name == clusterName {
|
||||
|
||||
// Add the cluster
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
|
||||
// get workers
|
||||
workers, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
All: true,
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("WARNING: couldn't get worker containers for cluster %s\n%+v", clusterName, err)
|
||||
}
|
||||
|
||||
// save cluster information
|
||||
serverPorts := []string{}
|
||||
for _, port := range server.Ports {
|
||||
serverPorts = append(serverPorts, strconv.Itoa(int(port.PublicPort)))
|
||||
}
|
||||
clusters[clusterName] = cluster{
|
||||
name: clusterName,
|
||||
image: server.Image,
|
||||
status: getClusterStatus(server, workers),
|
||||
serverPorts: serverPorts,
|
||||
server: server,
|
||||
workers: workers,
|
||||
}
|
||||
// clear label filters before searching for next cluster
|
||||
filters.Del("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
}
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
}
|
394
cli/commands.go
394
cli/commands.go
@ -1,394 +0,0 @@
|
||||
package run
|
||||
|
||||
/*
|
||||
* This file contains the "backend" functionality for the CLI commands (and flags)
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRegistry = "docker.io"
|
||||
defaultServerCount = 1
|
||||
)
|
||||
|
||||
// CheckTools checks if the docker API server is responding
|
||||
func CheckTools(c *cli.Context) error {
|
||||
log.Print("Checking docker...")
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ping, err := docker.Ping(ctx)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: checking docker failed\n%+v", err)
|
||||
}
|
||||
log.Printf("SUCCESS: Checking docker succeeded (API: v%s)\n", ping.APIVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateCluster creates a new single-node cluster container and initializes the cluster directory
|
||||
func CreateCluster(c *cli.Context) error {
|
||||
|
||||
if err := CheckClusterName(c.String("name")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cluster, err := getClusters(false, c.String("name")); err != nil {
|
||||
return err
|
||||
} else if len(cluster) != 0 {
|
||||
// A cluster exists with the same name. Return with an error.
|
||||
return fmt.Errorf("ERROR: Cluster %s already exists", c.String("name"))
|
||||
}
|
||||
|
||||
// On Error delete the cluster. If there createCluster() encounter any error,
|
||||
// call this function to remove all resources allocated for the cluster so far
|
||||
// so that they don't linger around.
|
||||
deleteCluster := func() {
|
||||
if err := DeleteCluster(c); err != nil {
|
||||
log.Printf("Error: Failed to delete cluster %s", c.String("name"))
|
||||
}
|
||||
}
|
||||
|
||||
// define image
|
||||
image := c.String("image")
|
||||
if c.IsSet("version") {
|
||||
// TODO: --version to be deprecated
|
||||
log.Println("[WARNING] The `--version` flag will be deprecated soon, please use `--image rancher/k3s:<version>` instead")
|
||||
if c.IsSet("image") {
|
||||
// version specified, custom image = error (to push deprecation of version flag)
|
||||
log.Fatalln("[ERROR] Please use `--image <image>:<version>` instead of --image and --version")
|
||||
} else {
|
||||
// version specified, default image = ok (until deprecation of version flag)
|
||||
image = fmt.Sprintf("%s:%s", strings.Split(image, ":")[0], c.String("version"))
|
||||
}
|
||||
}
|
||||
if len(strings.Split(image, "/")) <= 2 {
|
||||
// fallback to default registry
|
||||
image = fmt.Sprintf("%s/%s", defaultRegistry, image)
|
||||
}
|
||||
|
||||
// create cluster network
|
||||
networkID, err := createClusterNetwork(c.String("name"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Created cluster network with ID %s", networkID)
|
||||
|
||||
// environment variables
|
||||
env := []string{"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml"}
|
||||
env = append(env, c.StringSlice("env")...)
|
||||
env = append(env, fmt.Sprintf("K3S_CLUSTER_SECRET=%s", GenerateRandomString(20)))
|
||||
|
||||
// k3s server arguments
|
||||
// TODO: --port will soon be --api-port since we want to re-use --port for arbitrary port mappings
|
||||
if c.IsSet("port") {
|
||||
log.Println("INFO: As of v2.0.0 --port will be used for arbitrary port mapping. Please use --api-port/-a instead for configuring the Api Port")
|
||||
}
|
||||
apiPort, err := parseAPIPort(c.String("api-port"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k3AgentArgs := []string{}
|
||||
k3sServerArgs := []string{"--https-listen-port", apiPort.Port}
|
||||
|
||||
// When the 'host' is not provided by --api-port, try to fill it using Docker Machine's IP address.
|
||||
if apiPort.Host == "" {
|
||||
apiPort.Host, err = getDockerMachineIp()
|
||||
// IP address is the same as the host
|
||||
apiPort.HostIP = apiPort.Host
|
||||
// In case of error, Log a warning message, and continue on. Since it more likely caused by a miss configured
|
||||
// DOCKER_MACHINE_NAME environment variable.
|
||||
if err != nil {
|
||||
log.Printf("WARNING: Failed to get docker machine IP address, ignoring the DOCKER_MACHINE_NAME environment variable setting.\n")
|
||||
}
|
||||
}
|
||||
|
||||
if apiPort.Host != "" {
|
||||
// Add TLS SAN for non default host name
|
||||
log.Printf("Add TLS SAN for %s", apiPort.Host)
|
||||
k3sServerArgs = append(k3sServerArgs, "--tls-san", apiPort.Host)
|
||||
}
|
||||
|
||||
if c.IsSet("server-arg") || c.IsSet("x") {
|
||||
k3sServerArgs = append(k3sServerArgs, c.StringSlice("server-arg")...)
|
||||
}
|
||||
|
||||
if c.IsSet("agent-arg") {
|
||||
k3AgentArgs = append(k3AgentArgs, c.StringSlice("agent-arg")...)
|
||||
}
|
||||
|
||||
// new port map
|
||||
portmap, err := mapNodesToPortSpecs(c.StringSlice("publish"), GetAllContainerNames(c.String("name"), defaultServerCount, c.Int("workers")))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// create a docker volume for sharing image tarballs with the cluster
|
||||
imageVolume, err := createImageVolume(c.String("name"))
|
||||
log.Println("Created docker volume ", imageVolume.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
volumes := c.StringSlice("volume")
|
||||
volumes = append(volumes, fmt.Sprintf("%s:/images", imageVolume.Name))
|
||||
|
||||
clusterSpec := &ClusterSpec{
|
||||
AgentArgs: k3AgentArgs,
|
||||
APIPort: *apiPort,
|
||||
AutoRestart: c.Bool("auto-restart"),
|
||||
ClusterName: c.String("name"),
|
||||
Env: env,
|
||||
Image: image,
|
||||
NodeToPortSpecMap: portmap,
|
||||
PortAutoOffset: c.Int("port-auto-offset"),
|
||||
ServerArgs: k3sServerArgs,
|
||||
Verbose: c.GlobalBool("verbose"),
|
||||
Volumes: volumes,
|
||||
}
|
||||
|
||||
// create the server
|
||||
log.Printf("Creating cluster [%s]", c.String("name"))
|
||||
|
||||
// create the directory where we will put the kubeconfig file by default (when running `k3d get-config`)
|
||||
createClusterDir(c.String("name"))
|
||||
|
||||
dockerID, err := createServer(clusterSpec)
|
||||
if err != nil {
|
||||
deleteCluster()
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for k3s to be up and running if wanted.
|
||||
// We're simply scanning the container logs for a line that tells us that everything's up and running
|
||||
// TODO: also wait for worker nodes
|
||||
start := time.Now()
|
||||
timeout := time.Duration(c.Int("wait")) * time.Second
|
||||
for c.IsSet("wait") {
|
||||
// not running after timeout exceeded? Rollback and delete everything.
|
||||
if timeout != 0 && time.Now().After(start.Add(timeout)) {
|
||||
deleteCluster()
|
||||
return errors.New("Cluster creation exceeded specified timeout")
|
||||
}
|
||||
|
||||
// scan container logs for a line that tells us that the required services are up and running
|
||||
out, err := docker.ContainerLogs(ctx, dockerID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
|
||||
if err != nil {
|
||||
out.Close()
|
||||
return fmt.Errorf("ERROR: couldn't get docker logs for %s\n%+v", c.String("name"), err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
nRead, _ := buf.ReadFrom(out)
|
||||
out.Close()
|
||||
output := buf.String()
|
||||
if nRead > 0 && strings.Contains(string(output), "Running kubelet") {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
// spin up the worker nodes
|
||||
// TODO: do this concurrently in different goroutines
|
||||
if c.Int("workers") > 0 {
|
||||
log.Printf("Booting %s workers for cluster %s", strconv.Itoa(c.Int("workers")), c.String("name"))
|
||||
for i := 0; i < c.Int("workers"); i++ {
|
||||
workerID, err := createWorker(clusterSpec, i)
|
||||
if err != nil {
|
||||
deleteCluster()
|
||||
return err
|
||||
}
|
||||
log.Printf("Created worker with ID %s\n", workerID)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("SUCCESS: created cluster [%s]", c.String("name"))
|
||||
log.Printf(`You can now use the cluster with:
|
||||
|
||||
export KUBECONFIG="$(%s get-kubeconfig --name='%s')"
|
||||
kubectl cluster-info`, os.Args[0], c.String("name"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteCluster removes the containers belonging to a cluster and its local directory
|
||||
func DeleteCluster(c *cli.Context) error {
|
||||
clusters, err := getClusters(c.Bool("all"), c.String("name"))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove clusters one by one instead of appending all names to the docker command
|
||||
// this allows for more granular error handling and logging
|
||||
for _, cluster := range clusters {
|
||||
log.Printf("Removing cluster [%s]", cluster.name)
|
||||
if len(cluster.workers) > 0 {
|
||||
// TODO: this could be done in goroutines
|
||||
log.Printf("...Removing %d workers\n", len(cluster.workers))
|
||||
for _, worker := range cluster.workers {
|
||||
if err := removeContainer(worker.ID); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
deleteClusterDir(cluster.name)
|
||||
log.Println("...Removing server")
|
||||
if err := removeContainer(cluster.server.ID); err != nil {
|
||||
return fmt.Errorf("ERROR: Couldn't remove server for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
if err := deleteClusterNetwork(cluster.name); err != nil {
|
||||
log.Printf("WARNING: couldn't delete cluster network for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
log.Println("...Removing docker image volume")
|
||||
if err := deleteImageVolume(cluster.name); err != nil {
|
||||
log.Printf("WARNING: couldn't delete image docker volume for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
log.Printf("SUCCESS: removed cluster [%s]", cluster.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopCluster stops a running cluster container (restartable)
|
||||
func StopCluster(c *cli.Context) error {
|
||||
clusters, err := getClusters(c.Bool("all"), c.String("name"))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// remove clusters one by one instead of appending all names to the docker command
|
||||
// this allows for more granular error handling and logging
|
||||
for _, cluster := range clusters {
|
||||
log.Printf("Stopping cluster [%s]", cluster.name)
|
||||
if len(cluster.workers) > 0 {
|
||||
log.Printf("...Stopping %d workers\n", len(cluster.workers))
|
||||
for _, worker := range cluster.workers {
|
||||
if err := docker.ContainerStop(ctx, worker.ID, nil); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Println("...Stopping server")
|
||||
if err := docker.ContainerStop(ctx, cluster.server.ID, nil); err != nil {
|
||||
return fmt.Errorf("ERROR: Couldn't stop server for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
log.Printf("SUCCESS: Stopped cluster [%s]", cluster.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCluster starts a stopped cluster container
|
||||
func StartCluster(c *cli.Context) error {
|
||||
clusters, err := getClusters(c.Bool("all"), c.String("name"))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// remove clusters one by one instead of appending all names to the docker command
|
||||
// this allows for more granular error handling and logging
|
||||
for _, cluster := range clusters {
|
||||
log.Printf("Starting cluster [%s]", cluster.name)
|
||||
|
||||
log.Println("...Starting server")
|
||||
if err := docker.ContainerStart(ctx, cluster.server.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("ERROR: Couldn't start server for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
if len(cluster.workers) > 0 {
|
||||
log.Printf("...Starting %d workers\n", len(cluster.workers))
|
||||
for _, worker := range cluster.workers {
|
||||
if err := docker.ContainerStart(ctx, worker.ID, types.ContainerStartOptions{}); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("SUCCESS: Started cluster [%s]", cluster.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListClusters prints a list of created clusters
|
||||
func ListClusters(c *cli.Context) error {
|
||||
if c.IsSet("all") {
|
||||
log.Println("INFO: --all is on by default, thus no longer required. This option will be removed in v2.0.0")
|
||||
|
||||
}
|
||||
printClusters()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKubeConfig grabs the kubeconfig from the running cluster and prints the path to stdout
|
||||
func GetKubeConfig(c *cli.Context) error {
|
||||
cluster := c.String("name")
|
||||
kubeConfigPath, err := getKubeConfig(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// output kubeconfig file path to stdout
|
||||
fmt.Println(kubeConfigPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shell starts a new subshell with the KUBECONFIG pointing to the selected cluster
|
||||
func Shell(c *cli.Context) error {
|
||||
return subShell(c.String("name"), c.String("shell"), c.String("command"))
|
||||
}
|
||||
|
||||
// ImportImage saves an image locally and imports it into the k3d containers
|
||||
func ImportImage(c *cli.Context) error {
|
||||
images := make([]string, 0)
|
||||
if strings.Contains(c.Args().First(), ",") {
|
||||
images = append(images, strings.Split(c.Args().First(), ",")...)
|
||||
} else {
|
||||
images = append(images, c.Args()...)
|
||||
}
|
||||
return importImage(c.String("name"), images, c.Bool("no-remove"))
|
||||
}
|
237
cli/container.go
237
cli/container.go
@ -1,237 +0,0 @@
|
||||
package run
|
||||
|
||||
/*
|
||||
* The functions in this file take care of spinning up the
|
||||
* k3s server and worker containers as well as deleting them.
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
type ClusterSpec struct {
|
||||
AgentArgs []string
|
||||
APIPort apiPort
|
||||
AutoRestart bool
|
||||
ClusterName string
|
||||
Env []string
|
||||
Image string
|
||||
NodeToPortSpecMap map[string][]string
|
||||
PortAutoOffset int
|
||||
ServerArgs []string
|
||||
Verbose bool
|
||||
Volumes []string
|
||||
}
|
||||
|
||||
func startContainer(verbose bool, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (string, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
resp, err := docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
|
||||
if client.IsErrNotFound(err) {
|
||||
log.Printf("Pulling image %s...\n", config.Image)
|
||||
reader, err := docker.ImagePull(ctx, config.Image, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't pull image %s\n%+v", config.Image, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
if verbose {
|
||||
_, err := io.Copy(os.Stdout, reader)
|
||||
if err != nil {
|
||||
log.Printf("WARNING: couldn't get docker output\n%+v", err)
|
||||
}
|
||||
} else {
|
||||
_, err := io.Copy(ioutil.Discard, reader)
|
||||
if err != nil {
|
||||
log.Printf("WARNING: couldn't get docker output\n%+v", err)
|
||||
}
|
||||
}
|
||||
resp, err = docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't create container after pull %s\n%+v", containerName, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return resp.ID, nil
|
||||
}
|
||||
|
||||
func createServer(spec *ClusterSpec) (string, error) {
|
||||
log.Printf("Creating server using %s...\n", spec.Image)
|
||||
|
||||
containerLabels := make(map[string]string)
|
||||
containerLabels["app"] = "k3d"
|
||||
containerLabels["component"] = "server"
|
||||
containerLabels["created"] = time.Now().Format("2006-01-02 15:04:05")
|
||||
containerLabels["cluster"] = spec.ClusterName
|
||||
|
||||
containerName := GetContainerName("server", spec.ClusterName, -1)
|
||||
|
||||
// ports to be assigned to the server belong to roles
|
||||
// all, server or <server-container-name>
|
||||
serverPorts, err := MergePortSpecs(spec.NodeToPortSpecMap, "server", containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hostIP := "0.0.0.0"
|
||||
containerLabels["apihost"] = "localhost"
|
||||
if spec.APIPort.Host != "" {
|
||||
hostIP = spec.APIPort.HostIP
|
||||
containerLabels["apihost"] = spec.APIPort.Host
|
||||
}
|
||||
|
||||
apiPortSpec := fmt.Sprintf("%s:%s:%s/tcp", hostIP, spec.APIPort.Port, spec.APIPort.Port)
|
||||
|
||||
serverPorts = append(serverPorts, apiPortSpec)
|
||||
|
||||
serverPublishedPorts, err := CreatePublishedPorts(serverPorts)
|
||||
if err != nil {
|
||||
log.Fatalf("Error: failed to parse port specs %+v \n%+v", serverPorts, err)
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
PortBindings: serverPublishedPorts.PortBindings,
|
||||
Privileged: true,
|
||||
}
|
||||
|
||||
if spec.AutoRestart {
|
||||
hostConfig.RestartPolicy.Name = "unless-stopped"
|
||||
}
|
||||
|
||||
if len(spec.Volumes) > 0 && spec.Volumes[0] != "" {
|
||||
hostConfig.Binds = spec.Volumes
|
||||
}
|
||||
|
||||
networkingConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
k3dNetworkName(spec.ClusterName): {
|
||||
Aliases: []string{containerName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
Hostname: containerName,
|
||||
Image: spec.Image,
|
||||
Cmd: append([]string{"server"}, spec.ServerArgs...),
|
||||
ExposedPorts: serverPublishedPorts.ExposedPorts,
|
||||
Env: spec.Env,
|
||||
Labels: containerLabels,
|
||||
}
|
||||
id, err := startContainer(spec.Verbose, config, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// createWorker creates/starts a k3s agent node that connects to the server
|
||||
func createWorker(spec *ClusterSpec, postfix int) (string, error) {
|
||||
containerLabels := make(map[string]string)
|
||||
containerLabels["app"] = "k3d"
|
||||
containerLabels["component"] = "worker"
|
||||
containerLabels["created"] = time.Now().Format("2006-01-02 15:04:05")
|
||||
containerLabels["cluster"] = spec.ClusterName
|
||||
|
||||
containerName := GetContainerName("worker", spec.ClusterName, postfix)
|
||||
|
||||
env := append(spec.Env, fmt.Sprintf("K3S_URL=https://k3d-%s-server:%s", spec.ClusterName, spec.APIPort.Port))
|
||||
|
||||
// ports to be assigned to the server belong to roles
|
||||
// all, server or <server-container-name>
|
||||
workerPorts, err := MergePortSpecs(spec.NodeToPortSpecMap, "worker", containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
workerPublishedPorts, err := CreatePublishedPorts(workerPorts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if spec.PortAutoOffset > 0 {
|
||||
// TODO: add some checks before to print a meaningful log message saying that we cannot map multiple container ports
|
||||
// to the same host port without a offset
|
||||
workerPublishedPorts = workerPublishedPorts.Offset(postfix + spec.PortAutoOffset)
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
Tmpfs: map[string]string{
|
||||
"/run": "",
|
||||
"/var/run": "",
|
||||
},
|
||||
PortBindings: workerPublishedPorts.PortBindings,
|
||||
Privileged: true,
|
||||
}
|
||||
|
||||
if spec.AutoRestart {
|
||||
hostConfig.RestartPolicy.Name = "unless-stopped"
|
||||
}
|
||||
|
||||
if len(spec.Volumes) > 0 && spec.Volumes[0] != "" {
|
||||
hostConfig.Binds = spec.Volumes
|
||||
}
|
||||
|
||||
networkingConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
k3dNetworkName(spec.ClusterName): {
|
||||
Aliases: []string{containerName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
Hostname: containerName,
|
||||
Image: spec.Image,
|
||||
Env: env,
|
||||
Cmd: append([]string{"agent"}, spec.AgentArgs...),
|
||||
Labels: containerLabels,
|
||||
ExposedPorts: workerPublishedPorts.ExposedPorts,
|
||||
}
|
||||
|
||||
id, err := startContainer(spec.Verbose, config, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't start container %s\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// removeContainer tries to rm a container, selected by Docker ID, and does a rm -f if it fails (e.g. if container is still running)
|
||||
func removeContainer(ID string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
options := types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
}
|
||||
|
||||
if err := docker.ContainerRemove(ctx, ID, options); err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't delete container [%s] -> %+v", ID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getDockerMachineIp() (string, error) {
|
||||
machine := os.ExpandEnv("$DOCKER_MACHINE_NAME")
|
||||
|
||||
if machine == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
dockerMachinePath, err := exec.LookPath("docker-machine")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
out, err := exec.Command(dockerMachinePath, "ip", machine).Output()
|
||||
if err != nil {
|
||||
log.Printf("Error executing 'docker-machine ip'")
|
||||
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
log.Printf("%s", string(exitError.Stderr))
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
ipStr := strings.TrimSuffix(string(out), "\n")
|
||||
ipStr = strings.TrimSuffix(ipStr, "\r")
|
||||
|
||||
return ipStr, nil
|
||||
}
|
212
cli/image.go
212
cli/image.go
@ -1,212 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
const (
|
||||
imageBasePathRemote = "/images"
|
||||
k3dToolsImage = "docker.io/iwilltry42/k3d-tools:v0.0.1"
|
||||
)
|
||||
|
||||
func importImage(clusterName string, images []string, noRemove bool) error {
|
||||
// get a docker client
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// get cluster directory to temporarily save the image tarball there
|
||||
imageVolume, err := getImageVolume(clusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't get image volume for cluster [%s]\n%+v", clusterName, err)
|
||||
}
|
||||
|
||||
//*** first, save the images using the local docker daemon
|
||||
log.Printf("INFO: Saving images %s from local docker daemon...", images)
|
||||
toolsContainerName := fmt.Sprintf("k3d-%s-tools", clusterName)
|
||||
tarFileName := fmt.Sprintf("%s/k3d-%s-images-%s.tar", imageBasePathRemote, clusterName, time.Now().Format("20060102150405"))
|
||||
|
||||
// create a tools container to get the tarball into the named volume
|
||||
containerConfig := container.Config{
|
||||
Hostname: toolsContainerName,
|
||||
Image: k3dToolsImage,
|
||||
Labels: map[string]string{
|
||||
"app": "k3d",
|
||||
"cluster": clusterName,
|
||||
"component": "tools",
|
||||
},
|
||||
Cmd: append([]string{"save-image", "-d", tarFileName}, images...),
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
}
|
||||
hostConfig := container.HostConfig{
|
||||
Binds: []string{
|
||||
"/var/run/docker.sock:/var/run/docker.sock",
|
||||
fmt.Sprintf("%s:%s:rw", imageVolume.Name, imageBasePathRemote),
|
||||
},
|
||||
}
|
||||
|
||||
toolsContainerID, err := startContainer(false, &containerConfig, &hostConfig, &network.NetworkingConfig{}, toolsContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = docker.ContainerRemove(ctx, toolsContainerID, types.ContainerRemoveOptions{
|
||||
Force: true,
|
||||
}); err != nil {
|
||||
log.Println(fmt.Errorf("WARN: couldn't remove tools container\n%+v", err))
|
||||
}
|
||||
}()
|
||||
|
||||
// loop to wait for tools container to exit (failed or successfully saved images)
|
||||
for {
|
||||
cont, err := docker.ContainerInspect(ctx, toolsContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't get helper container's exit code\n%+v", err)
|
||||
}
|
||||
if !cont.State.Running { // container finished...
|
||||
if cont.State.ExitCode == 0 { // ...successfully
|
||||
log.Println("INFO: saved images to shared docker volume")
|
||||
break
|
||||
} else if cont.State.ExitCode != 0 { // ...failed
|
||||
errTxt := "ERROR: helper container failed to save images"
|
||||
logReader, err := docker.ContainerLogs(ctx, toolsContainerID, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s\n> couldn't get logs from helper container\n%+v", errTxt, err)
|
||||
}
|
||||
logs, err := ioutil.ReadAll(logReader) // let's show somw logs indicating what happened
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s\n> couldn't get logs from helper container\n%+v", errTxt, err)
|
||||
}
|
||||
return fmt.Errorf("%s -> Logs from [%s]:\n>>>>>>\n%s\n<<<<<<", errTxt, toolsContainerName, string(logs))
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second / 2) // wait for half a second so we don't spam the docker API too much
|
||||
}
|
||||
|
||||
// Get the container IDs for all containers in the cluster
|
||||
clusters, err := getClusters(false, clusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't get cluster by name [%s]\n%+v", clusterName, err)
|
||||
}
|
||||
containerList := []types.Container{clusters[clusterName].server}
|
||||
containerList = append(containerList, clusters[clusterName].workers...)
|
||||
|
||||
// *** second, import the images using ctr in the k3d nodes
|
||||
|
||||
// create exec configuration
|
||||
cmd := []string{"ctr", "image", "import", tarFileName}
|
||||
execConfig := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdout: true,
|
||||
Cmd: cmd,
|
||||
Tty: true,
|
||||
Detach: true,
|
||||
}
|
||||
|
||||
execAttachConfig := types.ExecConfig{
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
execStartConfig := types.ExecStartCheck{
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
// import in each node separately
|
||||
// TODO: import concurrently using goroutines or find a way to share the image cache
|
||||
for _, container := range containerList {
|
||||
|
||||
containerName := container.Names[0][1:] // trimming the leading "/" from name
|
||||
log.Printf("INFO: Importing images %s in container [%s]", images, containerName)
|
||||
|
||||
// create exec configuration
|
||||
execResponse, err := docker.ContainerExecCreate(ctx, container.ID, execConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: Failed to create exec command for container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// attach to exec process in container
|
||||
containerConnection, err := docker.ContainerExecAttach(ctx, execResponse.ID, types.ExecStartCheck{
|
||||
Detach: execAttachConfig.Detach,
|
||||
Tty: execAttachConfig.Tty,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't attach to container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
defer containerConnection.Close()
|
||||
|
||||
// start exec
|
||||
err = docker.ContainerExecStart(ctx, execResponse.ID, execStartConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't execute command in container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// get output from container
|
||||
content, err := ioutil.ReadAll(containerConnection.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't read output from container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// example output "unpacking image........ ...done"
|
||||
if !strings.Contains(string(content), "done") {
|
||||
return fmt.Errorf("ERROR: seems like something went wrong using `ctr image import` in container [%s]. Full output below:\n%s", containerName, string(content))
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("INFO: Successfully imported images %s in all nodes of cluster [%s]", images, clusterName)
|
||||
|
||||
// remove tarball from inside the server container
|
||||
if !noRemove {
|
||||
log.Println("INFO: Cleaning up tarball")
|
||||
|
||||
execID, err := docker.ContainerExecCreate(ctx, clusters[clusterName].server.ID, types.ExecConfig{
|
||||
Cmd: []string{"rm", "-f", tarFileName},
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("WARN: failed to delete tarball: couldn't create remove in container [%s]\n%+v", clusters[clusterName].server.ID, err)
|
||||
}
|
||||
err = docker.ContainerExecStart(ctx, execID.ID, types.ExecStartCheck{
|
||||
Detach: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("WARN: couldn't start tarball deletion action\n%+v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
execInspect, err := docker.ContainerExecInspect(ctx, execID.ID)
|
||||
if err != nil {
|
||||
log.Printf("WARN: couldn't verify deletion of tarball\n%+v", err)
|
||||
}
|
||||
|
||||
if !execInspect.Running {
|
||||
if execInspect.ExitCode == 0 {
|
||||
log.Println("INFO: deleted tarball")
|
||||
break
|
||||
} else {
|
||||
log.Println("WARN: failed to delete tarball")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("INFO: ...Done")
|
||||
|
||||
return nil
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
func k3dNetworkName(clusterName string) string {
|
||||
return fmt.Sprintf("k3d-%s", clusterName)
|
||||
}
|
||||
|
||||
// createClusterNetwork creates a docker network for a cluster that will be used
|
||||
// to let the server and worker containers communicate with each other easily.
|
||||
func createClusterNetwork(clusterName string) (string, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
args := filters.NewArgs()
|
||||
args.Add("label", "app=k3d")
|
||||
args.Add("label", "cluster="+clusterName)
|
||||
nl, err := docker.NetworkList(ctx, types.NetworkListOptions{Filters: args})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to list networks\n%+v", err)
|
||||
}
|
||||
|
||||
if len(nl) > 1 {
|
||||
log.Printf("WARNING: Found %d networks for %s when we only expect 1\n", len(nl), clusterName)
|
||||
}
|
||||
|
||||
if len(nl) > 0 {
|
||||
return nl[0].ID, nil
|
||||
}
|
||||
|
||||
// create the network with a set of labels and the cluster name as network name
|
||||
resp, err := docker.NetworkCreate(ctx, k3dNetworkName(clusterName), types.NetworkCreate{
|
||||
Labels: map[string]string{
|
||||
"app": "k3d",
|
||||
"cluster": clusterName,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ERROR: couldn't create network\n%+v", err)
|
||||
}
|
||||
|
||||
return resp.ID, nil
|
||||
}
|
||||
|
||||
// deleteClusterNetwork deletes a docker network based on the name of a cluster it belongs to
|
||||
func deleteClusterNetwork(clusterName string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
|
||||
networks, err := docker.NetworkList(ctx, types.NetworkListOptions{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't find network for cluster %s\n%+v", clusterName, err)
|
||||
}
|
||||
|
||||
// there should be only one network that matches the name... but who knows?
|
||||
for _, network := range networks {
|
||||
if err := docker.NetworkRemove(ctx, network.ID); err != nil {
|
||||
log.Printf("WARNING: couldn't remove network for cluster %s\n%+v", clusterName, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
203
cli/port.go
203
cli/port.go
@ -1,203 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// PublishedPorts is a struct used for exposing container ports on the host system
|
||||
type PublishedPorts struct {
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
PortBindings map[nat.Port][]nat.PortBinding
|
||||
}
|
||||
|
||||
// defaultNodes describes the type of nodes on which a port should be exposed by default
|
||||
const defaultNodes = "server"
|
||||
|
||||
// mapping a node role to groups that should be applied to it
|
||||
var nodeRuleGroupsMap = map[string][]string{
|
||||
"worker": {"all", "workers"},
|
||||
"server": {"all", "server", "master"},
|
||||
}
|
||||
|
||||
// mapNodesToPortSpecs maps nodes to portSpecs
|
||||
func mapNodesToPortSpecs(specs []string, createdNodes []string) (map[string][]string, error) {
|
||||
|
||||
if err := validatePortSpecs(specs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check node-specifier possibilitites
|
||||
possibleNodeSpecifiers := []string{"all", "workers", "server", "master"}
|
||||
possibleNodeSpecifiers = append(possibleNodeSpecifiers, createdNodes...)
|
||||
|
||||
nodeToPortSpecMap := make(map[string][]string)
|
||||
|
||||
for _, spec := range specs {
|
||||
nodes, portSpec := extractNodes(spec)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
nodes = append(nodes, defaultNodes)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
// check if node-specifier is valid (either a role or a name) and append to list if matches
|
||||
nodeFound := false
|
||||
for _, name := range possibleNodeSpecifiers {
|
||||
if node == name {
|
||||
nodeFound = true
|
||||
nodeToPortSpecMap[node] = append(nodeToPortSpecMap[node], portSpec)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !nodeFound {
|
||||
log.Printf("WARNING: Unknown node-specifier [%s] in port mapping entry [%s]", node, spec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nodeToPortSpecMap, nil
|
||||
}
|
||||
|
||||
// CreatePublishedPorts is the factory function for PublishedPorts
|
||||
func CreatePublishedPorts(specs []string) (*PublishedPorts, error) {
|
||||
if len(specs) == 0 {
|
||||
var newExposedPorts = make(map[nat.Port]struct{}, 1)
|
||||
var newPortBindings = make(map[nat.Port][]nat.PortBinding, 1)
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}, nil
|
||||
}
|
||||
|
||||
newExposedPorts, newPortBindings, err := nat.ParsePortSpecs(specs)
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}, err
|
||||
}
|
||||
|
||||
// validatePortSpecs matches the provided port specs against a set of rules to enable early exit if something is wrong
|
||||
func validatePortSpecs(specs []string) error {
|
||||
for _, spec := range specs {
|
||||
atSplit := strings.Split(spec, "@")
|
||||
_, err := nat.ParsePortSpec(atSplit[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: Invalid port specification [%s] in port mapping [%s]\n%+v", atSplit[0], spec, err)
|
||||
}
|
||||
if len(atSplit) > 0 {
|
||||
for i := 1; i < len(atSplit); i++ {
|
||||
if err := ValidateHostname(atSplit[i]); err != nil {
|
||||
return fmt.Errorf("ERROR: Invalid node-specifier [%s] in port mapping [%s]\n%+v", atSplit[i], spec, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractNodes separates the node specification from the actual port specs
|
||||
func extractNodes(spec string) ([]string, string) {
|
||||
// extract nodes
|
||||
nodes := []string{}
|
||||
atSplit := strings.Split(spec, "@")
|
||||
portSpec := atSplit[0]
|
||||
if len(atSplit) > 1 {
|
||||
nodes = atSplit[1:]
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
nodes = append(nodes, defaultNodes)
|
||||
}
|
||||
return nodes, portSpec
|
||||
}
|
||||
|
||||
// Offset creates a new PublishedPort structure, with all host ports are changed by a fixed 'offset'
|
||||
func (p PublishedPorts) Offset(offset int) *PublishedPorts {
|
||||
var newExposedPorts = make(map[nat.Port]struct{}, len(p.ExposedPorts))
|
||||
var newPortBindings = make(map[nat.Port][]nat.PortBinding, len(p.PortBindings))
|
||||
|
||||
for k, v := range p.ExposedPorts {
|
||||
newExposedPorts[k] = v
|
||||
}
|
||||
|
||||
for k, v := range p.PortBindings {
|
||||
bindings := make([]nat.PortBinding, len(v))
|
||||
for i, b := range v {
|
||||
port, _ := nat.ParsePort(b.HostPort)
|
||||
bindings[i].HostIP = b.HostIP
|
||||
bindings[i].HostPort = fmt.Sprintf("%d", port*offset)
|
||||
}
|
||||
newPortBindings[k] = bindings
|
||||
}
|
||||
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}
|
||||
}
|
||||
|
||||
// AddPort creates a new PublishedPort struct with one more port, based on 'portSpec'
|
||||
func (p *PublishedPorts) AddPort(portSpec string) (*PublishedPorts, error) {
|
||||
portMappings, err := nat.ParsePortSpec(portSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var newExposedPorts = make(map[nat.Port]struct{}, len(p.ExposedPorts)+1)
|
||||
var newPortBindings = make(map[nat.Port][]nat.PortBinding, len(p.PortBindings)+1)
|
||||
|
||||
// Populate the new maps
|
||||
for k, v := range p.ExposedPorts {
|
||||
newExposedPorts[k] = v
|
||||
}
|
||||
|
||||
for k, v := range p.PortBindings {
|
||||
newPortBindings[k] = v
|
||||
}
|
||||
|
||||
// Add new ports
|
||||
for _, portMapping := range portMappings {
|
||||
port := portMapping.Port
|
||||
if _, exists := newExposedPorts[port]; !exists {
|
||||
newExposedPorts[port] = struct{}{}
|
||||
}
|
||||
|
||||
bslice, exists := newPortBindings[port]
|
||||
if !exists {
|
||||
bslice = []nat.PortBinding{}
|
||||
}
|
||||
newPortBindings[port] = append(bslice, portMapping.Binding)
|
||||
}
|
||||
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}, nil
|
||||
}
|
||||
|
||||
// MergePortSpecs merges published ports for a given node
|
||||
func MergePortSpecs(nodeToPortSpecMap map[string][]string, role, name string) ([]string, error) {
|
||||
|
||||
portSpecs := []string{}
|
||||
|
||||
// add portSpecs according to node role
|
||||
for _, group := range nodeRuleGroupsMap[role] {
|
||||
for _, v := range nodeToPortSpecMap[group] {
|
||||
exists := false
|
||||
for _, i := range portSpecs {
|
||||
if v == i {
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
portSpecs = append(portSpecs, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add portSpecs according to node name
|
||||
for _, v := range nodeToPortSpecMap[name] {
|
||||
exists := false
|
||||
for _, i := range portSpecs {
|
||||
if v == i {
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
portSpecs = append(portSpecs, v)
|
||||
}
|
||||
}
|
||||
|
||||
return portSpecs, nil
|
||||
}
|
100
cli/shell.go
100
cli/shell.go
@ -1,100 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
)
|
||||
|
||||
type shell struct {
|
||||
Name string
|
||||
Options []string
|
||||
Prompt string
|
||||
Env map[string]string
|
||||
}
|
||||
|
||||
var shells = map[string]shell{
|
||||
"bash": {
|
||||
Name: "bash",
|
||||
Options: []string{
|
||||
"--noprofile", // don't load .profile/.bash_profile
|
||||
"--norc", // don't load .bashrc
|
||||
},
|
||||
Prompt: "PS1",
|
||||
},
|
||||
"zsh": {
|
||||
Name: "zsh",
|
||||
Options: []string{
|
||||
"--no-rcs", // don't load .zshrc
|
||||
},
|
||||
Prompt: "PROMPT",
|
||||
},
|
||||
}
|
||||
|
||||
// subShell
|
||||
func subShell(cluster, shell, command string) error {
|
||||
|
||||
// check if the selected shell is supported
|
||||
if shell == "auto" {
|
||||
shell = path.Base(os.Getenv("SHELL"))
|
||||
}
|
||||
|
||||
supported := false
|
||||
for supportedShell := range shells {
|
||||
if supportedShell == shell {
|
||||
supported = true
|
||||
}
|
||||
}
|
||||
if !supported {
|
||||
return fmt.Errorf("ERROR: selected shell [%s] is not supported", shell)
|
||||
}
|
||||
|
||||
// get kubeconfig for selected cluster
|
||||
kubeConfigPath, err := getKubeConfig(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if we're already in a subshell
|
||||
subShell := os.ExpandEnv("$__K3D_CLUSTER__")
|
||||
if len(subShell) > 0 {
|
||||
return fmt.Errorf("Error: Already in subshell of cluster %s", subShell)
|
||||
}
|
||||
|
||||
// get path of shell executable
|
||||
shellPath, err := exec.LookPath(shell)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set shell specific options (command line flags)
|
||||
shellOptions := shells[shell].Options
|
||||
|
||||
cmd := exec.Command(shellPath, shellOptions...)
|
||||
|
||||
if len(command) > 0 {
|
||||
cmd.Args = append(cmd.Args, "-c", command)
|
||||
|
||||
}
|
||||
|
||||
// Set up stdio
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set up Promot
|
||||
setPrompt := fmt.Sprintf("%s=[%s} %s", shells[shell].Prompt, cluster, os.Getenv(shells[shell].Prompt))
|
||||
|
||||
// Set up KUBECONFIG
|
||||
setKube := fmt.Sprintf("KUBECONFIG=%s", kubeConfigPath)
|
||||
|
||||
// Declare subshell
|
||||
subShell = fmt.Sprintf("__K3D_CLUSTER__=%s", cluster)
|
||||
|
||||
newEnv := append(os.Environ(), setPrompt, setKube, subShell)
|
||||
|
||||
cmd.Env = newEnv
|
||||
|
||||
return cmd.Run()
|
||||
}
|
20
cli/types.go
20
cli/types.go
@ -1,20 +0,0 @@
|
||||
package run
|
||||
|
||||
// Node describes a k3d node (= docker container)
|
||||
type Node struct {
|
||||
Name string
|
||||
Role string
|
||||
Image string
|
||||
Volumes []string
|
||||
Env []string
|
||||
Args []string
|
||||
Ports []string
|
||||
Restart bool
|
||||
}
|
||||
|
||||
// Cluster describes a k3d cluster (nodes, combined in a network)
|
||||
type Cluster struct {
|
||||
Name string
|
||||
Network string
|
||||
Nodes []Node
|
||||
}
|
122
cli/util.go
122
cli/util.go
@ -1,122 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type apiPort struct {
|
||||
Host string
|
||||
HostIP string
|
||||
Port string
|
||||
}
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
const (
|
||||
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||
)
|
||||
|
||||
var src = rand.NewSource(time.Now().UnixNano())
|
||||
|
||||
// GenerateRandomString thanks to https://stackoverflow.com/a/31832326/6450189
|
||||
// GenerateRandomString is used to generate a random string that is used as a cluster secret
|
||||
func GenerateRandomString(n int) string {
|
||||
|
||||
sb := strings.Builder{}
|
||||
sb.Grow(n)
|
||||
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
|
||||
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
|
||||
if remain == 0 {
|
||||
cache, remain = src.Int63(), letterIdxMax
|
||||
}
|
||||
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
|
||||
sb.WriteByte(letterBytes[idx])
|
||||
i--
|
||||
}
|
||||
cache >>= letterIdxBits
|
||||
remain--
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
/*** Cluster Name Validation ***/
|
||||
const clusterNameMaxSize int = 35
|
||||
|
||||
// CheckClusterName ensures that a cluster name is also a valid host name according to RFC 1123.
|
||||
// We further restrict the length of the cluster name to maximum 'clusterNameMaxSize'
|
||||
// so that we can construct the host names based on the cluster name, and still stay
|
||||
// within the 64 characters limit.
|
||||
func CheckClusterName(name string) error {
|
||||
if err := ValidateHostname(name); err != nil {
|
||||
return fmt.Errorf("[ERROR] Invalid cluster name\n%+v", ValidateHostname(name))
|
||||
}
|
||||
if len(name) > clusterNameMaxSize {
|
||||
return fmt.Errorf("[ERROR] Cluster name is too long (%d > %d)", len(name), clusterNameMaxSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateHostname ensures that a cluster name is also a valid host name according to RFC 1123.
|
||||
func ValidateHostname(name string) error {
|
||||
|
||||
if len(name) == 0 {
|
||||
return fmt.Errorf("[ERROR] no name provided")
|
||||
}
|
||||
|
||||
if name[0] == '-' || name[len(name)-1] == '-' {
|
||||
return fmt.Errorf("[ERROR] Hostname [%s] must not start or end with - (dash)", name)
|
||||
}
|
||||
|
||||
for _, c := range name {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
case 'a' <= c && c <= 'z':
|
||||
case 'A' <= c && c <= 'Z':
|
||||
case c == '-':
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("[ERROR] Hostname [%s] contains characters other than 'Aa-Zz', '0-9' or '-'", name)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAPIPort(portSpec string) (*apiPort, error) {
|
||||
var port *apiPort
|
||||
split := strings.Split(portSpec, ":")
|
||||
if len(split) > 2 {
|
||||
return nil, fmt.Errorf("api-port format error")
|
||||
}
|
||||
|
||||
if len(split) == 1 {
|
||||
port = &apiPort{Port: split[0]}
|
||||
} else {
|
||||
// Make sure 'host' can be resolved to an IP address
|
||||
addrs, err := net.LookupHost(split[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
port = &apiPort{Host: split[0], HostIP: addrs[0], Port: split[1]}
|
||||
}
|
||||
|
||||
// Verify 'port' is an integer and within port ranges
|
||||
p, err := strconv.Atoi(port.Port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p < 0 || p > 65535 {
|
||||
return nil, fmt.Errorf("ERROR: --api-port port value out of range")
|
||||
}
|
||||
|
||||
return port, nil
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// createImageVolume will create a new docker volume used for storing image tarballs that can be loaded into the clusters
|
||||
func createImageVolume(clusterName string) (types.Volume, error) {
|
||||
|
||||
var vol types.Volume
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
volName := fmt.Sprintf("k3d-%s-images", clusterName)
|
||||
|
||||
volumeCreateOptions := volume.VolumeCreateBody{
|
||||
Name: volName,
|
||||
Labels: map[string]string{
|
||||
"app": "k3d",
|
||||
"cluster": clusterName,
|
||||
},
|
||||
Driver: "local", //TODO: allow setting driver + opts
|
||||
DriverOpts: map[string]string{},
|
||||
}
|
||||
vol, err = docker.VolumeCreate(ctx, volumeCreateOptions)
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf("ERROR: failed to create image volume [%s] for cluster [%s]\n%+v", volName, clusterName, err)
|
||||
}
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
// deleteImageVolume will delete the volume we created for sharing images with this cluster
|
||||
func deleteImageVolume(clusterName string) error {
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
volName := fmt.Sprintf("k3d-%s-images", clusterName)
|
||||
|
||||
if err = docker.VolumeRemove(ctx, volName, true); err != nil {
|
||||
return fmt.Errorf("ERROR: couldn't remove volume [%s] for cluster [%s]\n%+v", volName, clusterName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getImageVolume returns the docker volume object representing the imagevolume for the cluster
|
||||
func getImageVolume(clusterName string) (types.Volume, error) {
|
||||
var vol types.Volume
|
||||
volName := fmt.Sprintf("k3d-%s-images", clusterName)
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
volumeList, err := docker.VolumeList(ctx, filters)
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf("ERROR: couldn't get volumes for cluster [%s]\n%+v ", clusterName, err)
|
||||
}
|
||||
volFound := false
|
||||
for _, volume := range volumeList.Volumes {
|
||||
if volume.Name == volName {
|
||||
vol = *volume
|
||||
volFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !volFound {
|
||||
return vol, fmt.Errorf("ERROR: didn't find volume [%s] in list of volumes returned for cluster [%s]", volName, clusterName)
|
||||
}
|
||||
|
||||
return vol, nil
|
||||
}
|
57
cmd/create.go
Normal file
57
cmd/create.go
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// createCmd represents the create command
|
||||
var createCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "A brief description of your command",
|
||||
Long: `A longer description that spans multiple lines and likely contains examples
|
||||
and usage of using your command. For example:
|
||||
|
||||
Cobra is a CLI library for Go that empowers applications.
|
||||
This application is a tool to generate the needed files
|
||||
to quickly create a Cobra application.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("create called")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(createCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// createCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// createCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
57
cmd/delete.go
Normal file
57
cmd/delete.go
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// deleteCmd represents the delete command
|
||||
var deleteCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "A brief description of your command",
|
||||
Long: `A longer description that spans multiple lines and likely contains examples
|
||||
and usage of using your command. For example:
|
||||
|
||||
Cobra is a CLI library for Go that empowers applications.
|
||||
This application is a tool to generate the needed files
|
||||
to quickly create a Cobra application.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("delete called")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(deleteCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// deleteCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// deleteCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
57
cmd/get.go
Normal file
57
cmd/get.go
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// getCmd represents the get command
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "A brief description of your command",
|
||||
Long: `A longer description that spans multiple lines and likely contains examples
|
||||
and usage of using your command. For example:
|
||||
|
||||
Cobra is a CLI library for Go that empowers applications.
|
||||
This application is a tool to generate the needed files
|
||||
to quickly create a Cobra application.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("get called")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(getCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// getCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// getCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
92
cmd/root.go
Normal file
92
cmd/root.go
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var cfgFile string
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "k3d",
|
||||
Short: "Run k3s in Docker!",
|
||||
Long: `k3d is a wrapper CLI that helps you to easily create k3s clusters inside docker.
|
||||
Nodes of a k3d cluster are docker containers running a k3s image.
|
||||
All Nodes of a k3d cluster are part of the same docker network.`,
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
log.Errorln(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
// Cobra supports persistent flags, which, if defined here,
|
||||
// will be global for your application.
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.k3d.yaml)")
|
||||
|
||||
// Cobra also supports local flags, which will only run
|
||||
// when this action is called directly.
|
||||
rootCmd.Flags().BoolP("verbose", "v", false, "Enable verbose output (debug level logs)")
|
||||
}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
if cfgFile != "" {
|
||||
// Use config file from the flag.
|
||||
viper.SetConfigFile(cfgFile)
|
||||
} else {
|
||||
// Find home directory.
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Search config in home directory with name ".k3d" (without extension).
|
||||
viper.AddConfigPath(home)
|
||||
viper.SetConfigName(".k3d")
|
||||
}
|
||||
|
||||
viper.AutomaticEnv() // read in environment variables that match
|
||||
|
||||
// If a config file is found, read it in.
|
||||
if err := viper.ReadInConfig(); err == nil {
|
||||
log.Debugln("Using config file:", viper.ConfigFileUsed())
|
||||
}
|
||||
}
|
33
go.mod
33
go.mod
@ -3,27 +3,16 @@ module github.com/rancher/k3d
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.12 // indirect
|
||||
github.com/containerd/containerd v1.2.7 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/docker v0.7.3-0.20190723064612-a9dc697fd2a5
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.3.3 // indirect
|
||||
github.com/gogo/protobuf v1.2.1 // indirect
|
||||
github.com/gorilla/mux v1.7.3 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.1
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2 // indirect
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
github.com/urfave/cli v1.20.0
|
||||
golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
google.golang.org/grpc v1.22.0 // indirect
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
github.com/pelletier/go-toml v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
)
|
||||
|
178
go.sum
178
go.sum
@ -1,86 +1,172 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
|
||||
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/containerd v1.2.7 h1:8lqLbl7u1j3MmiL9cJ/O275crSq7bfwUayvvatEupQk=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190723064612-a9dc697fd2a5 h1:7gpSLDzIX1hABRGvQdm+lBSHxpsHunr5m77wK/VxvNY=
|
||||
github.com/docker/docker v0.7.3-0.20190723064612-a9dc697fd2a5/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b h1:/zjbcJPEGAyu6Is/VBOALsgdi4z9+kz/Vtdm6S+beD0=
|
||||
golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4=
|
||||
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
|
||||
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
272
main.go
272
main.go
@ -1,254 +1,28 @@
|
||||
/*
|
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
import "github.com/rancher/k3d/cmd"
|
||||
|
||||
run "github.com/rancher/k3d/cli"
|
||||
"github.com/rancher/k3d/version"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// defaultK3sImage specifies the default image being used for server and workers
|
||||
const defaultK3sImage = "docker.io/rancher/k3s"
|
||||
const defaultK3sClusterName string = "k3s-default"
|
||||
|
||||
// main represents the CLI application
|
||||
func main() {
|
||||
|
||||
// App Details
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3d"
|
||||
app.Usage = "Run k3s in Docker!"
|
||||
app.Version = version.GetVersion()
|
||||
app.Authors = []cli.Author{
|
||||
{
|
||||
Name: "Thorsten Klein",
|
||||
Email: "iwilltry42@gmail.com",
|
||||
},
|
||||
{
|
||||
Name: "Rishabh Gupta",
|
||||
Email: "r.g.gupta@outlook.com",
|
||||
},
|
||||
{
|
||||
Name: "Darren Shepherd",
|
||||
},
|
||||
}
|
||||
|
||||
// commands that you can execute
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
// check-tools verifies that docker is up and running
|
||||
Name: "check-tools",
|
||||
Aliases: []string{"ct"},
|
||||
Usage: "Check if docker is running",
|
||||
Action: run.CheckTools,
|
||||
},
|
||||
{
|
||||
// shell starts a shell in the context of a running cluster
|
||||
Name: "shell",
|
||||
Usage: "Start a subshell for a cluster",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Value: defaultK3sClusterName,
|
||||
Usage: "Set a name for the cluster",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "command, c",
|
||||
Usage: "Run a shell command in the context of the cluster",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "shell, s",
|
||||
Value: "auto",
|
||||
Usage: "which shell to use. One of [auto, bash, zsh]",
|
||||
},
|
||||
},
|
||||
Action: run.Shell,
|
||||
},
|
||||
{
|
||||
// create creates a new k3s cluster in docker containers
|
||||
Name: "create",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "Create a single- or multi-node k3s cluster in docker containers",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Value: defaultK3sClusterName,
|
||||
Usage: "Set a name for the cluster",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "Mount one or more volumes into every node of the cluster (Docker notation: `source:destination`)",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "publish, add-port",
|
||||
Usage: "Publish k3s node ports to the host (Format: `[ip:][host-port:]container-port[/protocol]@node-specifier`, use multiple options to expose more ports)",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "port-auto-offset",
|
||||
Value: 0,
|
||||
Usage: "Automatically add an offset (* worker number) to the chosen host port when using `--publish` to map the same container-port from multiple k3d workers to the host",
|
||||
},
|
||||
cli.StringFlag{
|
||||
// TODO: to be deprecated
|
||||
Name: "version",
|
||||
Usage: "Choose the k3s image version",
|
||||
},
|
||||
cli.StringFlag{
|
||||
// TODO: only --api-port, -a soon since we want to use --port, -p for the --publish/--add-port functionality
|
||||
Name: "api-port, a, port, p",
|
||||
Value: "6443",
|
||||
Usage: "Specify the Kubernetes cluster API server port (Format: `[host:]port` (Note: --port/-p will be used for arbitrary port mapping as of v2.0.0, use --api-port/-a instead for setting the api port)",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "wait, t",
|
||||
Value: 0, // timeout
|
||||
Usage: "Wait for the cluster to come up before returning until timoout (in seconds). Use --wait 0 to wait forever",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "image, i",
|
||||
Usage: "Specify a k3s image (Format: <repo>/<image>:<tag>)",
|
||||
Value: fmt.Sprintf("%s:%s", defaultK3sImage, version.GetK3sVersion()),
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "server-arg, x",
|
||||
Usage: "Pass an additional argument to k3s server (new flag per argument)",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "agent-arg",
|
||||
Usage: "Pass an additional argument to k3s agent (new flag per argument)",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "env, e",
|
||||
Usage: "Pass an additional environment variable (new flag per variable)",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "workers, w",
|
||||
Value: 0,
|
||||
Usage: "Specify how many worker nodes you want to spawn",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "auto-restart",
|
||||
Usage: "Set docker's --restart=unless-stopped flag on the containers",
|
||||
},
|
||||
},
|
||||
Action: run.CreateCluster,
|
||||
},
|
||||
{
|
||||
// delete deletes an existing k3s cluster (remove container and cluster directory)
|
||||
Name: "delete",
|
||||
Aliases: []string{"d", "del"},
|
||||
Usage: "Delete cluster",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Value: defaultK3sClusterName,
|
||||
Usage: "name of the cluster",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Delete all existing clusters (this ignores the --name/-n flag)",
|
||||
},
|
||||
},
|
||||
Action: run.DeleteCluster,
|
||||
},
|
||||
{
|
||||
// stop stopy a running cluster (its container) so it's restartable
|
||||
Name: "stop",
|
||||
Usage: "Stop cluster",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Value: defaultK3sClusterName,
|
||||
Usage: "Name of the cluster",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Stop all running clusters (this ignores the --name/-n flag)",
|
||||
},
|
||||
},
|
||||
Action: run.StopCluster,
|
||||
},
|
||||
{
|
||||
// start restarts a stopped cluster container
|
||||
Name: "start",
|
||||
Usage: "Start a stopped cluster",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Value: defaultK3sClusterName,
|
||||
Usage: "Name of the cluster",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Start all stopped clusters (this ignores the --name/-n flag)",
|
||||
},
|
||||
},
|
||||
Action: run.StartCluster,
|
||||
},
|
||||
{
|
||||
// list prints a list of created clusters
|
||||
Name: "list",
|
||||
Aliases: []string{"ls", "l"},
|
||||
Usage: "List all clusters",
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Also show non-running clusters",
|
||||
},
|
||||
},
|
||||
Action: run.ListClusters,
|
||||
},
|
||||
{
|
||||
// get-kubeconfig grabs the kubeconfig from the cluster and prints the path to it
|
||||
Name: "get-kubeconfig",
|
||||
Usage: "Get kubeconfig location for cluster",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Value: defaultK3sClusterName,
|
||||
Usage: "Name of the cluster",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Get kubeconfig for all clusters (this ignores the --name/-n flag)",
|
||||
},
|
||||
},
|
||||
Action: run.GetKubeConfig,
|
||||
},
|
||||
{
|
||||
// get-kubeconfig grabs the kubeconfig from the cluster and prints the path to it
|
||||
Name: "import-images",
|
||||
Aliases: []string{"i"},
|
||||
Usage: "Import a comma- or space-separated list of container images from your local docker daemon into the cluster",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n, cluster, c",
|
||||
Value: defaultK3sClusterName,
|
||||
Usage: "Name of the cluster",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-remove, no-rm, keep, k",
|
||||
Usage: "Disable automatic removal of the tarball",
|
||||
},
|
||||
},
|
||||
Action: run.ImportImage,
|
||||
},
|
||||
}
|
||||
|
||||
// Global flags
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "verbose",
|
||||
Usage: "Enable verbose output",
|
||||
},
|
||||
}
|
||||
|
||||
// run the whole thing
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.Execute()
|
||||
}
|
||||
|
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
*.exe
|
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@ -1,22 +0,0 @@
|
||||
# go-winio
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
@ -1,280 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
BackupEaData
|
||||
BackupSecurity
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
)
|
||||
|
||||
const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
}
|
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct {
|
||||
r io.Reader
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
if wsi.NameSize != 0 {
|
||||
name := make([]uint16, int(wsi.NameSize/2))
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Size -= 8
|
||||
}
|
||||
r.bytesLeft = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) {
|
||||
if r.bytesLeft == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > r.bytesLeft {
|
||||
b = b[:r.bytesLeft]
|
||||
}
|
||||
n, err := r.r.Read(b)
|
||||
r.bytesLeft -= int64(n)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if r.bytesLeft == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct {
|
||||
w io.Writer
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
|
||||
return &BackupStreamWriter{w, 0}
|
||||
}
|
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
if w.bytesLeft != 0 {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8
|
||||
}
|
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(name) != 0 {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.bytesLeft = hdr.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
|
||||
if w.bytesLeft < int64(len(b)) {
|
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
|
||||
}
|
||||
n, err := w.w.Write(b)
|
||||
w.bytesLeft -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
r := &BackupFileReader{f, includeSecurity, 0}
|
||||
return r
|
||||
}
|
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
return w
|
||||
}
|
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
return int(bytesWritten), errors.New("not all bytes could be written")
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
@ -1,137 +0,0 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
307
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
307
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -1,307 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
ErrTimeout = &timeoutError{}
|
||||
)
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIo() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go ioCompletionProcessor(h)
|
||||
}
|
||||
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
||||
type deadlineHandler struct {
|
||||
setLock sync.Mutex
|
||||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomicBool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIo)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.readDeadline.channel = make(timeoutChan)
|
||||
f.writeDeadline.channel = make(timeoutChan)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return makeWin32File(h)
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes a win32File.
|
||||
func (f *win32File) Close() error {
|
||||
f.closeHandle()
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
f.wgLock.RUnlock()
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
op.ch <- ioResult{bytes, err}
|
||||
}
|
||||
}
|
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING {
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
if d != nil {
|
||||
d.channelLock.Lock()
|
||||
timeout = d.channel
|
||||
d.channelLock.Unlock()
|
||||
}
|
||||
|
||||
var r ioResult
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE {
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *win32File) SetReadDeadline(deadline time.Time) error {
|
||||
return f.readDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||
return f.writeDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
if !d.timer.Stop() {
|
||||
<-d.channel
|
||||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.setFalse()
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
d.channelLock.Lock()
|
||||
d.channel = make(chan struct{})
|
||||
d.channelLock.Unlock()
|
||||
default:
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.setTrue()
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
duration := deadline.Sub(now)
|
||||
if deadline.After(now) {
|
||||
// Deadline is in the future, set a timer to wait
|
||||
d.timer = time.AfterFunc(duration, timeoutIO)
|
||||
} else {
|
||||
// Deadline is in the past. Cancel all pending IO now.
|
||||
timeoutIO()
|
||||
}
|
||||
return nil
|
||||
}
|
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||
|
||||
const (
|
||||
fileBasicInfo = 0
|
||||
fileIDInfo = 0x12
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
VolumeSerialNumber uint64
|
||||
FileID [16]byte
|
||||
}
|
||||
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return fileID, nil
|
||||
}
|
421
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
421
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -1,421 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
cERROR_NO_DATA = syscall.Errno(232)
|
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
cPIPE_ACCESS_DUPLEX = 0x3
|
||||
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
|
||||
|
||||
cPIPE_UNLIMITED_INSTANCES = 255
|
||||
|
||||
cNMPWAIT_USE_DEFAULT_WAIT = 0
|
||||
cNMPWAIT_NOWAIT = 1
|
||||
|
||||
cPIPE_TYPE_MESSAGE = 4
|
||||
|
||||
cPIPE_READMODE_MESSAGE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||
// This error should match net.errClosing since docker takes a dependency on its text.
|
||||
ErrPipeListenerClosed = errors.New("use of closed network connection")
|
||||
|
||||
errPipeWriteClosed = errors.New("pipe has been closed for write")
|
||||
)
|
||||
|
||||
type win32Pipe struct {
|
||||
*win32File
|
||||
path string
|
||||
}
|
||||
|
||||
type win32MessageBytePipe struct {
|
||||
win32Pipe
|
||||
writeClosed bool
|
||||
readEOF bool
|
||||
}
|
||||
|
||||
type pipeAddress string
|
||||
|
||||
func (f *win32Pipe) LocalAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) RemoteAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) SetDeadline(t time.Time) error {
|
||||
f.SetReadDeadline(t)
|
||||
f.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
func (f *win32MessageBytePipe) CloseWrite() error {
|
||||
if f.writeClosed {
|
||||
return errPipeWriteClosed
|
||||
}
|
||||
err := f.win32File.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.win32File.Write(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.writeClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
|
||||
// they are used to implement CloseWrite().
|
||||
func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
|
||||
if f.writeClosed {
|
||||
return 0, errPipeWriteClosed
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return f.win32File.Write(b)
|
||||
}
|
||||
|
||||
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
|
||||
// mode pipe will return io.EOF, as will all subsequent reads.
|
||||
func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
if f.readEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := f.win32File.Read(b)
|
||||
if err == io.EOF {
|
||||
// If this was the result of a zero-byte read, then
|
||||
// it is possible that the read was due to a zero-size
|
||||
// message. Since we are simulating CloseWrite with a
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == syscall.ERROR_MORE_DATA {
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s pipeAddress) Network() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
func (s pipeAddress) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
var absTimeout time.Time
|
||||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
}
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
for {
|
||||
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
break
|
||||
}
|
||||
if time.Now().After(absTimeout) {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
var flags uint32
|
||||
err = getNamedPipeInfo(h, &flags, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the pipe is in message mode, return a message byte pipe, which
|
||||
// supports CloseWrite().
|
||||
if flags&cPIPE_TYPE_MESSAGE != 0 {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: f, path: path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: f, path: path}, nil
|
||||
}
|
||||
|
||||
type acceptResponse struct {
|
||||
f *win32File
|
||||
err error
|
||||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
securityDescriptor []byte
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
|
||||
if first {
|
||||
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
}
|
||||
|
||||
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
|
||||
if c.MessageMode {
|
||||
mode |= cPIPE_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
sa := &syscall.SecurityAttributes{}
|
||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
||||
if securityDescriptor != nil {
|
||||
len := uint32(len(securityDescriptor))
|
||||
sa.SecurityDescriptor = localAlloc(0, len)
|
||||
defer localFree(sa.SecurityDescriptor)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
||||
}
|
||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
|
||||
p, err := l.makeServerPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
p.Close()
|
||||
p = nil
|
||||
}
|
||||
case <-l.closeCh:
|
||||
// Abort the connect request by closing the handle.
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed {
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) listenerRoutine() {
|
||||
closed := false
|
||||
for !closed {
|
||||
select {
|
||||
case <-l.closeCh:
|
||||
closed = true
|
||||
case responseCh := <-l.acceptCh:
|
||||
var (
|
||||
p *win32File
|
||||
err error
|
||||
)
|
||||
for {
|
||||
p, err = l.makeConnectedServerPipe()
|
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != cERROR_NO_DATA {
|
||||
break
|
||||
}
|
||||
}
|
||||
responseCh <- acceptResponse{p, err}
|
||||
closed = err == ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
syscall.Close(l.firstHandle)
|
||||
l.firstHandle = 0
|
||||
// Notify Close() and Accept() callers that the handle has been closed.
|
||||
close(l.doneCh)
|
||||
}
|
||||
|
||||
// PipeConfig contain configuration for the pipe listener.
|
||||
type PipeConfig struct {
|
||||
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
|
||||
SecurityDescriptor string
|
||||
|
||||
// MessageMode determines whether the pipe is in byte or message mode. In either
|
||||
// case the pipe is read in byte mode by default. The only practical difference in
|
||||
// this implementation is that CloseWrite() is only supported for message mode pipes;
|
||||
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
|
||||
// transferred to the reader (and returned as io.EOF in this implementation)
|
||||
// when the pipe is in message mode.
|
||||
MessageMode bool
|
||||
|
||||
// InputBufferSize specifies the size the input buffer, in bytes.
|
||||
InputBufferSize int32
|
||||
|
||||
// OutputBufferSize specifies the size the input buffer, in bytes.
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must not already exist.
|
||||
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
var (
|
||||
sd []byte
|
||||
err error
|
||||
)
|
||||
if c == nil {
|
||||
c = &PipeConfig{}
|
||||
}
|
||||
if c.SecurityDescriptor != "" {
|
||||
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
h, err := makeServerPipeHandle(path, sd, c, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a client handle and connect it. This results in the pipe
|
||||
// instance always existing, so that clients see ERROR_PIPE_BUSY
|
||||
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
|
||||
// up so that no other instances can be used. This would have been
|
||||
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
|
||||
// instead of CreateNamedPipe. (Apparently created named pipes are
|
||||
// considered to be in listening state regardless of whether any
|
||||
// active calls to ConnectNamedPipe are outstanding.)
|
||||
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
// Close the client handle. The server side of the instance will
|
||||
// still be busy, leading to ERROR_PIPE_BUSY instead of
|
||||
// ERROR_NOT_FOUND, as long as we don't close the server handle,
|
||||
// or disconnect the client with DisconnectNamedPipe.
|
||||
syscall.Close(h2)
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
securityDescriptor: sd,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func connectPipe(p *win32File) error {
|
||||
c, err := p.prepareIo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.wg.Done()
|
||||
|
||||
err = connectNamedPipe(p.handle, &c.o)
|
||||
_, err = p.asyncIo(c, nil, 0, err)
|
||||
if err != nil && err != cERROR_PIPE_CONNECTED {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Accept() (net.Conn, error) {
|
||||
ch := make(chan acceptResponse)
|
||||
select {
|
||||
case l.acceptCh <- ch:
|
||||
response := <-ch
|
||||
err := response.err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l.config.MessageMode {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: response.f, path: l.path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: response.f, path: l.path}, nil
|
||||
case <-l.doneCh:
|
||||
return nil, ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Close() error {
|
||||
select {
|
||||
case l.closeCh <- 1:
|
||||
<-l.doneCh
|
||||
case <-l.doneCh:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Addr() net.Addr {
|
||||
return pipeAddress(l.path)
|
||||
}
|
202
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
202
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
@ -1,202 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
||||
const (
|
||||
SE_PRIVILEGE_ENABLED = 2
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
securityAnonymous = iota
|
||||
securityIdentification
|
||||
securityImpersonation
|
||||
securityDelegation
|
||||
)
|
||||
|
||||
var (
|
||||
privNames = make(map[string]uint64)
|
||||
privNameMutex sync.Mutex
|
||||
)
|
||||
|
||||
// PrivilegeError represents an error enabling privileges.
|
||||
type PrivilegeError struct {
|
||||
privileges []uint64
|
||||
}
|
||||
|
||||
func (e *PrivilegeError) Error() string {
|
||||
s := ""
|
||||
if len(e.privileges) > 1 {
|
||||
s = "Could not enable privileges "
|
||||
} else {
|
||||
s = "Could not enable privilege "
|
||||
}
|
||||
for i, p := range e.privileges {
|
||||
if i != 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += `"`
|
||||
s += getPrivilegeName(p)
|
||||
s += `"`
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RunWithPrivilege enables a single privilege for a function call.
|
||||
func RunWithPrivilege(name string, fn func() error) error {
|
||||
return RunWithPrivileges([]string{name}, fn)
|
||||
}
|
||||
|
||||
// RunWithPrivileges enables privileges for a function call.
|
||||
func RunWithPrivileges(names []string, fn func() error) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
token, err := newThreadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer releaseThreadToken(token)
|
||||
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn()
|
||||
}
|
||||
|
||||
func mapPrivileges(names []string) ([]uint64, error) {
|
||||
var privileges []uint64
|
||||
privNameMutex.Lock()
|
||||
defer privNameMutex.Unlock()
|
||||
for _, name := range names {
|
||||
p, ok := privNames[name]
|
||||
if !ok {
|
||||
err := lookupPrivilegeValue("", name, &p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privNames[name] = p
|
||||
}
|
||||
privileges = append(privileges, p)
|
||||
}
|
||||
return privileges, nil
|
||||
}
|
||||
|
||||
// EnableProcessPrivileges enables privileges globally for the process.
|
||||
func EnableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
|
||||
}
|
||||
|
||||
// DisableProcessPrivileges disables privileges globally for the process.
|
||||
func DisableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, 0)
|
||||
}
|
||||
|
||||
func enableDisableProcessPrivilege(names []string, action uint32) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, _ := windows.GetCurrentProcess()
|
||||
var token windows.Token
|
||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer token.Close()
|
||||
return adjustPrivileges(token, privileges, action)
|
||||
}
|
||||
|
||||
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||
for _, p := range privileges {
|
||||
binary.Write(&b, binary.LittleEndian, p)
|
||||
binary.Write(&b, binary.LittleEndian, action)
|
||||
}
|
||||
prevState := make([]byte, b.Len())
|
||||
reqSize := uint32(0)
|
||||
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
|
||||
if !success {
|
||||
return err
|
||||
}
|
||||
if err == ERROR_NOT_ALL_ASSIGNED {
|
||||
return &PrivilegeError{privileges}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPrivilegeName(luid uint64) string {
|
||||
var nameBuffer [256]uint16
|
||||
bufSize := uint32(len(nameBuffer))
|
||||
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %d>", luid)
|
||||
}
|
||||
|
||||
var displayNameBuffer [256]uint16
|
||||
displayBufSize := uint32(len(displayNameBuffer))
|
||||
var langID uint32
|
||||
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
|
||||
}
|
||||
|
||||
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
|
||||
}
|
||||
|
||||
func newThreadToken() (windows.Token, error) {
|
||||
err := impersonateSelf(securityImpersonation)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var token windows.Token
|
||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
||||
if err != nil {
|
||||
rerr := revertToSelf()
|
||||
if rerr != nil {
|
||||
panic(rerr)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func releaseThreadToken(h windows.Token) {
|
||||
err := revertToSelf()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h.Close()
|
||||
}
|
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
reparseTagMountPoint = 0xA0000003
|
||||
reparseTagSymlink = 0xA000000C
|
||||
)
|
||||
|
||||
type reparseDataBuffer struct {
|
||||
ReparseTag uint32
|
||||
ReparseDataLength uint16
|
||||
Reserved uint16
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
}
|
||||
|
||||
// ReparsePoint describes a Win32 symlink or mount point.
|
||||
type ReparsePoint struct {
|
||||
Target string
|
||||
IsMountPoint bool
|
||||
}
|
||||
|
||||
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
|
||||
// mount point reparse point.
|
||||
type UnsupportedReparsePointError struct {
|
||||
Tag uint32
|
||||
}
|
||||
|
||||
func (e *UnsupportedReparsePointError) Error() string {
|
||||
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
|
||||
}
|
||||
|
||||
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||
// or a mount point.
|
||||
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
|
||||
tag := binary.LittleEndian.Uint32(b[0:4])
|
||||
return DecodeReparsePointData(tag, b[8:])
|
||||
}
|
||||
|
||||
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
|
||||
isMountPoint := false
|
||||
switch tag {
|
||||
case reparseTagMountPoint:
|
||||
isMountPoint = true
|
||||
case reparseTagSymlink:
|
||||
default:
|
||||
return nil, &UnsupportedReparsePointError{tag}
|
||||
}
|
||||
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
|
||||
if !isMountPoint {
|
||||
nameOffset += 4
|
||||
}
|
||||
nameLength := binary.LittleEndian.Uint16(b[6:8])
|
||||
name := make([]uint16, nameLength/2)
|
||||
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
|
||||
}
|
||||
|
||||
func isDriveLetter(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
|
||||
// mount point.
|
||||
func EncodeReparsePoint(rp *ReparsePoint) []byte {
|
||||
// Generate an NT path and determine if this is a relative path.
|
||||
var ntTarget string
|
||||
relative := false
|
||||
if strings.HasPrefix(rp.Target, `\\?\`) {
|
||||
ntTarget = `\??\` + rp.Target[4:]
|
||||
} else if strings.HasPrefix(rp.Target, `\\`) {
|
||||
ntTarget = `\??\UNC\` + rp.Target[2:]
|
||||
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
|
||||
ntTarget = `\??\` + rp.Target
|
||||
} else {
|
||||
ntTarget = rp.Target
|
||||
relative = true
|
||||
}
|
||||
|
||||
// The paths must be NUL-terminated even though they are counted strings.
|
||||
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
|
||||
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
|
||||
|
||||
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
|
||||
size += len(ntTarget16)*2 + len(target16)*2
|
||||
|
||||
tag := uint32(reparseTagMountPoint)
|
||||
if !rp.IsMountPoint {
|
||||
tag = reparseTagSymlink
|
||||
size += 4 // Add room for symlink flags
|
||||
}
|
||||
|
||||
data := reparseDataBuffer{
|
||||
ReparseTag: tag,
|
||||
ReparseDataLength: uint16(size),
|
||||
SubstituteNameOffset: 0,
|
||||
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
|
||||
PrintNameOffset: uint16(len(ntTarget16) * 2),
|
||||
PrintNameLength: uint16((len(target16) - 1) * 2),
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, &data)
|
||||
if !rp.IsMountPoint {
|
||||
flags := uint32(0)
|
||||
if relative {
|
||||
flags |= 1
|
||||
}
|
||||
binary.Write(&b, binary.LittleEndian, flags)
|
||||
}
|
||||
|
||||
binary.Write(&b, binary.LittleEndian, ntTarget16)
|
||||
binary.Write(&b, binary.LittleEndian, target16)
|
||||
return b.Bytes()
|
||||
}
|
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||
//sys localFree(mem uintptr) = LocalFree
|
||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||
|
||||
const (
|
||||
cERROR_NONE_MAPPED = syscall.Errno(1332)
|
||||
)
|
||||
|
||||
type AccountLookupError struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AccountLookupError) Error() string {
|
||||
if e.Name == "" {
|
||||
return "lookup account: empty account name specified"
|
||||
}
|
||||
var s string
|
||||
switch e.Err {
|
||||
case cERROR_NONE_MAPPED:
|
||||
s = "not found"
|
||||
default:
|
||||
s = e.Err.Error()
|
||||
}
|
||||
return "lookup account " + e.Name + ": " + s
|
||||
}
|
||||
|
||||
type SddlConversionError struct {
|
||||
Sddl string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *SddlConversionError) Error() string {
|
||||
return "convert " + e.Sddl + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// LookupSidByName looks up the SID of an account by name
|
||||
func LookupSidByName(name string) (sid string, err error) {
|
||||
if name == "" {
|
||||
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
var sidSize, sidNameUse, refDomainSize uint32
|
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sidBuffer := make([]byte, sidSize)
|
||||
refDomainBuffer := make([]uint16, refDomainSize)
|
||||
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
var strBuffer *uint16
|
||||
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||
localFree(uintptr(unsafe.Pointer(strBuffer)))
|
||||
return sid, nil
|
||||
}
|
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
var sdBuffer uintptr
|
||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
|
||||
if err != nil {
|
||||
return nil, &SddlConversionError{sddl, err}
|
||||
}
|
||||
defer localFree(sdBuffer)
|
||||
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
|
||||
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||
var sddl *uint16
|
||||
// The returned string length seems to including an aribtrary number of terminating NULs.
|
||||
// Don't use it.
|
||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer localFree(uintptr(unsafe.Pointer(sddl)))
|
||||
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
|
||||
}
|
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
package winio
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
|
520
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
520
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -1,520 +0,0 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
|
||||
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
)
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||
newport = syscall.Handle(r0)
|
||||
if newport == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func waitNamedPipe(name string, timeout uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _waitNamedPipe(_p0, timeout)
|
||||
}
|
||||
|
||||
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
|
||||
}
|
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(str)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
|
||||
}
|
||||
|
||||
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localFree(mem uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
|
||||
len = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
var _p0 uint32
|
||||
if releaseAll {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func impersonateSelf(level uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func revertToSelf() (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
var _p0 uint32
|
||||
if openAsSelf {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCurrentThread() (h syscall.Handle) {
|
||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
|
||||
h = syscall.Handle(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeValue(_p0, _p1, luid)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeName(_p0, luid, buffer, size)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
} else {
|
||||
_p1 = 0
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
} else {
|
||||
_p2 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
} else {
|
||||
_p1 = 0
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
} else {
|
||||
_p2 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
@ -1,16 +0,0 @@
|
||||
Docker
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
78
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
78
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
// errors returned by most packages will map into one of these errors classes.
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
)
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Cause(err) == ErrInvalidArgument
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Cause(err) == ErrNotFound
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Cause(err) == ErrAlreadyExists
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Cause(err) == ErrFailedPrecondition
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Cause(err) == ErrUnavailable
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Cause(err) == ErrNotImplemented
|
||||
}
|
138
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
138
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
@ -1,138 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrapf(cls, msg)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
@ -1,247 +0,0 @@
|
||||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package reference
|
||||
|
||||
import "path"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().Name()
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().String()
|
||||
}
|
||||
return ref.String()
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||
}
|
||||
return matched, err
|
||||
}
|
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
@ -1,170 +0,0 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digestset"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface {
|
||||
Named
|
||||
Familiar() Named
|
||||
}
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
}
|
||||
if domain == legacyDefaultDomain {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository {
|
||||
repo := repository{
|
||||
domain: named.Domain(),
|
||||
path: named.Path(),
|
||||
}
|
||||
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (r reference) Familiar() Named {
|
||||
return reference{
|
||||
namedRepository: familiarizeName(r.namedRepository),
|
||||
tag: r.tag,
|
||||
digest: r.digest,
|
||||
}
|
||||
}
|
||||
|
||||
func (r repository) Familiar() Named {
|
||||
return familiarizeName(r)
|
||||
}
|
||||
|
||||
func (t taggedReference) Familiar() Named {
|
||||
return taggedReference{
|
||||
namedRepository: familiarizeName(t.namedRepository),
|
||||
tag: t.tag,
|
||||
}
|
||||
}
|
||||
|
||||
func (c canonicalReference) Familiar() Named {
|
||||
return canonicalReference{
|
||||
namedRepository: familiarizeName(c.namedRepository),
|
||||
digest: c.digest,
|
||||
}
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
namedTagged, err := WithTag(ref, defaultTag)
|
||||
if err != nil {
|
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err)
|
||||
}
|
||||
return namedTagged
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||
return digestReference("sha256:" + ref), nil
|
||||
}
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
@ -1,433 +0,0 @@
|
||||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
Reference
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface {
|
||||
Named
|
||||
Domain() string
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
}
|
||||
domain, _ := splitDomain(named.Name())
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
}
|
||||
_, path := splitDomain(named.Name())
|
||||
return path
|
||||
}
|
||||
|
||||
func splitDomain(name string) (string, string) {
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
}
|
||||
return splitDomain(named.Name())
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
repo.domain = ""
|
||||
repo.path = matches[1]
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
namedRepository: repo,
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.Parse(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if named.String() != s {
|
||||
return nil, ErrNameNotCanonical
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository{
|
||||
domain: match[1],
|
||||
path: match[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
namedRepository: repo,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.Name() == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return ref.namedRepository
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
type repository struct {
|
||||
domain string
|
||||
path string
|
||||
}
|
||||
|
||||
func (r repository) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
if r.domain == "" {
|
||||
return r.path
|
||||
}
|
||||
return r.domain + "/" + r.path
|
||||
}
|
||||
|
||||
func (r repository) Domain() string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func (r repository) Path() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.Name() + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
namedRepository
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.Name() + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
@ -1,143 +0,0 @@
|
||||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
267
vendor/github.com/docker/distribution/registry/api/errcode/errors.go
generated
vendored
267
vendor/github.com/docker/distribution/registry/api/errcode/errors.go
generated
vendored
@ -1,267 +0,0 @@
|
||||
package errcode
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrorCoder is the base interface for ErrorCode and Error allowing
|
||||
// users of each to just call ErrorCode to get the real ID of each
|
||||
type ErrorCoder interface {
|
||||
ErrorCode() ErrorCode
|
||||
}
|
||||
|
||||
// ErrorCode represents the error type. The errors are serialized via strings
|
||||
// and the integer format may change and should *never* be exported.
|
||||
type ErrorCode int
|
||||
|
||||
var _ error = ErrorCode(0)
|
||||
|
||||
// ErrorCode just returns itself
|
||||
func (ec ErrorCode) ErrorCode() ErrorCode {
|
||||
return ec
|
||||
}
|
||||
|
||||
// Error returns the ID/Value
|
||||
func (ec ErrorCode) Error() string {
|
||||
// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
|
||||
return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
|
||||
}
|
||||
|
||||
// Descriptor returns the descriptor for the error code.
|
||||
func (ec ErrorCode) Descriptor() ErrorDescriptor {
|
||||
d, ok := errorCodeToDescriptors[ec]
|
||||
|
||||
if !ok {
|
||||
return ErrorCodeUnknown.Descriptor()
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// String returns the canonical identifier for this error code.
|
||||
func (ec ErrorCode) String() string {
|
||||
return ec.Descriptor().Value
|
||||
}
|
||||
|
||||
// Message returned the human-readable error message for this error code.
|
||||
func (ec ErrorCode) Message() string {
|
||||
return ec.Descriptor().Message
|
||||
}
|
||||
|
||||
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
|
||||
// result.
|
||||
func (ec ErrorCode) MarshalText() (text []byte, err error) {
|
||||
return []byte(ec.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText decodes the form generated by MarshalText.
|
||||
func (ec *ErrorCode) UnmarshalText(text []byte) error {
|
||||
desc, ok := idToDescriptors[string(text)]
|
||||
|
||||
if !ok {
|
||||
desc = ErrorCodeUnknown.Descriptor()
|
||||
}
|
||||
|
||||
*ec = desc.Code
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithMessage creates a new Error struct based on the passed-in info and
|
||||
// overrides the Message property.
|
||||
func (ec ErrorCode) WithMessage(message string) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
// WithDetail creates a new Error struct based on the passed-in info and
|
||||
// set the Detail property appropriately
|
||||
func (ec ErrorCode) WithDetail(detail interface{}) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: ec.Message(),
|
||||
}.WithDetail(detail)
|
||||
}
|
||||
|
||||
// WithArgs creates a new Error struct and sets the Args slice
|
||||
func (ec ErrorCode) WithArgs(args ...interface{}) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: ec.Message(),
|
||||
}.WithArgs(args...)
|
||||
}
|
||||
|
||||
// Error provides a wrapper around ErrorCode with extra Details provided.
|
||||
type Error struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Detail interface{} `json:"detail,omitempty"`
|
||||
|
||||
// TODO(duglin): See if we need an "args" property so we can do the
|
||||
// variable substitution right before showing the message to the user
|
||||
}
|
||||
|
||||
var _ error = Error{}
|
||||
|
||||
// ErrorCode returns the ID/Value of this Error
|
||||
func (e Error) ErrorCode() ErrorCode {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// Error returns a human readable representation of the error.
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
|
||||
}
|
||||
|
||||
// WithDetail will return a new Error, based on the current one, but with
|
||||
// some Detail info added
|
||||
func (e Error) WithDetail(detail interface{}) Error {
|
||||
return Error{
|
||||
Code: e.Code,
|
||||
Message: e.Message,
|
||||
Detail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
// WithArgs uses the passed-in list of interface{} as the substitution
|
||||
// variables in the Error's Message string, but returns a new Error
|
||||
func (e Error) WithArgs(args ...interface{}) Error {
|
||||
return Error{
|
||||
Code: e.Code,
|
||||
Message: fmt.Sprintf(e.Code.Message(), args...),
|
||||
Detail: e.Detail,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorDescriptor provides relevant information about a given error code.
|
||||
type ErrorDescriptor struct {
|
||||
// Code is the error code that this descriptor describes.
|
||||
Code ErrorCode
|
||||
|
||||
// Value provides a unique, string key, often captilized with
|
||||
// underscores, to identify the error code. This value is used as the
|
||||
// keyed value when serializing api errors.
|
||||
Value string
|
||||
|
||||
// Message is a short, human readable decription of the error condition
|
||||
// included in API responses.
|
||||
Message string
|
||||
|
||||
// Description provides a complete account of the errors purpose, suitable
|
||||
// for use in documentation.
|
||||
Description string
|
||||
|
||||
// HTTPStatusCode provides the http status code that is associated with
|
||||
// this error condition.
|
||||
HTTPStatusCode int
|
||||
}
|
||||
|
||||
// ParseErrorCode returns the value by the string error code.
|
||||
// `ErrorCodeUnknown` will be returned if the error is not known.
|
||||
func ParseErrorCode(value string) ErrorCode {
|
||||
ed, ok := idToDescriptors[value]
|
||||
if ok {
|
||||
return ed.Code
|
||||
}
|
||||
|
||||
return ErrorCodeUnknown
|
||||
}
|
||||
|
||||
// Errors provides the envelope for multiple errors and a few sugar methods
|
||||
// for use within the application.
|
||||
type Errors []error
|
||||
|
||||
var _ error = Errors{}
|
||||
|
||||
func (errs Errors) Error() string {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return "<nil>"
|
||||
case 1:
|
||||
return errs[0].Error()
|
||||
default:
|
||||
msg := "errors:\n"
|
||||
for _, err := range errs {
|
||||
msg += err.Error() + "\n"
|
||||
}
|
||||
return msg
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the current number of errors.
|
||||
func (errs Errors) Len() int {
|
||||
return len(errs)
|
||||
}
|
||||
|
||||
// MarshalJSON converts slice of error, ErrorCode or Error into a
|
||||
// slice of Error - then serializes
|
||||
func (errs Errors) MarshalJSON() ([]byte, error) {
|
||||
var tmpErrs struct {
|
||||
Errors []Error `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
for _, daErr := range errs {
|
||||
var err Error
|
||||
|
||||
switch daErr.(type) {
|
||||
case ErrorCode:
|
||||
err = daErr.(ErrorCode).WithDetail(nil)
|
||||
case Error:
|
||||
err = daErr.(Error)
|
||||
default:
|
||||
err = ErrorCodeUnknown.WithDetail(daErr)
|
||||
|
||||
}
|
||||
|
||||
// If the Error struct was setup and they forgot to set the
|
||||
// Message field (meaning its "") then grab it from the ErrCode
|
||||
msg := err.Message
|
||||
if msg == "" {
|
||||
msg = err.Code.Message()
|
||||
}
|
||||
|
||||
tmpErrs.Errors = append(tmpErrs.Errors, Error{
|
||||
Code: err.Code,
|
||||
Message: msg,
|
||||
Detail: err.Detail,
|
||||
})
|
||||
}
|
||||
|
||||
return json.Marshal(tmpErrs)
|
||||
}
|
||||
|
||||
// UnmarshalJSON deserializes []Error and then converts it into slice of
|
||||
// Error or ErrorCode
|
||||
func (errs *Errors) UnmarshalJSON(data []byte) error {
|
||||
var tmpErrs struct {
|
||||
Errors []Error
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &tmpErrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var newErrs Errors
|
||||
for _, daErr := range tmpErrs.Errors {
|
||||
// If Message is empty or exactly matches the Code's message string
|
||||
// then just use the Code, no need for a full Error struct
|
||||
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
|
||||
// Error's w/o details get converted to ErrorCode
|
||||
newErrs = append(newErrs, daErr.Code)
|
||||
} else {
|
||||
// Error's w/ details are untouched
|
||||
newErrs = append(newErrs, Error{
|
||||
Code: daErr.Code,
|
||||
Message: daErr.Message,
|
||||
Detail: daErr.Detail,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
*errs = newErrs
|
||||
return nil
|
||||
}
|
40
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
40
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package errcode
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
|
||||
// and sets the content-type header to 'application/json'. It will handle
|
||||
// ErrorCoder and Errors, and if necessary will create an envelope.
|
||||
func ServeJSON(w http.ResponseWriter, err error) error {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
var sc int
|
||||
|
||||
switch errs := err.(type) {
|
||||
case Errors:
|
||||
if len(errs) < 1 {
|
||||
break
|
||||
}
|
||||
|
||||
if err, ok := errs[0].(ErrorCoder); ok {
|
||||
sc = err.ErrorCode().Descriptor().HTTPStatusCode
|
||||
}
|
||||
case ErrorCoder:
|
||||
sc = errs.ErrorCode().Descriptor().HTTPStatusCode
|
||||
err = Errors{err} // create an envelope.
|
||||
default:
|
||||
// We just have an unhandled error type, so just place in an envelope
|
||||
// and move along.
|
||||
err = Errors{err}
|
||||
}
|
||||
|
||||
if sc == 0 {
|
||||
sc = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(sc)
|
||||
|
||||
return json.NewEncoder(w).Encode(err)
|
||||
}
|
138
vendor/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
138
vendor/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
@ -1,138 +0,0 @@
|
||||
package errcode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
|
||||
idToDescriptors = map[string]ErrorDescriptor{}
|
||||
groupToDescriptors = map[string][]ErrorDescriptor{}
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorCodeUnknown is a generic error that can be used as a last
|
||||
// resort if there is no situation-specific error message that can be used
|
||||
ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNKNOWN",
|
||||
Message: "unknown error",
|
||||
Description: `Generic error returned when the error does not have an
|
||||
API classification.`,
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeUnsupported is returned when an operation is not supported.
|
||||
ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNSUPPORTED",
|
||||
Message: "The operation is unsupported.",
|
||||
Description: `The operation was unsupported due to a missing
|
||||
implementation or invalid set of parameters.`,
|
||||
HTTPStatusCode: http.StatusMethodNotAllowed,
|
||||
})
|
||||
|
||||
// ErrorCodeUnauthorized is returned if a request requires
|
||||
// authentication.
|
||||
ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNAUTHORIZED",
|
||||
Message: "authentication required",
|
||||
Description: `The access controller was unable to authenticate
|
||||
the client. Often this will be accompanied by a
|
||||
Www-Authenticate HTTP response header indicating how to
|
||||
authenticate.`,
|
||||
HTTPStatusCode: http.StatusUnauthorized,
|
||||
})
|
||||
|
||||
// ErrorCodeDenied is returned if a client does not have sufficient
|
||||
// permission to perform an action.
|
||||
ErrorCodeDenied = Register("errcode", ErrorDescriptor{
|
||||
Value: "DENIED",
|
||||
Message: "requested access to the resource is denied",
|
||||
Description: `The access controller denied access for the
|
||||
operation on a resource.`,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
})
|
||||
|
||||
// ErrorCodeUnavailable provides a common error to report unavailability
|
||||
// of a service or endpoint.
|
||||
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNAVAILABLE",
|
||||
Message: "service unavailable",
|
||||
Description: "Returned when a service is not available",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
})
|
||||
|
||||
// ErrorCodeTooManyRequests is returned if a client attempts too many
|
||||
// times to contact a service endpoint.
|
||||
ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
|
||||
Value: "TOOMANYREQUESTS",
|
||||
Message: "too many requests",
|
||||
Description: `Returned when a client attempts to contact a
|
||||
service too many times`,
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
})
|
||||
)
|
||||
|
||||
var nextCode = 1000
|
||||
var registerLock sync.Mutex
|
||||
|
||||
// Register will make the passed-in error known to the environment and
|
||||
// return a new ErrorCode
|
||||
func Register(group string, descriptor ErrorDescriptor) ErrorCode {
|
||||
registerLock.Lock()
|
||||
defer registerLock.Unlock()
|
||||
|
||||
descriptor.Code = ErrorCode(nextCode)
|
||||
|
||||
if _, ok := idToDescriptors[descriptor.Value]; ok {
|
||||
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
|
||||
}
|
||||
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
|
||||
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
|
||||
}
|
||||
|
||||
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
|
||||
errorCodeToDescriptors[descriptor.Code] = descriptor
|
||||
idToDescriptors[descriptor.Value] = descriptor
|
||||
|
||||
nextCode++
|
||||
return descriptor.Code
|
||||
}
|
||||
|
||||
type byValue []ErrorDescriptor
|
||||
|
||||
func (a byValue) Len() int { return len(a) }
|
||||
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
|
||||
// GetGroupNames returns the list of Error group names that are registered
|
||||
func GetGroupNames() []string {
|
||||
keys := []string{}
|
||||
|
||||
for k := range groupToDescriptors {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// GetErrorCodeGroup returns the named group of error descriptors
|
||||
func GetErrorCodeGroup(name string) []ErrorDescriptor {
|
||||
desc := groupToDescriptors[name]
|
||||
sort.Sort(byValue(desc))
|
||||
return desc
|
||||
}
|
||||
|
||||
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
|
||||
// registered, irrespective of what group they're in
|
||||
func GetErrorAllDescriptors() []ErrorDescriptor {
|
||||
result := []ErrorDescriptor{}
|
||||
|
||||
for _, group := range GetGroupNames() {
|
||||
result = append(result, GetErrorCodeGroup(group)...)
|
||||
}
|
||||
sort.Sort(byValue(result))
|
||||
return result
|
||||
}
|
2082
vendor/github.com/docker/docker/AUTHORS
generated
vendored
2082
vendor/github.com/docker/docker/AUTHORS
generated
vendored
File diff suppressed because it is too large
Load Diff
191
vendor/github.com/docker/docker/LICENSE
generated
vendored
191
vendor/github.com/docker/docker/LICENSE
generated
vendored
@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2018 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
19
vendor/github.com/docker/docker/NOTICE
generated
vendored
19
vendor/github.com/docker/docker/NOTICE
generated
vendored
@ -1,19 +0,0 @@
|
||||
Docker
|
||||
Copyright 2012-2017 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
42
vendor/github.com/docker/docker/api/README.md
generated
vendored
42
vendor/github.com/docker/docker/api/README.md
generated
vendored
@ -1,42 +0,0 @@
|
||||
# Working on the Engine API
|
||||
|
||||
The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
|
||||
|
||||
It consists of various components in this repository:
|
||||
|
||||
- `api/swagger.yaml` A Swagger definition of the API.
|
||||
- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
|
||||
- `cli/` The command-line client.
|
||||
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
|
||||
- `daemon/` The daemon, which serves the API.
|
||||
|
||||
## Swagger definition
|
||||
|
||||
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
|
||||
|
||||
1. Automatically generate documentation.
|
||||
2. Automatically generate the Go server and client. (A work-in-progress.)
|
||||
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
|
||||
|
||||
## Updating the API documentation
|
||||
|
||||
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
|
||||
|
||||
The file is split into two main sections:
|
||||
|
||||
- `definitions`, which defines re-usable objects used in requests and responses
|
||||
- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
|
||||
|
||||
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
|
||||
|
||||
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
|
||||
|
||||
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
|
||||
|
||||
## Viewing the API documentation
|
||||
|
||||
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
|
||||
|
||||
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
|
||||
|
||||
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
|
11
vendor/github.com/docker/docker/api/common.go
generated
vendored
11
vendor/github.com/docker/docker/api/common.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.41"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
NoBaseImageSpecifier = "scratch"
|
||||
)
|
6
vendor/github.com/docker/docker/api/common_unix.go
generated
vendored
6
vendor/github.com/docker/docker/api/common_unix.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// MinVersion represents Minimum REST API version supported
|
||||
const MinVersion = "1.12"
|
8
vendor/github.com/docker/docker/api/common_windows.go
generated
vendored
8
vendor/github.com/docker/docker/api/common_windows.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// MinVersion represents Minimum REST API version supported
|
||||
// Technically the first daemon API version released on Windows is v1.25 in
|
||||
// engine version 1.13. However, some clients are explicitly using downlevel
|
||||
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
|
||||
// Hence also allowing 1.24 on Windows.
|
||||
const MinVersion string = "1.24"
|
12
vendor/github.com/docker/docker/api/swagger-gen.yaml
generated
vendored
12
vendor/github.com/docker/docker/api/swagger-gen.yaml
generated
vendored
@ -1,12 +0,0 @@
|
||||
|
||||
layout:
|
||||
models:
|
||||
- name: definition
|
||||
source: asset:model
|
||||
target: "{{ joinFilePath .Target .ModelPackage }}"
|
||||
file_name: "{{ (snakize (pascalize .Name)) }}.go"
|
||||
operations:
|
||||
- name: handler
|
||||
source: asset:serverOperation
|
||||
target: "{{ joinFilePath .Target .APIPackage .Package }}"
|
||||
file_name: "{{ (snakize (pascalize .Name)) }}.go"
|
10453
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
10453
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
22
vendor/github.com/docker/docker/api/types/auth.go
generated
vendored
22
vendor/github.com/docker/docker/api/types/auth.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
23
vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
generated
vendored
23
vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
package blkiodev // import "github.com/docker/docker/api/types/blkiodev"
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WeightDevice is a structure that holds device:weight pair
|
||||
type WeightDevice struct {
|
||||
Path string
|
||||
Weight uint16
|
||||
}
|
||||
|
||||
func (w *WeightDevice) String() string {
|
||||
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
|
||||
}
|
||||
|
||||
// ThrottleDevice is a structure that holds device:rate_per_second pair
|
||||
type ThrottleDevice struct {
|
||||
Path string
|
||||
Rate uint64
|
||||
}
|
||||
|
||||
func (t *ThrottleDevice) String() string {
|
||||
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
|
||||
}
|
415
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
415
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
@ -1,415 +0,0 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
|
||||
type CheckpointCreateOptions struct {
|
||||
CheckpointID string
|
||||
CheckpointDir string
|
||||
Exit bool
|
||||
}
|
||||
|
||||
// CheckpointListOptions holds parameters to list checkpoints for a container
|
||||
type CheckpointListOptions struct {
|
||||
CheckpointDir string
|
||||
}
|
||||
|
||||
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
|
||||
type CheckpointDeleteOptions struct {
|
||||
CheckpointID string
|
||||
CheckpointDir string
|
||||
}
|
||||
|
||||
// ContainerAttachOptions holds parameters to attach to a container.
|
||||
type ContainerAttachOptions struct {
|
||||
Stream bool
|
||||
Stdin bool
|
||||
Stdout bool
|
||||
Stderr bool
|
||||
DetachKeys string
|
||||
Logs bool
|
||||
}
|
||||
|
||||
// ContainerCommitOptions holds parameters to commit changes into a container.
|
||||
type ContainerCommitOptions struct {
|
||||
Reference string
|
||||
Comment string
|
||||
Author string
|
||||
Changes []string
|
||||
Pause bool
|
||||
Config *container.Config
|
||||
}
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct {
|
||||
ExecID string
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
}
|
||||
|
||||
// ContainerListOptions holds parameters to list containers with.
|
||||
type ContainerListOptions struct {
|
||||
Quiet bool
|
||||
Size bool
|
||||
All bool
|
||||
Latest bool
|
||||
Since string
|
||||
Before string
|
||||
Limit int
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// ContainerLogsOptions holds parameters to filter logs with.
|
||||
type ContainerLogsOptions struct {
|
||||
ShowStdout bool
|
||||
ShowStderr bool
|
||||
Since string
|
||||
Until string
|
||||
Timestamps bool
|
||||
Follow bool
|
||||
Tail string
|
||||
Details bool
|
||||
}
|
||||
|
||||
// ContainerRemoveOptions holds parameters to remove containers.
|
||||
type ContainerRemoveOptions struct {
|
||||
RemoveVolumes bool
|
||||
RemoveLinks bool
|
||||
Force bool
|
||||
}
|
||||
|
||||
// ContainerStartOptions holds parameters to start containers.
|
||||
type ContainerStartOptions struct {
|
||||
CheckpointID string
|
||||
CheckpointDir string
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
type EventsOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
type NetworkListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// HijackedResponse holds connection information for a hijacked request.
|
||||
type HijackedResponse struct {
|
||||
Conn net.Conn
|
||||
Reader *bufio.Reader
|
||||
}
|
||||
|
||||
// Close closes the hijacked connection and reader.
|
||||
func (h *HijackedResponse) Close() {
|
||||
h.Conn.Close()
|
||||
}
|
||||
|
||||
// CloseWriter is an interface that implements structs
|
||||
// that close input streams to prevent from writing.
|
||||
type CloseWriter interface {
|
||||
CloseWrite() error
|
||||
}
|
||||
|
||||
// CloseWrite closes a readWriter for writing.
|
||||
func (h *HijackedResponse) CloseWrite() error {
|
||||
if conn, ok := h.Conn.(CloseWriter); ok {
|
||||
return conn.CloseWrite()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImageBuildOptions holds the information
|
||||
// necessary to build images.
|
||||
type ImageBuildOptions struct {
|
||||
Tags []string
|
||||
SuppressOutput bool
|
||||
RemoteContext string
|
||||
NoCache bool
|
||||
Remove bool
|
||||
ForceRemove bool
|
||||
PullParent bool
|
||||
Isolation container.Isolation
|
||||
CPUSetCPUs string
|
||||
CPUSetMems string
|
||||
CPUShares int64
|
||||
CPUQuota int64
|
||||
CPUPeriod int64
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CgroupParent string
|
||||
NetworkMode string
|
||||
ShmSize int64
|
||||
Dockerfile string
|
||||
Ulimits []*units.Ulimit
|
||||
// BuildArgs needs to be a *string instead of just a string so that
|
||||
// we can tell the difference between "" (empty string) and no value
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
// api/server/router/build/build_routes.go for even more info.
|
||||
BuildArgs map[string]*string
|
||||
AuthConfigs map[string]AuthConfig
|
||||
Context io.Reader
|
||||
Labels map[string]string
|
||||
// squash the resulting image's layers to the parent
|
||||
// preserves the original image and creates a new one from the parent with all
|
||||
// the changes applied to a single layer
|
||||
Squash bool
|
||||
// CacheFrom specifies images that are used for matching cache. Images
|
||||
// specified here do not need to have a valid parent chain to match cache.
|
||||
CacheFrom []string
|
||||
SecurityOpt []string
|
||||
ExtraHosts []string // List of extra hosts
|
||||
Target string
|
||||
SessionID string
|
||||
Platform string
|
||||
// Version specifies the version of the unerlying builder to use
|
||||
Version BuilderVersion
|
||||
// BuildID is an optional identifier that can be passed together with the
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
// build with the cancel request.
|
||||
BuildID string
|
||||
// Outputs defines configurations for exporting build results. Only supported
|
||||
// in BuildKit mode
|
||||
Outputs []ImageBuildOutput
|
||||
}
|
||||
|
||||
// ImageBuildOutput defines configuration for exporting a build result
|
||||
type ImageBuildOutput struct {
|
||||
Type string
|
||||
Attrs map[string]string
|
||||
}
|
||||
|
||||
// BuilderVersion sets the version of underlying builder to use
|
||||
type BuilderVersion string
|
||||
|
||||
const (
|
||||
// BuilderV1 is the first generation builder in docker daemon
|
||||
BuilderV1 BuilderVersion = "1"
|
||||
// BuilderBuildKit is builder based on moby/buildkit project
|
||||
BuilderBuildKit = "2"
|
||||
)
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
// returned by a server after building
|
||||
// an image.
|
||||
type ImageBuildResponse struct {
|
||||
Body io.ReadCloser
|
||||
OSType string
|
||||
}
|
||||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
type ImageCreateOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||
}
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
type ImageImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
type ImageImportOptions struct {
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform string // Platform is the target platform of the image
|
||||
}
|
||||
|
||||
// ImageListOptions holds parameters to filter the list of images with.
|
||||
type ImageListOptions struct {
|
||||
All bool
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
type ImageLoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// ImagePullOptions holds information to pull images.
|
||||
type ImagePullOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Platform string
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
// This function returns the registry authentication
|
||||
// header value in base 64 format, or an error
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func() (string, error)
|
||||
|
||||
//ImagePushOptions holds information to push images.
|
||||
type ImagePushOptions ImagePullOptions
|
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
type ImageRemoveOptions struct {
|
||||
Force bool
|
||||
PruneChildren bool
|
||||
}
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
type ImageSearchOptions struct {
|
||||
RegistryAuth string
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
}
|
||||
|
||||
// ResizeOptions holds parameters to resize a tty.
|
||||
// It can be used to resize container ttys and
|
||||
// exec process ttys too.
|
||||
type ResizeOptions struct {
|
||||
Height uint
|
||||
Width uint
|
||||
}
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NodeRemoveOptions holds parameters to remove nodes with.
|
||||
type NodeRemoveOptions struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
// ServiceCreateOptions contains the options to use when creating a service.
|
||||
type ServiceCreateOptions struct {
|
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// ServiceCreateResponse contains the information returned to a client
|
||||
// on the creation of a new service.
|
||||
type ServiceCreateResponse struct {
|
||||
// ID is the ID of the created service.
|
||||
ID string
|
||||
// Warnings is a set of non-fatal warning messages to pass on to the user.
|
||||
Warnings []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Values for RegistryAuthFrom in ServiceUpdateOptions
|
||||
const (
|
||||
RegistryAuthFromSpec = "spec"
|
||||
RegistryAuthFromPreviousSpec = "previous-spec"
|
||||
)
|
||||
|
||||
// ServiceUpdateOptions contains the options to be used for updating services.
|
||||
type ServiceUpdateOptions struct {
|
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string
|
||||
|
||||
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
|
||||
// into this field. While it does open API users up to racy writes, most
|
||||
// users may not need that level of consistency in practice.
|
||||
|
||||
// RegistryAuthFrom specifies where to find the registry authorization
|
||||
// credentials if they are not given in EncodedRegistryAuth. Valid
|
||||
// values are "spec" and "previous-spec".
|
||||
RegistryAuthFrom string
|
||||
|
||||
// Rollback indicates whether a server-side rollback should be
|
||||
// performed. When this is set, the provided spec will be ignored.
|
||||
// The valid values are "previous" and "none". An empty value is the
|
||||
// same as "none".
|
||||
Rollback string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// ServiceListOptions holds parameters to list services with.
|
||||
type ServiceListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// ServiceInspectOptions holds parameters related to the "service inspect"
|
||||
// operation.
|
||||
type ServiceInspectOptions struct {
|
||||
InsertDefaults bool
|
||||
}
|
||||
|
||||
// TaskListOptions holds parameters to list tasks with.
|
||||
type TaskListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// PluginRemoveOptions holds parameters to remove plugins.
|
||||
type PluginRemoveOptions struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
// PluginEnableOptions holds parameters to enable plugins.
|
||||
type PluginEnableOptions struct {
|
||||
Timeout int
|
||||
}
|
||||
|
||||
// PluginDisableOptions holds parameters to disable plugins.
|
||||
type PluginDisableOptions struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
// PluginInstallOptions holds parameters to install a plugin.
|
||||
type PluginInstallOptions struct {
|
||||
Disabled bool
|
||||
AcceptAllPermissions bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
|
||||
Args []string
|
||||
}
|
||||
|
||||
// SwarmUnlockKeyResponse contains the response for Engine API:
|
||||
// GET /swarm/unlockkey
|
||||
type SwarmUnlockKeyResponse struct {
|
||||
// UnlockKey is the unlock key in ASCII-armored format.
|
||||
UnlockKey string
|
||||
}
|
||||
|
||||
// PluginCreateOptions hold all options to plugin create.
|
||||
type PluginCreateOptions struct {
|
||||
RepoName string
|
||||
}
|
64
vendor/github.com/docker/docker/api/types/configs.go
generated
vendored
64
vendor/github.com/docker/docker/api/types/configs.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
)
|
||||
|
||||
// configs holds structs used for internal communication between the
|
||||
// frontend (such as an http server) and the backend (such as the
|
||||
// docker daemon).
|
||||
|
||||
// ContainerCreateConfig is the parameter set to ContainerCreate()
|
||||
type ContainerCreateConfig struct {
|
||||
Name string
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
AdjustCPUShares bool
|
||||
}
|
||||
|
||||
// ContainerRmConfig holds arguments for the container remove
|
||||
// operation. This struct is used to tell the backend what operations
|
||||
// to perform.
|
||||
type ContainerRmConfig struct {
|
||||
ForceRemove, RemoveVolume, RemoveLink bool
|
||||
}
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecConfig struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
|
||||
// PluginRmConfig holds arguments for plugin remove.
|
||||
type PluginRmConfig struct {
|
||||
ForceRemove bool
|
||||
}
|
||||
|
||||
// PluginEnableConfig holds arguments for plugin enable
|
||||
type PluginEnableConfig struct {
|
||||
Timeout int
|
||||
}
|
||||
|
||||
// PluginDisableConfig holds arguments for plugin disable.
|
||||
type PluginDisableConfig struct {
|
||||
ForceDisable bool
|
||||
}
|
||||
|
||||
// NetworkListConfig stores the options available for listing networks
|
||||
type NetworkListConfig struct {
|
||||
// TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here
|
||||
Detailed bool
|
||||
Verbose bool
|
||||
}
|
69
vendor/github.com/docker/docker/api/types/container/config.go
generated
vendored
69
vendor/github.com/docker/docker/api/types/container/config.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// MinimumDuration puts a minimum on user configured duration.
|
||||
// This is to prevent API error on time unit. For example, API may
|
||||
// set 3 as healthcheck interval with intention of 3 seconds, but
|
||||
// Docker interprets it as 3 nanoseconds.
|
||||
const MinimumDuration = 1 * time.Millisecond
|
||||
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig struct {
|
||||
// Test is the test to perform to check that the container is healthy.
|
||||
// An empty slice means to inherit the default.
|
||||
// The options are:
|
||||
// {} : inherit healthcheck
|
||||
// {"NONE"} : disable healthcheck
|
||||
// {"CMD", args...} : exec arguments directly
|
||||
// {"CMD-SHELL", command} : run command with system's default shell
|
||||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
Retries int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Config contains the configuration data about a container.
|
||||
// It should hold only portable information about the container.
|
||||
// Here, "portable" means "independent from the host we are running on".
|
||||
// Non-portable information *should* appear in HostConfig.
|
||||
// All fields added to this struct must be marked `omitempty` to keep getting
|
||||
// predictable hashes from the old `v1Compatibility` configuration.
|
||||
type Config struct {
|
||||
Hostname string // Hostname
|
||||
Domainname string // Domainname
|
||||
User string // User that will run the command(s) inside the container, also support user:group
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStdout bool // Attach the standard output
|
||||
AttachStderr bool // Attach the standard error
|
||||
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
|
||||
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
|
||||
OpenStdin bool // Open stdin
|
||||
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
||||
Env []string // List of environment variable to set in the container
|
||||
Cmd strslice.StrSlice // Command to run when starting the container
|
||||
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific).
|
||||
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
||||
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
|
||||
NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
||||
MacAddress string `json:",omitempty"` // Mac Address of the container
|
||||
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
||||
Labels map[string]string // List of labels set to this container
|
||||
StopSignal string `json:",omitempty"` // Signal to stop a container
|
||||
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
|
||||
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
|
||||
}
|
21
vendor/github.com/docker/docker/api/types/container/container_changes.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/container/container_changes.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerChangeResponseItem change item in response to ContainerChanges operation
|
||||
// swagger:model ContainerChangeResponseItem
|
||||
type ContainerChangeResponseItem struct {
|
||||
|
||||
// Kind of change
|
||||
// Required: true
|
||||
Kind uint8 `json:"Kind"`
|
||||
|
||||
// Path to file that has changed
|
||||
// Required: true
|
||||
Path string `json:"Path"`
|
||||
}
|
21
vendor/github.com/docker/docker/api/types/container/container_create.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/container/container_create.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerCreateCreatedBody OK response to ContainerCreate operation
|
||||
// swagger:model ContainerCreateCreatedBody
|
||||
type ContainerCreateCreatedBody struct {
|
||||
|
||||
// The ID of the created container
|
||||
// Required: true
|
||||
ID string `json:"Id"`
|
||||
|
||||
// Warnings encountered when creating the container
|
||||
// Required: true
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
21
vendor/github.com/docker/docker/api/types/container/container_top.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/container/container_top.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerTopOKBody OK response to ContainerTop operation
|
||||
// swagger:model ContainerTopOKBody
|
||||
type ContainerTopOKBody struct {
|
||||
|
||||
// Each process running in the container, where each is process is an array of values corresponding to the titles
|
||||
// Required: true
|
||||
Processes [][]string `json:"Processes"`
|
||||
|
||||
// The ps column titles
|
||||
// Required: true
|
||||
Titles []string `json:"Titles"`
|
||||
}
|
17
vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
17
vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerUpdateOKBody OK response to ContainerUpdate operation
|
||||
// swagger:model ContainerUpdateOKBody
|
||||
type ContainerUpdateOKBody struct {
|
||||
|
||||
// warnings
|
||||
// Required: true
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
29
vendor/github.com/docker/docker/api/types/container/container_wait.go
generated
vendored
29
vendor/github.com/docker/docker/api/types/container/container_wait.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerWaitOKBodyError container waiting error, if any
|
||||
// swagger:model ContainerWaitOKBodyError
|
||||
type ContainerWaitOKBodyError struct {
|
||||
|
||||
// Details of an error
|
||||
Message string `json:"Message,omitempty"`
|
||||
}
|
||||
|
||||
// ContainerWaitOKBody OK response to ContainerWait operation
|
||||
// swagger:model ContainerWaitOKBody
|
||||
type ContainerWaitOKBody struct {
|
||||
|
||||
// error
|
||||
// Required: true
|
||||
Error *ContainerWaitOKBodyError `json:"Error"`
|
||||
|
||||
// Exit code of the container
|
||||
// Required: true
|
||||
StatusCode int64 `json:"StatusCode"`
|
||||
}
|
448
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
448
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
@ -1,448 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// CgroupnsMode represents the cgroup namespace mode of the container
|
||||
type CgroupnsMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its own private cgroup namespace
|
||||
func (c CgroupnsMode) IsPrivate() bool {
|
||||
return c == "private"
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container shares the host's cgroup namespace
|
||||
func (c CgroupnsMode) IsHost() bool {
|
||||
return c == "host"
|
||||
}
|
||||
|
||||
// IsEmpty indicates whether the container cgroup namespace mode is unset
|
||||
func (c CgroupnsMode) IsEmpty() bool {
|
||||
return c == ""
|
||||
}
|
||||
|
||||
// Valid indicates whether the cgroup namespace mode is valid
|
||||
func (c CgroupnsMode) Valid() bool {
|
||||
return c.IsEmpty() || c.IsPrivate() || c.IsHost()
|
||||
}
|
||||
|
||||
// Isolation represents the isolation technology of a container. The supported
|
||||
// values are platform specific
|
||||
type Isolation string
|
||||
|
||||
// IsDefault indicates the default isolation technology of a container. On Linux this
|
||||
// is the native driver. On Windows, this is a Windows Server Container.
|
||||
func (i Isolation) IsDefault() bool {
|
||||
return strings.ToLower(string(i)) == "default" || string(i) == ""
|
||||
}
|
||||
|
||||
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
||||
func (i Isolation) IsHyperV() bool {
|
||||
return strings.ToLower(string(i)) == "hyperv"
|
||||
}
|
||||
|
||||
// IsProcess indicates the use of process isolation
|
||||
func (i Isolation) IsProcess() bool {
|
||||
return strings.ToLower(string(i)) == "process"
|
||||
}
|
||||
|
||||
const (
|
||||
// IsolationEmpty is unspecified (same behavior as default)
|
||||
IsolationEmpty = Isolation("")
|
||||
// IsolationDefault is the default isolation mode on current daemon
|
||||
IsolationDefault = Isolation("default")
|
||||
// IsolationProcess is process isolation mode
|
||||
IsolationProcess = Isolation("process")
|
||||
// IsolationHyperV is HyperV isolation mode
|
||||
IsolationHyperV = Isolation("hyperv")
|
||||
)
|
||||
|
||||
// IpcMode represents the container ipc stack.
|
||||
type IpcMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
|
||||
func (n IpcMode) IsPrivate() bool {
|
||||
return n == "private"
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container shares the host's ipc namespace.
|
||||
func (n IpcMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsShareable indicates whether the container's ipc namespace can be shared with another container.
|
||||
func (n IpcMode) IsShareable() bool {
|
||||
return n == "shareable"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether the container uses another container's ipc namespace.
|
||||
func (n IpcMode) IsContainer() bool {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// IsNone indicates whether container IpcMode is set to "none".
|
||||
func (n IpcMode) IsNone() bool {
|
||||
return n == "none"
|
||||
}
|
||||
|
||||
// IsEmpty indicates whether container IpcMode is empty
|
||||
func (n IpcMode) IsEmpty() bool {
|
||||
return n == ""
|
||||
}
|
||||
|
||||
// Valid indicates whether the ipc mode is valid.
|
||||
func (n IpcMode) Valid() bool {
|
||||
return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
|
||||
}
|
||||
|
||||
// Container returns the name of the container ipc stack is going to be used.
|
||||
func (n IpcMode) Container() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 && parts[0] == "container" {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// NetworkMode represents the container network stack.
|
||||
type NetworkMode string
|
||||
|
||||
// IsNone indicates whether container isn't using a network stack.
|
||||
func (n NetworkMode) IsNone() bool {
|
||||
return n == "none"
|
||||
}
|
||||
|
||||
// IsDefault indicates whether container uses the default network stack.
|
||||
func (n NetworkMode) IsDefault() bool {
|
||||
return n == "default"
|
||||
}
|
||||
|
||||
// IsPrivate indicates whether container uses its private network stack.
|
||||
func (n NetworkMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
}
|
||||
|
||||
// IsContainer indicates whether container uses a container network stack.
|
||||
func (n NetworkMode) IsContainer() bool {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||
func (n NetworkMode) ConnectedContainer() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
//UserDefined indicates user-created network
|
||||
func (n NetworkMode) UserDefined() string {
|
||||
if n.IsUserDefined() {
|
||||
return string(n)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UsernsMode represents userns mode in the container.
|
||||
type UsernsMode string
|
||||
|
||||
// IsHost indicates whether the container uses the host's userns.
|
||||
func (n UsernsMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsPrivate indicates whether the container uses the a private userns.
|
||||
func (n UsernsMode) IsPrivate() bool {
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// Valid indicates whether the userns is valid.
|
||||
func (n UsernsMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// CgroupSpec represents the cgroup to use for the container.
|
||||
type CgroupSpec string
|
||||
|
||||
// IsContainer indicates whether the container is using another container cgroup
|
||||
func (c CgroupSpec) IsContainer() bool {
|
||||
parts := strings.SplitN(string(c), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// Valid indicates whether the cgroup spec is valid.
|
||||
func (c CgroupSpec) Valid() bool {
|
||||
return c.IsContainer() || c == ""
|
||||
}
|
||||
|
||||
// Container returns the name of the container whose cgroup will be used.
|
||||
func (c CgroupSpec) Container() string {
|
||||
parts := strings.SplitN(string(c), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UTSMode represents the UTS namespace of the container.
|
||||
type UTSMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its private UTS namespace.
|
||||
func (n UTSMode) IsPrivate() bool {
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's UTS namespace.
|
||||
func (n UTSMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// Valid indicates whether the UTS namespace is valid.
|
||||
func (n UTSMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PidMode represents the pid namespace of the container.
|
||||
type PidMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its own new pid namespace.
|
||||
func (n PidMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's pid namespace.
|
||||
func (n PidMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether the container uses a container's pid namespace.
|
||||
func (n PidMode) IsContainer() bool {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// Valid indicates whether the pid namespace is valid.
|
||||
func (n PidMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
case "container":
|
||||
if len(parts) != 2 || parts[1] == "" {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Container returns the name of the container whose pid namespace is going to be used.
|
||||
func (n PidMode) Container() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DeviceRequest represents a request for devices from a device driver.
|
||||
// Used by GPU device drivers.
|
||||
type DeviceRequest struct {
|
||||
Driver string // Name of device driver
|
||||
Count int // Number of devices to request (-1 = All)
|
||||
DeviceIDs []string // List of device IDs as recognizable by the device driver
|
||||
Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu")
|
||||
Options map[string]string // Options to pass onto the device driver
|
||||
}
|
||||
|
||||
// DeviceMapping represents the device mapping between the host and the container.
|
||||
type DeviceMapping struct {
|
||||
PathOnHost string
|
||||
PathInContainer string
|
||||
CgroupPermissions string
|
||||
}
|
||||
|
||||
// RestartPolicy represents the restart policies of the container.
|
||||
type RestartPolicy struct {
|
||||
Name string
|
||||
MaximumRetryCount int
|
||||
}
|
||||
|
||||
// IsNone indicates whether the container has the "no" restart policy.
|
||||
// This means the container will not automatically restart when exiting.
|
||||
func (rp *RestartPolicy) IsNone() bool {
|
||||
return rp.Name == "no" || rp.Name == ""
|
||||
}
|
||||
|
||||
// IsAlways indicates whether the container has the "always" restart policy.
|
||||
// This means the container will automatically restart regardless of the exit status.
|
||||
func (rp *RestartPolicy) IsAlways() bool {
|
||||
return rp.Name == "always"
|
||||
}
|
||||
|
||||
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
|
||||
// This means the container will automatically restart of exiting with a non-zero exit status.
|
||||
func (rp *RestartPolicy) IsOnFailure() bool {
|
||||
return rp.Name == "on-failure"
|
||||
}
|
||||
|
||||
// IsUnlessStopped indicates whether the container has the
|
||||
// "unless-stopped" restart policy. This means the container will
|
||||
// automatically restart unless user has put it to stopped state.
|
||||
func (rp *RestartPolicy) IsUnlessStopped() bool {
|
||||
return rp.Name == "unless-stopped"
|
||||
}
|
||||
|
||||
// IsSame compares two RestartPolicy to see if they are the same
|
||||
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
|
||||
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
|
||||
}
|
||||
|
||||
// LogMode is a type to define the available modes for logging
|
||||
// These modes affect how logs are handled when log messages start piling up.
|
||||
type LogMode string
|
||||
|
||||
// Available logging modes
|
||||
const (
|
||||
LogModeUnset = ""
|
||||
LogModeBlocking LogMode = "blocking"
|
||||
LogModeNonBlock LogMode = "non-blocking"
|
||||
)
|
||||
|
||||
// LogConfig represents the logging configuration of the container.
|
||||
type LogConfig struct {
|
||||
Type string
|
||||
Config map[string]string
|
||||
}
|
||||
|
||||
// Resources contains container's resources (cgroups config, ulimits...)
|
||||
type Resources struct {
|
||||
// Applicable to all platforms
|
||||
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CgroupParent string // Parent cgroup.
|
||||
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
|
||||
BlkioWeightDevice []*blkiodev.WeightDevice
|
||||
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
|
||||
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
|
||||
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
|
||||
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
|
||||
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
|
||||
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
|
||||
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
|
||||
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
|
||||
CpusetCpus string // CpusetCpus 0-2, 0,1
|
||||
CpusetMems string // CpusetMems 0-2, 0,1
|
||||
Devices []DeviceMapping // List of devices to map inside the container
|
||||
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
||||
DeviceRequests []DeviceRequest // List of device requests for device drivers
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
CPUCount int64 `json:"CpuCount"` // CPU count
|
||||
CPUPercent int64 `json:"CpuPercent"` // CPU percent
|
||||
IOMaximumIOps uint64 // Maximum IOps for the container system drive
|
||||
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
|
||||
}
|
||||
|
||||
// UpdateConfig holds the mutable attributes of a Container.
|
||||
// Those attributes can be updated at runtime.
|
||||
type UpdateConfig struct {
|
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources
|
||||
RestartPolicy RestartPolicy
|
||||
}
|
||||
|
||||
// HostConfig the non-portable Config structure of a container.
|
||||
// Here, "non-portable" means "dependent of the host we are running on".
|
||||
// Portable information *should* appear in Config.
|
||||
type HostConfig struct {
|
||||
// Applicable to all platforms
|
||||
Binds []string // List of volume bindings for this container
|
||||
ContainerIDFile string // File (path) where the containerId is written
|
||||
LogConfig LogConfig // Configuration of the logs for this container
|
||||
NetworkMode NetworkMode // Network mode to use for the container
|
||||
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
|
||||
RestartPolicy RestartPolicy // Restart policy to be used for the container
|
||||
AutoRemove bool // Automatically remove container when it exits
|
||||
VolumeDriver string // Name of the volume driver used to mount volumes
|
||||
VolumesFrom []string // List of volumes to take from other container
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||
Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set)
|
||||
CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
ExtraHosts []string // List of extra hosts
|
||||
GroupAdd []string // List of additional groups that the container process will run as
|
||||
IpcMode IpcMode // IPC namespace to use for the container
|
||||
Cgroup CgroupSpec // Cgroup to use for the container
|
||||
Links []string // List of links (in the name:alias form)
|
||||
OomScoreAdj int // Container preference for OOM-killing
|
||||
PidMode PidMode // PID namespace to use for the container
|
||||
Privileged bool // Is the container in privileged mode
|
||||
PublishAllPorts bool // Should docker publish all exposed port for the container
|
||||
ReadonlyRootfs bool // Is the container root filesystem in read-only
|
||||
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
|
||||
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
|
||||
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
|
||||
UTSMode UTSMode // UTS namespace to use for the container
|
||||
UsernsMode UsernsMode // The user namespace to use for the container
|
||||
ShmSize int64 // Total shm memory usage
|
||||
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
|
||||
Runtime string `json:",omitempty"` // Runtime to use with this container
|
||||
|
||||
// Applicable to Windows
|
||||
ConsoleSize [2]uint // Initial console size (height,width)
|
||||
Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
|
||||
|
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources
|
||||
|
||||
// Mounts specs used by the container
|
||||
Mounts []mount.Mount `json:",omitempty"`
|
||||
|
||||
// MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths)
|
||||
MaskedPaths []string
|
||||
|
||||
// ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths)
|
||||
ReadonlyPaths []string
|
||||
|
||||
// Run a custom init inside the container, if null, use the daemon's configured settings
|
||||
Init *bool `json:",omitempty"`
|
||||
}
|
41
vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
generated
vendored
41
vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsBridge() {
|
||||
return "bridge"
|
||||
} else if n.IsHost() {
|
||||
return "host"
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
} else if n.IsNone() {
|
||||
return "none"
|
||||
} else if n.IsDefault() {
|
||||
return "default"
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == "bridge"
|
||||
}
|
||||
|
||||
// IsHost indicates whether container uses the host network stack.
|
||||
func (n NetworkMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsUserDefined indicates user-created network
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
|
||||
}
|
40
vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
generated
vendored
40
vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
// in windows it is given the name NAT
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == "nat"
|
||||
}
|
||||
|
||||
// IsHost indicates whether container uses the host network stack.
|
||||
// returns false as this is not supported by windows
|
||||
func (n NetworkMode) IsHost() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsUserDefined indicates user-created network
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsDefault() {
|
||||
return "default"
|
||||
} else if n.IsBridge() {
|
||||
return "nat"
|
||||
} else if n.IsNone() {
|
||||
return "none"
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
22
vendor/github.com/docker/docker/api/types/container/waitcondition.go
generated
vendored
22
vendor/github.com/docker/docker/api/types/container/waitcondition.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// WaitCondition is a type used to specify a container state for which
|
||||
// to wait.
|
||||
type WaitCondition string
|
||||
|
||||
// Possible WaitCondition Values.
|
||||
//
|
||||
// WaitConditionNotRunning (default) is used to wait for any of the non-running
|
||||
// states: "created", "exited", "dead", "removing", or "removed".
|
||||
//
|
||||
// WaitConditionNextExit is used to wait for the next time the state changes
|
||||
// to a non-running state. If the state is currently "created" or "exited",
|
||||
// this would cause Wait() to block until either the container runs and exits
|
||||
// or is removed.
|
||||
//
|
||||
// WaitConditionRemoved is used to wait for the container to be removed.
|
||||
const (
|
||||
WaitConditionNotRunning WaitCondition = "not-running"
|
||||
WaitConditionNextExit WaitCondition = "next-exit"
|
||||
WaitConditionRemoved WaitCondition = "removed"
|
||||
)
|
13
vendor/github.com/docker/docker/api/types/error_response.go
generated
vendored
13
vendor/github.com/docker/docker/api/types/error_response.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ErrorResponse Represents an error.
|
||||
// swagger:model ErrorResponse
|
||||
type ErrorResponse struct {
|
||||
|
||||
// The error message.
|
||||
// Required: true
|
||||
Message string `json:"message"`
|
||||
}
|
6
vendor/github.com/docker/docker/api/types/error_response_ext.go
generated
vendored
6
vendor/github.com/docker/docker/api/types/error_response_ext.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
package types
|
||||
|
||||
// Error returns the error message
|
||||
func (e ErrorResponse) Error() string {
|
||||
return e.Message
|
||||
}
|
52
vendor/github.com/docker/docker/api/types/events/events.go
generated
vendored
52
vendor/github.com/docker/docker/api/types/events/events.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package events // import "github.com/docker/docker/api/types/events"
|
||||
|
||||
const (
|
||||
// ContainerEventType is the event type that containers generate
|
||||
ContainerEventType = "container"
|
||||
// DaemonEventType is the event type that daemon generate
|
||||
DaemonEventType = "daemon"
|
||||
// ImageEventType is the event type that images generate
|
||||
ImageEventType = "image"
|
||||
// NetworkEventType is the event type that networks generate
|
||||
NetworkEventType = "network"
|
||||
// PluginEventType is the event type that plugins generate
|
||||
PluginEventType = "plugin"
|
||||
// VolumeEventType is the event type that volumes generate
|
||||
VolumeEventType = "volume"
|
||||
// ServiceEventType is the event type that services generate
|
||||
ServiceEventType = "service"
|
||||
// NodeEventType is the event type that nodes generate
|
||||
NodeEventType = "node"
|
||||
// SecretEventType is the event type that secrets generate
|
||||
SecretEventType = "secret"
|
||||
// ConfigEventType is the event type that configs generate
|
||||
ConfigEventType = "config"
|
||||
)
|
||||
|
||||
// Actor describes something that generates events,
|
||||
// like a container, or a network, or a volume.
|
||||
// It has a defined name and a set or attributes.
|
||||
// The container attributes are its labels, other actors
|
||||
// can generate these attributes from other properties.
|
||||
type Actor struct {
|
||||
ID string
|
||||
Attributes map[string]string
|
||||
}
|
||||
|
||||
// Message represents the information an event contains
|
||||
type Message struct {
|
||||
// Deprecated information from JSONMessage.
|
||||
// With data only in container events.
|
||||
Status string `json:"status,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
From string `json:"from,omitempty"`
|
||||
|
||||
Type string
|
||||
Action string
|
||||
Actor Actor
|
||||
// Engine events are local scope. Cluster events are swarm scope.
|
||||
Scope string `json:"scope,omitempty"`
|
||||
|
||||
Time int64 `json:"time,omitempty"`
|
||||
TimeNano int64 `json:"timeNano,omitempty"`
|
||||
}
|
315
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
315
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
@ -1,315 +0,0 @@
|
||||
/*Package filters provides tools for encoding a mapping of keys to a set of
|
||||
multiple values.
|
||||
*/
|
||||
package filters // import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// Args stores a mapping of keys to a set of multiple values.
|
||||
type Args struct {
|
||||
fields map[string]map[string]bool
|
||||
}
|
||||
|
||||
// KeyValuePair are used to initialize a new Args
|
||||
type KeyValuePair struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// Arg creates a new KeyValuePair for initializing Args
|
||||
func Arg(key, value string) KeyValuePair {
|
||||
return KeyValuePair{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// NewArgs returns a new Args populated with the initial args
|
||||
func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||
args := Args{fields: map[string]map[string]bool{}}
|
||||
for _, arg := range initialArgs {
|
||||
args.Add(arg.Key, arg.Value)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) {
|
||||
if len(args.fields) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
return json.Marshal(args.fields)
|
||||
}
|
||||
|
||||
// ToJSON returns the Args as a JSON encoded string
|
||||
func ToJSON(a Args) (string, error) {
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
buf, err := json.Marshal(a)
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
|
||||
// then the encoded format will use an older legacy format where the values are a
|
||||
// list of strings, instead of a set.
|
||||
//
|
||||
// Deprecated: Use ToJSON
|
||||
func ToParamWithVersion(version string, a Args) (string, error) {
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if version != "" && versions.LessThan(version, "1.22") {
|
||||
buf, err := json.Marshal(convertArgsToSlice(a.fields))
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// FromJSON decodes a JSON encoded string into Args
|
||||
func FromJSON(p string) (Args, error) {
|
||||
args := NewArgs()
|
||||
|
||||
if p == "" {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
raw := []byte(p)
|
||||
err := json.Unmarshal(raw, &args)
|
||||
if err == nil {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// Fallback to parsing arguments in the legacy slice format
|
||||
deprecated := map[string][]string{}
|
||||
if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
|
||||
return args, err
|
||||
}
|
||||
|
||||
args.fields = deprecatedArgs(deprecated)
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON populates the Args from JSON encode bytes
|
||||
func (args Args) UnmarshalJSON(raw []byte) error {
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(raw, &args.fields)
|
||||
}
|
||||
|
||||
// Get returns the list of values associated with the key
|
||||
func (args Args) Get(key string) []string {
|
||||
values := args.fields[key]
|
||||
if values == nil {
|
||||
return make([]string, 0)
|
||||
}
|
||||
slice := make([]string, 0, len(values))
|
||||
for key := range values {
|
||||
slice = append(slice, key)
|
||||
}
|
||||
return slice
|
||||
}
|
||||
|
||||
// Add a new value to the set of values
|
||||
func (args Args) Add(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
args.fields[key][value] = true
|
||||
} else {
|
||||
args.fields[key] = map[string]bool{value: true}
|
||||
}
|
||||
}
|
||||
|
||||
// Del removes a value from the set
|
||||
func (args Args) Del(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
delete(args.fields[key], value)
|
||||
if len(args.fields[key]) == 0 {
|
||||
delete(args.fields, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of keys in the mapping
|
||||
func (args Args) Len() int {
|
||||
return len(args.fields)
|
||||
}
|
||||
|
||||
// MatchKVList returns true if all the pairs in sources exist as key=value
|
||||
// pairs in the mapping at key, or if there are no values at key.
|
||||
func (args Args) MatchKVList(key string, sources map[string]string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(sources) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for value := range fieldValues {
|
||||
testKV := strings.SplitN(value, "=", 2)
|
||||
|
||||
v, ok := sources[testKV[0]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(testKV) == 2 && testKV[1] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Match returns true if any of the values at key match the source string
|
||||
func (args Args) Match(field, source string) bool {
|
||||
if args.ExactMatch(field, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := args.fields[field]
|
||||
for name2match := range fieldValues {
|
||||
match, err := regexp.MatchString(name2match, source)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
func (args Args) ExactMatch(key, source string) bool {
|
||||
fieldValues, ok := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if !ok || len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// try to match full name value to avoid O(N) regular expression matching
|
||||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// UniqueExactMatch returns true if there is only one value and the source
|
||||
// matches exactly the value.
|
||||
func (args Args) UniqueExactMatch(key, source string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(args.fields[key]) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
// try to match full name value to avoid O(N) regular expression matching
|
||||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// FuzzyMatch returns true if the source matches exactly one value, or the
|
||||
// source has one of the values as a prefix.
|
||||
func (args Args) FuzzyMatch(key, source string) bool {
|
||||
if args.ExactMatch(key, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := args.fields[key]
|
||||
for prefix := range fieldValues {
|
||||
if strings.HasPrefix(source, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Contains returns true if the key exists in the mapping
|
||||
func (args Args) Contains(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
type invalidFilter string
|
||||
|
||||
func (e invalidFilter) Error() string {
|
||||
return "Invalid filter '" + string(e) + "'"
|
||||
}
|
||||
|
||||
func (invalidFilter) InvalidParameter() {}
|
||||
|
||||
// Validate compared the set of accepted keys against the keys in the mapping.
|
||||
// An error is returned if any mapping keys are not in the accepted set.
|
||||
func (args Args) Validate(accepted map[string]bool) error {
|
||||
for name := range args.fields {
|
||||
if !accepted[name] {
|
||||
return invalidFilter(name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WalkValues iterates over the list of values for a key in the mapping and calls
|
||||
// op() for each value. If op returns an error the iteration stops and the
|
||||
// error is returned.
|
||||
func (args Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := args.fields[field]; !ok {
|
||||
return nil
|
||||
}
|
||||
for v := range args.fields[field] {
|
||||
if err := op(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a copy of args.
|
||||
func (args Args) Clone() (newArgs Args) {
|
||||
newArgs.fields = make(map[string]map[string]bool, len(args.fields))
|
||||
for k, m := range args.fields {
|
||||
var mm map[string]bool
|
||||
if m != nil {
|
||||
mm = make(map[string]bool, len(m))
|
||||
for kk, v := range m {
|
||||
mm[kk] = v
|
||||
}
|
||||
}
|
||||
newArgs.fields[k] = mm
|
||||
}
|
||||
return newArgs
|
||||
}
|
||||
|
||||
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
|
||||
m := map[string]map[string]bool{}
|
||||
for k, v := range d {
|
||||
values := map[string]bool{}
|
||||
for _, vv := range v {
|
||||
values[vv] = true
|
||||
}
|
||||
m[k] = values
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
|
||||
m := map[string][]string{}
|
||||
for k, v := range f {
|
||||
values := []string{}
|
||||
for kk := range v {
|
||||
if v[kk] {
|
||||
values = append(values, kk)
|
||||
}
|
||||
}
|
||||
m[k] = values
|
||||
}
|
||||
return m
|
||||
}
|
17
vendor/github.com/docker/docker/api/types/graph_driver_data.go
generated
vendored
17
vendor/github.com/docker/docker/api/types/graph_driver_data.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// GraphDriverData Information about a container's graph driver.
|
||||
// swagger:model GraphDriverData
|
||||
type GraphDriverData struct {
|
||||
|
||||
// data
|
||||
// Required: true
|
||||
Data map[string]string `json:"Data"`
|
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
}
|
13
vendor/github.com/docker/docker/api/types/id_response.go
generated
vendored
13
vendor/github.com/docker/docker/api/types/id_response.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// IDResponse Response to an API call that returns just an Id
|
||||
// swagger:model IdResponse
|
||||
type IDResponse struct {
|
||||
|
||||
// The id of the newly created object.
|
||||
// Required: true
|
||||
ID string `json:"Id"`
|
||||
}
|
37
vendor/github.com/docker/docker/api/types/image/image_history.go
generated
vendored
37
vendor/github.com/docker/docker/api/types/image/image_history.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package image // import "github.com/docker/docker/api/types/image"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// HistoryResponseItem individual image layer information in response to ImageHistory operation
|
||||
// swagger:model HistoryResponseItem
|
||||
type HistoryResponseItem struct {
|
||||
|
||||
// comment
|
||||
// Required: true
|
||||
Comment string `json:"Comment"`
|
||||
|
||||
// created
|
||||
// Required: true
|
||||
Created int64 `json:"Created"`
|
||||
|
||||
// created by
|
||||
// Required: true
|
||||
CreatedBy string `json:"CreatedBy"`
|
||||
|
||||
// Id
|
||||
// Required: true
|
||||
ID string `json:"Id"`
|
||||
|
||||
// size
|
||||
// Required: true
|
||||
Size int64 `json:"Size"`
|
||||
|
||||
// tags
|
||||
// Required: true
|
||||
Tags []string `json:"Tags"`
|
||||
}
|
15
vendor/github.com/docker/docker/api/types/image_delete_response_item.go
generated
vendored
15
vendor/github.com/docker/docker/api/types/image_delete_response_item.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ImageDeleteResponseItem image delete response item
|
||||
// swagger:model ImageDeleteResponseItem
|
||||
type ImageDeleteResponseItem struct {
|
||||
|
||||
// The image ID of an image that was deleted
|
||||
Deleted string `json:"Deleted,omitempty"`
|
||||
|
||||
// The image ID of an image that was untagged
|
||||
Untagged string `json:"Untagged,omitempty"`
|
||||
}
|
49
vendor/github.com/docker/docker/api/types/image_summary.go
generated
vendored
49
vendor/github.com/docker/docker/api/types/image_summary.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ImageSummary image summary
|
||||
// swagger:model ImageSummary
|
||||
type ImageSummary struct {
|
||||
|
||||
// containers
|
||||
// Required: true
|
||||
Containers int64 `json:"Containers"`
|
||||
|
||||
// created
|
||||
// Required: true
|
||||
Created int64 `json:"Created"`
|
||||
|
||||
// Id
|
||||
// Required: true
|
||||
ID string `json:"Id"`
|
||||
|
||||
// labels
|
||||
// Required: true
|
||||
Labels map[string]string `json:"Labels"`
|
||||
|
||||
// parent Id
|
||||
// Required: true
|
||||
ParentID string `json:"ParentId"`
|
||||
|
||||
// repo digests
|
||||
// Required: true
|
||||
RepoDigests []string `json:"RepoDigests"`
|
||||
|
||||
// repo tags
|
||||
// Required: true
|
||||
RepoTags []string `json:"RepoTags"`
|
||||
|
||||
// shared size
|
||||
// Required: true
|
||||
SharedSize int64 `json:"SharedSize"`
|
||||
|
||||
// size
|
||||
// Required: true
|
||||
Size int64 `json:"Size"`
|
||||
|
||||
// virtual size
|
||||
// Required: true
|
||||
VirtualSize int64 `json:"VirtualSize"`
|
||||
}
|
131
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
131
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
@ -1,131 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/api/types/mount"
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Type represents the type of a mount.
|
||||
type Type string
|
||||
|
||||
// Type constants
|
||||
const (
|
||||
// TypeBind is the type for mounting host dir
|
||||
TypeBind Type = "bind"
|
||||
// TypeVolume is the type for remote storage volumes
|
||||
TypeVolume Type = "volume"
|
||||
// TypeTmpfs is the type for mounting tmpfs
|
||||
TypeTmpfs Type = "tmpfs"
|
||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||
TypeNamedPipe Type = "npipe"
|
||||
)
|
||||
|
||||
// Mount represents a mount (volume).
|
||||
type Mount struct {
|
||||
Type Type `json:",omitempty"`
|
||||
// Source specifies the name of the mount. Depending on mount type, this
|
||||
// may be a volume name or a host path, or even ignored.
|
||||
// Source is not supported for tmpfs (must be an empty value)
|
||||
Source string `json:",omitempty"`
|
||||
Target string `json:",omitempty"`
|
||||
ReadOnly bool `json:",omitempty"`
|
||||
Consistency Consistency `json:",omitempty"`
|
||||
|
||||
BindOptions *BindOptions `json:",omitempty"`
|
||||
VolumeOptions *VolumeOptions `json:",omitempty"`
|
||||
TmpfsOptions *TmpfsOptions `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Propagation represents the propagation of a mount.
|
||||
type Propagation string
|
||||
|
||||
const (
|
||||
// PropagationRPrivate RPRIVATE
|
||||
PropagationRPrivate Propagation = "rprivate"
|
||||
// PropagationPrivate PRIVATE
|
||||
PropagationPrivate Propagation = "private"
|
||||
// PropagationRShared RSHARED
|
||||
PropagationRShared Propagation = "rshared"
|
||||
// PropagationShared SHARED
|
||||
PropagationShared Propagation = "shared"
|
||||
// PropagationRSlave RSLAVE
|
||||
PropagationRSlave Propagation = "rslave"
|
||||
// PropagationSlave SLAVE
|
||||
PropagationSlave Propagation = "slave"
|
||||
)
|
||||
|
||||
// Propagations is the list of all valid mount propagations
|
||||
var Propagations = []Propagation{
|
||||
PropagationRPrivate,
|
||||
PropagationPrivate,
|
||||
PropagationRShared,
|
||||
PropagationShared,
|
||||
PropagationRSlave,
|
||||
PropagationSlave,
|
||||
}
|
||||
|
||||
// Consistency represents the consistency requirements of a mount.
|
||||
type Consistency string
|
||||
|
||||
const (
|
||||
// ConsistencyFull guarantees bind mount-like consistency
|
||||
ConsistencyFull Consistency = "consistent"
|
||||
// ConsistencyCached mounts can cache read data and FS structure
|
||||
ConsistencyCached Consistency = "cached"
|
||||
// ConsistencyDelegated mounts can cache read and written data and structure
|
||||
ConsistencyDelegated Consistency = "delegated"
|
||||
// ConsistencyDefault provides "consistent" behavior unless overridden
|
||||
ConsistencyDefault Consistency = "default"
|
||||
)
|
||||
|
||||
// BindOptions defines options specific to mounts of type "bind".
|
||||
type BindOptions struct {
|
||||
Propagation Propagation `json:",omitempty"`
|
||||
NonRecursive bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// VolumeOptions represents the options for a mount of type volume.
|
||||
type VolumeOptions struct {
|
||||
NoCopy bool `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
DriverConfig *Driver `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Driver represents a volume driver.
|
||||
type Driver struct {
|
||||
Name string `json:",omitempty"`
|
||||
Options map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// TmpfsOptions defines options specific to mounts of type "tmpfs".
|
||||
type TmpfsOptions struct {
|
||||
// Size sets the size of the tmpfs, in bytes.
|
||||
//
|
||||
// This will be converted to an operating system specific value
|
||||
// depending on the host. For example, on linux, it will be converted to
|
||||
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
|
||||
// docker, uses a straight byte value.
|
||||
//
|
||||
// Percentages are not supported.
|
||||
SizeBytes int64 `json:",omitempty"`
|
||||
// Mode of the tmpfs upon creation
|
||||
Mode os.FileMode `json:",omitempty"`
|
||||
|
||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||
// daemon, that are accepted. Only the most basic are added for now.
|
||||
//
|
||||
// From docker/docker/pkg/mount/flags.go:
|
||||
//
|
||||
// var validFlags = map[string]bool{
|
||||
// "": true,
|
||||
// "size": true, X
|
||||
// "mode": true, X
|
||||
// "uid": true,
|
||||
// "gid": true,
|
||||
// "nr_inodes": true,
|
||||
// "nr_blocks": true,
|
||||
// "mpol": true,
|
||||
// }
|
||||
//
|
||||
// Some of these may be straightforward to add, but others, such as
|
||||
// uid/gid have implications in a clustered system.
|
||||
}
|
127
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
127
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
package network // import "github.com/docker/docker/api/types/network"
|
||||
import (
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
// Address represents an IP address
|
||||
type Address struct {
|
||||
Addr string
|
||||
PrefixLen int
|
||||
}
|
||||
|
||||
// IPAM represents IP Address Management
|
||||
type IPAM struct {
|
||||
Driver string
|
||||
Options map[string]string //Per network IPAM driver options
|
||||
Config []IPAMConfig
|
||||
}
|
||||
|
||||
// IPAMConfig represents IPAM configurations
|
||||
type IPAMConfig struct {
|
||||
Subnet string `json:",omitempty"`
|
||||
IPRange string `json:",omitempty"`
|
||||
Gateway string `json:",omitempty"`
|
||||
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
|
||||
}
|
||||
|
||||
// EndpointIPAMConfig represents IPAM configurations for the endpoint
|
||||
type EndpointIPAMConfig struct {
|
||||
IPv4Address string `json:",omitempty"`
|
||||
IPv6Address string `json:",omitempty"`
|
||||
LinkLocalIPs []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Copy makes a copy of the endpoint ipam config
|
||||
func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
|
||||
cfgCopy := *cfg
|
||||
cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
|
||||
cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
|
||||
return &cfgCopy
|
||||
}
|
||||
|
||||
// PeerInfo represents one peer of an overlay network
|
||||
type PeerInfo struct {
|
||||
Name string
|
||||
IP string
|
||||
}
|
||||
|
||||
// EndpointSettings stores the network endpoint details
|
||||
type EndpointSettings struct {
|
||||
// Configurations
|
||||
IPAMConfig *EndpointIPAMConfig
|
||||
Links []string
|
||||
Aliases []string
|
||||
// Operational data
|
||||
NetworkID string
|
||||
EndpointID string
|
||||
Gateway string
|
||||
IPAddress string
|
||||
IPPrefixLen int
|
||||
IPv6Gateway string
|
||||
GlobalIPv6Address string
|
||||
GlobalIPv6PrefixLen int
|
||||
MacAddress string
|
||||
DriverOpts map[string]string
|
||||
}
|
||||
|
||||
// Task carries the information about one backend task
|
||||
type Task struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
EndpointIP string
|
||||
Info map[string]string
|
||||
}
|
||||
|
||||
// ServiceInfo represents service parameters with the list of service's tasks
|
||||
type ServiceInfo struct {
|
||||
VIP string
|
||||
Ports []string
|
||||
LocalLBIndex int
|
||||
Tasks []Task
|
||||
}
|
||||
|
||||
// Copy makes a deep copy of `EndpointSettings`
|
||||
func (es *EndpointSettings) Copy() *EndpointSettings {
|
||||
epCopy := *es
|
||||
if es.IPAMConfig != nil {
|
||||
epCopy.IPAMConfig = es.IPAMConfig.Copy()
|
||||
}
|
||||
|
||||
if es.Links != nil {
|
||||
links := make([]string, 0, len(es.Links))
|
||||
epCopy.Links = append(links, es.Links...)
|
||||
}
|
||||
|
||||
if es.Aliases != nil {
|
||||
aliases := make([]string, 0, len(es.Aliases))
|
||||
epCopy.Aliases = append(aliases, es.Aliases...)
|
||||
}
|
||||
return &epCopy
|
||||
}
|
||||
|
||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
||||
type NetworkingConfig struct {
|
||||
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
|
||||
}
|
||||
|
||||
// ConfigReference specifies the source which provides a network's configuration
|
||||
type ConfigReference struct {
|
||||
Network string
|
||||
}
|
||||
|
||||
var acceptedFilters = map[string]bool{
|
||||
"dangling": true,
|
||||
"driver": true,
|
||||
"id": true,
|
||||
"label": true,
|
||||
"name": true,
|
||||
"scope": true,
|
||||
"type": true,
|
||||
}
|
||||
|
||||
// ValidateFilters validates the list of filter args with the available filters.
|
||||
func ValidateFilters(filter filters.Args) error {
|
||||
return errdefs.InvalidParameter(filter.Validate(acceptedFilters))
|
||||
}
|
203
vendor/github.com/docker/docker/api/types/plugin.go
generated
vendored
203
vendor/github.com/docker/docker/api/types/plugin.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// Plugin A plugin for the Engine API
|
||||
// swagger:model Plugin
|
||||
type Plugin struct {
|
||||
|
||||
// config
|
||||
// Required: true
|
||||
Config PluginConfig `json:"Config"`
|
||||
|
||||
// True if the plugin is running. False if the plugin is not running, only installed.
|
||||
// Required: true
|
||||
Enabled bool `json:"Enabled"`
|
||||
|
||||
// Id
|
||||
ID string `json:"Id,omitempty"`
|
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
|
||||
// plugin remote reference used to push/pull the plugin
|
||||
PluginReference string `json:"PluginReference,omitempty"`
|
||||
|
||||
// settings
|
||||
// Required: true
|
||||
Settings PluginSettings `json:"Settings"`
|
||||
}
|
||||
|
||||
// PluginConfig The config of a plugin.
|
||||
// swagger:model PluginConfig
|
||||
type PluginConfig struct {
|
||||
|
||||
// args
|
||||
// Required: true
|
||||
Args PluginConfigArgs `json:"Args"`
|
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"`
|
||||
|
||||
// Docker Version used to create the plugin
|
||||
DockerVersion string `json:"DockerVersion,omitempty"`
|
||||
|
||||
// documentation
|
||||
// Required: true
|
||||
Documentation string `json:"Documentation"`
|
||||
|
||||
// entrypoint
|
||||
// Required: true
|
||||
Entrypoint []string `json:"Entrypoint"`
|
||||
|
||||
// env
|
||||
// Required: true
|
||||
Env []PluginEnv `json:"Env"`
|
||||
|
||||
// interface
|
||||
// Required: true
|
||||
Interface PluginConfigInterface `json:"Interface"`
|
||||
|
||||
// ipc host
|
||||
// Required: true
|
||||
IpcHost bool `json:"IpcHost"`
|
||||
|
||||
// linux
|
||||
// Required: true
|
||||
Linux PluginConfigLinux `json:"Linux"`
|
||||
|
||||
// mounts
|
||||
// Required: true
|
||||
Mounts []PluginMount `json:"Mounts"`
|
||||
|
||||
// network
|
||||
// Required: true
|
||||
Network PluginConfigNetwork `json:"Network"`
|
||||
|
||||
// pid host
|
||||
// Required: true
|
||||
PidHost bool `json:"PidHost"`
|
||||
|
||||
// propagated mount
|
||||
// Required: true
|
||||
PropagatedMount string `json:"PropagatedMount"`
|
||||
|
||||
// user
|
||||
User PluginConfigUser `json:"User,omitempty"`
|
||||
|
||||
// work dir
|
||||
// Required: true
|
||||
WorkDir string `json:"WorkDir"`
|
||||
|
||||
// rootfs
|
||||
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
|
||||
}
|
||||
|
||||
// PluginConfigArgs plugin config args
|
||||
// swagger:model PluginConfigArgs
|
||||
type PluginConfigArgs struct {
|
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"`
|
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"`
|
||||
|
||||
// value
|
||||
// Required: true
|
||||
Value []string `json:"Value"`
|
||||
}
|
||||
|
||||
// PluginConfigInterface The interface between Docker and the plugin
|
||||
// swagger:model PluginConfigInterface
|
||||
type PluginConfigInterface struct {
|
||||
|
||||
// Protocol to use for clients connecting to the plugin.
|
||||
ProtocolScheme string `json:"ProtocolScheme,omitempty"`
|
||||
|
||||
// socket
|
||||
// Required: true
|
||||
Socket string `json:"Socket"`
|
||||
|
||||
// types
|
||||
// Required: true
|
||||
Types []PluginInterfaceType `json:"Types"`
|
||||
}
|
||||
|
||||
// PluginConfigLinux plugin config linux
|
||||
// swagger:model PluginConfigLinux
|
||||
type PluginConfigLinux struct {
|
||||
|
||||
// allow all devices
|
||||
// Required: true
|
||||
AllowAllDevices bool `json:"AllowAllDevices"`
|
||||
|
||||
// capabilities
|
||||
// Required: true
|
||||
Capabilities []string `json:"Capabilities"`
|
||||
|
||||
// devices
|
||||
// Required: true
|
||||
Devices []PluginDevice `json:"Devices"`
|
||||
}
|
||||
|
||||
// PluginConfigNetwork plugin config network
|
||||
// swagger:model PluginConfigNetwork
|
||||
type PluginConfigNetwork struct {
|
||||
|
||||
// type
|
||||
// Required: true
|
||||
Type string `json:"Type"`
|
||||
}
|
||||
|
||||
// PluginConfigRootfs plugin config rootfs
|
||||
// swagger:model PluginConfigRootfs
|
||||
type PluginConfigRootfs struct {
|
||||
|
||||
// diff ids
|
||||
DiffIds []string `json:"diff_ids"`
|
||||
|
||||
// type
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// PluginConfigUser plugin config user
|
||||
// swagger:model PluginConfigUser
|
||||
type PluginConfigUser struct {
|
||||
|
||||
// g ID
|
||||
GID uint32 `json:"GID,omitempty"`
|
||||
|
||||
// UID
|
||||
UID uint32 `json:"UID,omitempty"`
|
||||
}
|
||||
|
||||
// PluginSettings Settings that can be modified by users.
|
||||
// swagger:model PluginSettings
|
||||
type PluginSettings struct {
|
||||
|
||||
// args
|
||||
// Required: true
|
||||
Args []string `json:"Args"`
|
||||
|
||||
// devices
|
||||
// Required: true
|
||||
Devices []PluginDevice `json:"Devices"`
|
||||
|
||||
// env
|
||||
// Required: true
|
||||
Env []string `json:"Env"`
|
||||
|
||||
// mounts
|
||||
// Required: true
|
||||
Mounts []PluginMount `json:"Mounts"`
|
||||
}
|
25
vendor/github.com/docker/docker/api/types/plugin_device.go
generated
vendored
25
vendor/github.com/docker/docker/api/types/plugin_device.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginDevice plugin device
|
||||
// swagger:model PluginDevice
|
||||
type PluginDevice struct {
|
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"`
|
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
|
||||
// path
|
||||
// Required: true
|
||||
Path *string `json:"Path"`
|
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"`
|
||||
}
|
25
vendor/github.com/docker/docker/api/types/plugin_env.go
generated
vendored
25
vendor/github.com/docker/docker/api/types/plugin_env.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginEnv plugin env
|
||||
// swagger:model PluginEnv
|
||||
type PluginEnv struct {
|
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"`
|
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"`
|
||||
|
||||
// value
|
||||
// Required: true
|
||||
Value *string `json:"Value"`
|
||||
}
|
21
vendor/github.com/docker/docker/api/types/plugin_interface_type.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/plugin_interface_type.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginInterfaceType plugin interface type
|
||||
// swagger:model PluginInterfaceType
|
||||
type PluginInterfaceType struct {
|
||||
|
||||
// capability
|
||||
// Required: true
|
||||
Capability string `json:"Capability"`
|
||||
|
||||
// prefix
|
||||
// Required: true
|
||||
Prefix string `json:"Prefix"`
|
||||
|
||||
// version
|
||||
// Required: true
|
||||
Version string `json:"Version"`
|
||||
}
|
37
vendor/github.com/docker/docker/api/types/plugin_mount.go
generated
vendored
37
vendor/github.com/docker/docker/api/types/plugin_mount.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginMount plugin mount
|
||||
// swagger:model PluginMount
|
||||
type PluginMount struct {
|
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"`
|
||||
|
||||
// destination
|
||||
// Required: true
|
||||
Destination string `json:"Destination"`
|
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
|
||||
// options
|
||||
// Required: true
|
||||
Options []string `json:"Options"`
|
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"`
|
||||
|
||||
// source
|
||||
// Required: true
|
||||
Source *string `json:"Source"`
|
||||
|
||||
// type
|
||||
// Required: true
|
||||
Type string `json:"Type"`
|
||||
}
|
71
vendor/github.com/docker/docker/api/types/plugin_responses.go
generated
vendored
71
vendor/github.com/docker/docker/api/types/plugin_responses.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// PluginsListResponse contains the response for the Engine API
|
||||
type PluginsListResponse []*Plugin
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
|
||||
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
|
||||
versionIndex := len(p)
|
||||
prefixIndex := 0
|
||||
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
|
||||
return fmt.Errorf("%q is not a plugin interface type", p)
|
||||
}
|
||||
p = p[1 : len(p)-1]
|
||||
loop:
|
||||
for i, b := range p {
|
||||
switch b {
|
||||
case '.':
|
||||
prefixIndex = i
|
||||
case '/':
|
||||
versionIndex = i
|
||||
break loop
|
||||
}
|
||||
}
|
||||
t.Prefix = string(p[:prefixIndex])
|
||||
t.Capability = string(p[prefixIndex+1 : versionIndex])
|
||||
if versionIndex < len(p) {
|
||||
t.Version = string(p[versionIndex+1:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler for PluginInterfaceType
|
||||
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(t.String())
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer for PluginInterfaceType
|
||||
func (t PluginInterfaceType) String() string {
|
||||
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
|
||||
}
|
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
type PluginPrivilege struct {
|
||||
Name string
|
||||
Description string
|
||||
Value []string
|
||||
}
|
||||
|
||||
// PluginPrivileges is a list of PluginPrivilege
|
||||
type PluginPrivileges []PluginPrivilege
|
||||
|
||||
func (s PluginPrivileges) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s PluginPrivileges) Less(i, j int) bool {
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
||||
func (s PluginPrivileges) Swap(i, j int) {
|
||||
sort.Strings(s[i].Value)
|
||||
sort.Strings(s[j].Value)
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
23
vendor/github.com/docker/docker/api/types/port.go
generated
vendored
23
vendor/github.com/docker/docker/api/types/port.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// Port An open port on a container
|
||||
// swagger:model Port
|
||||
type Port struct {
|
||||
|
||||
// Host IP address that the container's port is mapped to
|
||||
IP string `json:"IP,omitempty"`
|
||||
|
||||
// Port on the container
|
||||
// Required: true
|
||||
PrivatePort uint16 `json:"PrivatePort"`
|
||||
|
||||
// Port exposed on the host
|
||||
PublicPort uint16 `json:"PublicPort,omitempty"`
|
||||
|
||||
// type
|
||||
// Required: true
|
||||
Type string `json:"Type"`
|
||||
}
|
21
vendor/github.com/docker/docker/api/types/registry/authenticate.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/registry/authenticate.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// AuthenticateOKBody authenticate o k body
|
||||
// swagger:model AuthenticateOKBody
|
||||
type AuthenticateOKBody struct {
|
||||
|
||||
// An opaque token used to authenticate a user after a successful login
|
||||
// Required: true
|
||||
IdentityToken string `json:"IdentityToken"`
|
||||
|
||||
// The status of the authentication
|
||||
// Required: true
|
||||
Status string `json:"Status"`
|
||||
}
|
119
vendor/github.com/docker/docker/api/types/registry/registry.go
generated
vendored
119
vendor/github.com/docker/docker/api/types/registry/registry.go
generated
vendored
@ -1,119 +0,0 @@
|
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ServiceConfig stores daemon registry services configuration.
|
||||
type ServiceConfig struct {
|
||||
AllowNondistributableArtifactsCIDRs []*NetIPNet
|
||||
AllowNondistributableArtifactsHostnames []string
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
}
|
||||
|
||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||
// unmarshalled to JSON
|
||||
type NetIPNet net.IPNet
|
||||
|
||||
// String returns the CIDR notation of ipnet
|
||||
func (ipnet *NetIPNet) String() string {
|
||||
return (*net.IPNet)(ipnet).String()
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON representation of the IPNet
|
||||
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal((*net.IPNet)(ipnet).String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets the IPNet from a byte array of JSON
|
||||
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
|
||||
var ipnetStr string
|
||||
if err = json.Unmarshal(b, &ipnetStr); err == nil {
|
||||
var cidr *net.IPNet
|
||||
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
|
||||
*ipnet = NetIPNet(*cidr)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IndexInfo contains information about a registry
|
||||
//
|
||||
// RepositoryInfo Examples:
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "docker.io",
|
||||
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
||||
// "Secure" : true,
|
||||
// "Official" : true,
|
||||
// },
|
||||
// "RemoteName" : "library/debian",
|
||||
// "LocalName" : "debian",
|
||||
// "CanonicalName" : "docker.io/debian"
|
||||
// "Official" : true,
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "127.0.0.1:5000",
|
||||
// "Mirrors" : [],
|
||||
// "Secure" : false,
|
||||
// "Official" : false,
|
||||
// },
|
||||
// "RemoteName" : "user/repo",
|
||||
// "LocalName" : "127.0.0.1:5000/user/repo",
|
||||
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
||||
// "Official" : false,
|
||||
// }
|
||||
type IndexInfo struct {
|
||||
// Name is the name of the registry, such as "docker.io"
|
||||
Name string
|
||||
// Mirrors is a list of mirrors, expressed as URIs
|
||||
Mirrors []string
|
||||
// Secure is set to false if the registry is part of the list of
|
||||
// insecure registries. Insecure registries accept HTTP and/or accept
|
||||
// HTTPS with certificates from unknown CAs.
|
||||
Secure bool
|
||||
// Official indicates whether this is an official registry
|
||||
Official bool
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
|
||||
// DistributionInspect describes the result obtained from contacting the
|
||||
// registry to retrieve image metadata
|
||||
type DistributionInspect struct {
|
||||
// Descriptor contains information about the manifest, including
|
||||
// the content addressable digest
|
||||
Descriptor v1.Descriptor
|
||||
// Platforms contains the list of platforms supported by the image,
|
||||
// obtained by parsing the manifest
|
||||
Platforms []v1.Platform
|
||||
}
|
94
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
94
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
@ -1,94 +0,0 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
||||
type Seccomp struct {
|
||||
DefaultAction Action `json:"defaultAction"`
|
||||
// Architectures is kept to maintain backward compatibility with the old
|
||||
// seccomp profile.
|
||||
Architectures []Arch `json:"architectures,omitempty"`
|
||||
ArchMap []Architecture `json:"archMap,omitempty"`
|
||||
Syscalls []*Syscall `json:"syscalls"`
|
||||
}
|
||||
|
||||
// Architecture is used to represent a specific architecture
|
||||
// and its sub-architectures
|
||||
type Architecture struct {
|
||||
Arch Arch `json:"architecture"`
|
||||
SubArches []Arch `json:"subArchitectures"`
|
||||
}
|
||||
|
||||
// Arch used for architectures
|
||||
type Arch string
|
||||
|
||||
// Additional architectures permitted to be used for system calls
|
||||
// By default only the native architecture of the kernel is permitted
|
||||
const (
|
||||
ArchX86 Arch = "SCMP_ARCH_X86"
|
||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
|
||||
ArchX32 Arch = "SCMP_ARCH_X32"
|
||||
ArchARM Arch = "SCMP_ARCH_ARM"
|
||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
|
||||
ArchMIPS Arch = "SCMP_ARCH_MIPS"
|
||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
|
||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
|
||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
|
||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
|
||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
|
||||
ArchPPC Arch = "SCMP_ARCH_PPC"
|
||||
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
|
||||
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
|
||||
ArchS390 Arch = "SCMP_ARCH_S390"
|
||||
ArchS390X Arch = "SCMP_ARCH_S390X"
|
||||
)
|
||||
|
||||
// Action taken upon Seccomp rule match
|
||||
type Action string
|
||||
|
||||
// Define actions for Seccomp rules
|
||||
const (
|
||||
ActKill Action = "SCMP_ACT_KILL"
|
||||
ActTrap Action = "SCMP_ACT_TRAP"
|
||||
ActErrno Action = "SCMP_ACT_ERRNO"
|
||||
ActTrace Action = "SCMP_ACT_TRACE"
|
||||
ActAllow Action = "SCMP_ACT_ALLOW"
|
||||
)
|
||||
|
||||
// Operator used to match syscall arguments in Seccomp
|
||||
type Operator string
|
||||
|
||||
// Define operators for syscall arguments in Seccomp
|
||||
const (
|
||||
OpNotEqual Operator = "SCMP_CMP_NE"
|
||||
OpLessThan Operator = "SCMP_CMP_LT"
|
||||
OpLessEqual Operator = "SCMP_CMP_LE"
|
||||
OpEqualTo Operator = "SCMP_CMP_EQ"
|
||||
OpGreaterEqual Operator = "SCMP_CMP_GE"
|
||||
OpGreaterThan Operator = "SCMP_CMP_GT"
|
||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
|
||||
)
|
||||
|
||||
// Arg used for matching specific syscall arguments in Seccomp
|
||||
type Arg struct {
|
||||
Index uint `json:"index"`
|
||||
Value uint64 `json:"value"`
|
||||
ValueTwo uint64 `json:"valueTwo"`
|
||||
Op Operator `json:"op"`
|
||||
}
|
||||
|
||||
// Filter is used to conditionally apply Seccomp rules
|
||||
type Filter struct {
|
||||
Caps []string `json:"caps,omitempty"`
|
||||
Arches []string `json:"arches,omitempty"`
|
||||
MinKernel string `json:"minKernel,omitempty"`
|
||||
}
|
||||
|
||||
// Syscall is used to match a group of syscalls in Seccomp
|
||||
type Syscall struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Names []string `json:"names,omitempty"`
|
||||
Action Action `json:"action"`
|
||||
Args []*Arg `json:"args"`
|
||||
Comment string `json:"comment"`
|
||||
Includes Filter `json:"includes"`
|
||||
Excludes Filter `json:"excludes"`
|
||||
}
|
12
vendor/github.com/docker/docker/api/types/service_update_response.go
generated
vendored
12
vendor/github.com/docker/docker/api/types/service_update_response.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
package types
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ServiceUpdateResponse service update response
|
||||
// swagger:model ServiceUpdateResponse
|
||||
type ServiceUpdateResponse struct {
|
||||
|
||||
// Optional warning messages
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
181
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
181
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
@ -1,181 +0,0 @@
|
||||
// Package types is used for API stability in the types and response to the
|
||||
// consumers of the API stats endpoint.
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import "time"
|
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container.
|
||||
// Not used on Windows.
|
||||
type ThrottlingData struct {
|
||||
// Number of periods with throttling active
|
||||
Periods uint64 `json:"periods"`
|
||||
// Number of periods when the container hits its throttling limit.
|
||||
ThrottledPeriods uint64 `json:"throttled_periods"`
|
||||
// Aggregate time the container was throttled for in nanoseconds.
|
||||
ThrottledTime uint64 `json:"throttled_time"`
|
||||
}
|
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
type CPUUsage struct {
|
||||
// Total CPU time consumed.
|
||||
// Units: nanoseconds (Linux)
|
||||
// Units: 100's of nanoseconds (Windows)
|
||||
TotalUsage uint64 `json:"total_usage"`
|
||||
|
||||
// Total CPU time consumed per core (Linux). Not used on Windows.
|
||||
// Units: nanoseconds.
|
||||
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
|
||||
|
||||
// Time spent by tasks of the cgroup in kernel mode (Linux).
|
||||
// Time spent by all container processes in kernel mode (Windows).
|
||||
// Units: nanoseconds (Linux).
|
||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
|
||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
|
||||
|
||||
// Time spent by tasks of the cgroup in user mode (Linux).
|
||||
// Time spent by all container processes in user mode (Windows).
|
||||
// Units: nanoseconds (Linux).
|
||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
|
||||
UsageInUsermode uint64 `json:"usage_in_usermode"`
|
||||
}
|
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
type CPUStats struct {
|
||||
// CPU Usage. Linux and Windows.
|
||||
CPUUsage CPUUsage `json:"cpu_usage"`
|
||||
|
||||
// System Usage. Linux only.
|
||||
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
|
||||
|
||||
// Online CPUs. Linux only.
|
||||
OnlineCPUs uint32 `json:"online_cpus,omitempty"`
|
||||
|
||||
// Throttling Data. Linux only.
|
||||
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
|
||||
}
|
||||
|
||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||
// Windows returns stats for commit and private working set only.
|
||||
type MemoryStats struct {
|
||||
// Linux Memory Stats
|
||||
|
||||
// current res_counter usage for memory
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
// maximum usage ever recorded.
|
||||
MaxUsage uint64 `json:"max_usage,omitempty"`
|
||||
// TODO(vishh): Export these as stronger types.
|
||||
// all the stats exported via memory.stat.
|
||||
Stats map[string]uint64 `json:"stats,omitempty"`
|
||||
// number of times memory usage hits limits.
|
||||
Failcnt uint64 `json:"failcnt,omitempty"`
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
|
||||
// Windows Memory Stats
|
||||
// See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
|
||||
|
||||
// committed bytes
|
||||
Commit uint64 `json:"commitbytes,omitempty"`
|
||||
// peak committed bytes
|
||||
CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
|
||||
// private working set
|
||||
PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
|
||||
}
|
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// Not used on Windows.
|
||||
type BlkioStatEntry struct {
|
||||
Major uint64 `json:"major"`
|
||||
Minor uint64 `json:"minor"`
|
||||
Op string `json:"op"`
|
||||
Value uint64 `json:"value"`
|
||||
}
|
||||
|
||||
// BlkioStats stores All IO service stats for data read and write.
|
||||
// This is a Linux specific structure as the differences between expressing
|
||||
// block I/O on Windows and Linux are sufficiently significant to make
|
||||
// little sense attempting to morph into a combined structure.
|
||||
type BlkioStats struct {
|
||||
// number of bytes transferred to and from the block device
|
||||
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
|
||||
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
|
||||
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
|
||||
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
|
||||
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
|
||||
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
|
||||
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
|
||||
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
|
||||
}
|
||||
|
||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
type StorageStats struct {
|
||||
ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
|
||||
ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
|
||||
WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
|
||||
WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkStats aggregates the network stats of one container
|
||||
type NetworkStats struct {
|
||||
// Bytes received. Windows and Linux.
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
// Packets received. Windows and Linux.
|
||||
RxPackets uint64 `json:"rx_packets"`
|
||||
// Received errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
RxErrors uint64 `json:"rx_errors"`
|
||||
// Incoming packets dropped. Windows and Linux.
|
||||
RxDropped uint64 `json:"rx_dropped"`
|
||||
// Bytes sent. Windows and Linux.
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
// Packets sent. Windows and Linux.
|
||||
TxPackets uint64 `json:"tx_packets"`
|
||||
// Sent errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
TxErrors uint64 `json:"tx_errors"`
|
||||
// Outgoing packets dropped. Windows and Linux.
|
||||
TxDropped uint64 `json:"tx_dropped"`
|
||||
// Endpoint ID. Not used on Linux.
|
||||
EndpointID string `json:"endpoint_id,omitempty"`
|
||||
// Instance ID. Not used on Linux.
|
||||
InstanceID string `json:"instance_id,omitempty"`
|
||||
}
|
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
type PidsStats struct {
|
||||
// Current is the number of pids in the cgroup
|
||||
Current uint64 `json:"current,omitempty"`
|
||||
// Limit is the hard limit on the number of pids in the cgroup.
|
||||
// A "Limit" of 0 means that there is no limit.
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
type Stats struct {
|
||||
// Common stats
|
||||
Read time.Time `json:"read"`
|
||||
PreRead time.Time `json:"preread"`
|
||||
|
||||
// Linux specific stats, not populated on Windows.
|
||||
PidsStats PidsStats `json:"pids_stats,omitempty"`
|
||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||
|
||||
// Windows specific stats, not populated on Linux.
|
||||
NumProcs uint32 `json:"num_procs"`
|
||||
StorageStats StorageStats `json:"storage_stats,omitempty"`
|
||||
|
||||
// Shared stats
|
||||
CPUStats CPUStats `json:"cpu_stats,omitempty"`
|
||||
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
}
|
||||
|
||||
// StatsJSON is newly used Networks
|
||||
type StatsJSON struct {
|
||||
Stats
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
|
||||
// Networks request version >=1.21
|
||||
Networks map[string]NetworkStats `json:"networks,omitempty"`
|
||||
}
|
30
vendor/github.com/docker/docker/api/types/strslice/strslice.go
generated
vendored
30
vendor/github.com/docker/docker/api/types/strslice/strslice.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
package strslice // import "github.com/docker/docker/api/types/strslice"
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// StrSlice represents a string or an array of strings.
|
||||
// We need to override the json decoder to accept both options.
|
||||
type StrSlice []string
|
||||
|
||||
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
||||
// strings. This method is needed to implement json.Unmarshaler.
|
||||
func (e *StrSlice) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
// With no input, we preserve the existing value by returning nil and
|
||||
// leaving the target alone. This allows defining default values for
|
||||
// the type.
|
||||
return nil
|
||||
}
|
||||
|
||||
p := make([]string, 0, 1)
|
||||
if err := json.Unmarshal(b, &p); err != nil {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
p = append(p, s)
|
||||
}
|
||||
|
||||
*e = p
|
||||
return nil
|
||||
}
|
40
vendor/github.com/docker/docker/api/types/swarm/common.go
generated
vendored
40
vendor/github.com/docker/docker/api/types/swarm/common.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "time"
|
||||
|
||||
// Version represents the internal object version.
|
||||
type Version struct {
|
||||
Index uint64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Meta is a base object inherited by most of the other once.
|
||||
type Meta struct {
|
||||
Version Version `json:",omitempty"`
|
||||
CreatedAt time.Time `json:",omitempty"`
|
||||
UpdatedAt time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Annotations represents how to describe an object.
|
||||
type Annotations struct {
|
||||
Name string `json:",omitempty"`
|
||||
Labels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
// Driver represents a driver (network, logging, secrets backend).
|
||||
type Driver struct {
|
||||
Name string `json:",omitempty"`
|
||||
Options map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// TLSInfo represents the TLS information about what CA certificate is trusted,
|
||||
// and who the issuer for a TLS certificate is
|
||||
type TLSInfo struct {
|
||||
// TrustRoot is the trusted CA root certificate in PEM format
|
||||
TrustRoot string `json:",omitempty"`
|
||||
|
||||
// CertIssuer is the raw subject bytes of the issuer
|
||||
CertIssuerSubject []byte `json:",omitempty"`
|
||||
|
||||
// CertIssuerPublicKey is the raw public key bytes of the issuer
|
||||
CertIssuerPublicKey []byte `json:",omitempty"`
|
||||
}
|
40
vendor/github.com/docker/docker/api/types/swarm/config.go
generated
vendored
40
vendor/github.com/docker/docker/api/types/swarm/config.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "os"
|
||||
|
||||
// Config represents a config.
|
||||
type Config struct {
|
||||
ID string
|
||||
Meta
|
||||
Spec ConfigSpec
|
||||
}
|
||||
|
||||
// ConfigSpec represents a config specification from a config in swarm
|
||||
type ConfigSpec struct {
|
||||
Annotations
|
||||
Data []byte `json:",omitempty"`
|
||||
|
||||
// Templating controls whether and how to evaluate the config payload as
|
||||
// a template. If it is not set, no templating is used.
|
||||
Templating *Driver `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ConfigReferenceFileTarget is a file target in a config reference
|
||||
type ConfigReferenceFileTarget struct {
|
||||
Name string
|
||||
UID string
|
||||
GID string
|
||||
Mode os.FileMode
|
||||
}
|
||||
|
||||
// ConfigReferenceRuntimeTarget is a target for a config specifying that it
|
||||
// isn't mounted into the container but instead has some other purpose.
|
||||
type ConfigReferenceRuntimeTarget struct{}
|
||||
|
||||
// ConfigReference is a reference to a config in swarm
|
||||
type ConfigReference struct {
|
||||
File *ConfigReferenceFileTarget `json:",omitempty"`
|
||||
Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"`
|
||||
ConfigID string
|
||||
ConfigName string
|
||||
}
|
77
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
77
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
@ -1,77 +0,0 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
)
|
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
// Detailed documentation is available in:
|
||||
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
|
||||
// `nameserver`, `search`, `options` have been supported.
|
||||
// TODO: `domain` is not supported yet.
|
||||
type DNSConfig struct {
|
||||
// Nameservers specifies the IP addresses of the name servers
|
||||
Nameservers []string `json:",omitempty"`
|
||||
// Search specifies the search list for host-name lookup
|
||||
Search []string `json:",omitempty"`
|
||||
// Options allows certain internal resolver variables to be modified
|
||||
Options []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// SELinuxContext contains the SELinux labels of the container.
|
||||
type SELinuxContext struct {
|
||||
Disable bool
|
||||
|
||||
User string
|
||||
Role string
|
||||
Type string
|
||||
Level string
|
||||
}
|
||||
|
||||
// CredentialSpec for managed service account (Windows only)
|
||||
type CredentialSpec struct {
|
||||
Config string
|
||||
File string
|
||||
Registry string
|
||||
}
|
||||
|
||||
// Privileges defines the security options for the container.
|
||||
type Privileges struct {
|
||||
CredentialSpec *CredentialSpec
|
||||
SELinuxContext *SELinuxContext
|
||||
}
|
||||
|
||||
// ContainerSpec represents the spec of a container.
|
||||
type ContainerSpec struct {
|
||||
Image string `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Args []string `json:",omitempty"`
|
||||
Hostname string `json:",omitempty"`
|
||||
Env []string `json:",omitempty"`
|
||||
Dir string `json:",omitempty"`
|
||||
User string `json:",omitempty"`
|
||||
Groups []string `json:",omitempty"`
|
||||
Privileges *Privileges `json:",omitempty"`
|
||||
Init *bool `json:",omitempty"`
|
||||
StopSignal string `json:",omitempty"`
|
||||
TTY bool `json:",omitempty"`
|
||||
OpenStdin bool `json:",omitempty"`
|
||||
ReadOnly bool `json:",omitempty"`
|
||||
Mounts []mount.Mount `json:",omitempty"`
|
||||
StopGracePeriod *time.Duration `json:",omitempty"`
|
||||
Healthcheck *container.HealthConfig `json:",omitempty"`
|
||||
// The format of extra hosts on swarmkit is specified in:
|
||||
// http://man7.org/linux/man-pages/man5/hosts.5.html
|
||||
// IP_address canonical_hostname [aliases...]
|
||||
Hosts []string `json:",omitempty"`
|
||||
DNSConfig *DNSConfig `json:",omitempty"`
|
||||
Secrets []*SecretReference `json:",omitempty"`
|
||||
Configs []*ConfigReference `json:",omitempty"`
|
||||
Isolation container.Isolation `json:",omitempty"`
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
Capabilities []string `json:",omitempty"`
|
||||
}
|
121
vendor/github.com/docker/docker/api/types/swarm/network.go
generated
vendored
121
vendor/github.com/docker/docker/api/types/swarm/network.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
)
|
||||
|
||||
// Endpoint represents an endpoint.
|
||||
type Endpoint struct {
|
||||
Spec EndpointSpec `json:",omitempty"`
|
||||
Ports []PortConfig `json:",omitempty"`
|
||||
VirtualIPs []EndpointVirtualIP `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EndpointSpec represents the spec of an endpoint.
|
||||
type EndpointSpec struct {
|
||||
Mode ResolutionMode `json:",omitempty"`
|
||||
Ports []PortConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ResolutionMode represents a resolution mode.
|
||||
type ResolutionMode string
|
||||
|
||||
const (
|
||||
// ResolutionModeVIP VIP
|
||||
ResolutionModeVIP ResolutionMode = "vip"
|
||||
// ResolutionModeDNSRR DNSRR
|
||||
ResolutionModeDNSRR ResolutionMode = "dnsrr"
|
||||
)
|
||||
|
||||
// PortConfig represents the config of a port.
|
||||
type PortConfig struct {
|
||||
Name string `json:",omitempty"`
|
||||
Protocol PortConfigProtocol `json:",omitempty"`
|
||||
// TargetPort is the port inside the container
|
||||
TargetPort uint32 `json:",omitempty"`
|
||||
// PublishedPort is the port on the swarm hosts
|
||||
PublishedPort uint32 `json:",omitempty"`
|
||||
// PublishMode is the mode in which port is published
|
||||
PublishMode PortConfigPublishMode `json:",omitempty"`
|
||||
}
|
||||
|
||||
// PortConfigPublishMode represents the mode in which the port is to
|
||||
// be published.
|
||||
type PortConfigPublishMode string
|
||||
|
||||
const (
|
||||
// PortConfigPublishModeIngress is used for ports published
|
||||
// for ingress load balancing using routing mesh.
|
||||
PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
|
||||
// PortConfigPublishModeHost is used for ports published
|
||||
// for direct host level access on the host where the task is running.
|
||||
PortConfigPublishModeHost PortConfigPublishMode = "host"
|
||||
)
|
||||
|
||||
// PortConfigProtocol represents the protocol of a port.
|
||||
type PortConfigProtocol string
|
||||
|
||||
const (
|
||||
// TODO(stevvooe): These should be used generally, not just for PortConfig.
|
||||
|
||||
// PortConfigProtocolTCP TCP
|
||||
PortConfigProtocolTCP PortConfigProtocol = "tcp"
|
||||
// PortConfigProtocolUDP UDP
|
||||
PortConfigProtocolUDP PortConfigProtocol = "udp"
|
||||
// PortConfigProtocolSCTP SCTP
|
||||
PortConfigProtocolSCTP PortConfigProtocol = "sctp"
|
||||
)
|
||||
|
||||
// EndpointVirtualIP represents the virtual ip of a port.
|
||||
type EndpointVirtualIP struct {
|
||||
NetworkID string `json:",omitempty"`
|
||||
Addr string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Network represents a network.
|
||||
type Network struct {
|
||||
ID string
|
||||
Meta
|
||||
Spec NetworkSpec `json:",omitempty"`
|
||||
DriverState Driver `json:",omitempty"`
|
||||
IPAMOptions *IPAMOptions `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkSpec represents the spec of a network.
|
||||
type NetworkSpec struct {
|
||||
Annotations
|
||||
DriverConfiguration *Driver `json:",omitempty"`
|
||||
IPv6Enabled bool `json:",omitempty"`
|
||||
Internal bool `json:",omitempty"`
|
||||
Attachable bool `json:",omitempty"`
|
||||
Ingress bool `json:",omitempty"`
|
||||
IPAMOptions *IPAMOptions `json:",omitempty"`
|
||||
ConfigFrom *network.ConfigReference `json:",omitempty"`
|
||||
Scope string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkAttachmentConfig represents the configuration of a network attachment.
|
||||
type NetworkAttachmentConfig struct {
|
||||
Target string `json:",omitempty"`
|
||||
Aliases []string `json:",omitempty"`
|
||||
DriverOpts map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkAttachment represents a network attachment.
|
||||
type NetworkAttachment struct {
|
||||
Network Network `json:",omitempty"`
|
||||
Addresses []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// IPAMOptions represents ipam options.
|
||||
type IPAMOptions struct {
|
||||
Driver Driver `json:",omitempty"`
|
||||
Configs []IPAMConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// IPAMConfig represents ipam configuration.
|
||||
type IPAMConfig struct {
|
||||
Subnet string `json:",omitempty"`
|
||||
Range string `json:",omitempty"`
|
||||
Gateway string `json:",omitempty"`
|
||||
}
|
115
vendor/github.com/docker/docker/api/types/swarm/node.go
generated
vendored
115
vendor/github.com/docker/docker/api/types/swarm/node.go
generated
vendored
@ -1,115 +0,0 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
// Node represents a node.
|
||||
type Node struct {
|
||||
ID string
|
||||
Meta
|
||||
// Spec defines the desired state of the node as specified by the user.
|
||||
// The system will honor this and will *never* modify it.
|
||||
Spec NodeSpec `json:",omitempty"`
|
||||
// Description encapsulates the properties of the Node as reported by the
|
||||
// agent.
|
||||
Description NodeDescription `json:",omitempty"`
|
||||
// Status provides the current status of the node, as seen by the manager.
|
||||
Status NodeStatus `json:",omitempty"`
|
||||
// ManagerStatus provides the current status of the node's manager
|
||||
// component, if the node is a manager.
|
||||
ManagerStatus *ManagerStatus `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NodeSpec represents the spec of a node.
|
||||
type NodeSpec struct {
|
||||
Annotations
|
||||
Role NodeRole `json:",omitempty"`
|
||||
Availability NodeAvailability `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NodeRole represents the role of a node.
|
||||
type NodeRole string
|
||||
|
||||
const (
|
||||
// NodeRoleWorker WORKER
|
||||
NodeRoleWorker NodeRole = "worker"
|
||||
// NodeRoleManager MANAGER
|
||||
NodeRoleManager NodeRole = "manager"
|
||||
)
|
||||
|
||||
// NodeAvailability represents the availability of a node.
|
||||
type NodeAvailability string
|
||||
|
||||
const (
|
||||
// NodeAvailabilityActive ACTIVE
|
||||
NodeAvailabilityActive NodeAvailability = "active"
|
||||
// NodeAvailabilityPause PAUSE
|
||||
NodeAvailabilityPause NodeAvailability = "pause"
|
||||
// NodeAvailabilityDrain DRAIN
|
||||
NodeAvailabilityDrain NodeAvailability = "drain"
|
||||
)
|
||||
|
||||
// NodeDescription represents the description of a node.
|
||||
type NodeDescription struct {
|
||||
Hostname string `json:",omitempty"`
|
||||
Platform Platform `json:",omitempty"`
|
||||
Resources Resources `json:",omitempty"`
|
||||
Engine EngineDescription `json:",omitempty"`
|
||||
TLSInfo TLSInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Platform represents the platform (Arch/OS).
|
||||
type Platform struct {
|
||||
Architecture string `json:",omitempty"`
|
||||
OS string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EngineDescription represents the description of an engine.
|
||||
type EngineDescription struct {
|
||||
EngineVersion string `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
Plugins []PluginDescription `json:",omitempty"`
|
||||
}
|
||||
|
||||
// PluginDescription represents the description of an engine plugin.
|
||||
type PluginDescription struct {
|
||||
Type string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NodeStatus represents the status of a node.
|
||||
type NodeStatus struct {
|
||||
State NodeState `json:",omitempty"`
|
||||
Message string `json:",omitempty"`
|
||||
Addr string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Reachability represents the reachability of a node.
|
||||
type Reachability string
|
||||
|
||||
const (
|
||||
// ReachabilityUnknown UNKNOWN
|
||||
ReachabilityUnknown Reachability = "unknown"
|
||||
// ReachabilityUnreachable UNREACHABLE
|
||||
ReachabilityUnreachable Reachability = "unreachable"
|
||||
// ReachabilityReachable REACHABLE
|
||||
ReachabilityReachable Reachability = "reachable"
|
||||
)
|
||||
|
||||
// ManagerStatus represents the status of a manager.
|
||||
type ManagerStatus struct {
|
||||
Leader bool `json:",omitempty"`
|
||||
Reachability Reachability `json:",omitempty"`
|
||||
Addr string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NodeState represents the state of a node.
|
||||
type NodeState string
|
||||
|
||||
const (
|
||||
// NodeStateUnknown UNKNOWN
|
||||
NodeStateUnknown NodeState = "unknown"
|
||||
// NodeStateDown DOWN
|
||||
NodeStateDown NodeState = "down"
|
||||
// NodeStateReady READY
|
||||
NodeStateReady NodeState = "ready"
|
||||
// NodeStateDisconnected DISCONNECTED
|
||||
NodeStateDisconnected NodeState = "disconnected"
|
||||
)
|
27
vendor/github.com/docker/docker/api/types/swarm/runtime.go
generated
vendored
27
vendor/github.com/docker/docker/api/types/swarm/runtime.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
// RuntimeType is the type of runtime used for the TaskSpec
|
||||
type RuntimeType string
|
||||
|
||||
// RuntimeURL is the proto type url
|
||||
type RuntimeURL string
|
||||
|
||||
const (
|
||||
// RuntimeContainer is the container based runtime
|
||||
RuntimeContainer RuntimeType = "container"
|
||||
// RuntimePlugin is the plugin based runtime
|
||||
RuntimePlugin RuntimeType = "plugin"
|
||||
// RuntimeNetworkAttachment is the network attachment runtime
|
||||
RuntimeNetworkAttachment RuntimeType = "attachment"
|
||||
|
||||
// RuntimeURLContainer is the proto url for the container type
|
||||
RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
|
||||
// RuntimeURLPlugin is the proto url for the plugin type
|
||||
RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
|
||||
)
|
||||
|
||||
// NetworkAttachmentSpec represents the runtime spec type for network
|
||||
// attachment tasks
|
||||
type NetworkAttachmentSpec struct {
|
||||
ContainerID string
|
||||
}
|
3
vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
generated
vendored
3
vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
|
||||
|
||||
package runtime // import "github.com/docker/docker/api/types/swarm/runtime"
|
754
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
generated
vendored
754
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
generated
vendored
@ -1,754 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: plugin.proto
|
||||
|
||||
/*
|
||||
Package runtime is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
plugin.proto
|
||||
|
||||
It has these top-level messages:
|
||||
PluginSpec
|
||||
PluginPrivilege
|
||||
*/
|
||||
package runtime
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// PluginSpec defines the base payload which clients can specify for creating
|
||||
// a service with the plugin runtime.
|
||||
type PluginSpec struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
|
||||
Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
|
||||
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
|
||||
Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PluginSpec) Reset() { *m = PluginSpec{} }
|
||||
func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
|
||||
func (*PluginSpec) ProtoMessage() {}
|
||||
func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
|
||||
|
||||
func (m *PluginSpec) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetRemote() string {
|
||||
if m != nil {
|
||||
return m.Remote
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
|
||||
if m != nil {
|
||||
return m.Privileges
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetDisabled() bool {
|
||||
if m != nil {
|
||||
return m.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetEnv() []string {
|
||||
if m != nil {
|
||||
return m.Env
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
type PluginPrivilege struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
|
||||
Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
|
||||
func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
|
||||
func (*PluginPrivilege) ProtoMessage() {}
|
||||
func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
|
||||
|
||||
func (m *PluginPrivilege) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) GetDescription() string {
|
||||
if m != nil {
|
||||
return m.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) GetValue() []string {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
|
||||
proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
|
||||
}
|
||||
func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.Remote) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
|
||||
i += copy(dAtA[i:], m.Remote)
|
||||
}
|
||||
if len(m.Privileges) > 0 {
|
||||
for _, msg := range m.Privileges {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if m.Disabled {
|
||||
dAtA[i] = 0x20
|
||||
i++
|
||||
if m.Disabled {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if len(m.Env) > 0 {
|
||||
for _, s := range m.Env {
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.Description) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
|
||||
i += copy(dAtA[i:], m.Description)
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
for _, s := range m.Value {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *PluginSpec) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
l = len(m.Remote)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
if len(m.Privileges) > 0 {
|
||||
for _, e := range m.Privileges {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Disabled {
|
||||
n += 2
|
||||
}
|
||||
if len(m.Env) > 0 {
|
||||
for _, s := range m.Env {
|
||||
l = len(s)
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
l = len(m.Description)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
for _, s := range m.Value {
|
||||
l = len(s)
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovPlugin(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozPlugin(x uint64) (n int) {
|
||||
return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *PluginSpec) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Remote = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Privileges = append(m.Privileges, &PluginPrivilege{})
|
||||
if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Disabled = bool(v != 0)
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Env = append(m.Env, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Description = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipPlugin(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthPlugin
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipPlugin(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
|
||||
|
||||
var fileDescriptorPlugin = []byte{
|
||||
// 256 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30,
|
||||
0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a,
|
||||
0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17,
|
||||
0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64,
|
||||
0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e,
|
||||
0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64,
|
||||
0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4,
|
||||
0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec,
|
||||
0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9,
|
||||
0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9,
|
||||
0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6,
|
||||
0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb,
|
||||
0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8,
|
||||
0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb,
|
||||
0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38,
|
||||
0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00,
|
||||
}
|
21
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
generated
vendored
21
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
generated
vendored
@ -1,21 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime";
|
||||
|
||||
// PluginSpec defines the base payload which clients can specify for creating
|
||||
// a service with the plugin runtime.
|
||||
message PluginSpec {
|
||||
string name = 1;
|
||||
string remote = 2;
|
||||
repeated PluginPrivilege privileges = 3;
|
||||
bool disabled = 4;
|
||||
repeated string env = 5;
|
||||
}
|
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
message PluginPrivilege {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
repeated string value = 3;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user