Added support for group based node volume binds + refactoring
This commit is contained in:
parent
1e0aac19f6
commit
9b888c8216
@ -148,30 +148,9 @@ func CreateCluster(c *cli.Context) error {
|
||||
}
|
||||
volumes := c.StringSlice("volume")
|
||||
|
||||
volumesSpec := Volumes{
|
||||
DefaultVolumes: []string{},
|
||||
NodeSpecificVolumes: make(map[string][]string),
|
||||
}
|
||||
for _, volume := range volumes {
|
||||
if strings.Contains(volume, "@") {
|
||||
split := strings.Split(volume, "@")
|
||||
if len(split) != 2 {
|
||||
return fmt.Errorf("invalid node volume spec: %s", volume)
|
||||
}
|
||||
|
||||
nodeVolumes := split[0]
|
||||
node := split[1]
|
||||
if len(node) == 0 {
|
||||
return fmt.Errorf("invalid node volume spec: %s", volume)
|
||||
}
|
||||
|
||||
if _, ok := volumesSpec.NodeSpecificVolumes[node]; !ok {
|
||||
volumesSpec.NodeSpecificVolumes[node] = []string{}
|
||||
}
|
||||
volumesSpec.NodeSpecificVolumes[node] = append(volumesSpec.NodeSpecificVolumes[node], nodeVolumes)
|
||||
} else {
|
||||
volumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, volume)
|
||||
}
|
||||
volumesSpec, err := NewVolumes(volumes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
volumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, fmt.Sprintf("%s:/images", imageVolume.Name))
|
||||
|
||||
@ -30,7 +30,7 @@ type ClusterSpec struct {
|
||||
NodeToPortSpecMap map[string][]string
|
||||
PortAutoOffset int
|
||||
ServerArgs []string
|
||||
Volumes Volumes
|
||||
Volumes *Volumes
|
||||
}
|
||||
|
||||
func startContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (string, error) {
|
||||
@ -118,7 +118,7 @@ func createServer(spec *ClusterSpec) (string, error) {
|
||||
hostConfig.RestartPolicy.Name = "unless-stopped"
|
||||
}
|
||||
|
||||
addVolumesToHostConfig(spec.Volumes, containerName, hostConfig)
|
||||
spec.Volumes.addVolumesToHostConfig(containerName, "server", hostConfig)
|
||||
|
||||
networkingConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
@ -185,7 +185,7 @@ func createWorker(spec *ClusterSpec, postfix int) (string, error) {
|
||||
hostConfig.RestartPolicy.Name = "unless-stopped"
|
||||
}
|
||||
|
||||
addVolumesToHostConfig(spec.Volumes, containerName, hostConfig)
|
||||
spec.Volumes.addVolumesToHostConfig(containerName, "worker", hostConfig)
|
||||
|
||||
networkingConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
@ -230,17 +230,3 @@ func removeContainer(ID string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addVolumesToHostConfig adds all default volumes and node specific volumes to a HostConfig
|
||||
func addVolumesToHostConfig(volumesSpec Volumes, containerName string, hostConfig *container.HostConfig) {
|
||||
if len(volumesSpec.DefaultVolumes) > 0 {
|
||||
volumes := volumesSpec.DefaultVolumes
|
||||
|
||||
// add node specific volumes if present
|
||||
if v, ok := volumesSpec.NodeSpecificVolumes[containerName]; ok {
|
||||
volumes = append(volumes, v...)
|
||||
}
|
||||
|
||||
hostConfig.Binds = volumes
|
||||
}
|
||||
}
|
||||
|
||||
@ -14,15 +14,6 @@ type PublishedPorts struct {
|
||||
PortBindings map[nat.Port][]nat.PortBinding
|
||||
}
|
||||
|
||||
// defaultNodes describes the type of nodes on which a port should be exposed by default
|
||||
const defaultNodes = "server"
|
||||
|
||||
// mapping a node role to groups that should be applied to it
|
||||
var nodeRuleGroupsMap = map[string][]string{
|
||||
"worker": {"all", "workers"},
|
||||
"server": {"all", "server", "master"},
|
||||
}
|
||||
|
||||
// mapNodesToPortSpecs maps nodes to portSpecs
|
||||
func mapNodesToPortSpecs(specs []string, createdNodes []string) (map[string][]string, error) {
|
||||
|
||||
|
||||
14
cli/util.go
14
cli/util.go
@ -15,11 +15,6 @@ type apiPort struct {
|
||||
Port string
|
||||
}
|
||||
|
||||
type Volumes struct {
|
||||
DefaultVolumes []string
|
||||
NodeSpecificVolumes map[string][]string
|
||||
}
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
const (
|
||||
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||
@ -27,6 +22,15 @@ const (
|
||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||
)
|
||||
|
||||
// defaultNodes describes the default node group (master)
|
||||
const defaultNodes = "server"
|
||||
|
||||
// mapping a node role to groups that should be applied to it
|
||||
var nodeRuleGroupsMap = map[string][]string{
|
||||
"worker": {"all", "workers"},
|
||||
"server": {"all", "server", "master"},
|
||||
}
|
||||
|
||||
var src = rand.NewSource(time.Now().UnixNano())
|
||||
|
||||
// GenerateRandomString thanks to https://stackoverflow.com/a/31832326/6450189
|
||||
|
||||
@ -3,13 +3,21 @@ package run
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
type Volumes struct {
|
||||
DefaultVolumes []string
|
||||
NodeSpecificVolumes map[string][]string
|
||||
GroupSpecificVolumes map[string][]string
|
||||
}
|
||||
|
||||
// createImageVolume will create a new docker volume used for storing image tarballs that can be loaded into the clusters
|
||||
func createImageVolume(clusterName string) (types.Volume, error) {
|
||||
|
||||
@ -90,3 +98,68 @@ func getImageVolume(clusterName string) (types.Volume, error) {
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
func NewVolumes(volumes []string) (*Volumes, error) {
|
||||
volumesSpec := &Volumes{
|
||||
DefaultVolumes: []string{},
|
||||
NodeSpecificVolumes: make(map[string][]string),
|
||||
GroupSpecificVolumes: make(map[string][]string),
|
||||
}
|
||||
for _, volume := range volumes {
|
||||
if strings.Contains(volume, "@") {
|
||||
split := strings.Split(volume, "@")
|
||||
if len(split) != 2 {
|
||||
return nil, fmt.Errorf("invalid node volume spec: %s", volume)
|
||||
}
|
||||
|
||||
nodeVolumes := split[0]
|
||||
node := strings.ToLower(split[1])
|
||||
if len(node) == 0 {
|
||||
return nil, fmt.Errorf("invalid node volume spec: %s", volume)
|
||||
}
|
||||
|
||||
// check if node selector is a node group
|
||||
if _, ok := nodeRuleGroupsMap[node]; ok {
|
||||
volumesSpec.addGroupSpecificVolume(node, nodeVolumes)
|
||||
}
|
||||
|
||||
// otherwise this is a volume for a specific node
|
||||
volumesSpec.addNodeSpecificVolume(node, nodeVolumes)
|
||||
} else {
|
||||
volumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, volume)
|
||||
}
|
||||
}
|
||||
|
||||
return volumesSpec, nil
|
||||
}
|
||||
|
||||
// addVolumesToHostConfig adds all default volumes and node / group specific volumes to a HostConfig
|
||||
func (v Volumes) addVolumesToHostConfig(containerName string, groupName string, hostConfig *container.HostConfig) {
|
||||
volumes := v.DefaultVolumes
|
||||
|
||||
if v, ok := v.NodeSpecificVolumes[containerName]; ok {
|
||||
volumes = append(volumes, v...)
|
||||
}
|
||||
|
||||
if v, ok := v.GroupSpecificVolumes[groupName]; ok {
|
||||
volumes = append(volumes, v...)
|
||||
}
|
||||
|
||||
if len(volumes) > 0 {
|
||||
hostConfig.Binds = volumes
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Volumes) addNodeSpecificVolume(node, volume string) {
|
||||
if _, ok := v.NodeSpecificVolumes[node]; !ok {
|
||||
v.NodeSpecificVolumes[node] = []string{}
|
||||
}
|
||||
v.NodeSpecificVolumes[node] = append(v.NodeSpecificVolumes[node], volume)
|
||||
}
|
||||
|
||||
func (v *Volumes) addGroupSpecificVolume(group, volume string) {
|
||||
if _, ok := v.GroupSpecificVolumes[group]; !ok {
|
||||
v.GroupSpecificVolumes[group] = []string{}
|
||||
}
|
||||
v.GroupSpecificVolumes[group] = append(v.GroupSpecificVolumes[group], volume)
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user