init clusteredit
This commit is contained in:
parent
2162504ee1
commit
831e58fdce
120
cmd/cluster/clusterEdit.go
Normal file
120
cmd/cluster/clusterEdit.go
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdNodeEdit returns a new cobra command
|
||||
func NewCmdNodeEdit() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "edit NAME",
|
||||
Short: "[EXPERIMENTAL] Edit cluster(s).",
|
||||
Long: `[EXPERIMENTAL] Edit cluster(s).`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Aliases: []string{"update"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
existingCluster, changeset := parseEditClusterCmd(cmd, args)
|
||||
|
||||
log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
|
||||
|
||||
log.Infof("Successfully updated %s", existingCluster.Name)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseEditClusterCmd parses the command input into variables required to delete nodes
|
||||
func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf.SimpleConfig) {
|
||||
|
||||
existingCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if existingCluster == nil {
|
||||
log.Infof("Cluster %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
changeset := conf.SimpleConfig{}
|
||||
|
||||
/*
|
||||
* --port-add
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// init portmap
|
||||
changeset.Ports = []conf.PortWithNodeFilters{}
|
||||
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
for _, portFlag := range portFlags {
|
||||
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
log.Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for port, nodeFilters := range portFilterMap {
|
||||
changeset.Ports = append(changeset.Ports, conf.PortWithNodeFilters{
|
||||
Port: port,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
return existingCluster, &changeset
|
||||
}
|
||||
@ -29,6 +29,7 @@ import (
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
gort "runtime"
|
||||
@ -36,6 +37,7 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/rancher/k3d/v4/pkg/actions"
|
||||
conftypes "github.com/rancher/k3d/v4/pkg/config/types"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
@ -779,6 +781,26 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
|
||||
if !overwroteExisting {
|
||||
cluster.Nodes = append(cluster.Nodes, node)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Loadbalancer
|
||||
if cluster.ServerLoadBalancer == nil {
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.LoadBalancerRole {
|
||||
cluster.ServerLoadBalancer = &k3d.Loadbalancer{
|
||||
Node: node,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cluster.ServerLoadBalancer.Node != nil {
|
||||
lbcfg, err := GetLoadbalancerConfig(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return cluster, fmt.Errorf("error getting loadbalancer config for cluster %s: %w", cluster.Name, err)
|
||||
}
|
||||
cluster.ServerLoadBalancer.Config = &lbcfg
|
||||
}
|
||||
}
|
||||
|
||||
if err := populateClusterFieldsFromLabels(cluster); err != nil {
|
||||
@ -1009,3 +1031,50 @@ func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClusterEditChangesetSimple modifies an existing cluster with a given SimpleConfig changeset
|
||||
func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, changeset *config.SimpleConfig) error {
|
||||
nodeCount := len(cluster.Nodes)
|
||||
nodeList := cluster.Nodes
|
||||
// === Ports ===
|
||||
|
||||
existingLB := cluster.ServerLoadBalancer
|
||||
lbChangeset :=
|
||||
|
||||
if len(changeset.Ports) > 0 {
|
||||
for _, portWithNodeFilters := range changeset.Ports {
|
||||
log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
|
||||
if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
|
||||
log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, conftypes.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = conftypes.DefaultTargetsNodefiltersPortMappings
|
||||
}
|
||||
|
||||
for _, f := range portWithNodeFilters.NodeFilters {
|
||||
if strings.HasPrefix(f, "loadbalancer") {
|
||||
log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, conftypes.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = conftypes.DefaultTargetsNodefiltersPortMappings
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for suffix, nodes := range filteredNodes {
|
||||
switch suffix {
|
||||
case "proxy", util.NodeFilterSuffixNone:
|
||||
break
|
||||
case util.NodeFilterMapKeyAll:
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("error: 'cluster edit' does not (yet) support the '%s' opt/suffix for adding ports", suffix)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -32,6 +32,7 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/config/types"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
@ -44,10 +45,6 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultTargetsNodefiltersPortMappings = []string{"servers:*:proxy", "agents:*:proxy"}
|
||||
)
|
||||
|
||||
// TransformSimpleToClusterConfig transforms a simple configuration to a full-fledged cluster configuration
|
||||
func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtime, simpleConfig conf.SimpleConfig) (*conf.ClusterConfig, error) {
|
||||
|
||||
@ -362,10 +359,10 @@ func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMapping, nodes []*k3d.Node) error {
|
||||
portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto())
|
||||
func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, portmapping nat.PortMapping, targetNodes []*k3d.Node) error {
|
||||
portconfig := fmt.Sprintf("%s.%s", portmapping.Port.Port(), portmapping.Port.Proto())
|
||||
nodenames := []string{}
|
||||
for _, node := range nodes {
|
||||
for _, node := range targetNodes {
|
||||
if node.Role == k3d.LoadBalancerRole {
|
||||
return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)")
|
||||
}
|
||||
@ -398,14 +395,14 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
|
||||
for _, portWithNodeFilters := range portsWithNodeFilters {
|
||||
log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
|
||||
if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
|
||||
log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings
|
||||
log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
|
||||
}
|
||||
|
||||
for _, f := range portWithNodeFilters.NodeFilters {
|
||||
if strings.HasPrefix(f, "loadbalancer") {
|
||||
log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings
|
||||
log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -415,12 +412,6 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
|
||||
return err
|
||||
}
|
||||
|
||||
nn := ""
|
||||
for _, n := range filteredNodes["proxy"] {
|
||||
nn = strings.Join([]string{nn, n.Name}, ",")
|
||||
}
|
||||
log.Debugf("Filtered nodes: %#v", nn)
|
||||
|
||||
for suffix, nodes := range filteredNodes {
|
||||
portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port)
|
||||
if err != nil {
|
||||
|
||||
@ -32,3 +32,8 @@ type Config interface {
|
||||
GetKind() string
|
||||
GetAPIVersion() string
|
||||
}
|
||||
|
||||
// Default Targets for NodeFilters
|
||||
var (
|
||||
DefaultTargetsNodefiltersPortMappings = []string{"servers:*:proxy", "agents:*:proxy"}
|
||||
)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user