feat(logging): using new package containing logrus.Logger instead of global scope logrus (closes #583) (#699, @Shanduur)
This commit is contained in:
parent
ac527e7c8a
commit
917c19eae5
@ -22,7 +22,8 @@ THE SOFTWARE.
|
||||
package cluster
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,8 +37,8 @@ func NewCmdCluster() *cobra.Command {
|
||||
Long: `Manage cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
@ -41,11 +42,10 @@ import (
|
||||
k3dCluster "github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/config"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
@ -76,24 +76,24 @@ func initConfig() {
|
||||
if configFile != "" {
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
log.Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
// create temporary file to expand environment variables in the config without writing that back to the original file
|
||||
// we're doing it here, because this happens just before absolutely all other processing
|
||||
tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile)))
|
||||
if err != nil {
|
||||
log.Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
|
||||
l.Log().Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
|
||||
}
|
||||
defer tmpfile.Close()
|
||||
|
||||
originalcontent, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
log.Fatalf("error reading config file %s: %v", configFile, err)
|
||||
l.Log().Fatalf("error reading config file %s: %v", configFile, err)
|
||||
}
|
||||
expandedcontent := os.ExpandEnv(string(originalcontent))
|
||||
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
|
||||
log.Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
|
||||
l.Log().Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
|
||||
}
|
||||
|
||||
// use temp file with expanded variables
|
||||
@ -102,29 +102,29 @@ func initConfig() {
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
log.Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
log.Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
|
||||
log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
log.Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
l.Log().Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
}
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
c, _ := yaml.Marshal(cfgViper.AllSettings())
|
||||
log.Debugf("Configuration:\n%s", c)
|
||||
l.Log().Debugf("Configuration:\n%s", c)
|
||||
|
||||
c, _ = yaml.Marshal(ppViper.AllSettings())
|
||||
log.Debugf("Additional CLI Configuration:\n%s", c)
|
||||
l.Log().Debugf("Additional CLI Configuration:\n%s", c)
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,27 +154,27 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
}
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
log.Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
|
||||
l.Log().Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
simpleCfg := cfg.(conf.SimpleConfig)
|
||||
|
||||
log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
l.Log().Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
simpleCfg, err = applyCLIOverrides(simpleCfg)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to apply CLI overrides: %+v", err)
|
||||
l.Log().Fatalf("Failed to apply CLI overrides: %+v", err)
|
||||
}
|
||||
|
||||
log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
l.Log().Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
/**************************************
|
||||
* Transform, Process & Validate Configuration *
|
||||
@ -187,18 +187,18 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, simpleCfg)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
log.Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
l.Log().Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
clusterConfig, err = config.ProcessClusterConfig(*clusterConfig)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
log.Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
l.Log().Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
if err := config.ValidateClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *clusterConfig); err != nil {
|
||||
log.Fatalln("Failed Cluster Configuration Validation: ", err)
|
||||
l.Log().Fatalln("Failed Cluster Configuration Validation: ", err)
|
||||
}
|
||||
|
||||
/**************************************
|
||||
@ -207,44 +207,44 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
// check if a cluster with that name exists already
|
||||
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster); err == nil {
|
||||
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
|
||||
l.Log().Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
|
||||
}
|
||||
|
||||
// create cluster
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
log.Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
|
||||
l.Log().Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
|
||||
clusterConfig.ClusterCreateOpts.WaitForServer = true
|
||||
}
|
||||
//if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
|
||||
if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil {
|
||||
// rollback if creation failed
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
if simpleCfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
|
||||
log.Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
l.Log().Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
}
|
||||
// rollback if creation failed
|
||||
log.Errorln("Failed to create cluster >>> Rolling Back")
|
||||
l.Log().Errorln("Failed to create cluster >>> Rolling Back")
|
||||
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, k3d.ClusterDeleteOpts{SkipRegistryCheck: true}); err != nil {
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
}
|
||||
log.Fatalln("Cluster creation FAILED, all changes have been rolled back!")
|
||||
l.Log().Fatalln("Cluster creation FAILED, all changes have been rolled back!")
|
||||
}
|
||||
log.Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
|
||||
l.Log().Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
|
||||
|
||||
/**************
|
||||
* Kubeconfig *
|
||||
**************/
|
||||
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
log.Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
|
||||
l.Log().Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
|
||||
clusterConfig.KubeconfigOpts.SwitchCurrentContext = false
|
||||
}
|
||||
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
log.Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
|
||||
l.Log().Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: simpleCfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
|
||||
log.Warningln(err)
|
||||
l.Log().Warningln(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
*****************/
|
||||
|
||||
// print information on how to use the cluster with kubectl
|
||||
log.Infoln("You can now use it like this:")
|
||||
l.Log().Infoln("You can now use it like this:")
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, clusterConfig.Cluster.Name))
|
||||
} else if !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
@ -273,7 +273,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
|
||||
log.Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/***********************
|
||||
@ -388,7 +388,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file")
|
||||
_ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config"))
|
||||
if err := cmd.MarkFlagFilename("registry-config", "yaml", "yml"); err != nil {
|
||||
log.Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/* Subcommands */
|
||||
@ -424,7 +424,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// Overwrite if cli arg is set
|
||||
if ppViper.IsSet("cli.api-port") {
|
||||
if cfg.ExposeAPI.HostPort != "" {
|
||||
log.Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
|
||||
l.Log().Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
|
||||
}
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(ppViper.GetString("cli.api-port"), k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
@ -454,11 +454,11 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified volume
|
||||
volume, filters, err := cliutil.SplitFiltersFromFlag(volumeFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
|
||||
log.Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
|
||||
l.Log().Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -476,7 +476,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("VolumeFilterMap: %+v", volumeFilterMap)
|
||||
l.Log().Tracef("VolumeFilterMap: %+v", volumeFilterMap)
|
||||
|
||||
// -> PORTS
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
@ -484,12 +484,12 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
log.Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
@ -502,7 +502,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
// --k3s-node-label
|
||||
// k3sNodeLabelFilterMap will add k3s node label to applied node filters
|
||||
@ -512,7 +512,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -530,7 +530,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
|
||||
l.Log().Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
|
||||
|
||||
// --runtime-label
|
||||
// runtimeLabelFilterMap will add container runtime label to applied node filters
|
||||
@ -540,7 +540,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
cliutil.ValidateRuntimeLabelKey(strings.Split(label, "=")[0])
|
||||
@ -560,7 +560,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
|
||||
l.Log().Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
|
||||
|
||||
// --env
|
||||
// envFilterMap will add container env vars to applied node filters
|
||||
@ -570,7 +570,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified env var
|
||||
env, filters, err := cliutil.SplitFiltersFromFlag(envFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -588,7 +588,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
l.Log().Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
|
||||
// --k3s-arg
|
||||
argFilterMap := make(map[string][]string, 1)
|
||||
@ -597,7 +597,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified arg
|
||||
arg, filters, err := cliutil.SplitFiltersFromFlag(argFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
|
@ -28,10 +28,10 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -51,31 +51,31 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
clusters := parseDeleteClusterCmd(cmd, args)
|
||||
|
||||
if len(clusters) == 0 {
|
||||
log.Infoln("No clusters found")
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c, k3d.ClusterDeleteOpts{SkipRegistryCheck: false}); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
log.Infoln("Removing cluster details from default kubeconfig...")
|
||||
l.Log().Infoln("Removing cluster details from default kubeconfig...")
|
||||
if err := client.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||
log.Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
log.Warnln(err)
|
||||
l.Log().Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
l.Log().Warnln(err)
|
||||
}
|
||||
log.Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||
l.Log().Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||
configDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||
l.Log().Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||
} else {
|
||||
kubeconfigfile := path.Join(configDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
if err := os.Remove(kubeconfigfile); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||
l.Log().Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Successfully deleted cluster %s!", c.Name)
|
||||
l.Log().Infof("Successfully deleted cluster %s!", c.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,12 +98,12 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
} else if all {
|
||||
log.Infoln("Deleting all clusters...")
|
||||
l.Log().Infoln("Deleting all clusters...")
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
@ -119,7 +119,7 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if err == client.ClusterGetNoNodesFoundError {
|
||||
continue
|
||||
}
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
|
@ -26,9 +26,9 @@ import (
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -47,13 +47,13 @@ func NewCmdClusterEdit() *cobra.Command {
|
||||
|
||||
existingCluster, changeset := parseEditClusterCmd(cmd, args)
|
||||
|
||||
log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
|
||||
|
||||
if err := client.ClusterEditChangesetSimple(cmd.Context(), runtimes.SelectedRuntime, existingCluster, changeset); err != nil {
|
||||
log.Fatalf("Failed to update the cluster: %v", err)
|
||||
l.Log().Fatalf("Failed to update the cluster: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully updated %s", existingCluster.Name)
|
||||
l.Log().Infof("Successfully updated %s", existingCluster.Name)
|
||||
|
||||
},
|
||||
}
|
||||
@ -72,11 +72,11 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
|
||||
existingCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingCluster == nil {
|
||||
log.Infof("Cluster %s not found", args[0])
|
||||
l.Log().Infof("Cluster %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -100,12 +100,12 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
log.Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
@ -118,7 +118,7 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
return existingCluster, &changeset
|
||||
}
|
||||
|
@ -30,13 +30,12 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
k3cluster "github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
)
|
||||
|
||||
@ -83,14 +82,14 @@ func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
|
||||
// cluster name not specified : get all clusters
|
||||
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
// cluster name specified : get specific cluster
|
||||
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -126,7 +125,7 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
||||
}
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to print headers")
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,9 +30,8 @@ import (
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdClusterStart returns a new cobra command
|
||||
@ -49,11 +48,11 @@ func NewCmdClusterStart() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStartClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
log.Infoln("No clusters found")
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -77,11 +76,11 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
@ -94,7 +93,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
@ -26,10 +26,9 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdClusterStop returns a new cobra command
|
||||
@ -44,11 +43,11 @@ func NewCmdClusterStop() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStopClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
log.Infoln("No clusters found")
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,11 +69,11 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
@ -87,7 +86,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
@ -22,7 +22,8 @@ THE SOFTWARE.
|
||||
package config
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -34,8 +35,8 @@ func NewCmdConfig() *cobra.Command {
|
||||
Long: `Work with config file(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"os"
|
||||
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -39,7 +39,7 @@ func NewCmdConfigInit() *cobra.Command {
|
||||
Use: "init",
|
||||
Aliases: []string{"create"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
log.Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
|
||||
l.Log().Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
|
||||
if output == "-" {
|
||||
fmt.Println(config.DefaultConfig)
|
||||
} else {
|
||||
@ -51,16 +51,16 @@ func NewCmdConfigInit() *cobra.Command {
|
||||
// create/overwrite file
|
||||
file, err = os.Create(output)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create/overwrite output file: %s", err)
|
||||
l.Log().Fatalf("Failed to create/overwrite output file: %s", err)
|
||||
}
|
||||
// write content
|
||||
if _, err = file.WriteString(config.DefaultConfig); err != nil {
|
||||
log.Fatalf("Failed to write to output file: %+v", err)
|
||||
l.Log().Fatalf("Failed to write to output file: %+v", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalf("Failed to stat output file: %+v", err)
|
||||
l.Log().Fatalf("Failed to stat output file: %+v", err)
|
||||
} else {
|
||||
log.Errorln("Output file exists and --force was not set")
|
||||
l.Log().Errorln("Output file exists and --force was not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@ -69,7 +69,7 @@ func NewCmdConfigInit() *cobra.Command {
|
||||
|
||||
cmd.Flags().StringVarP(&output, "output", "o", "k3d-default.yaml", "Write a default k3d config")
|
||||
if err := cmd.MarkFlagFilename("output", "yaml", "yml"); err != nil {
|
||||
log.Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
|
||||
l.Log().Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
|
||||
}
|
||||
cmd.Flags().BoolVarP(&force, "force", "f", false, "Force overwrite of target file")
|
||||
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
@ -44,7 +44,7 @@ func NewCmdConfigMigrate() *cobra.Command {
|
||||
configFile := args[0]
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
log.Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
cfgViper := viper.New()
|
||||
@ -55,38 +55,38 @@ func NewCmdConfigMigrate() *cobra.Command {
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
log.Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
log.Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
|
||||
log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
l.Log().Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
yamlout, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
output := "-"
|
||||
@ -97,11 +97,11 @@ func NewCmdConfigMigrate() *cobra.Command {
|
||||
|
||||
if output == "-" {
|
||||
if _, err := os.Stdout.Write(yamlout); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
if err := os.WriteFile(output, yamlout, os.ModeAppend); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,9 +26,9 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@ -42,8 +42,8 @@ func NewCmdDebug() *cobra.Command {
|
||||
Long: `Debug k3d cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -61,8 +61,8 @@ func NewCmdDebugLoadbalancer() *cobra.Command {
|
||||
Long: `Debug the loadbalancer`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -74,16 +74,16 @@ func NewCmdDebugLoadbalancer() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
lbconf, err := client.GetLoadbalancerConfig(cmd.Context(), runtimes.SelectedRuntime, c)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
yamlized, err := yaml.Marshal(lbconf)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
fmt.Println(string(yamlized))
|
||||
},
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package image
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -37,8 +37,8 @@ func NewCmdImage() *cobra.Command {
|
||||
Long: `Handle container images.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -27,11 +27,10 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/tools"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdImageImport returns a new cobra command
|
||||
@ -60,20 +59,20 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
images, clusters := parseLoadImageCmd(cmd, args)
|
||||
log.Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
|
||||
l.Log().Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
|
||||
errOccured := false
|
||||
for _, cluster := range clusters {
|
||||
log.Infof("Importing image(s) into cluster '%s'", cluster.Name)
|
||||
l.Log().Infof("Importing image(s) into cluster '%s'", cluster.Name)
|
||||
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
log.Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
|
||||
l.Log().Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
|
||||
errOccured = true
|
||||
}
|
||||
}
|
||||
if errOccured {
|
||||
log.Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
|
||||
l.Log().Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
|
||||
l.Log().Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
|
||||
},
|
||||
}
|
||||
|
||||
@ -82,7 +81,7 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
|
||||
*********/
|
||||
cmd.Flags().StringArrayP("cluster", "c", []string{k3d.DefaultClusterName}, "Select clusters to load the image to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&loadImageOpts.KeepTar, "keep-tarball", "k", false, "Do not delete the tarball containing the saved images from the shared volume")
|
||||
@ -100,7 +99,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
|
||||
// --cluster
|
||||
clusterNames, err := cmd.Flags().GetStringArray("cluster")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters := []k3d.Cluster{}
|
||||
for _, clusterName := range clusterNames {
|
||||
@ -110,7 +109,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
|
||||
// images
|
||||
images := args
|
||||
if len(images) == 0 {
|
||||
log.Fatalln("No images specified!")
|
||||
l.Log().Fatalln("No images specified!")
|
||||
}
|
||||
|
||||
return images, clusters
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,8 +36,8 @@ func NewCmdKubeconfig() *cobra.Command {
|
||||
Long: `Manage kubeconfig(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -27,11 +27,10 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type getKubeconfigFlags struct {
|
||||
@ -70,13 +69,13 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
if getKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -85,10 +84,10 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
// get kubeconfigs from all clusters
|
||||
errorGettingKubeconfig := false
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
fmt.Println("---") // YAML document separator
|
||||
if _, err := client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
}
|
||||
}
|
||||
|
@ -29,13 +29,12 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v4/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mergeKubeconfigFlags struct {
|
||||
@ -64,14 +63,14 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
var err error
|
||||
|
||||
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
|
||||
log.Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
|
||||
l.Log().Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
|
||||
}
|
||||
|
||||
// generate list of clusters
|
||||
if mergeKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -83,7 +82,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
for _, clusterName := range clusternames {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -94,18 +93,18 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
var outputs []string
|
||||
outputDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Failed to save kubeconfig to local directory")
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Failed to save kubeconfig to local directory")
|
||||
}
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
output := mergeKubeconfigFlags.output
|
||||
if output == "" && !mergeKubeconfigFlags.targetDefault {
|
||||
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
}
|
||||
output, err = client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
} else {
|
||||
outputs = append(outputs, output)
|
||||
@ -127,7 +126,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
// add flags
|
||||
cmd.Flags().StringVarP(&mergeKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
|
||||
if err := cmd.MarkFlagFilename("output"); err != nil {
|
||||
log.Fatalln("Failed to mark flag --output as filename")
|
||||
l.Log().Fatalln("Failed to mark flag --output as filename")
|
||||
}
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "kubeconfig-merge-default", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,8 +36,8 @@ func NewCmdNode() *cobra.Command {
|
||||
Long: `Manage node(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -32,10 +32,10 @@ import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
k3dc "github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeCreate returns a new cobra command
|
||||
@ -52,10 +52,10 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes, cluster := parseCreateNodeCmd(cmd, args)
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
|
||||
log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
log.Infof("Successfully created %d node(s)!", len(nodes))
|
||||
l.Log().Infof("Successfully created %d node(s)!", len(nodes))
|
||||
},
|
||||
}
|
||||
|
||||
@ -63,11 +63,11 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
|
||||
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
|
||||
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--role'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--role'", err)
|
||||
}
|
||||
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Select the cluster that the node shall connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
|
||||
@ -89,32 +89,32 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl
|
||||
// --replicas
|
||||
replicas, err := cmd.Flags().GetInt("replicas")
|
||||
if err != nil {
|
||||
log.Errorln("No replica count specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No replica count specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --role
|
||||
roleStr, err := cmd.Flags().GetString("role")
|
||||
if err != nil {
|
||||
log.Errorln("No node role specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No node role specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if _, ok := k3d.NodeRoles[roleStr]; !ok {
|
||||
log.Fatalf("Unknown node role '%s'\n", roleStr)
|
||||
l.Log().Fatalf("Unknown node role '%s'\n", roleStr)
|
||||
}
|
||||
role := k3d.NodeRoles[roleStr]
|
||||
|
||||
// --image
|
||||
image, err := cmd.Flags().GetString("image")
|
||||
if err != nil {
|
||||
log.Errorln("No image specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No image specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --cluster
|
||||
clusterName, err := cmd.Flags().GetString("cluster")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
cluster := &k3d.Cluster{
|
||||
Name: clusterName,
|
||||
@ -123,25 +123,25 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl
|
||||
// --memory
|
||||
memory, err := cmd.Flags().GetString("memory")
|
||||
if err != nil {
|
||||
log.Errorln("No memory specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No memory specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if _, err := dockerunits.RAMInBytes(memory); memory != "" && err != nil {
|
||||
log.Errorf("Provided memory limit value is invalid")
|
||||
l.Log().Errorf("Provided memory limit value is invalid")
|
||||
}
|
||||
|
||||
// --runtime-label
|
||||
runtimeLabelsFlag, err := cmd.Flags().GetStringSlice("runtime-label")
|
||||
if err != nil {
|
||||
log.Errorln("No runtime-label specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No runtime-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
runtimeLabels := make(map[string]string, len(runtimeLabelsFlag)+1)
|
||||
for _, label := range runtimeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
log.Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
|
||||
l.Log().Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
cliutil.ValidateRuntimeLabelKey(labelSplitted[0])
|
||||
runtimeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
@ -153,15 +153,15 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl
|
||||
// --k3s-node-label
|
||||
k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label")
|
||||
if err != nil {
|
||||
log.Errorln("No k3s-node-label specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No k3s-node-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
k3sNodeLabels := make(map[string]string, len(k3sNodeLabelsFlag))
|
||||
for _, label := range k3sNodeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
log.Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
|
||||
l.Log().Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
k3sNodeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
}
|
||||
|
@ -24,9 +24,9 @@ package node
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -52,14 +52,14 @@ func NewCmdNodeDelete() *cobra.Command {
|
||||
nodeDeleteOpts := k3d.NodeDeleteOpts{SkipLBUpdate: flags.All} // do not update LB, if we're deleting all nodes anyway
|
||||
|
||||
if len(nodes) == 0 {
|
||||
log.Infoln("No nodes found")
|
||||
l.Log().Infoln("No nodes found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, nodeDeleteOpts); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Successfully deleted %d node(s)!", len(nodes))
|
||||
l.Log().Infof("Successfully deleted %d node(s)!", len(nodes))
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -83,11 +83,11 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlag
|
||||
// --all
|
||||
if flags.All {
|
||||
if !flags.IncludeRegistries {
|
||||
log.Infoln("Didn't set '--registries', so won't delete registries.")
|
||||
l.Log().Infoln("Didn't set '--registries', so won't delete registries.")
|
||||
}
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
include := k3d.ClusterInternalNodeRoles
|
||||
exclude := []k3d.Role{}
|
||||
@ -99,13 +99,13 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlag
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
log.Fatalln("Expecting at least one node name if `--all` is not set")
|
||||
l.Log().Fatalln("Expecting at least one node name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
@ -25,9 +25,9 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -46,13 +46,13 @@ func NewCmdNodeEdit() *cobra.Command {
|
||||
|
||||
existingNode, changeset := parseEditNodeCmd(cmd, args)
|
||||
|
||||
log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
|
||||
|
||||
if err := client.NodeEdit(cmd.Context(), runtimes.SelectedRuntime, existingNode, changeset); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully updated %s", existingNode.Name)
|
||||
l.Log().Infof("Successfully updated %s", existingNode.Name)
|
||||
|
||||
},
|
||||
}
|
||||
@ -71,16 +71,16 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
|
||||
|
||||
existingNode, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: args[0]})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingNode == nil {
|
||||
log.Infof("Node %s not found", args[0])
|
||||
l.Log().Infof("Node %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if existingNode.Role != k3d.LoadBalancerRole {
|
||||
log.Fatalln("Currently only the loadbalancer can be updated!")
|
||||
l.Log().Fatalln("Currently only the loadbalancer can be updated!")
|
||||
}
|
||||
|
||||
changeset := &k3d.Node{}
|
||||
@ -90,7 +90,7 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
|
||||
|
||||
portmappings, err := nat.ParsePortSpec(flag)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse port spec '%s': %+v", flag, err)
|
||||
l.Log().Fatalf("Failed to parse port spec '%s': %+v", flag, err)
|
||||
}
|
||||
|
||||
for _, pm := range portmappings {
|
||||
|
@ -28,11 +28,10 @@ import (
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type nodeListFlags struct {
|
||||
@ -64,14 +63,14 @@ func NewCmdNodeList() *cobra.Command {
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) cluster name specified -> get specific cluster
|
||||
for _, node := range nodes {
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
|
@ -23,11 +23,10 @@ package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeStart returns a new cobra command
|
||||
@ -42,7 +41,7 @@ func NewCmdNodeStart() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStartNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StartNode(cmd.Context(), node); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -55,7 +54,7 @@ func NewCmdNodeStart() *cobra.Command {
|
||||
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
log.Fatalln("No node name given")
|
||||
l.Log().Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
|
@ -26,9 +26,8 @@ import (
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeStop returns a new cobra command
|
||||
@ -43,7 +42,7 @@ func NewCmdNodeStop() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStopNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StopNode(cmd.Context(), node); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -56,7 +55,7 @@ func NewCmdNodeStop() *cobra.Command {
|
||||
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
log.Fatalln("No node name given")
|
||||
l.Log().Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package registry
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -37,8 +37,8 @@ func NewCmdRegistry() *cobra.Command {
|
||||
Long: `Manage registry/registries`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ package registry
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
@ -75,12 +75,12 @@ func NewCmdRegistryCreate() *cobra.Command {
|
||||
reg, clusters := parseCreateRegistryCmd(cmd, args, flags, ppFlags)
|
||||
regNode, err := client.RegistryRun(cmd.Context(), runtimes.SelectedRuntime, reg)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if err := client.RegistryConnectClusters(cmd.Context(), runtimes.SelectedRuntime, regNode, clusters); err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
}
|
||||
log.Infof("Successfully created registry '%s'", reg.Host)
|
||||
l.Log().Infof("Successfully created registry '%s'", reg.Host)
|
||||
regString := fmt.Sprintf("%s:%s", reg.Host, reg.ExposureOpts.Binding.HostPort)
|
||||
if !flags.NoHelp {
|
||||
fmt.Println(fmt.Sprintf(helptext, regString, regString, regString, regString))
|
||||
@ -93,10 +93,10 @@ func NewCmdRegistryCreate() *cobra.Command {
|
||||
// TODO: connecting to clusters requires non-existing config reload functionality in containerd
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Clusters, "cluster", "c", nil, "[NotReady] Select the cluster(s) that the registry shall connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", cliutil.ValidArgsAvailableClusters); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
if err := cmd.Flags().MarkHidden("cluster"); err != nil {
|
||||
log.Fatalln("Failed to hide --cluster flag on registry create command")
|
||||
l.Log().Fatalln("Failed to hide --cluster flag on registry create command")
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&flags.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultRegistryImageRepo, k3d.DefaultRegistryImageTag), "Specify image used for the registry")
|
||||
@ -125,8 +125,8 @@ func parseCreateRegistryCmd(cmd *cobra.Command, args []string, flags *regCreateF
|
||||
// --port
|
||||
exposePort, err := cliutil.ParsePortExposureSpec(ppFlags.Port, k3d.DefaultRegistryPort)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to parse registry port")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Failed to parse registry port")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// set the name for the registry node
|
||||
|
@ -24,9 +24,9 @@ package registry
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -51,11 +51,11 @@ func NewCmdRegistryDelete() *cobra.Command {
|
||||
nodes := parseRegistryDeleteCmd(cmd, args, &flags)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
log.Infoln("No registries found")
|
||||
l.Log().Infoln("No registries found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -80,18 +80,18 @@ func parseRegistryDeleteCmd(cmd *cobra.Command, args []string, flags *registryDe
|
||||
if flags.All {
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
log.Fatalln("Expecting at least one registry name if `--all` is not set")
|
||||
l.Log().Fatalln("Expecting at least one registry name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
@ -28,9 +28,9 @@ import (
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -64,15 +64,15 @@ func NewCmdRegistryList() *cobra.Command {
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all registries
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) registry name(s) specified -> get specific registries
|
||||
for _, node := range nodes {
|
||||
log.Tracef("Node %s", node.Name)
|
||||
l.Log().Tracef("Node %s", node.Name)
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
|
65
cmd/root.go
65
cmd/root.go
@ -40,9 +40,10 @@ import (
|
||||
"github.com/rancher/k3d/v4/cmd/node"
|
||||
"github.com/rancher/k3d/v4/cmd/registry"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/writer"
|
||||
)
|
||||
|
||||
@ -71,7 +72,7 @@ All Nodes of a k3d cluster are part of the same docker network.`,
|
||||
printVersion()
|
||||
} else {
|
||||
if err := cmd.Usage(); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -110,11 +111,11 @@ All Nodes of a k3d cluster are part of the same docker network.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
info, err := runtimes.SelectedRuntime.Info()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
err = yaml.NewEncoder(os.Stdout).Encode(info)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
Hidden: true,
|
||||
@ -136,58 +137,58 @@ func Execute() {
|
||||
if _, _, err := cmd.Find(parts); err != nil {
|
||||
pluginFound, err := cliutil.HandlePlugin(context.Background(), parts)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to execute plugin '%+v'", parts)
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorf("Failed to execute plugin '%+v'", parts)
|
||||
l.Log().Fatalln(err)
|
||||
} else if pluginFound {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := cmd.Execute(); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
// initLogging initializes the logger
|
||||
func initLogging() {
|
||||
if flags.traceLogging {
|
||||
log.SetLevel(log.TraceLevel)
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
} else if flags.debugLogging {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
} else {
|
||||
switch logLevel := strings.ToUpper(os.Getenv("LOG_LEVEL")); logLevel {
|
||||
case "TRACE":
|
||||
log.SetLevel(log.TraceLevel)
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
case "DEBUG":
|
||||
log.SetLevel(log.DebugLevel)
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
case "WARN":
|
||||
log.SetLevel(log.WarnLevel)
|
||||
l.Log().SetLevel(logrus.WarnLevel)
|
||||
case "ERROR":
|
||||
log.SetLevel(log.ErrorLevel)
|
||||
l.Log().SetLevel(logrus.ErrorLevel)
|
||||
default:
|
||||
log.SetLevel(log.InfoLevel)
|
||||
l.Log().SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
}
|
||||
log.SetOutput(ioutil.Discard)
|
||||
log.AddHook(&writer.Hook{
|
||||
l.Log().SetOutput(ioutil.Discard)
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
Writer: os.Stderr,
|
||||
LogLevels: []log.Level{
|
||||
log.PanicLevel,
|
||||
log.FatalLevel,
|
||||
log.ErrorLevel,
|
||||
log.WarnLevel,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
},
|
||||
})
|
||||
log.AddHook(&writer.Hook{
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
Writer: os.Stdout,
|
||||
LogLevels: []log.Level{
|
||||
log.InfoLevel,
|
||||
log.DebugLevel,
|
||||
log.TraceLevel,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
logrus.TraceLevel,
|
||||
},
|
||||
})
|
||||
|
||||
formatter := &log.TextFormatter{
|
||||
formatter := &logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
}
|
||||
|
||||
@ -195,18 +196,18 @@ func initLogging() {
|
||||
formatter.FullTimestamp = true
|
||||
}
|
||||
|
||||
log.SetFormatter(formatter)
|
||||
l.Log().SetFormatter(formatter)
|
||||
|
||||
}
|
||||
|
||||
func initRuntime() {
|
||||
runtime, err := runtimes.GetRuntime("docker")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
runtimes.SelectedRuntime = runtime
|
||||
if rtinfo, err := runtime.Info(); err == nil {
|
||||
log.Debugf("Runtime Info:\n%+v", rtinfo)
|
||||
l.Log().Debugf("Runtime Info:\n%+v", rtinfo)
|
||||
}
|
||||
}
|
||||
|
||||
@ -286,11 +287,11 @@ PowerShell:
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if completionFunc, ok := completionFunctions[args[0]]; ok {
|
||||
if err := completionFunc(os.Stdout); err != nil {
|
||||
log.Fatalf("Failed to generate completion script for shell '%s'", args[0])
|
||||
l.Log().Fatalf("Failed to generate completion script for shell '%s'", args[0])
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Fatalf("Shell '%s' not supported for completion", args[0])
|
||||
l.Log().Fatalf("Shell '%s' not supported for completion", args[0])
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
@ -26,9 +26,9 @@ import (
|
||||
"strings"
|
||||
|
||||
k3dcluster "github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -39,7 +39,7 @@ func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete st
|
||||
var clusters []*k3d.Cluster
|
||||
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of clusters for shell completion")
|
||||
l.Log().Errorln("Failed to get list of clusters for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete strin
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of nodes for shell completion")
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ func ValidArgsAvailableRegistries(cmd *cobra.Command, args []string, toComplete
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of nodes for shell completion")
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
|
||||
@ -50,10 +50,10 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||
// Case 1.1: Escaped backslash
|
||||
if strings.HasSuffix(it, "\\\\") {
|
||||
it = strings.TrimSuffix(it, "\\")
|
||||
log.Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
|
||||
l.Log().Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
|
||||
} else {
|
||||
// Case 1.2: Unescaped backslash -> Escaping the '@' -> remove suffix and append it to buffer, followed by the escaped @ sign
|
||||
log.Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
|
||||
l.Log().Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
|
||||
buffer += strings.TrimSuffix(it, "\\") + "@"
|
||||
continue
|
||||
}
|
||||
|
@ -29,8 +29,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -55,7 +55,7 @@ func PrintNodes(nodes []*k3d.Node, outputFormat string, headers *[]string, nodeP
|
||||
if headers != nil {
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(*headers, "\t"))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to print headers")
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,9 +28,9 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var apiPortRegexp = regexp.MustCompile(`^(?P<hostref>(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|(?P<hostname>\S+):)?(?P<port>(\d{1,5}|random))$`)
|
||||
@ -55,7 +55,7 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
|
||||
|
||||
// check if there's a host reference
|
||||
if submatches["hostname"] != "" {
|
||||
log.Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
|
||||
l.Log().Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
|
||||
addrs, err := net.LookupHost(submatches["hostname"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to lookup host '%s' specified for Port Exposure: %+v", submatches["hostname"], err)
|
||||
@ -77,15 +77,15 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
|
||||
|
||||
// port: get a free one if there's none defined or set to random
|
||||
if submatches["port"] == "" || submatches["port"] == "random" {
|
||||
log.Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
|
||||
l.Log().Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
|
||||
freePort, err := GetFreePort()
|
||||
if err != nil || freePort == 0 {
|
||||
log.Warnf("Failed to get random free port: %+v", err)
|
||||
log.Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
|
||||
l.Log().Warnf("Failed to get random free port: %+v", err)
|
||||
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
|
||||
submatches["port"] = internalPort
|
||||
} else {
|
||||
submatches["port"] = strconv.Itoa(freePort)
|
||||
log.Debugf("Got free port for Port Exposure: '%d'", freePort)
|
||||
l.Log().Debugf("Got free port for Port Exposure: '%d'", freePort)
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,13 +112,13 @@ func ValidatePortMap(portmap string) (string, error) {
|
||||
func GetFreePort() (int, error) {
|
||||
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
log.Errorln("Failed to resolve address")
|
||||
l.Log().Errorln("Failed to resolve address")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create TCP Listener")
|
||||
l.Log().Errorln("Failed to create TCP Listener")
|
||||
return 0, err
|
||||
}
|
||||
defer tcpListener.Close()
|
||||
|
@ -24,12 +24,12 @@ package util
|
||||
import (
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// validateRuntimeLabelKey validates a given label key is not reserved for internal k3d usage
|
||||
func ValidateRuntimeLabelKey(labelKey string) {
|
||||
if strings.HasPrefix(labelKey, "k3s.") || strings.HasPrefix(labelKey, "k3d.") || labelKey == "app" {
|
||||
log.Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
|
||||
l.Log().Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
|
||||
@ -81,7 +81,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
|
||||
}
|
||||
if !isNamedVolume {
|
||||
if _, err := os.Stat(src); err != nil {
|
||||
log.Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
l.Log().Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
@ -12,6 +11,6 @@ func main() {
|
||||
k3d.DisableAutoGenTag = true
|
||||
|
||||
if err := doc.GenMarkdownTree(k3d, "../docs/usage/commands"); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
copystruct "github.com/mitchellh/copystructure"
|
||||
"github.com/rancher/k3d/v4/pkg/actions"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
@ -45,7 +46,7 @@ import (
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/k3s"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@ -101,7 +102,7 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
|
||||
// create the registry hosting configmap
|
||||
if len(clusterConfig.ClusterCreateOpts.Registries.Use) > 0 {
|
||||
if err := prepCreateLocalRegistryHostingConfigMap(ctx, runtime, &clusterConfig.Cluster); err != nil {
|
||||
log.Warnf("Failed to create LocalRegistryHosting ConfigMap: %+v", err)
|
||||
l.Log().Warnf("Failed to create LocalRegistryHosting ConfigMap: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -148,7 +149,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
|
||||
// Ensure referenced registries
|
||||
for _, reg := range clusterConfig.ClusterCreateOpts.Registries.Use {
|
||||
log.Debugf("Trying to find registry %s", reg.Host)
|
||||
l.Log().Debugf("Trying to find registry %s", reg.Host)
|
||||
regNode, err := runtime.GetNode(ctx, &k3d.Node{Name: reg.Host})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to find registry node '%s': %+v", reg.Host, err)
|
||||
@ -173,7 +174,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
}
|
||||
|
||||
// Use existing registries (including the new one, if created)
|
||||
log.Tracef("Using Registries: %+v", clusterConfig.ClusterCreateOpts.Registries.Use)
|
||||
l.Log().Tracef("Using Registries: %+v", clusterConfig.ClusterCreateOpts.Registries.Use)
|
||||
|
||||
var registryConfig *k3s.Registry
|
||||
|
||||
@ -200,7 +201,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to generate LocalRegistryHosting configmap: %+v", err)
|
||||
}
|
||||
log.Tracef("Writing LocalRegistryHosting YAML:\n%s", string(regCm))
|
||||
l.Log().Tracef("Writing LocalRegistryHosting YAML:\n%s", string(regCm))
|
||||
clusterConfig.ClusterCreateOpts.NodeHooks = append(clusterConfig.ClusterCreateOpts.NodeHooks, k3d.NodeHook{
|
||||
Stage: k3d.LifecycleStagePreStart,
|
||||
Action: actions.WriteFileAction{
|
||||
@ -220,7 +221,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
if err := RegistryMergeConfig(ctx, registryConfig, clusterConfig.ClusterCreateOpts.Registries.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Tracef("Merged registry config: %+v", registryConfig)
|
||||
l.Log().Tracef("Merged registry config: %+v", registryConfig)
|
||||
} else {
|
||||
registryConfig = clusterConfig.ClusterCreateOpts.Registries.Config
|
||||
}
|
||||
@ -247,7 +248,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
|
||||
// ClusterPrepNetwork creates a new cluster network, if needed or sets everything up to re-use an existing network
|
||||
func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
|
||||
log.Infoln("Prep: Network")
|
||||
l.Log().Infoln("Prep: Network")
|
||||
|
||||
// error out if external cluster network should be used but no name was set
|
||||
if cluster.Network.Name == "" && cluster.Network.External {
|
||||
@ -273,7 +274,7 @@ func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d
|
||||
// create cluster network or use an existing one
|
||||
network, networkExists, err := runtime.CreateNetworkIfNotPresent(ctx, &cluster.Network)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create cluster network")
|
||||
l.Log().Errorln("Failed to create cluster network")
|
||||
return err
|
||||
}
|
||||
cluster.Network = *network
|
||||
@ -282,7 +283,7 @@ func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkIPRange] = cluster.Network.IPAM.IPPrefix.String()
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = strconv.FormatBool(cluster.Network.External)
|
||||
if networkExists {
|
||||
log.Infof("Re-using existing network '%s' (%s)", network.Name, network.ID)
|
||||
l.Log().Infof("Re-using existing network '%s' (%s)", network.Name, network.ID)
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
|
||||
}
|
||||
|
||||
@ -296,7 +297,7 @@ func ClusterPrepImageVolume(ctx context.Context, runtime k3drt.Runtime, cluster
|
||||
*/
|
||||
imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
if err := runtime.CreateVolume(ctx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
|
||||
log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
|
||||
l.Log().Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -314,7 +315,7 @@ func ClusterPrepImageVolume(ctx context.Context, runtime k3drt.Runtime, cluster
|
||||
// - a docker network
|
||||
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
|
||||
|
||||
log.Tracef(`
|
||||
l.Log().Tracef(`
|
||||
===== Creating Cluster =====
|
||||
|
||||
Runtime:
|
||||
@ -345,16 +346,16 @@ ClusterCreatOpts:
|
||||
*/
|
||||
if cluster.KubeAPI.Host == k3d.DefaultAPIHost && runtime == k3drt.Docker {
|
||||
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
|
||||
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
l.Log().Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
|
||||
if err != nil {
|
||||
log.Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
|
||||
l.Log().Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
|
||||
} else if machineIP != "" {
|
||||
log.Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
|
||||
l.Log().Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
|
||||
cluster.KubeAPI.Host = machineIP
|
||||
cluster.KubeAPI.Binding.HostIP = machineIP
|
||||
} else {
|
||||
log.Traceln("Not using docker-machine")
|
||||
l.Log().Traceln("Not using docker-machine")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -425,12 +426,12 @@ ClusterCreatOpts:
|
||||
node.GPURequest = clusterCreateOpts.GPURequest
|
||||
|
||||
// create node
|
||||
log.Infof("Creating node '%s'", node.Name)
|
||||
l.Log().Infof("Creating node '%s'", node.Name)
|
||||
if err := NodeCreate(clusterCreateCtx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create node")
|
||||
l.Log().Errorln("Failed to create node")
|
||||
return err
|
||||
}
|
||||
log.Debugf("Created node '%s'", node.Name)
|
||||
l.Log().Debugf("Created node '%s'", node.Name)
|
||||
|
||||
// start node
|
||||
//return NodeStart(clusterCreateCtx, runtime, node, k3d.NodeStartOpts{PreStartActions: clusterCreateOpts.NodeHookActions})
|
||||
@ -442,7 +443,7 @@ ClusterCreatOpts:
|
||||
|
||||
// create init node first
|
||||
if cluster.InitNode != nil {
|
||||
log.Infoln("Creating initializing server node")
|
||||
l.Log().Infoln("Creating initializing server node")
|
||||
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
|
||||
if cluster.InitNode.RuntimeLabels == nil {
|
||||
cluster.InitNode.RuntimeLabels = map[string]string{}
|
||||
@ -493,7 +494,7 @@ ClusterCreatOpts:
|
||||
|
||||
// WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance
|
||||
if serverCount == 2 {
|
||||
log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
|
||||
l.Log().Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
|
||||
}
|
||||
|
||||
/*
|
||||
@ -537,11 +538,11 @@ ClusterCreatOpts:
|
||||
|
||||
cluster.ServerLoadBalancer.Node.HookActions = append(cluster.ServerLoadBalancer.Node.HookActions, writeLbConfigAction)
|
||||
|
||||
log.Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
l.Log().Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
if err := NodeCreate(ctx, runtime, cluster.ServerLoadBalancer.Node, k3d.NodeCreateOpts{}); err != nil {
|
||||
return fmt.Errorf("error creating loadbalancer: %v", err)
|
||||
}
|
||||
log.Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
l.Log().Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -551,18 +552,18 @@ ClusterCreatOpts:
|
||||
// ClusterDelete deletes an existing cluster
|
||||
func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, opts k3d.ClusterDeleteOpts) error {
|
||||
|
||||
log.Infof("Deleting cluster '%s'", cluster.Name)
|
||||
l.Log().Infof("Deleting cluster '%s'", cluster.Name)
|
||||
cluster, err := ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Cluster Details: %+v", cluster)
|
||||
l.Log().Debugf("Cluster Details: %+v", cluster)
|
||||
|
||||
failed := 0
|
||||
for _, node := range cluster.Nodes {
|
||||
// registry: only delete, if not connected to other networks
|
||||
if node.Role == k3d.RegistryRole && !opts.SkipRegistryCheck {
|
||||
log.Tracef("Registry Node has %d networks: %+v", len(node.Networks), node)
|
||||
l.Log().Tracef("Registry Node has %d networks: %+v", len(node.Networks), node)
|
||||
|
||||
// check if node is connected to other networks, that are not
|
||||
// - the cluster network
|
||||
@ -576,21 +577,21 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
if net == "bridge" || net == "host" {
|
||||
continue
|
||||
}
|
||||
log.Tracef("net: %s", net)
|
||||
l.Log().Tracef("net: %s", net)
|
||||
connectedToOtherNet = true
|
||||
break
|
||||
}
|
||||
if connectedToOtherNet {
|
||||
log.Infof("Registry %s is also connected to other (non-default) networks (%+v), not deleting it...", node.Name, node.Networks)
|
||||
l.Log().Infof("Registry %s is also connected to other (non-default) networks (%+v), not deleting it...", node.Name, node.Networks)
|
||||
if err := runtime.DisconnectNodeFromNetwork(ctx, node, cluster.Network.Name); err != nil {
|
||||
log.Warnf("Failed to disconnect registry %s from cluster network %s", node.Name, cluster.Network.Name)
|
||||
l.Log().Warnf("Failed to disconnect registry %s from cluster network %s", node.Name, cluster.Network.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := NodeDelete(ctx, runtime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
log.Warningf("Failed to delete node '%s': Try to delete it manually", node.Name)
|
||||
l.Log().Warningf("Failed to delete node '%s': Try to delete it manually", node.Name)
|
||||
failed++
|
||||
continue
|
||||
}
|
||||
@ -599,48 +600,48 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
// Delete the cluster network, if it was created for/by this cluster (and if it's not in use anymore)
|
||||
if cluster.Network.Name != "" {
|
||||
if !cluster.Network.External {
|
||||
log.Infof("Deleting cluster network '%s'", cluster.Network.Name)
|
||||
l.Log().Infof("Deleting cluster network '%s'", cluster.Network.Name)
|
||||
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
|
||||
if errors.Is(err, runtimeErr.ErrRuntimeNetworkNotEmpty) { // there are still containers connected to that network
|
||||
|
||||
connectedNodes, err := runtime.GetNodesInNetwork(ctx, cluster.Network.Name) // check, if there are any k3d nodes connected to the cluster
|
||||
if err != nil {
|
||||
log.Warningf("Failed to check cluster network for connected nodes: %+v", err)
|
||||
l.Log().Warningf("Failed to check cluster network for connected nodes: %+v", err)
|
||||
}
|
||||
|
||||
if len(connectedNodes) > 0 { // there are still k3d-managed containers (aka nodes) connected to the network
|
||||
connectedRegistryNodes := util.FilterNodesByRole(connectedNodes, k3d.RegistryRole)
|
||||
if len(connectedRegistryNodes) == len(connectedNodes) { // only registry node(s) left in the network
|
||||
for _, node := range connectedRegistryNodes {
|
||||
log.Debugf("Disconnecting registry node %s from the network...", node.Name)
|
||||
l.Log().Debugf("Disconnecting registry node %s from the network...", node.Name)
|
||||
if err := runtime.DisconnectNodeFromNetwork(ctx, node, cluster.Network.Name); err != nil {
|
||||
log.Warnf("Failed to disconnect registry %s from network %s", node.Name, cluster.Network.Name)
|
||||
l.Log().Warnf("Failed to disconnect registry %s from network %s", node.Name, cluster.Network.Name)
|
||||
} else {
|
||||
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
|
||||
log.Warningf("Failed to delete cluster network, even after disconnecting registry node(s): %+v", err)
|
||||
l.Log().Warningf("Failed to delete cluster network, even after disconnecting registry node(s): %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // besides the registry node(s), there are still other nodes... maybe they still need a registry
|
||||
log.Debugf("There are some non-registry nodes left in the network")
|
||||
l.Log().Debugf("There are some non-registry nodes left in the network")
|
||||
}
|
||||
} else {
|
||||
log.Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
|
||||
l.Log().Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("Failed to delete cluster network '%s': '%+v'", cluster.Network.Name, err)
|
||||
l.Log().Warningf("Failed to delete cluster network '%s': '%+v'", cluster.Network.Name, err)
|
||||
}
|
||||
}
|
||||
} else if cluster.Network.External {
|
||||
log.Debugf("Skip deletion of cluster network '%s' because it's managed externally", cluster.Network.Name)
|
||||
l.Log().Debugf("Skip deletion of cluster network '%s' because it's managed externally", cluster.Network.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// delete image volume
|
||||
if cluster.ImageVolume != "" {
|
||||
log.Infof("Deleting image volume '%s'", cluster.ImageVolume)
|
||||
l.Log().Infof("Deleting image volume '%s'", cluster.ImageVolume)
|
||||
if err := runtime.DeleteVolume(ctx, cluster.ImageVolume); err != nil {
|
||||
log.Warningf("Failed to delete image volume '%s' of cluster '%s': Try to delete it manually", cluster.ImageVolume, cluster.Name)
|
||||
l.Log().Warningf("Failed to delete image volume '%s' of cluster '%s': Try to delete it manually", cluster.ImageVolume, cluster.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -653,26 +654,26 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
|
||||
// ClusterList returns a list of all existing clusters
|
||||
func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
|
||||
log.Traceln("Listing Clusters...")
|
||||
l.Log().Traceln("Listing Clusters...")
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get clusters")
|
||||
l.Log().Errorln("Failed to get clusters")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("Found %d nodes", len(nodes))
|
||||
if log.GetLevel() == log.TraceLevel {
|
||||
l.Log().Debugf("Found %d nodes", len(nodes))
|
||||
if l.Log().GetLevel() == logrus.TraceLevel {
|
||||
for _, node := range nodes {
|
||||
log.Tracef("Found node %s of role %s", node.Name, node.Role)
|
||||
l.Log().Tracef("Found node %s of role %s", node.Name, node.Role)
|
||||
}
|
||||
}
|
||||
|
||||
nodes = NodeFilterByRoles(nodes, k3d.ClusterInternalNodeRoles, k3d.ClusterExternalNodeRoles)
|
||||
|
||||
log.Tracef("Found %d cluster-internal nodes", len(nodes))
|
||||
if log.GetLevel() == log.TraceLevel {
|
||||
l.Log().Tracef("Found %d cluster-internal nodes", len(nodes))
|
||||
if l.Log().GetLevel() == logrus.TraceLevel {
|
||||
for _, node := range nodes {
|
||||
log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName])
|
||||
l.Log().Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName])
|
||||
}
|
||||
}
|
||||
|
||||
@ -699,11 +700,11 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er
|
||||
// enrich cluster structs with label values
|
||||
for _, cluster := range clusters {
|
||||
if err := populateClusterFieldsFromLabels(cluster); err != nil {
|
||||
log.Warnf("Failed to populate cluster fields from node label values for cluster '%s'", cluster.Name)
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to populate cluster fields from node label values for cluster '%s'", cluster.Name)
|
||||
l.Log().Warnln(err)
|
||||
}
|
||||
}
|
||||
log.Debugf("Found %d clusters", len(clusters))
|
||||
l.Log().Debugf("Found %d clusters", len(clusters))
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
@ -756,7 +757,7 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
|
||||
// get nodes that belong to the selected cluster
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get nodes for cluster '%s': %v", cluster.Name, err)
|
||||
l.Log().Errorf("Failed to get nodes for cluster '%s': %v", cluster.Name, err)
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
@ -797,15 +798,15 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
|
||||
if cluster.ServerLoadBalancer != nil && cluster.ServerLoadBalancer.Node != nil {
|
||||
lbcfg, err := GetLoadbalancerConfig(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("error getting loadbalancer config from %s: %v", cluster.ServerLoadBalancer.Node.Name, err)
|
||||
l.Log().Errorf("error getting loadbalancer config from %s: %v", cluster.ServerLoadBalancer.Node.Name, err)
|
||||
}
|
||||
cluster.ServerLoadBalancer.Config = &lbcfg
|
||||
}
|
||||
}
|
||||
|
||||
if err := populateClusterFieldsFromLabels(cluster); err != nil {
|
||||
log.Warnf("Failed to populate cluster fields from node labels")
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to populate cluster fields from node labels")
|
||||
l.Log().Warnln(err)
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
@ -822,7 +823,7 @@ func GenerateNodeName(cluster string, role k3d.Role, suffix int) string {
|
||||
|
||||
// ClusterStart starts a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.ClusterStartOpts) error {
|
||||
log.Infof("Starting cluster '%s'", cluster.Name)
|
||||
l.Log().Infof("Starting cluster '%s'", cluster.Name)
|
||||
|
||||
if startClusterOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
@ -850,23 +851,23 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
}
|
||||
|
||||
// TODO: remove trace logs below
|
||||
log.Traceln("Servers before sort:")
|
||||
l.Log().Traceln("Servers before sort:")
|
||||
for i, n := range servers {
|
||||
log.Tracef("Server %d - %s", i, n.Name)
|
||||
l.Log().Tracef("Server %d - %s", i, n.Name)
|
||||
}
|
||||
sort.Slice(servers, func(i, j int) bool {
|
||||
return servers[i].Name < servers[j].Name
|
||||
})
|
||||
log.Traceln("Servers after sort:")
|
||||
l.Log().Traceln("Servers after sort:")
|
||||
for i, n := range servers {
|
||||
log.Tracef("Server %d - %s", i, n.Name)
|
||||
l.Log().Tracef("Server %d - %s", i, n.Name)
|
||||
}
|
||||
|
||||
/*
|
||||
* Init Node
|
||||
*/
|
||||
if initNode != nil {
|
||||
log.Infoln("Starting the initializing server...")
|
||||
l.Log().Infoln("Starting the initializing server...")
|
||||
if err := NodeStart(ctx, runtime, initNode, k3d.NodeStartOpts{
|
||||
Wait: true, // always wait for the init node
|
||||
NodeHooks: startClusterOpts.NodeHooks,
|
||||
@ -879,7 +880,7 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
/*
|
||||
* Server Nodes
|
||||
*/
|
||||
log.Infoln("Starting servers...")
|
||||
l.Log().Infoln("Starting servers...")
|
||||
nodeStartOpts := k3d.NodeStartOpts{
|
||||
Wait: true,
|
||||
NodeHooks: startClusterOpts.NodeHooks,
|
||||
@ -896,7 +897,7 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
|
||||
agentWG, aCtx := errgroup.WithContext(ctx)
|
||||
|
||||
log.Infoln("Starting agents...")
|
||||
l.Log().Infoln("Starting agents...")
|
||||
for _, agentNode := range agents {
|
||||
currentAgentNode := agentNode
|
||||
agentWG.Go(func() error {
|
||||
@ -912,7 +913,7 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
*/
|
||||
|
||||
helperWG, hCtx := errgroup.WithContext(ctx)
|
||||
log.Infoln("Starting helpers...")
|
||||
l.Log().Infoln("Starting helpers...")
|
||||
for _, helperNode := range aux {
|
||||
currentHelperNode := helperNode
|
||||
|
||||
@ -937,12 +938,12 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
|
||||
// ClusterStop stops a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
log.Infof("Stopping cluster '%s'", cluster.Name)
|
||||
l.Log().Infof("Stopping cluster '%s'", cluster.Name)
|
||||
|
||||
failed := 0
|
||||
for _, node := range cluster.Nodes {
|
||||
if err := runtime.StopNode(ctx, node); err != nil {
|
||||
log.Warningf("Failed to stop node '%s': Try to stop it manually", node.Name)
|
||||
l.Log().Warningf("Failed to stop node '%s': Try to stop it manually", node.Name)
|
||||
failed++
|
||||
continue
|
||||
}
|
||||
@ -964,19 +965,19 @@ func SortClusters(clusters []*k3d.Cluster) []*k3d.Cluster {
|
||||
|
||||
// prepInjectHostIP adds /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
|
||||
func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) {
|
||||
log.Infoln("(Optional) Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access")
|
||||
l.Log().Infoln("(Optional) Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access")
|
||||
hostIP, err := GetHostIP(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get HostIP: %+v", err)
|
||||
l.Log().Warnf("Failed to get HostIP: %+v", err)
|
||||
}
|
||||
if hostIP != nil {
|
||||
hostRecordSuccessMessage := ""
|
||||
etcHostsFailureCount := 0
|
||||
hostsEntry := fmt.Sprintf("%s %s", hostIP, k3d.DefaultK3dInternalHostRecord)
|
||||
log.Debugf("Adding extra host entry '%s'...", hostsEntry)
|
||||
l.Log().Debugf("Adding extra host entry '%s'...", hostsEntry)
|
||||
for _, node := range cluster.Nodes {
|
||||
if err := runtime.ExecInNode(ctx, node, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)}); err != nil {
|
||||
log.Warnf("Failed to add extra entry '%s' to /etc/hosts in node '%s'", hostsEntry, node.Name)
|
||||
l.Log().Warnf("Failed to add extra entry '%s' to /etc/hosts in node '%s'", hostsEntry, node.Name)
|
||||
etcHostsFailureCount++
|
||||
}
|
||||
}
|
||||
@ -997,22 +998,22 @@ func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.C
|
||||
msg := fmt.Sprintf("error patching the CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err)
|
||||
readlogs, err := ioutil.ReadAll(logreader)
|
||||
if err != nil {
|
||||
log.Debugf("error reading the logs from failed CoreDNS patch exec process in node %s: %v", node.Name, err)
|
||||
l.Log().Debugf("error reading the logs from failed CoreDNS patch exec process in node %s: %v", node.Name, err)
|
||||
} else {
|
||||
msg += fmt.Sprintf("\nLogs: %s", string(readlogs))
|
||||
}
|
||||
log.Debugln(msg)
|
||||
l.Log().Debugln(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
if successInjectCoreDNSEntry == false {
|
||||
log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s' (see debug logs)", hostsEntry)
|
||||
l.Log().Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s' (see debug logs)", hostsEntry)
|
||||
} else {
|
||||
hostRecordSuccessMessage += " and to the CoreDNS ConfigMap"
|
||||
}
|
||||
|
||||
if hostRecordSuccessMessage != "" {
|
||||
log.Infoln(hostRecordSuccessMessage)
|
||||
l.Log().Infoln(hostRecordSuccessMessage)
|
||||
}
|
||||
|
||||
}
|
||||
@ -1027,12 +1028,12 @@ func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt.
|
||||
success = true
|
||||
break
|
||||
} else {
|
||||
log.Debugf("Failed to create LocalRegistryHosting ConfigMap in node %s: %+v", node.Name, err)
|
||||
l.Log().Debugf("Failed to create LocalRegistryHosting ConfigMap in node %s: %+v", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if success == false {
|
||||
log.Warnf("Failed to create LocalRegistryHosting ConfigMap")
|
||||
l.Log().Warnf("Failed to create LocalRegistryHosting ConfigMap")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1089,7 +1090,7 @@ func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, clus
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("ORIGINAL:\n> Ports: %+v\n> Config: %+v\nCHANGESET:\n> Ports: %+v\n> Config: %+v", existingLB.Node.Ports, existingLB.Config, lbChangeset.Node.Ports, lbChangeset.Config)
|
||||
l.Log().Debugf("ORIGINAL:\n> Ports: %+v\n> Config: %+v\nCHANGESET:\n> Ports: %+v\n> Config: %+v", existingLB.Node.Ports, existingLB.Config, lbChangeset.Node.Ports, lbChangeset.Config)
|
||||
|
||||
// prepare to write config to lb container
|
||||
configyaml, err := yaml.Marshal(lbChangeset.Config)
|
||||
|
@ -25,9 +25,9 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/types/fixes"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
||||
@ -35,18 +35,18 @@ func EnableCgroupV2FixIfNeeded(runtime runtimes.Runtime) {
|
||||
if _, isSet := os.LookupEnv(fixes.EnvFixCgroupV2); !isSet {
|
||||
runtimeInfo, err := runtime.Info()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get runtime information: %+v", err)
|
||||
l.Log().Warnf("Failed to get runtime information: %+v", err)
|
||||
return
|
||||
}
|
||||
cgroupVersion, err := strconv.Atoi(runtimeInfo.CgroupVersion)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to parse cgroupVersion: %+v", err)
|
||||
l.Log().Debugf("Failed to parse cgroupVersion: %+v", err)
|
||||
return
|
||||
}
|
||||
if cgroupVersion == 2 {
|
||||
log.Debugf("Detected CgroupV2, enabling custom entrypoint (disable by setting %s=false)", fixes.EnvFixCgroupV2)
|
||||
l.Log().Debugf("Detected CgroupV2, enabling custom entrypoint (disable by setting %s=false)", fixes.EnvFixCgroupV2)
|
||||
if err := os.Setenv(fixes.EnvFixCgroupV2, "true"); err != nil {
|
||||
log.Errorf("Detected CgroupsV2 but failed to enable k3d's hotfix (try `export %s=true`): %+v", fixes.EnvFixCgroupV2, err)
|
||||
l.Log().Errorf("Detected CgroupsV2 but failed to enable k3d's hotfix (try `export %s=true`): %+v", fixes.EnvFixCgroupV2, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,10 +29,10 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
rt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var nsLookupAddressRegexp = regexp.MustCompile(`^Address:\s+(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$`)
|
||||
@ -44,7 +44,7 @@ func GetHostIP(ctx context.Context, rtime rt.Runtime, cluster *k3d.Cluster) (net
|
||||
// Docker Runtime
|
||||
if rtime == rt.Docker {
|
||||
|
||||
log.Tracef("Runtime GOOS: %s", runtime.GOOS)
|
||||
l.Log().Tracef("Runtime GOOS: %s", runtime.GOOS)
|
||||
|
||||
// "native" Docker on Linux
|
||||
if runtime.GOOS == "linux" {
|
||||
@ -94,28 +94,28 @@ func resolveHostnameFromInside(ctx context.Context, rtime rt.Runtime, node *k3d.
|
||||
return nil, fmt.Errorf("Failed to scan logs for host IP: Could not create scanner from logreader")
|
||||
}
|
||||
if scanner != nil && execErr != nil {
|
||||
log.Debugln("Exec Process Failed, but we still got logs, so we're at least trying to get the IP from there...")
|
||||
log.Tracef("-> Exec Process Error was: %+v", execErr)
|
||||
l.Log().Debugln("Exec Process Failed, but we still got logs, so we're at least trying to get the IP from there...")
|
||||
l.Log().Tracef("-> Exec Process Error was: %+v", execErr)
|
||||
}
|
||||
for scanner.Scan() {
|
||||
log.Tracef("Scanning Log Line '%s'", scanner.Text())
|
||||
l.Log().Tracef("Scanning Log Line '%s'", scanner.Text())
|
||||
match := nsLookupAddressRegexp.FindStringSubmatch(scanner.Text())
|
||||
if len(match) == 0 {
|
||||
continue
|
||||
}
|
||||
log.Tracef("-> Match(es): '%+v'", match)
|
||||
l.Log().Tracef("-> Match(es): '%+v'", match)
|
||||
submatches = util.MapSubexpNames(nsLookupAddressRegexp.SubexpNames(), match)
|
||||
log.Tracef(" -> Submatch(es): %+v", submatches)
|
||||
l.Log().Tracef(" -> Submatch(es): %+v", submatches)
|
||||
break
|
||||
}
|
||||
if _, ok := submatches["ip"]; !ok {
|
||||
if execErr != nil {
|
||||
log.Errorln(execErr)
|
||||
l.Log().Errorln(execErr)
|
||||
}
|
||||
return nil, fmt.Errorf("Failed to read address for '%s' from nslookup response", hostname)
|
||||
}
|
||||
|
||||
log.Debugf("Hostname '%s' -> Address '%s'", hostname, submatches["ip"])
|
||||
l.Log().Debugf("Hostname '%s' -> Address '%s'", hostname, submatches["ip"])
|
||||
|
||||
return net.ParseIP(submatches["ip"]), nil
|
||||
|
||||
|
@ -24,9 +24,9 @@ package client
|
||||
import (
|
||||
"context"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
@ -53,7 +53,7 @@ func GetIP(ctx context.Context, runtime k3drt.Runtime, network *k3d.ClusterNetwo
|
||||
|
||||
ip := ipset.Ranges()[0].From
|
||||
|
||||
log.Debugf("Found free IP %s in network %s", ip.String(), network.Name)
|
||||
l.Log().Debugf("Found free IP %s in network %s", ip.String(), network.Name)
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
|
@ -30,9 +30,9 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
@ -78,18 +78,18 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
|
||||
|
||||
// the output file does not exist: try to create it and try again
|
||||
if os.IsNotExist(err) && firstRun {
|
||||
log.Debugf("Output path '%s' doesn't exist, trying to create it...", output)
|
||||
l.Log().Debugf("Output path '%s' doesn't exist, trying to create it...", output)
|
||||
|
||||
// create directory path
|
||||
if err := os.MkdirAll(filepath.Dir(output), 0755); err != nil {
|
||||
log.Errorf("Failed to create output directory '%s'", filepath.Dir(output))
|
||||
l.Log().Errorf("Failed to create output directory '%s'", filepath.Dir(output))
|
||||
return output, err
|
||||
}
|
||||
|
||||
// try create output file
|
||||
f, err := os.Create(output)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create output file '%s'", output)
|
||||
l.Log().Errorf("Failed to create output file '%s'", output)
|
||||
return output, err
|
||||
}
|
||||
f.Close()
|
||||
@ -98,7 +98,7 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
|
||||
firstRun = false
|
||||
continue
|
||||
}
|
||||
log.Errorf("Failed to open output file '%s' or it's not a KubeConfig", output)
|
||||
l.Log().Errorf("Failed to open output file '%s' or it's not a KubeConfig", output)
|
||||
return output, err
|
||||
}
|
||||
break
|
||||
@ -117,7 +117,7 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
// TODO: getKubeconfig: we should make sure, that the server node we're trying to fetch from is actually running
|
||||
serverNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.ServerRole)})
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get server nodes")
|
||||
l.Log().Errorln("Failed to get server nodes")
|
||||
return nil, err
|
||||
}
|
||||
if len(serverNodes) == 0 {
|
||||
@ -147,14 +147,14 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
// get the kubeconfig from the first server node
|
||||
reader, err := runtime.GetKubeconfig(ctx, chosenServer)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get kubeconfig from node '%s'", chosenServer.Name)
|
||||
l.Log().Errorf("Failed to get kubeconfig from node '%s'", chosenServer.Name)
|
||||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
readBytes, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
log.Errorln("Couldn't read kubeconfig file")
|
||||
l.Log().Errorln("Couldn't read kubeconfig file")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
*/
|
||||
kc, err := clientcmd.Load(trimBytes)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to parse the KubeConfig")
|
||||
l.Log().Errorln("Failed to parse the KubeConfig")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -196,7 +196,7 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
// set current-context to new context name
|
||||
kc.CurrentContext = newContextName
|
||||
|
||||
log.Tracef("Modified Kubeconfig: %+v", kc)
|
||||
l.Log().Tracef("Modified Kubeconfig: %+v", kc)
|
||||
|
||||
return kc, nil
|
||||
}
|
||||
@ -212,7 +212,7 @@ func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
|
||||
} else {
|
||||
output, err = os.Create(path)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create file '%s'", path)
|
||||
l.Log().Errorf("Failed to create file '%s'", path)
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
@ -220,17 +220,17 @@ func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
|
||||
|
||||
kubeconfigBytes, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to write KubeConfig")
|
||||
l.Log().Errorln("Failed to write KubeConfig")
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = output.Write(kubeconfigBytes)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to write to file '%s'", output.Name())
|
||||
l.Log().Errorf("Failed to write to file '%s'", output.Name())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Wrote kubeconfig to '%s'", output.Name())
|
||||
l.Log().Debugf("Wrote kubeconfig to '%s'", output.Name())
|
||||
|
||||
return nil
|
||||
|
||||
@ -239,7 +239,7 @@ func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
|
||||
// KubeconfigMerge merges a new kubeconfig into an existing kubeconfig and returns the result
|
||||
func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
|
||||
|
||||
log.Tracef("Merging new Kubeconfig:\n%+v\n>>> into existing Kubeconfig:\n%+v", newKubeConfig, existingKubeConfig)
|
||||
l.Log().Tracef("Merging new Kubeconfig:\n%+v\n>>> into existing Kubeconfig:\n%+v", newKubeConfig, existingKubeConfig)
|
||||
|
||||
// Overwrite values in existing kubeconfig
|
||||
for k, v := range newKubeConfig.Clusters {
|
||||
@ -274,7 +274,7 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex
|
||||
updateCurrentContext = true
|
||||
}
|
||||
if updateCurrentContext {
|
||||
log.Debugf("Setting new current-context '%s'", newKubeConfig.CurrentContext)
|
||||
l.Log().Debugf("Setting new current-context '%s'", newKubeConfig.CurrentContext)
|
||||
existingKubeConfig.CurrentContext = newKubeConfig.CurrentContext
|
||||
}
|
||||
|
||||
@ -285,17 +285,17 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex
|
||||
func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
tempPath := fmt.Sprintf("%s.k3d_%s", path, time.Now().Format("20060102_150405.000000"))
|
||||
if err := clientcmd.WriteToFile(*kubeconfig, tempPath); err != nil {
|
||||
log.Errorf("Failed to write merged kubeconfig to temporary file '%s'", tempPath)
|
||||
l.Log().Errorf("Failed to write merged kubeconfig to temporary file '%s'", tempPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// Move temporary file over existing KubeConfig
|
||||
if err := os.Rename(tempPath, path); err != nil {
|
||||
log.Errorf("Failed to overwrite existing KubeConfig '%s' with new KubeConfig '%s'", path, tempPath)
|
||||
l.Log().Errorf("Failed to overwrite existing KubeConfig '%s' with new KubeConfig '%s'", path, tempPath)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Wrote kubeconfig to '%s'", path)
|
||||
l.Log().Debugf("Wrote kubeconfig to '%s'", path)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -306,7 +306,7 @@ func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Using default kubeconfig '%s'", path)
|
||||
l.Log().Debugf("Using default kubeconfig '%s'", path)
|
||||
return clientcmd.LoadFromFile(path)
|
||||
}
|
||||
|
||||
|
@ -31,10 +31,10 @@ import (
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/go-test/deep"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -51,7 +51,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
// update cluster details to ensure that we have the latest node list
|
||||
cluster, err = ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update details for cluster '%s'", cluster.Name)
|
||||
l.Log().Errorf("Failed to update details for cluster '%s'", cluster.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -60,23 +60,23 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
return fmt.Errorf("error getting current config from loadbalancer: %w", err)
|
||||
}
|
||||
|
||||
log.Tracef("Current loadbalancer config:\n%+v", currentConfig)
|
||||
l.Log().Tracef("Current loadbalancer config:\n%+v", currentConfig)
|
||||
|
||||
newLBConfig, err := LoadbalancerGenerateConfig(cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating new loadbalancer config: %w", err)
|
||||
}
|
||||
log.Tracef("New loadbalancer config:\n%+v", currentConfig)
|
||||
l.Log().Tracef("New loadbalancer config:\n%+v", currentConfig)
|
||||
|
||||
if diff := deep.Equal(currentConfig, newLBConfig); diff != nil {
|
||||
log.Debugf("Updating the loadbalancer with this diff: %+v", diff)
|
||||
l.Log().Debugf("Updating the loadbalancer with this diff: %+v", diff)
|
||||
}
|
||||
|
||||
newLbConfigYaml, err := yaml.Marshal(&newLBConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling the new loadbalancer config: %w", err)
|
||||
}
|
||||
log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
|
||||
l.Log().Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
|
||||
startTime := time.Now().Truncate(time.Second).UTC()
|
||||
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer.Node); err != nil {
|
||||
return fmt.Errorf("error writing new loadbalancer config to container: %w", err)
|
||||
@ -91,18 +91,18 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
defer failureCtxCancel()
|
||||
err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
|
||||
l.Log().Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
|
||||
return ErrLBConfigFailedTest
|
||||
} else {
|
||||
log.Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.")
|
||||
l.Log().Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.")
|
||||
return ErrLBConfigHostNotFound
|
||||
}
|
||||
} else {
|
||||
log.Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err)
|
||||
l.Log().Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err)
|
||||
return ErrLBConfigFailedTest
|
||||
}
|
||||
}
|
||||
log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
|
||||
l.Log().Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
|
||||
|
||||
time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits
|
||||
|
||||
|
@ -34,19 +34,20 @@ import (
|
||||
"time"
|
||||
|
||||
copystruct "github.com/mitchellh/copystructure"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
dockerunits "github.com/docker/go-units"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/rancher/k3d/v4/pkg/actions"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
runtimeErrors "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/fixes"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@ -55,7 +56,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
targetClusterName := cluster.Name
|
||||
cluster, err := ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find specified cluster '%s'", targetClusterName)
|
||||
l.Log().Errorf("Failed to find specified cluster '%s'", targetClusterName)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -80,7 +81,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
}
|
||||
// if we didn't find a node with the same role in the cluster, just choose any other node
|
||||
if srcNode == nil {
|
||||
log.Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
|
||||
l.Log().Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
|
||||
node.Cmd = k3d.DefaultRoleCmds[node.Role]
|
||||
for _, existingNode := range cluster.Nodes {
|
||||
if existingNode.Role != k3d.LoadBalancerRole { // any role except for the LoadBalancer role
|
||||
@ -105,7 +106,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
for _, forbiddenMount := range util.DoNotCopyVolumeSuffices {
|
||||
for i, mount := range node.Volumes {
|
||||
if strings.Contains(mount, forbiddenMount) {
|
||||
log.Tracef("Dropping copied volume mount %s to avoid issues...", mount)
|
||||
l.Log().Tracef("Dropping copied volume mount %s to avoid issues...", mount)
|
||||
node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i)
|
||||
}
|
||||
}
|
||||
@ -120,29 +121,29 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
for i, cmd := range srcNode.Cmd {
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||
if cmd == forbiddenCmd {
|
||||
log.Tracef("Dropping '%s' from source node's cmd", forbiddenCmd)
|
||||
l.Log().Tracef("Dropping '%s' from source node's cmd", forbiddenCmd)
|
||||
srcNode.Cmd = append(srcNode.Cmd[:i], srcNode.Cmd[i+1:]...)
|
||||
}
|
||||
}
|
||||
for i, arg := range node.Args {
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||
if arg == forbiddenCmd {
|
||||
log.Tracef("Dropping '%s' from source node's args", forbiddenCmd)
|
||||
l.Log().Tracef("Dropping '%s' from source node's args", forbiddenCmd)
|
||||
srcNode.Args = append(srcNode.Args[:i], srcNode.Args[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name)
|
||||
log.Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node)
|
||||
l.Log().Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name)
|
||||
l.Log().Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node)
|
||||
|
||||
// fetch registry config
|
||||
registryConfigBytes := []byte{}
|
||||
registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, srcNode)
|
||||
if err != nil {
|
||||
if !errors.Is(err, runtimeErrors.ErrRuntimeFileNotFound) {
|
||||
log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
}
|
||||
} else {
|
||||
defer registryConfigReader.Close()
|
||||
@ -150,7 +151,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
var err error
|
||||
registryConfigBytes, err = ioutil.ReadAll(registryConfigReader)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
}
|
||||
registryConfigReader.Close()
|
||||
registryConfigBytes = bytes.Trim(registryConfigBytes[512:], "\x00") // trim control characters, etc.
|
||||
@ -158,13 +159,13 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
|
||||
// merge node config of new node into existing node config
|
||||
if err := mergo.MergeWithOverwrite(srcNode, *node); err != nil {
|
||||
log.Errorln("Failed to merge new node config into existing node config")
|
||||
l.Log().Errorln("Failed to merge new node config into existing node config")
|
||||
return err
|
||||
}
|
||||
|
||||
node = srcNode
|
||||
|
||||
log.Debugf("Resulting node %+v", node)
|
||||
l.Log().Debugf("Resulting node %+v", node)
|
||||
|
||||
k3sURLFound := false
|
||||
for _, envVar := range node.Env {
|
||||
@ -177,7 +178,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
if url, ok := node.RuntimeLabels[k3d.LabelClusterURL]; ok {
|
||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", url))
|
||||
} else {
|
||||
log.Warnln("Failed to find K3S_URL value!")
|
||||
l.Log().Warnln("Failed to find K3S_URL value!")
|
||||
}
|
||||
}
|
||||
|
||||
@ -207,7 +208,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
|
||||
// if it's a server node, then update the loadbalancer configuration
|
||||
if node.Role == k3d.ServerRole {
|
||||
log.Infoln("Updating loadbalancer config to include new server node(s)")
|
||||
l.Log().Infoln("Updating loadbalancer config to include new server node(s)")
|
||||
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
||||
if !errors.Is(err, ErrLBConfigHostNotFound) {
|
||||
return fmt.Errorf("error updating loadbalancer: %w", err)
|
||||
@ -251,25 +252,25 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
|
||||
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Error(err)
|
||||
l.Log().Error(err)
|
||||
}
|
||||
if createNodeOpts.Wait {
|
||||
currentNode := node
|
||||
nodeWaitGroup.Go(func() error {
|
||||
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
l.Log().Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role]
|
||||
if readyLogMessage != "" {
|
||||
return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{})
|
||||
}
|
||||
log.Warnf("NodeCreateMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
|
||||
l.Log().Warnf("NodeCreateMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := nodeWaitGroup.Wait(); err != nil {
|
||||
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
|
||||
log.Errorf(">>> %+v", err)
|
||||
l.Log().Errorln("Failed to bring up all nodes in time. Check the logs:")
|
||||
l.Log().Errorf(">>> %+v", err)
|
||||
return fmt.Errorf("Failed to create nodes")
|
||||
}
|
||||
|
||||
@ -298,7 +299,7 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
|
||||
|
||||
// return early, if the node is already running
|
||||
if node.State.Running {
|
||||
log.Infof("Node %s is already running", node.Name)
|
||||
l.Log().Infof("Node %s is already running", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -324,33 +325,33 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
log.Debugf("Node %s Start Time: %+v", node.Name, startTime)
|
||||
l.Log().Debugf("Node %s Start Time: %+v", node.Name, startTime)
|
||||
|
||||
// execute lifecycle hook actions
|
||||
for _, hook := range nodeStartOpts.NodeHooks {
|
||||
if hook.Stage == k3d.LifecycleStagePreStart {
|
||||
log.Tracef("Node %s: Executing preStartAction '%s'", node.Name, reflect.TypeOf(hook))
|
||||
l.Log().Tracef("Node %s: Executing preStartAction '%s'", node.Name, reflect.TypeOf(hook))
|
||||
if err := hook.Action.Run(ctx, node); err != nil {
|
||||
log.Errorf("Node %s: Failed executing preStartAction '%+v': %+v", node.Name, hook, err)
|
||||
l.Log().Errorf("Node %s: Failed executing preStartAction '%+v': %+v", node.Name, hook, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start the node
|
||||
log.Tracef("Starting node '%s'", node.Name)
|
||||
l.Log().Tracef("Starting node '%s'", node.Name)
|
||||
|
||||
if err := runtime.StartNode(ctx, node); err != nil {
|
||||
log.Errorf("Failed to start node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to start node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
if node.State.Started != "" {
|
||||
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", node.State.Started)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to parse '%s.State.Started' timestamp '%s', falling back to calulated time", node.Name, node.State.Started)
|
||||
l.Log().Debugf("Failed to parse '%s.State.Started' timestamp '%s', falling back to calulated time", node.Name, node.State.Started)
|
||||
}
|
||||
startTime = ts.Truncate(time.Second)
|
||||
log.Debugf("Truncated %s to %s", ts, startTime)
|
||||
l.Log().Debugf("Truncated %s to %s", ts, startTime)
|
||||
}
|
||||
|
||||
if nodeStartOpts.Wait {
|
||||
@ -358,12 +359,12 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
|
||||
nodeStartOpts.ReadyLogMessage = k3d.ReadyLogMessageByRole[node.Role]
|
||||
}
|
||||
if nodeStartOpts.ReadyLogMessage != "" {
|
||||
log.Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
|
||||
l.Log().Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
|
||||
if err := NodeWaitForLogMessage(ctx, runtime, node, nodeStartOpts.ReadyLogMessage, startTime); err != nil {
|
||||
return fmt.Errorf("Node %s failed to get ready: %+v", node.Name, err)
|
||||
}
|
||||
} else {
|
||||
log.Warnf("NodeStart: Set to wait for node %s to be ready, but there's no target log message defined", node.Name)
|
||||
l.Log().Warnf("NodeStart: Set to wait for node %s to be ready, but there's no target log message defined", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,7 +375,7 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
|
||||
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
||||
EnableCgroupV2FixIfNeeded(runtime)
|
||||
log.Tracef("Creating node from spec\n%+v", node)
|
||||
l.Log().Tracef("Creating node from spec\n%+v", node)
|
||||
|
||||
/*
|
||||
* CONFIGURATION
|
||||
@ -406,7 +407,7 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
// memory limits
|
||||
if node.Memory != "" {
|
||||
if runtime != runtimes.Docker {
|
||||
log.Warn("ignoring specified memory limits as runtime is not Docker")
|
||||
l.Log().Warn("ignoring specified memory limits as runtime is not Docker")
|
||||
} else {
|
||||
memory, err := dockerunits.RAMInBytes(node.Memory)
|
||||
if err != nil {
|
||||
@ -424,7 +425,7 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
return fmt.Errorf("Failed to check for the existence of edac folder: %+v", err)
|
||||
}
|
||||
if exists {
|
||||
log.Debugln("Found edac folder")
|
||||
l.Log().Debugln("Found edac folder")
|
||||
fakeedacpath, err := util.MakeFakeEdac(node.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create fake edac: %+v", err)
|
||||
@ -448,17 +449,17 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, opts k3d.NodeDeleteOpts) error {
|
||||
// delete node
|
||||
if err := runtime.DeleteNode(ctx, node); err != nil {
|
||||
log.Error(err)
|
||||
l.Log().Error(err)
|
||||
}
|
||||
|
||||
// delete fake folder created for limits
|
||||
if node.Memory != "" {
|
||||
log.Debug("Cleaning fake files folder from k3d config dir for this node...")
|
||||
l.Log().Debug("Cleaning fake files folder from k3d config dir for this node...")
|
||||
filepath, err := util.GetNodeFakerDirOrCreate(node.Name)
|
||||
err = os.RemoveAll(filepath)
|
||||
if err != nil {
|
||||
// this err prob should not be fatal, just log it
|
||||
log.Errorf("Could not remove fake files folder for node %s: %+v", node.Name, err)
|
||||
l.Log().Errorf("Could not remove fake files folder for node %s: %+v", node.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -466,7 +467,7 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o
|
||||
if !opts.SkipLBUpdate && (node.Role == k3d.ServerRole || node.Role == k3d.AgentRole) {
|
||||
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.RuntimeLabels[k3d.LabelClusterName]})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find cluster for node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to find cluster for node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -510,7 +511,7 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error {
|
||||
dockerHost := runtime.GetHost()
|
||||
if dockerHost != "" {
|
||||
dockerHost = strings.Split(dockerHost, ":")[0] // remove the port
|
||||
log.Tracef("Using docker host %s", dockerHost)
|
||||
l.Log().Tracef("Using docker host %s", dockerHost)
|
||||
node.RuntimeLabels[k3d.LabelServerAPIHostIP] = dockerHost
|
||||
node.RuntimeLabels[k3d.LabelServerAPIHost] = dockerHost
|
||||
}
|
||||
@ -525,7 +526,7 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error {
|
||||
func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get nodes")
|
||||
l.Log().Errorln("Failed to get nodes")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -537,7 +538,7 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
|
||||
// get node
|
||||
node, err := runtime.GetNode(ctx, node)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to get node '%s'", node.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -546,14 +547,14 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
|
||||
|
||||
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
|
||||
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
|
||||
log.Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
|
||||
l.Log().Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
d, ok := ctx.Deadline()
|
||||
if ok {
|
||||
log.Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
|
||||
l.Log().Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
|
||||
}
|
||||
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
|
||||
}
|
||||
@ -577,15 +578,15 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
|
||||
output := buf.String()
|
||||
|
||||
if nRead > 0 && strings.Contains(os.Getenv("K3D_LOG_NODE_WAIT_LOGS"), string(node.Role)) {
|
||||
log.Tracef("=== Read logs since %s ===\n%s\n", since, output)
|
||||
l.Log().Tracef("=== Read logs since %s ===\n%s\n", since, output)
|
||||
}
|
||||
// check if we can find the specified line in the log
|
||||
if nRead > 0 && strings.Contains(output, message) {
|
||||
if log.GetLevel() >= log.TraceLevel {
|
||||
if l.Log().GetLevel() >= logrus.TraceLevel {
|
||||
temp := strings.Split(output, "\n")
|
||||
for _, l := range temp {
|
||||
if strings.Contains(l, message) {
|
||||
log.Tracef("Found target log line: `%s`", l)
|
||||
for _, t := range temp {
|
||||
if strings.Contains(t, message) {
|
||||
l.Log().Tracef("Found target log line: `%s`", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -595,12 +596,12 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
|
||||
// check if the container is restarting
|
||||
running, status, _ := runtime.GetNodeStatus(ctx, node)
|
||||
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
|
||||
log.Warnf("Node '%s' is restarting for more than a minute now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name)
|
||||
l.Log().Warnf("Node '%s' is restarting for more than a minute now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name)
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond) // wait for half a second to avoid overloading docker (error `socket: too many open files`)
|
||||
}
|
||||
log.Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
|
||||
l.Log().Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -610,7 +611,7 @@ func NodeFilterByRoles(nodes []*k3d.Node, includeRoles, excludeRoles []k3d.Role)
|
||||
for _, includeRole := range includeRoles {
|
||||
for _, excludeRole := range excludeRoles {
|
||||
if includeRole == excludeRole {
|
||||
log.Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
|
||||
l.Log().Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -634,7 +635,7 @@ nodeLoop:
|
||||
}
|
||||
}
|
||||
|
||||
log.Tracef("Filteres %d nodes by roles (in: %+v | ex: %+v), got %d left", len(nodes), includeRoles, excludeRoles, len(resultList))
|
||||
l.Log().Tracef("Filteres %d nodes by roles (in: %+v | ex: %+v), got %d left", len(nodes), includeRoles, excludeRoles, len(resultList))
|
||||
|
||||
return resultList
|
||||
}
|
||||
@ -666,11 +667,11 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
|
||||
// loop over existing portbindings to avoid port collisions (docker doesn't check for it)
|
||||
for _, existingPB := range result.Ports[port] {
|
||||
if util.IsPortBindingEqual(portbinding, existingPB) { // also matches on "equal" HostIPs (127.0.0.1, "", 0.0.0.0)
|
||||
log.Tracef("Skipping existing PortBinding: %+v", existingPB)
|
||||
l.Log().Tracef("Skipping existing PortBinding: %+v", existingPB)
|
||||
continue loopChangesetPortbindings
|
||||
}
|
||||
}
|
||||
log.Tracef("Adding portbinding %+v for port %s", portbinding, port.Port())
|
||||
l.Log().Tracef("Adding portbinding %+v for port %s", portbinding, port.Port())
|
||||
result.Ports[port] = append(result.Ports[port], portbinding)
|
||||
}
|
||||
}
|
||||
@ -718,14 +719,14 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
|
||||
// rename existing node
|
||||
oldNameTemp := fmt.Sprintf("%s-%s", old.Name, util.GenerateRandomString(5))
|
||||
oldNameOriginal := old.Name
|
||||
log.Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp)
|
||||
l.Log().Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp)
|
||||
if err := runtime.RenameNode(ctx, old, oldNameTemp); err != nil {
|
||||
return err
|
||||
}
|
||||
old.Name = oldNameTemp
|
||||
|
||||
// create (not start) new node
|
||||
log.Infof("Creating new node %s...", new.Name)
|
||||
l.Log().Infof("Creating new node %s...", new.Name)
|
||||
if err := NodeCreate(ctx, runtime, new, k3d.NodeCreateOpts{Wait: true}); err != nil {
|
||||
if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil {
|
||||
return fmt.Errorf("Failed to create new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
|
||||
@ -734,13 +735,13 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
|
||||
}
|
||||
|
||||
// stop existing/old node
|
||||
log.Infof("Stopping existing node %s...", old.Name)
|
||||
l.Log().Infof("Stopping existing node %s...", old.Name)
|
||||
if err := runtime.StopNode(ctx, old); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// start new node
|
||||
log.Infof("Starting new node %s...", new.Name)
|
||||
l.Log().Infof("Starting new node %s...", new.Name)
|
||||
if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
|
||||
if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err)
|
||||
@ -756,7 +757,7 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
|
||||
}
|
||||
|
||||
// cleanup: delete old node
|
||||
log.Infof("Deleting old node %s...", old.Name)
|
||||
l.Log().Infof("Deleting old node %s...", old.Name)
|
||||
if err := NodeDelete(ctx, runtime, old, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -30,10 +30,11 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/k3d/v4/pkg/config/types"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -46,15 +47,15 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
|
||||
nodeList := cluster.Nodes
|
||||
|
||||
for _, portWithNodeFilters := range portsWithNodeFilters {
|
||||
log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
|
||||
l.Log().Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
|
||||
if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
|
||||
log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
l.Log().Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
|
||||
}
|
||||
|
||||
for _, f := range portWithNodeFilters.NodeFilters {
|
||||
if strings.HasPrefix(f, "loadbalancer") {
|
||||
log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
l.Log().Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
|
||||
break
|
||||
}
|
||||
@ -100,12 +101,12 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
|
||||
}
|
||||
|
||||
// print generated loadbalancer config
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
yamlized, err := yaml.Marshal(cluster.ServerLoadBalancer.Config)
|
||||
if err != nil {
|
||||
log.Errorf("error printing loadbalancer config: %v", err)
|
||||
l.Log().Errorf("error printing loadbalancer config: %v", err)
|
||||
} else {
|
||||
log.Debugf("generated loadbalancer config:\n%s", string(yamlized))
|
||||
l.Log().Debugf("generated loadbalancer config:\n%s", string(yamlized))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -28,12 +28,12 @@ import (
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/imdario/mergo"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/k3s"
|
||||
"github.com/rancher/k3d/v4/pkg/types/k8s"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -58,8 +58,8 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi
|
||||
reg.Host = k3d.DefaultRegistryName
|
||||
}
|
||||
// if err := ValidateHostname(reg.Host); err != nil {
|
||||
// log.Errorln("Invalid name for registry")
|
||||
// log.Fatalln(err)
|
||||
// l.Log().Errorln("Invalid name for registry")
|
||||
// l.Log().Fatalln(err)
|
||||
// }
|
||||
|
||||
registryNode := &k3d.Node{
|
||||
@ -97,13 +97,13 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi
|
||||
registryNode.Ports[reg.ExposureOpts.Port] = []nat.PortBinding{reg.ExposureOpts.Binding}
|
||||
|
||||
// create the registry node
|
||||
log.Infof("Creating node '%s'", registryNode.Name)
|
||||
l.Log().Infof("Creating node '%s'", registryNode.Name)
|
||||
if err := NodeCreate(ctx, runtime, registryNode, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create registry node")
|
||||
l.Log().Errorln("Failed to create registry node")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Successfully created registry '%s'", registryNode.Name)
|
||||
l.Log().Infof("Successfully created registry '%s'", registryNode.Name)
|
||||
|
||||
return registryNode, nil
|
||||
|
||||
@ -115,7 +115,7 @@ func RegistryConnectClusters(ctx context.Context, runtime runtimes.Runtime, regi
|
||||
// find registry node
|
||||
registryNode, err := NodeGet(ctx, runtime, registryNode)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
|
||||
l.Log().Errorf("Failed to find registry node '%s'", registryNode.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -124,13 +124,13 @@ func RegistryConnectClusters(ctx context.Context, runtime runtimes.Runtime, regi
|
||||
for _, c := range clusters {
|
||||
cluster, err := ClusterGet(ctx, runtime, c)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to connect to cluster '%s': Cluster not found", c.Name)
|
||||
l.Log().Warnf("Failed to connect to cluster '%s': Cluster not found", c.Name)
|
||||
failed++
|
||||
continue
|
||||
}
|
||||
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, cluster.Network.Name); err != nil {
|
||||
log.Warnf("Failed to connect to cluster '%s': Connection failed", cluster.Name)
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to connect to cluster '%s': Connection failed", cluster.Name)
|
||||
l.Log().Warnln(err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
@ -148,7 +148,7 @@ func RegistryConnectNetworks(ctx context.Context, runtime runtimes.Runtime, regi
|
||||
// find registry node
|
||||
registryNode, err := NodeGet(ctx, runtime, registryNode)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
|
||||
l.Log().Errorf("Failed to find registry node '%s'", registryNode.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -156,8 +156,8 @@ func RegistryConnectNetworks(ctx context.Context, runtime runtimes.Runtime, regi
|
||||
failed := 0
|
||||
for _, net := range networks {
|
||||
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, net); err != nil {
|
||||
log.Warnf("Failed to connect to network '%s': Connection failed", net)
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to connect to network '%s': Connection failed", net)
|
||||
l.Log().Warnln(err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
@ -247,7 +247,7 @@ func RegistryFromNode(node *k3d.Node) (*k3d.Registry, error) {
|
||||
}
|
||||
}
|
||||
|
||||
log.Tracef("Got registry %+v from node %+v", registry, node)
|
||||
l.Log().Tracef("Got registry %+v from node %+v", registry, node)
|
||||
|
||||
return registry, nil
|
||||
|
||||
@ -273,11 +273,11 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
}
|
||||
|
||||
if len(registries) > 1 {
|
||||
log.Warnf("More than one registry specified, but the LocalRegistryHostingV1 spec only supports one -> Selecting the first one: %s", registries[0].Host)
|
||||
l.Log().Warnf("More than one registry specified, but the LocalRegistryHostingV1 spec only supports one -> Selecting the first one: %s", registries[0].Host)
|
||||
}
|
||||
|
||||
if len(registries) < 1 {
|
||||
log.Debugln("No registry specified, not generating local registry hosting configmap")
|
||||
l.Log().Debugln("No registry specified, not generating local registry hosting configmap")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -290,15 +290,15 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
// if the host is now 0.0.0.0, check if we can set it to the IP of the docker-machine, if it's used
|
||||
if host == k3d.DefaultAPIHost && runtime == runtimes.Docker {
|
||||
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
|
||||
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
l.Log().Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
|
||||
if err != nil {
|
||||
log.Warnf("Using docker-machine, but failed to get it's IP for usage in LocalRegistryHosting Config Map: %+v", err)
|
||||
l.Log().Warnf("Using docker-machine, but failed to get it's IP for usage in LocalRegistryHosting Config Map: %+v", err)
|
||||
} else if machineIP != "" {
|
||||
log.Infof("Using the docker-machine IP %s in the LocalRegistryHosting Config Map", machineIP)
|
||||
l.Log().Infof("Using the docker-machine IP %s in the LocalRegistryHosting Config Map", machineIP)
|
||||
host = machineIP
|
||||
} else {
|
||||
log.Traceln("Not using docker-machine")
|
||||
l.Log().Traceln("Not using docker-machine")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -337,7 +337,7 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Tracef("LocalRegistryHostingConfigMapYaml: %s", string(cmYaml))
|
||||
l.Log().Tracef("LocalRegistryHostingConfigMapYaml: %s", string(cmYaml))
|
||||
|
||||
return cmYaml, nil
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
@ -59,7 +59,7 @@ func FromViper(config *viper.Viper) (types.Config, error) {
|
||||
apiVersion := strings.ToLower(config.GetString("apiversion"))
|
||||
kind := strings.ToLower(config.GetString("kind"))
|
||||
|
||||
log.Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind)
|
||||
l.Log().Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind)
|
||||
|
||||
switch apiVersion {
|
||||
case "k3d.io/v1alpha2":
|
||||
@ -77,7 +77,7 @@ func FromViper(config *viper.Viper) (types.Config, error) {
|
||||
}
|
||||
|
||||
if err := config.Unmarshal(&cfg); err != nil {
|
||||
log.Errorln("Failed to unmarshal File config")
|
||||
l.Log().Errorln("Failed to unmarshal File config")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -32,12 +32,12 @@ import (
|
||||
|
||||
"github.com/xeipuuv/gojsonschema"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// ValidateSchemaFile takes a filepath, reads the file and validates it against a JSON schema
|
||||
func ValidateSchemaFile(filepath string, schema []byte) error {
|
||||
log.Debugf("Validating file %s against default JSONSchema...", filepath)
|
||||
l.Log().Debugf("Validating file %s against default JSONSchema...", filepath)
|
||||
|
||||
fileContents, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
@ -76,7 +76,7 @@ func ValidateSchema(content map[string]interface{}, schemaJSON []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("JSON Schema Validation Result: %+v", result)
|
||||
l.Log().Debugf("JSON Schema Validation Result: %+v", result)
|
||||
|
||||
if !result.Valid() {
|
||||
var sb strings.Builder
|
||||
|
@ -25,15 +25,15 @@ package config
|
||||
import (
|
||||
"github.com/imdario/mergo"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// MergeSimple merges two simple configuration files with the values of the destination one having priority
|
||||
func MergeSimple(dest, src conf.SimpleConfig) (*conf.SimpleConfig, error) {
|
||||
log.Debugf("Merging %+v into %+v", src, dest)
|
||||
l.Log().Debugf("Merging %+v into %+v", src, dest)
|
||||
|
||||
if err := mergo.Merge(&dest, src); err != nil {
|
||||
log.Errorln("Failed to merge config")
|
||||
l.Log().Errorln("Failed to merge config")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ package config
|
||||
|
||||
import (
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// ProcessClusterConfig applies processing to the config sanitizing it and doing
|
||||
@ -32,10 +32,10 @@ import (
|
||||
func ProcessClusterConfig(clusterConfig conf.ClusterConfig) (*conf.ClusterConfig, error) {
|
||||
cluster := clusterConfig.Cluster
|
||||
if cluster.Network.Name == "host" {
|
||||
log.Infoln("Hostnetwork selected - disabling injection of docker host into the cluster, server load balancer and setting the api port to the k3s default")
|
||||
l.Log().Infoln("Hostnetwork selected - disabling injection of docker host into the cluster, server load balancer and setting the api port to the k3s default")
|
||||
// if network is set to host, exposed api port must be the one imposed by k3s
|
||||
k3sPort := cluster.KubeAPI.Port.Port()
|
||||
log.Debugf("Host network was chosen, changing provided/random api port to k3s:%s", k3sPort)
|
||||
l.Log().Debugf("Host network was chosen, changing provided/random api port to k3s:%s", k3sPort)
|
||||
cluster.KubeAPI.PortMapping.Binding.HostPort = k3sPort
|
||||
|
||||
// if network is host, dont inject docker host into the cluster
|
||||
|
@ -41,7 +41,7 @@ import (
|
||||
"gopkg.in/yaml.v2"
|
||||
"inet.af/netaddr"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -118,7 +118,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
}
|
||||
newCluster.Nodes = append(newCluster.Nodes, newCluster.ServerLoadBalancer.Node)
|
||||
} else {
|
||||
log.Debugln("Disabling the load balancer")
|
||||
l.Log().Debugln("Disabling the load balancer")
|
||||
}
|
||||
|
||||
/*************
|
||||
@ -299,7 +299,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse use-registry string '%s': %+v", usereg, err)
|
||||
}
|
||||
log.Tracef("Parsed registry reference: %+v", reg)
|
||||
l.Log().Tracef("Parsed registry reference: %+v", reg)
|
||||
clusterCreateOpts.Registries.Use = append(clusterCreateOpts.Registries.Use, reg)
|
||||
}
|
||||
|
||||
@ -307,7 +307,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
var k3sRegistry *k3s.Registry
|
||||
|
||||
if strings.Contains(simpleConfig.Registries.Config, "\n") { // CASE 1: embedded registries.yaml (multiline string)
|
||||
log.Debugf("Found multiline registries config embedded in SimpleConfig:\n%s", simpleConfig.Registries.Config)
|
||||
l.Log().Debugf("Found multiline registries config embedded in SimpleConfig:\n%s", simpleConfig.Registries.Config)
|
||||
if err := yaml.Unmarshal([]byte(simpleConfig.Registries.Config), &k3sRegistry); err != nil {
|
||||
return nil, fmt.Errorf("Failed to read embedded registries config: %+v", err)
|
||||
}
|
||||
@ -326,7 +326,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
}
|
||||
}
|
||||
|
||||
log.Tracef("Registry: read config from input:\n%+v", k3sRegistry)
|
||||
l.Log().Tracef("Registry: read config from input:\n%+v", k3sRegistry)
|
||||
clusterCreateOpts.Registries.Config = k3sRegistry
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
|
||||
configtypes "github.com/rancher/k3d/v4/pkg/config/types"
|
||||
"github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
var Migrations = map[string]func(configtypes.Config) (configtypes.Config, error){
|
||||
@ -35,7 +35,7 @@ var Migrations = map[string]func(configtypes.Config) (configtypes.Config, error)
|
||||
}
|
||||
|
||||
func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) {
|
||||
log.Debugln("Migrating v1alpha2 to v1alpha3")
|
||||
l.Log().Debugln("Migrating v1alpha2 to v1alpha3")
|
||||
|
||||
injson, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
@ -80,13 +80,13 @@ func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) {
|
||||
|
||||
cfg.APIVersion = ApiVersion
|
||||
|
||||
log.Debugf("Migrated config: %+v", cfg)
|
||||
l.Log().Debugf("Migrated config: %+v", cfg)
|
||||
|
||||
return cfg, nil
|
||||
|
||||
}
|
||||
|
||||
log.Debugf("No migration needed for %s#%s -> %s#%s", input.GetAPIVersion(), input.GetKind(), ApiVersion, input.GetKind())
|
||||
l.Log().Debugf("No migration needed for %s#%s -> %s#%s", input.GetAPIVersion(), input.GetKind(), ApiVersion, input.GetKind())
|
||||
|
||||
return input, nil
|
||||
|
||||
|
@ -35,14 +35,14 @@ import (
|
||||
"fmt"
|
||||
|
||||
dockerunits "github.com/docker/go-units"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// ValidateClusterConfig checks a given cluster config for basic errors
|
||||
func ValidateClusterConfig(ctx context.Context, runtime runtimes.Runtime, config conf.ClusterConfig) error {
|
||||
// cluster name must be a valid host name
|
||||
if err := k3dc.CheckName(config.Cluster.Name); err != nil {
|
||||
log.Errorf("Provided cluster name '%s' does not match requirements", config.Cluster.Name)
|
||||
l.Log().Errorf("Provided cluster name '%s' does not match requirements", config.Cluster.Name)
|
||||
|
||||
return err
|
||||
}
|
||||
|
39
pkg/logger/logger.go
Normal file
39
pkg/logger/logger.go
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package logger
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Logger is default instance of logger used in all other packages
|
||||
// instead of global scope logrus.Logger.
|
||||
var Logger *logrus.Logger
|
||||
|
||||
func init() {
|
||||
Logger = logrus.New()
|
||||
}
|
||||
|
||||
// Log is used to return the default Logger.
|
||||
func Log() *logrus.Logger {
|
||||
return Logger
|
||||
}
|
@ -33,19 +33,20 @@ import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// createContainer creates a new docker container from translated specs
|
||||
func createContainer(ctx context.Context, dockerNode *NodeInDocker, name string) (string, error) {
|
||||
|
||||
log.Tracef("Creating docker container with translated config\n%+v\n", dockerNode)
|
||||
l.Log().Tracef("Creating docker container with translated config\n%+v\n", dockerNode)
|
||||
|
||||
// initialize docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return "", err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -57,15 +58,15 @@ func createContainer(ctx context.Context, dockerNode *NodeInDocker, name string)
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
if err := pullImage(ctx, docker, dockerNode.ContainerConfig.Image); err != nil {
|
||||
log.Errorf("Failed to create container '%s'", name)
|
||||
l.Log().Errorf("Failed to create container '%s'", name)
|
||||
return "", err
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.Errorf("Failed to create container '%s'", name)
|
||||
l.Log().Errorf("Failed to create container '%s'", name)
|
||||
return "", err
|
||||
}
|
||||
log.Debugf("Created container %s (ID: %s)", name, resp.ID)
|
||||
l.Log().Debugf("Created container %s (ID: %s)", name, resp.ID)
|
||||
break
|
||||
}
|
||||
|
||||
@ -76,7 +77,7 @@ func startContainer(ctx context.Context, ID string) error {
|
||||
// initialize docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -90,7 +91,7 @@ func removeContainer(ctx context.Context, ID string) error {
|
||||
// (0) create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -103,11 +104,11 @@ func removeContainer(ctx context.Context, ID string) error {
|
||||
|
||||
// (2) remove container
|
||||
if err := docker.ContainerRemove(ctx, ID, options); err != nil {
|
||||
log.Errorf("Failed to delete container '%s'", ID)
|
||||
l.Log().Errorf("Failed to delete container '%s'", ID)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infoln("Deleted", ID)
|
||||
l.Log().Infoln("Deleted", ID)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -117,22 +118,22 @@ func pullImage(ctx context.Context, docker *client.Client, image string) error {
|
||||
|
||||
resp, err := docker.ImagePull(ctx, image, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to pull image '%s'", image)
|
||||
l.Log().Errorf("Failed to pull image '%s'", image)
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
log.Infof("Pulling image '%s'", image)
|
||||
l.Log().Infof("Pulling image '%s'", image)
|
||||
|
||||
// in debug mode (--verbose flag set), output pull progress
|
||||
var writer io.Writer = ioutil.Discard
|
||||
if log.GetLevel() == log.DebugLevel {
|
||||
if l.Log().GetLevel() == logrus.DebugLevel {
|
||||
writer = os.Stdout
|
||||
}
|
||||
_, err = io.Copy(writer, resp)
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't get docker output")
|
||||
log.Warningln(err)
|
||||
l.Log().Warningf("Couldn't get docker output")
|
||||
l.Log().Warningln(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -144,7 +145,7 @@ func getNodeContainer(ctx context.Context, node *k3d.Node) (*types.Container, er
|
||||
// (0) create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -186,7 +187,7 @@ func getNodeContainer(ctx context.Context, node *k3d.Node) (*types.Container, er
|
||||
func executeCheckInContainer(ctx context.Context, image string, cmd []string) (int64, error) {
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return -1, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -202,7 +203,7 @@ func executeCheckInContainer(ctx context.Context, image string, cmd []string) (i
|
||||
Entrypoint: []string{},
|
||||
}, nil, nil, nil, "")
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create container from image %s with cmd %s", image, cmd)
|
||||
l.Log().Errorf("Failed to create container from image %s with cmd %s", image, cmd)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
@ -215,7 +216,7 @@ func executeCheckInContainer(ctx context.Context, image string, cmd []string) (i
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
log.Errorf("Error while waiting for container %s to exit", resp.ID)
|
||||
l.Log().Errorf("Error while waiting for container %s to exit", resp.ID)
|
||||
return -1, err
|
||||
}
|
||||
case status := <-statusCh:
|
||||
@ -231,11 +232,11 @@ func executeCheckInContainer(ctx context.Context, image string, cmd []string) (i
|
||||
|
||||
// CheckIfDirectoryExists checks for the existence of a given path inside the docker environment
|
||||
func CheckIfDirectoryExists(ctx context.Context, image string, dir string) (bool, error) {
|
||||
log.Tracef("checking if dir %s exists in docker environment...", dir)
|
||||
l.Log().Tracef("checking if dir %s exists in docker environment...", dir)
|
||||
shellCmd := fmt.Sprintf("[ -d \"%s\" ] && exit 0 || exit 1", dir)
|
||||
cmd := []string{"sh", "-c", shellCmd}
|
||||
exitCode, err := executeCheckInContainer(ctx, image, cmd)
|
||||
log.Tracef("check dir container returned %d exist code", exitCode)
|
||||
l.Log().Tracef("check dir container returned %d exist code", exitCode)
|
||||
return exitCode == 0, err
|
||||
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
type Docker struct{}
|
||||
@ -43,7 +43,7 @@ func (d Docker) GetHost() string {
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
log.Debugf("DockerHost: %s", url.Host)
|
||||
l.Log().Debugf("DockerHost: %s", url.Host)
|
||||
return url.Host
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// GetImages returns a list of images present in the runtime
|
||||
@ -33,14 +33,14 @@ func (d Docker) GetImages(ctx context.Context) ([]string, error) {
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
|
||||
imageSummary, err := docker.ImageList(ctx, types.ImageListOptions{All: true})
|
||||
if err != nil {
|
||||
log.Errorln("Failed to list available docker images")
|
||||
l.Log().Errorln("Failed to list available docker images")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -26,15 +26,15 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
runtimeTypes "github.com/rancher/k3d/v4/pkg/runtimes/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (d Docker) Info() (*runtimeTypes.RuntimeInfo, error) {
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
|
@ -26,15 +26,15 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetKubeconfig grabs the kubeconfig from inside a k3d node
|
||||
func (d Docker) GetKubeconfig(ctx context.Context, node *k3d.Node) (io.ReadCloser, error) {
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -44,11 +44,11 @@ func (d Docker) GetKubeconfig(ctx context.Context, node *k3d.Node) (io.ReadClose
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Tracef("Container Details: %+v", container)
|
||||
l.Log().Tracef("Container Details: %+v", container)
|
||||
|
||||
reader, _, err := docker.CopyFromContainer(ctx, container.ID, "/output/kubeconfig.yaml")
|
||||
if err != nil {
|
||||
log.Errorf("Failed to copy from container '%s'", container.ID)
|
||||
l.Log().Errorf("Failed to copy from container '%s'", container.ID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -28,21 +28,21 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
func (d Docker) GetDockerMachineIP() (string, error) {
|
||||
machine := os.ExpandEnv("$DOCKER_MACHINE_NAME")
|
||||
if machine == "" {
|
||||
log.Tracef("Docker Machine not specified via DOCKER_MACHINE_NAME env var")
|
||||
l.Log().Tracef("Docker Machine not specified via DOCKER_MACHINE_NAME env var")
|
||||
return "", nil
|
||||
}
|
||||
|
||||
log.Debugf("Docker Machine found: %s", machine)
|
||||
l.Log().Debugf("Docker Machine found: %s", machine)
|
||||
dockerMachinePath, err := exec.LookPath("docker-machine")
|
||||
if err != nil {
|
||||
if err == exec.ErrNotFound {
|
||||
log.Debugf("DOCKER_MACHINE_NAME env var present, but executable docker-machine not found: %+v", err)
|
||||
l.Log().Debugf("DOCKER_MACHINE_NAME env var present, but executable docker-machine not found: %+v", err)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
@ -32,10 +32,10 @@ import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"inet.af/netaddr"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetNetwork returns a given network
|
||||
@ -43,7 +43,7 @@ func (d Docker) GetNetwork(ctx context.Context, searchNet *k3d.ClusterNetwork) (
|
||||
// (0) create new docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -65,7 +65,7 @@ func (d Docker) GetNetwork(ctx context.Context, searchNet *k3d.ClusterNetwork) (
|
||||
Filters: filter,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorln("Failed to list docker networks")
|
||||
l.Log().Errorln("Failed to list docker networks")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ func (d Docker) GetNetwork(ctx context.Context, searchNet *k3d.ClusterNetwork) (
|
||||
network.IPAM.IPsUsed = append(network.IPAM.IPsUsed, used)
|
||||
}
|
||||
} else {
|
||||
log.Debugf("Network %s does not have an IPAM config", network.Name)
|
||||
l.Log().Debugf("Network %s does not have an IPAM config", network.Name)
|
||||
}
|
||||
|
||||
// Only one Network allowed, but some functions don't care about this, so they can ignore the error and just use the first one returned
|
||||
@ -121,7 +121,7 @@ func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, inNet *k3d.Cluste
|
||||
// (0) create new docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, false, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -130,10 +130,10 @@ func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, inNet *k3d.Cluste
|
||||
if err != nil {
|
||||
if err != runtimeErr.ErrRuntimeNetworkNotExists {
|
||||
if existingNet == nil {
|
||||
log.Errorln("Failed to check for duplicate networks")
|
||||
l.Log().Errorln("Failed to check for duplicate networks")
|
||||
return nil, false, err
|
||||
} else if err == runtimeErr.ErrRuntimeNetworkMultiSameName {
|
||||
log.Warnf("%+v, returning the first one: %s (%s)", err, existingNet.Name, existingNet.ID)
|
||||
l.Log().Warnf("%+v, returning the first one: %s (%s)", err, existingNet.Name, existingNet.ID)
|
||||
return existingNet, true, nil
|
||||
} else {
|
||||
return nil, false, fmt.Errorf("unhandled error while checking for existing networks: %+v", err)
|
||||
@ -152,7 +152,7 @@ func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, inNet *k3d.Cluste
|
||||
|
||||
// we want a managed (user-defined) network, but user didn't specify a subnet, so we try to auto-generate one
|
||||
if inNet.IPAM.Managed && inNet.IPAM.IPPrefix.IsZero() {
|
||||
log.Traceln("No subnet prefix given, but network should be managed: Trying to get a free subnet prefix...")
|
||||
l.Log().Traceln("No subnet prefix given, but network should be managed: Trying to get a free subnet prefix...")
|
||||
freeSubnetPrefix, err := d.getFreeSubnetPrefix(ctx)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@ -174,17 +174,17 @@ func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, inNet *k3d.Cluste
|
||||
|
||||
newNet, err := docker.NetworkCreate(ctx, inNet.Name, netCreateOpts)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create new network")
|
||||
l.Log().Errorln("Failed to create new network")
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
networkDetails, err := docker.NetworkInspect(ctx, newNet.ID, types.NetworkInspectOptions{})
|
||||
if err != nil {
|
||||
log.Errorln("Failed to inspect newly created network")
|
||||
l.Log().Errorln("Failed to inspect newly created network")
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
log.Infof("Created network '%s' (%s)", inNet.Name, networkDetails.ID)
|
||||
l.Log().Infof("Created network '%s' (%s)", inNet.Name, networkDetails.ID)
|
||||
prefix, err := netaddr.ParseIPPrefix(networkDetails.IPAM.Config[0].Subnet)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@ -204,7 +204,7 @@ func (d Docker) DeleteNetwork(ctx context.Context, ID string) error {
|
||||
// (0) create new docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -223,7 +223,7 @@ func (d Docker) DeleteNetwork(ctx context.Context, ID string) error {
|
||||
func GetNetwork(ctx context.Context, ID string) (types.NetworkResource, error) {
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return types.NetworkResource{}, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -234,7 +234,7 @@ func GetNetwork(ctx context.Context, ID string) (types.NetworkResource, error) {
|
||||
func GetGatewayIP(ctx context.Context, network string) (net.IP, error) {
|
||||
bridgeNetwork, err := GetNetwork(ctx, network)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get bridge network with name '%s'", network)
|
||||
l.Log().Errorf("Failed to get bridge network with name '%s'", network)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -251,7 +251,7 @@ func GetGatewayIP(ctx context.Context, network string) (net.IP, error) {
|
||||
func (d Docker) ConnectNodeToNetwork(ctx context.Context, node *k3d.Node, networkName string) error {
|
||||
// check that node was not attached to network before
|
||||
if isAttachedToNetwork(node, networkName) {
|
||||
log.Infof("Container '%s' is already connected to '%s'", node.Name, networkName)
|
||||
l.Log().Infof("Container '%s' is already connected to '%s'", node.Name, networkName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -264,7 +264,7 @@ func (d Docker) ConnectNodeToNetwork(ctx context.Context, node *k3d.Node, networ
|
||||
// get docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -272,7 +272,7 @@ func (d Docker) ConnectNodeToNetwork(ctx context.Context, node *k3d.Node, networ
|
||||
// get network
|
||||
networkResource, err := GetNetwork(ctx, networkName)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get network '%s'", networkName)
|
||||
l.Log().Errorf("Failed to get network '%s'", networkName)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -282,7 +282,7 @@ func (d Docker) ConnectNodeToNetwork(ctx context.Context, node *k3d.Node, networ
|
||||
|
||||
// DisconnectNodeFromNetwork disconnects a node from a network (u don't say :O)
|
||||
func (d Docker) DisconnectNodeFromNetwork(ctx context.Context, node *k3d.Node, networkName string) error {
|
||||
log.Debugf("Disconnecting node %s from network %s...", node.Name, networkName)
|
||||
l.Log().Debugf("Disconnecting node %s from network %s...", node.Name, networkName)
|
||||
// get container
|
||||
container, err := getNodeContainer(ctx, node)
|
||||
if err != nil {
|
||||
@ -292,7 +292,7 @@ func (d Docker) DisconnectNodeFromNetwork(ctx context.Context, node *k3d.Node, n
|
||||
// get docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -300,7 +300,7 @@ func (d Docker) DisconnectNodeFromNetwork(ctx context.Context, node *k3d.Node, n
|
||||
// get network
|
||||
networkResource, err := GetNetwork(ctx, networkName)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get network '%s'", networkName)
|
||||
l.Log().Errorf("Failed to get network '%s'", networkName)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -327,7 +327,7 @@ func (d Docker) getFreeSubnetPrefix(ctx context.Context) (netaddr.IPPrefix, erro
|
||||
return netaddr.IPPrefix{}, fmt.Errorf("failed to inspect fake network %s: %w", fakenetResp.ID, err)
|
||||
}
|
||||
|
||||
log.Tracef("Created fake network %s (%s) with subnet prefix %s. Deleting it again to re-use that prefix...", fakenet.Name, fakenet.ID, fakenet.IPAM.IPPrefix.String())
|
||||
l.Log().Tracef("Created fake network %s (%s) with subnet prefix %s. Deleting it again to re-use that prefix...", fakenet.Name, fakenet.ID, fakenet.IPAM.IPPrefix.String())
|
||||
|
||||
if err := d.DeleteNetwork(ctx, fakenet.ID); err != nil {
|
||||
return netaddr.IPPrefix{}, fmt.Errorf("failed to delete fake network %s (%s): %w", fakenet.Name, fakenet.ID, err)
|
||||
@ -339,9 +339,9 @@ func (d Docker) getFreeSubnetPrefix(ctx context.Context) (netaddr.IPPrefix, erro
|
||||
|
||||
// parseIPAM Returns an IPAM structure with the subnet and gateway filled in. If some of the values
|
||||
// cannot be parsed, an error is returned. If gateway is empty, the function calculates the default gateway.
|
||||
func (d Docker) parseIPAM(config network.IPAMConfig) (ipam k3d.IPAM, err error){
|
||||
func (d Docker) parseIPAM(config network.IPAMConfig) (ipam k3d.IPAM, err error) {
|
||||
var gateway netaddr.IP
|
||||
ipam = k3d.IPAM{ IPsUsed: []netaddr.IP{}}
|
||||
ipam = k3d.IPAM{IPsUsed: []netaddr.IP{}}
|
||||
|
||||
ipam.IPPrefix, err = netaddr.ParseIPPrefix(config.Subnet)
|
||||
if err != nil {
|
||||
@ -350,7 +350,7 @@ func (d Docker) parseIPAM(config network.IPAMConfig) (ipam k3d.IPAM, err error){
|
||||
|
||||
if config.Gateway == "" {
|
||||
gateway = ipam.IPPrefix.IP.Next()
|
||||
} else {
|
||||
} else {
|
||||
gateway, err = netaddr.ParseIP(config.Gateway)
|
||||
}
|
||||
ipam.IPsUsed = append(ipam.IPsUsed, gateway)
|
||||
|
@ -33,9 +33,9 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CreateNode creates a new container
|
||||
@ -44,14 +44,14 @@ func (d Docker) CreateNode(ctx context.Context, node *k3d.Node) error {
|
||||
// translate node spec to docker container specs
|
||||
dockerNode, err := TranslateNodeToContainer(node)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to translate k3d node specification to docker container specifications")
|
||||
l.Log().Errorln("Failed to translate k3d node specification to docker container specifications")
|
||||
return err
|
||||
}
|
||||
|
||||
// create node
|
||||
_, err = createContainer(ctx, dockerNode, node.Name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to create node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ func (d Docker) CreateNode(ctx context.Context, node *k3d.Node) error {
|
||||
|
||||
// DeleteNode deletes a node
|
||||
func (d Docker) DeleteNode(ctx context.Context, nodeSpec *k3d.Node) error {
|
||||
log.Debugf("Deleting node %s ...", nodeSpec.Name)
|
||||
l.Log().Debugf("Deleting node %s ...", nodeSpec.Name)
|
||||
return removeContainer(ctx, nodeSpec.Name)
|
||||
}
|
||||
|
||||
@ -81,7 +81,7 @@ func (d Docker) GetNodesByLabel(ctx context.Context, labels map[string]string) (
|
||||
|
||||
containerDetails, err := getContainerDetails(ctx, container.ID)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get details for container %s", container.Names[0])
|
||||
l.Log().Warnf("Failed to get details for container %s", container.Names[0])
|
||||
node, err = TranslateContainerToNode(&container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -111,7 +111,7 @@ func (d Docker) StartNode(ctx context.Context, node *k3d.Node) error {
|
||||
// get container which represents the node
|
||||
nodeContainer, err := getNodeContainer(ctx, node)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get container for node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to get container for node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ func (d Docker) StartNode(ctx context.Context, node *k3d.Node) error {
|
||||
}
|
||||
|
||||
// actually start the container
|
||||
log.Infof("Starting Node '%s'", node.Name)
|
||||
l.Log().Infof("Starting Node '%s'", node.Name)
|
||||
if err := docker.ContainerStart(ctx, nodeContainer.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -151,7 +151,7 @@ func (d Docker) StopNode(ctx context.Context, node *k3d.Node) error {
|
||||
// get container which represents the node
|
||||
nodeContainer, err := getNodeContainer(ctx, node)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get container for node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to get container for node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ func getContainerDetails(ctx context.Context, containerID string) (types.Contain
|
||||
|
||||
containerDetails, err := docker.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get details for container '%s'", containerID)
|
||||
l.Log().Errorf("Failed to get details for container '%s'", containerID)
|
||||
return types.ContainerJSON{}, err
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ func (d Docker) GetNode(ctx context.Context, node *k3d.Node) (*k3d.Node, error)
|
||||
|
||||
node, err = TranslateContainerDetailsToNode(containerDetails)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to translate container '%s' to node object", containerDetails.Name)
|
||||
l.Log().Errorf("Failed to translate container '%s' to node object", containerDetails.Name)
|
||||
return node, err
|
||||
}
|
||||
|
||||
@ -252,7 +252,7 @@ func (d Docker) GetNodeStatus(ctx context.Context, node *k3d.Node) (bool, string
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return running, stateString, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -288,14 +288,14 @@ func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
|
||||
containerInspectResponse, err := docker.ContainerInspect(ctx, container.ID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to inspect node '%s'(ID %s)", node.Name, container.ID)
|
||||
l.Log().Errorf("Failed to inspect node '%s'(ID %s)", node.Name, container.ID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -309,7 +309,7 @@ func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time
|
||||
}
|
||||
logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Since: sinceStr})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get logs from node '%s' (container '%s')", node.Name, container.ID)
|
||||
l.Log().Errorf("Failed to get logs from node '%s' (container '%s')", node.Name, container.ID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -335,7 +335,7 @@ func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) er
|
||||
if execConnection != nil && execConnection.Reader != nil {
|
||||
logs, err := ioutil.ReadAll(execConnection.Reader)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get logs from errored exec process in node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to get logs from errored exec process in node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
err = fmt.Errorf("%w: Logs from failed access process:\n%s", err, string(logs))
|
||||
@ -346,7 +346,7 @@ func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) er
|
||||
|
||||
func executeInNode(ctx context.Context, node *k3d.Node, cmd []string) (*types.HijackedResponse, error) {
|
||||
|
||||
log.Debugf("Executing command '%+v' in node '%s'", cmd, node.Name)
|
||||
l.Log().Debugf("Executing command '%+v' in node '%s'", cmd, node.Name)
|
||||
|
||||
// get the container for the given node
|
||||
container, err := getNodeContainer(ctx, node)
|
||||
@ -357,7 +357,7 @@ func executeInNode(ctx context.Context, node *k3d.Node, cmd []string) (*types.Hi
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -378,12 +378,12 @@ func executeInNode(ctx context.Context, node *k3d.Node, cmd []string) (*types.Hi
|
||||
Tty: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to connect to exec process in node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to connect to exec process in node '%s'", node.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := docker.ContainerExecStart(ctx, exec.ID, types.ExecStartCheck{Tty: true}); err != nil {
|
||||
log.Errorf("Failed to start exec process in node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to start exec process in node '%s'", node.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -391,20 +391,20 @@ func executeInNode(ctx context.Context, node *k3d.Node, cmd []string) (*types.Hi
|
||||
// get info about exec process inside container
|
||||
execInfo, err := docker.ContainerExecInspect(ctx, exec.ID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to inspect exec process in node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to inspect exec process in node '%s'", node.Name)
|
||||
return &execConnection, err
|
||||
}
|
||||
|
||||
// if still running, continue loop
|
||||
if execInfo.Running {
|
||||
log.Tracef("Exec process '%+v' still running in node '%s'.. sleeping for 1 second...", cmd, node.Name)
|
||||
l.Log().Tracef("Exec process '%+v' still running in node '%s'.. sleeping for 1 second...", cmd, node.Name)
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
// check exitcode
|
||||
if execInfo.ExitCode == 0 { // success
|
||||
log.Debugf("Exec process in node '%s' exited with '0'", node.Name)
|
||||
l.Log().Debugf("Exec process in node '%s' exited with '0'", node.Name)
|
||||
return &execConnection, nil
|
||||
}
|
||||
return &execConnection, fmt.Errorf("Exec process in node '%s' failed with exit code '%d'", node.Name, execInfo.ExitCode)
|
||||
@ -416,7 +416,7 @@ func (d Docker) GetNodesInNetwork(ctx context.Context, network string) ([]*k3d.N
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return nil, err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -437,7 +437,7 @@ func (d Docker) GetNodesInNetwork(ctx context.Context, network string) ([]*k3d.N
|
||||
node, err := TranslateContainerDetailsToNode(containerDetails)
|
||||
if err != nil {
|
||||
if errors.Is(err, runtimeErr.ErrRuntimeContainerUnknown) {
|
||||
log.Tracef("GetNodesInNetwork: inspected non-k3d-managed container %s", containerDetails.Name)
|
||||
l.Log().Tracef("GetNodesInNetwork: inspected non-k3d-managed container %s", containerDetails.Name)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
@ -458,7 +458,7 @@ func (d Docker) RenameNode(ctx context.Context, node *k3d.Node, newName string)
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
|
@ -31,10 +31,10 @@ import (
|
||||
docker "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/go-connections/nat"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/fixes"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
dockercliopts "github.com/docker/cli/opts"
|
||||
dockerunits "github.com/docker/go-units"
|
||||
@ -145,7 +145,7 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
|
||||
if len(node.Networks) > 0 {
|
||||
netInfo, err := GetNetwork(context.Background(), node.Networks[0]) // FIXME: only considering first network here, as that's the one k3d creates for a cluster
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get network information: %v", err)
|
||||
l.Log().Warnf("Failed to get network information: %v", err)
|
||||
} else if netInfo.Driver == "host" {
|
||||
hostConfig.NetworkMode = "host"
|
||||
}
|
||||
@ -175,7 +175,7 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
|
||||
|
||||
// first, make sure, that it's actually a k3d managed container by checking if it has all the default labels
|
||||
for k, v := range k3d.DefaultRuntimeLabels {
|
||||
log.Tracef("TranslateContainerDetailsToNode: Checking for default object label %s=%s on container %s", k, v, containerDetails.Name)
|
||||
l.Log().Tracef("TranslateContainerDetailsToNode: Checking for default object label %s=%s on container %s", k, v, containerDetails.Name)
|
||||
found := false
|
||||
for lk, lv := range containerDetails.Config.Labels {
|
||||
if lk == k && lv == v {
|
||||
@ -184,7 +184,7 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
log.Debugf("Container %s is missing default label %s=%s in label set %+v", containerDetails.Name, k, v, containerDetails.Config.Labels)
|
||||
l.Log().Debugf("Container %s is missing default label %s=%s in label set %+v", containerDetails.Name, k, v, containerDetails.Config.Labels)
|
||||
return nil, runtimeErr.ErrRuntimeContainerUnknown
|
||||
}
|
||||
}
|
||||
@ -223,7 +223,7 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
|
||||
if serverIsInitLabel, ok := containerDetails.Config.Labels[k3d.LabelServerIsInit]; ok {
|
||||
if serverIsInitLabel == "true" {
|
||||
if !clusterInitFlagSet {
|
||||
log.Errorf("Container %s has label %s=true, but the args do not contain the --cluster-init flag", containerDetails.Name, k3d.LabelServerIsInit)
|
||||
l.Log().Errorf("Container %s has label %s=true, but the args do not contain the --cluster-init flag", containerDetails.Name, k3d.LabelServerIsInit)
|
||||
} else {
|
||||
serverOpts.IsInit = true
|
||||
}
|
||||
|
@ -37,9 +37,9 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/pkg/errors"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
runtimeErrors "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetDefaultObjectLabelsFilter returns docker type filters created from k3d labels
|
||||
@ -57,27 +57,27 @@ func (d Docker) CopyToNode(ctx context.Context, src string, dest string, node *k
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
|
||||
container, err := getNodeContainer(ctx, node)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find container for target node '%s'", node.Name)
|
||||
l.Log().Errorf("Failed to find container for target node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// source: docker/cli/cli/command/container/cp
|
||||
srcInfo, err := archive.CopyInfoSourcePath(src, false)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to copy info source path")
|
||||
l.Log().Errorln("Failed to copy info source path")
|
||||
return err
|
||||
}
|
||||
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create tar resource")
|
||||
l.Log().Errorln("Failed to create tar resource")
|
||||
return err
|
||||
}
|
||||
defer srcArchive.Close()
|
||||
@ -90,7 +90,7 @@ func (d Docker) CopyToNode(ctx context.Context, src string, dest string, node *k
|
||||
|
||||
destDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, destInfo)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to prepare archive")
|
||||
l.Log().Errorln("Failed to prepare archive")
|
||||
return err
|
||||
}
|
||||
defer preparedArchive.Close()
|
||||
@ -109,7 +109,7 @@ func (d Docker) WriteToNode(ctx context.Context, content []byte, dest string, mo
|
||||
// create docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -132,7 +132,7 @@ func (d Docker) WriteToNode(ctx context.Context, content []byte, dest string, mo
|
||||
}
|
||||
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
log.Debugf("Failed to close tar writer: %+v", err)
|
||||
l.Log().Debugf("Failed to close tar writer: %+v", err)
|
||||
}
|
||||
|
||||
tarBytes := bytes.NewReader(buf.Bytes())
|
||||
@ -145,7 +145,7 @@ func (d Docker) WriteToNode(ctx context.Context, content []byte, dest string, mo
|
||||
|
||||
// ReadFromNode reads from a given filepath inside the node container
|
||||
func (d Docker) ReadFromNode(ctx context.Context, path string, node *k3d.Node) (io.ReadCloser, error) {
|
||||
log.Tracef("Reading path %s from node %s...", path, node.Name)
|
||||
l.Log().Tracef("Reading path %s from node %s...", path, node.Name)
|
||||
nodeContainer, err := getNodeContainer(ctx, node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to find container for node '%s': %+v", node.Name, err)
|
||||
@ -175,7 +175,7 @@ func GetDockerClient() (*client.Client, error) {
|
||||
}
|
||||
|
||||
newClientOpts := flags.NewClientOptions()
|
||||
newClientOpts.Common.LogLevel = log.GetLevel().String() // this is needed, as the following Initialize() call will set a new log level on the global logrus instance
|
||||
newClientOpts.Common.LogLevel = l.Log().GetLevel().String() // this is needed, as the following Initialize() call will set a new log level on the global logrus instance
|
||||
|
||||
err = dockerCli.Initialize(newClientOpts)
|
||||
if err != nil {
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CreateVolume creates a new named volume
|
||||
@ -36,7 +36,7 @@ func (d Docker) CreateVolume(ctx context.Context, name string, labels map[string
|
||||
// (0) create new docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -58,10 +58,10 @@ func (d Docker) CreateVolume(ctx context.Context, name string, labels map[string
|
||||
|
||||
vol, err := docker.VolumeCreate(ctx, volumeCreateOptions)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create volume '%s'", name)
|
||||
l.Log().Errorf("Failed to create volume '%s'", name)
|
||||
return err
|
||||
}
|
||||
log.Infof("Created volume '%s'", vol.Name)
|
||||
l.Log().Infof("Created volume '%s'", vol.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -70,7 +70,7 @@ func (d Docker) DeleteVolume(ctx context.Context, name string) error {
|
||||
// (0) create new docker client
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
defer docker.Close()
|
||||
@ -78,21 +78,21 @@ func (d Docker) DeleteVolume(ctx context.Context, name string) error {
|
||||
// get volume and delete it
|
||||
vol, err := docker.VolumeInspect(ctx, name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find volume '%s'", name)
|
||||
l.Log().Errorf("Failed to find volume '%s'", name)
|
||||
return err
|
||||
}
|
||||
|
||||
// check if volume is still in use
|
||||
if vol.UsageData != nil {
|
||||
if vol.UsageData.RefCount > 0 {
|
||||
log.Errorf("Failed to delete volume '%s'", vol.Name)
|
||||
l.Log().Errorf("Failed to delete volume '%s'", vol.Name)
|
||||
return fmt.Errorf("Volume '%s' is still referenced by %d containers", name, vol.UsageData.RefCount)
|
||||
}
|
||||
}
|
||||
|
||||
// remove volume
|
||||
if err := docker.VolumeRemove(ctx, name, true); err != nil {
|
||||
log.Errorf("Failed to delete volume '%s'", name)
|
||||
l.Log().Errorf("Failed to delete volume '%s'", name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ func (d Docker) GetVolume(name string) (string, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := GetDockerClient()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
l.Log().Errorln("Failed to create docker client")
|
||||
return "", err
|
||||
}
|
||||
defer docker.Close()
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
|
||||
@ -78,7 +78,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) error {
|
||||
}
|
||||
if !isNamedVolume {
|
||||
if _, err := os.Stat(src); err != nil {
|
||||
log.Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
l.Log().Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -32,10 +32,10 @@ import (
|
||||
"time"
|
||||
|
||||
k3dc "github.com/rancher/k3d/v4/pkg/client"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ImageImportIntoClusterMulti starts up a k3d tools container for the selected cluster and uses it to export
|
||||
@ -53,7 +53,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
|
||||
cluster, err = k3dc.ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find the specified cluster")
|
||||
l.Log().Errorf("Failed to find the specified cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -74,13 +74,13 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
return fmt.Errorf("Failed to find image volume for cluster '%s'", cluster.Name)
|
||||
}
|
||||
|
||||
log.Debugf("Attaching to cluster's image volume '%s'", imageVolume)
|
||||
l.Log().Debugf("Attaching to cluster's image volume '%s'", imageVolume)
|
||||
|
||||
// create tools node to export images
|
||||
var toolsNode *k3d.Node
|
||||
toolsNode, err = runtime.GetNode(ctx, &k3d.Node{Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name)})
|
||||
if err != nil || toolsNode == nil {
|
||||
log.Infoln("Starting new tools node...")
|
||||
l.Log().Infoln("Starting new tools node...")
|
||||
toolsNode, err = runToolsNode( // TODO: re-use existing container
|
||||
ctx,
|
||||
runtime,
|
||||
@ -91,10 +91,10 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to run tools container for cluster '%s'", cluster.Name)
|
||||
l.Log().Errorf("Failed to run tools container for cluster '%s'", cluster.Name)
|
||||
}
|
||||
} else if !toolsNode.State.Running {
|
||||
log.Infof("Starting existing tools node %s...", toolsNode.Name)
|
||||
l.Log().Infof("Starting existing tools node %s...", toolsNode.Name)
|
||||
if err := runtime.StartNode(ctx, toolsNode); err != nil {
|
||||
return fmt.Errorf("error starting existing tools node %s: %v", toolsNode.Name, err)
|
||||
}
|
||||
@ -112,10 +112,10 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
|
||||
if len(imagesFromRuntime) > 0 {
|
||||
// save image to tarfile in shared volume
|
||||
log.Infof("Saving %d image(s) from runtime...", len(imagesFromRuntime))
|
||||
l.Log().Infof("Saving %d image(s) from runtime...", len(imagesFromRuntime))
|
||||
tarName := fmt.Sprintf("%s/k3d-%s-images-%s.tar", k3d.DefaultImageVolumeMountPath, cluster.Name, time.Now().Format("20060102150405"))
|
||||
if err := runtime.ExecInNode(ctx, toolsNode, append([]string{"./k3d-tools", "save-image", "-d", tarName}, imagesFromRuntime...)); err != nil {
|
||||
log.Errorf("Failed to save image(s) in tools container for cluster '%s'", cluster.Name)
|
||||
l.Log().Errorf("Failed to save image(s) in tools container for cluster '%s'", cluster.Name)
|
||||
return err
|
||||
}
|
||||
importTarNames = append(importTarNames, tarName)
|
||||
@ -123,11 +123,11 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
|
||||
if len(imagesFromTar) > 0 {
|
||||
// copy tarfiles to shared volume
|
||||
log.Infof("Saving %d tarball(s) to shared image volume...", len(imagesFromTar))
|
||||
l.Log().Infof("Saving %d tarball(s) to shared image volume...", len(imagesFromTar))
|
||||
for _, file := range imagesFromTar {
|
||||
tarName := fmt.Sprintf("%s/k3d-%s-images-%s-file-%s", k3d.DefaultImageVolumeMountPath, cluster.Name, time.Now().Format("20060102150405"), path.Base(file))
|
||||
if err := runtime.CopyToNode(ctx, file, tarName, toolsNode); err != nil {
|
||||
log.Errorf("Failed to copy image tar '%s' to tools node! Error below:\n%+v", file, err)
|
||||
l.Log().Errorf("Failed to copy image tar '%s' to tools node! Error below:\n%+v", file, err)
|
||||
continue
|
||||
}
|
||||
importTarNames = append(importTarNames, tarName)
|
||||
@ -135,7 +135,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
}
|
||||
|
||||
// import image in each node
|
||||
log.Infoln("Importing images into nodes...")
|
||||
l.Log().Infoln("Importing images into nodes...")
|
||||
var importWaitgroup sync.WaitGroup
|
||||
for _, tarName := range importTarNames {
|
||||
for _, node := range cluster.Nodes {
|
||||
@ -143,10 +143,10 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
importWaitgroup.Add(1)
|
||||
go func(node *k3d.Node, wg *sync.WaitGroup, tarPath string) {
|
||||
log.Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
|
||||
l.Log().Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
|
||||
if err := runtime.ExecInNode(ctx, node, []string{"ctr", "image", "import", tarPath}); err != nil {
|
||||
log.Errorf("Failed to import images in node '%s'", node.Name)
|
||||
log.Errorln(err)
|
||||
l.Log().Errorf("Failed to import images in node '%s'", node.Name)
|
||||
l.Log().Errorln(err)
|
||||
}
|
||||
wg.Done()
|
||||
}(node, &importWaitgroup, tarName)
|
||||
@ -157,22 +157,22 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
|
||||
// remove tarball
|
||||
if !opts.KeepTar && len(importTarNames) > 0 {
|
||||
log.Infoln("Removing the tarball(s) from image volume...")
|
||||
l.Log().Infoln("Removing the tarball(s) from image volume...")
|
||||
if err := runtime.ExecInNode(ctx, toolsNode, []string{"rm", "-f", strings.Join(importTarNames, " ")}); err != nil {
|
||||
log.Errorf("Failed to delete one or more tarballs from '%+v'", importTarNames)
|
||||
log.Errorln(err)
|
||||
l.Log().Errorf("Failed to delete one or more tarballs from '%+v'", importTarNames)
|
||||
l.Log().Errorln(err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete tools container
|
||||
if !opts.KeepToolsNode {
|
||||
log.Infoln("Removing k3d-tools node...")
|
||||
l.Log().Infoln("Removing k3d-tools node...")
|
||||
if err := runtime.DeleteNode(ctx, toolsNode); err != nil {
|
||||
log.Errorf("Failed to delete tools node '%s': Try to delete it manually", toolsNode.Name)
|
||||
l.Log().Errorf("Failed to delete tools node '%s': Try to delete it manually", toolsNode.Name)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infoln("Successfully imported image(s)")
|
||||
l.Log().Infoln("Successfully imported image(s)")
|
||||
|
||||
return nil
|
||||
|
||||
@ -181,25 +181,25 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
func findImages(ctx context.Context, runtime runtimes.Runtime, requestedImages []string) (imagesFromRuntime, imagesFromTar []string, err error) {
|
||||
runtimeImages, err := runtime.GetImages(ctx)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to fetch list of existing images from runtime")
|
||||
l.Log().Errorln("Failed to fetch list of existing images from runtime")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, requestedImage := range requestedImages {
|
||||
if isFile(requestedImage) {
|
||||
imagesFromTar = append(imagesFromTar, requestedImage)
|
||||
log.Debugf("Selected image '%s' is a file", requestedImage)
|
||||
l.Log().Debugf("Selected image '%s' is a file", requestedImage)
|
||||
break
|
||||
}
|
||||
|
||||
runtimeImage, found := findRuntimeImage(requestedImage, runtimeImages)
|
||||
if found {
|
||||
imagesFromRuntime = append(imagesFromRuntime, runtimeImage)
|
||||
log.Debugf("Selected image '%s' (found as '%s') in runtime", requestedImage, runtimeImage)
|
||||
l.Log().Debugf("Selected image '%s' (found as '%s') in runtime", requestedImage, runtimeImage)
|
||||
break
|
||||
}
|
||||
|
||||
log.Warnf("Image '%s' is not a file and couldn't be found in the container runtime", requestedImage)
|
||||
l.Log().Warnf("Image '%s' is not a file and couldn't be found in the container runtime", requestedImage)
|
||||
}
|
||||
return imagesFromRuntime, imagesFromTar, err
|
||||
}
|
||||
@ -294,7 +294,7 @@ func runToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cl
|
||||
}
|
||||
node.RuntimeLabels[k3d.LabelClusterName] = cluster.Name
|
||||
if err := k3dc.NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorf("Failed to create tools container for cluster '%s'", cluster.Name)
|
||||
l.Log().Errorf("Failed to create tools container for cluster '%s'", cluster.Name)
|
||||
return node, err
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DefaultK3sImageRepo specifies the default image repository for the used k3s image
|
||||
@ -46,7 +46,7 @@ const DefaultRegistryImageTag = "2"
|
||||
|
||||
func GetLoadbalancerImage() string {
|
||||
if img := os.Getenv("K3D_IMAGE_LOADBALANCER"); img != "" {
|
||||
log.Infof("Loadbalancer image set from env var $K3D_IMAGE_LOADBALANCER: %s", img)
|
||||
l.Log().Infof("Loadbalancer image set from env var $K3D_IMAGE_LOADBALANCER: %s", img)
|
||||
return img
|
||||
}
|
||||
|
||||
@ -55,7 +55,7 @@ func GetLoadbalancerImage() string {
|
||||
|
||||
func GetToolsImage() string {
|
||||
if img := os.Getenv("K3D_IMAGE_TOOLS"); img != "" {
|
||||
log.Infof("Tools image set from env var $K3D_IMAGE_TOOLS: %s", img)
|
||||
l.Log().Infof("Tools image set from env var $K3D_IMAGE_TOOLS: %s", img)
|
||||
return img
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"path"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// GetConfigDirOrCreate will return the base path of the k3d config directory or create it if it doesn't exist yet
|
||||
@ -36,14 +36,14 @@ func GetConfigDirOrCreate() (string, error) {
|
||||
// build the path
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get user's home directory")
|
||||
l.Log().Errorln("Failed to get user's home directory")
|
||||
return "", err
|
||||
}
|
||||
configDir := path.Join(homeDir, ".k3d")
|
||||
|
||||
// create directories if necessary
|
||||
if err := createDirIfNotExists(configDir); err != nil {
|
||||
log.Errorf("Failed to create config path '%s'", configDir)
|
||||
l.Log().Errorf("Failed to create config path '%s'", configDir)
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -86,7 +86,7 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string]
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Tracef("Filtered %d nodes for suffix '%s' (filter: %s)", len(filteredNodes), suffix, nf)
|
||||
l.Log().Tracef("Filtered %d nodes for suffix '%s' (filter: %s)", len(filteredNodes), suffix, nf)
|
||||
|
||||
result[suffix] = append(result[suffix], filteredNodes...)
|
||||
}
|
||||
@ -97,10 +97,10 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string]
|
||||
// FilterNodes takes a string filter to return a filtered list of nodes
|
||||
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
|
||||
log.Tracef("Filtering %d nodes by %s", len(nodes), filters)
|
||||
l.Log().Tracef("Filtering %d nodes by %s", len(nodes), filters)
|
||||
|
||||
if len(filters) == 0 || len(filters[0]) == 0 {
|
||||
log.Warnln("No node filter specified")
|
||||
l.Log().Warnln("No node filter specified")
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@ -137,7 +137,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
// if one of the filters is 'all', we only return this and drop all others
|
||||
if submatches["group"] == "all" {
|
||||
if len(filters) > 1 {
|
||||
log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
|
||||
l.Log().Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
@ -243,7 +243,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
|
||||
}
|
||||
|
||||
log.Tracef("Filtered %d nodes (filter: %s)", len(filteredNodes), filters)
|
||||
l.Log().Tracef("Filtered %d nodes (filter: %s)", len(filteredNodes), filters)
|
||||
|
||||
return filteredNodes, nil
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"strings"
|
||||
|
||||
dockerunits "github.com/docker/go-units"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -64,7 +64,7 @@ func GetNodeFakerDirOrCreate(name string) (string, error) {
|
||||
|
||||
// create directories if necessary
|
||||
if err := createDirIfNotExists(fakeDir); err != nil {
|
||||
log.Errorf("Failed to create fake files path '%s'", fakeDir)
|
||||
l.Log().Errorf("Failed to create fake files path '%s'", fakeDir)
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ func MakeFakeEdac(nodeName string) (string, error) {
|
||||
edacPath := path.Join(dir, "edac")
|
||||
// create directories if necessary
|
||||
if err := createDirIfNotExists(edacPath); err != nil {
|
||||
log.Errorf("Failed to create fake edac path '%s'", edacPath)
|
||||
l.Log().Errorf("Failed to create fake edac path '%s'", edacPath)
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -26,20 +26,20 @@ import (
|
||||
"net"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// GetFreePort tries to fetch an open port from the OS-Kernel
|
||||
func GetFreePort() (int, error) {
|
||||
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
log.Errorln("Failed to resolve address")
|
||||
l.Log().Errorln("Failed to resolve address")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create TCP Listener")
|
||||
l.Log().Errorln("Failed to create TCP Listener")
|
||||
return 0, err
|
||||
}
|
||||
defer tcpListener.Close()
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/heroku/docker-registry-client/registry"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v4/pkg/logger"
|
||||
)
|
||||
|
||||
// Version is the string that contains version
|
||||
@ -48,7 +48,7 @@ func GetVersion() string {
|
||||
// GetHelperImageVersion returns the CLI version or 'latest'
|
||||
func GetHelperImageVersion() string {
|
||||
if tag := os.Getenv("K3D_HELPER_IMAGE_TAG"); tag != "" {
|
||||
log.Infoln("Helper image tag set from env var")
|
||||
l.Log().Infoln("Helper image tag set from env var")
|
||||
return tag
|
||||
}
|
||||
if len(HelperVersionOverride) > 0 {
|
||||
@ -65,7 +65,7 @@ func GetK3sVersion(latest bool) string {
|
||||
if latest {
|
||||
version, err := fetchLatestK3sVersion()
|
||||
if err != nil || version == "" {
|
||||
log.Warnln("Failed to fetch latest K3s version from DockerHub, falling back to hardcoded version.")
|
||||
l.Log().Warnln("Failed to fetch latest K3s version from DockerHub, falling back to hardcoded version.")
|
||||
return K3sVersion
|
||||
}
|
||||
return version
|
||||
@ -92,8 +92,8 @@ func fetchLatestK3sVersion() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Debugln("Fetched the following tags for rancher/k3s from DockerHub:")
|
||||
log.Debugln(tags)
|
||||
l.Log().Debugln("Fetched the following tags for rancher/k3s from DockerHub:")
|
||||
l.Log().Debugln(tags)
|
||||
|
||||
return "sampleTag", nil
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user