mirror of
https://github.com/siderolabs/talos.git
synced 2025-11-01 17:01:10 +01:00
fix: make output of upgrade-k8s command less scary
This removes `retrying error` messages while waiting for the API server pod state to reflect changes from the updated static pod definition. Log more lines to notify about the progress. Skip `kube-proxy` if not found (as we allow it to be disabled). ``` $ talosctl upgrade-k8s -n 172.20.0.2 --from 1.21.0 --to 1.21.2 discovered master nodes ["172.20.0.2" "172.20.0.3" "172.20.0.4"] updating "kube-apiserver" to version "1.21.2" > "172.20.0.2": starting update > "172.20.0.2": machine configuration patched > "172.20.0.2": waiting for API server state pod update < "172.20.0.2": successfully updated > "172.20.0.3": starting update > "172.20.0.3": machine configuration patched > "172.20.0.3": waiting for API server state pod update < "172.20.0.3": successfully updated > "172.20.0.4": starting update > "172.20.0.4": machine configuration patched > "172.20.0.4": waiting for API server state pod update < "172.20.0.4": successfully updated updating "kube-controller-manager" to version "1.21.2" > "172.20.0.2": starting update > "172.20.0.2": machine configuration patched > "172.20.0.2": waiting for API server state pod update < "172.20.0.2": successfully updated > "172.20.0.3": starting update > "172.20.0.3": machine configuration patched > "172.20.0.3": waiting for API server state pod update < "172.20.0.3": successfully updated > "172.20.0.4": starting update > "172.20.0.4": machine configuration patched > "172.20.0.4": waiting for API server state pod update < "172.20.0.4": successfully updated updating "kube-scheduler" to version "1.21.2" > "172.20.0.2": starting update > "172.20.0.2": machine configuration patched > "172.20.0.2": waiting for API server state pod update < "172.20.0.2": successfully updated > "172.20.0.3": starting update > "172.20.0.3": machine configuration patched > "172.20.0.3": waiting for API server state pod update < "172.20.0.3": successfully updated > "172.20.0.4": starting update > "172.20.0.4": machine configuration patched > "172.20.0.4": waiting for API server state pod update < "172.20.0.4": successfully updated updating daemonset "kube-proxy" to version "1.21.2" kube-proxy skipped as DaemonSet was not found ``` Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
This commit is contained in:
parent
7f8e50de4d
commit
e883c12b31
@ -13,6 +13,7 @@ import (
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/talos-systems/go-retry/retry"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/talos-systems/talos/pkg/cluster"
|
||||
@ -73,7 +74,11 @@ func UpgradeTalosManaged(ctx context.Context, cluster UpgradeProvider, options U
|
||||
}
|
||||
|
||||
if err = hyperkubeUpgradeDs(ctx, k8sClient.Clientset, kubeProxy, options); err != nil {
|
||||
return fmt.Errorf("error updating kube-proxy: %w", err)
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Println("kube-proxy skipped as DaemonSet was not found")
|
||||
} else {
|
||||
return fmt.Errorf("error updating kube-proxy: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -103,7 +108,7 @@ func upgradeNodeConfigPatch(ctx context.Context, cluster UpgradeProvider, option
|
||||
|
||||
ctx = client.WithNodes(ctx, node)
|
||||
|
||||
fmt.Printf(" > updating node %q\n", node)
|
||||
fmt.Printf(" > %q: starting update\n", node)
|
||||
|
||||
watchClient, err := c.Resources.Watch(ctx, config.NamespaceName, config.K8sControlPlaneType, service)
|
||||
if err != nil {
|
||||
@ -137,10 +142,15 @@ func upgradeNodeConfigPatch(ctx context.Context, cluster UpgradeProvider, option
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" > %q: machine configuration patched\n", node)
|
||||
fmt.Printf(" > %q: waiting for API server state pod update\n", node)
|
||||
|
||||
var expectedConfigVersion string
|
||||
|
||||
if !skipConfigWait {
|
||||
watchUpdated, err := watchClient.Recv()
|
||||
var watchUpdated client.WatchResponse
|
||||
|
||||
watchUpdated, err = watchClient.Recv()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error watching config: %w", err)
|
||||
}
|
||||
@ -154,9 +164,15 @@ func upgradeNodeConfigPatch(ctx context.Context, cluster UpgradeProvider, option
|
||||
expectedConfigVersion = watchInitial.Resource.Metadata().Version().String()
|
||||
}
|
||||
|
||||
return retry.Constant(3*time.Minute, retry.WithUnits(10*time.Second), retry.WithErrorLogging(true)).Retry(func() error {
|
||||
if err = retry.Constant(3*time.Minute, retry.WithUnits(10*time.Second)).Retry(func() error {
|
||||
return checkPodStatus(ctx, cluster, service, node, expectedConfigVersion)
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf(" < %q: successfully updated\n", node)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errUpdateSkipped = fmt.Errorf("update skipped")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user