mirror of
https://github.com/siderolabs/sidero.git
synced 2025-10-24 05:51:20 +02:00
Bump CAPI, controller-runtime, Talos dependencies, Kubernetes. Fix some compatibility issues, but should be no functional changes. Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
200 lines
6.6 KiB
Go
200 lines
6.6 KiB
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"os"
|
|
|
|
debug "github.com/siderolabs/go-debug"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
|
cgrecord "k8s.io/client-go/tools/record"
|
|
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
|
"sigs.k8s.io/cluster-api/controllers/remote"
|
|
ctrl "sigs.k8s.io/controller-runtime"
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
|
|
|
infrav1alpha2 "github.com/siderolabs/sidero/app/caps-controller-manager/api/v1alpha2"
|
|
infrav1alpha3 "github.com/siderolabs/sidero/app/caps-controller-manager/api/v1alpha3"
|
|
"github.com/siderolabs/sidero/app/caps-controller-manager/controllers"
|
|
metalv1 "github.com/siderolabs/sidero/app/sidero-controller-manager/api/v1alpha2"
|
|
// +kubebuilder:scaffold:imports
|
|
)
|
|
|
|
const (
|
|
defaultMaxConcurrentReconciles = 10
|
|
debugAddr = ":9994"
|
|
)
|
|
|
|
var (
|
|
scheme = runtime.NewScheme()
|
|
setupLog = ctrl.Log.WithName("setup")
|
|
)
|
|
|
|
//nolint:wsl
|
|
func init() {
|
|
_ = clientgoscheme.AddToScheme(scheme)
|
|
_ = capiv1.AddToScheme(scheme)
|
|
_ = infrav1alpha2.AddToScheme(scheme)
|
|
_ = infrav1alpha3.AddToScheme(scheme)
|
|
_ = metalv1.AddToScheme(scheme)
|
|
// +kubebuilder:scaffold:scheme
|
|
}
|
|
|
|
func main() {
|
|
var (
|
|
metricsAddr string
|
|
healthAddr string
|
|
enableLeaderElection bool
|
|
webhookPort int
|
|
)
|
|
|
|
flag.StringVar(&metricsAddr, "metrics-bind-addr", ":8080", "The address the metric endpoint binds to.")
|
|
flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.")
|
|
flag.BoolVar(&enableLeaderElection, "enable-leader-election", true,
|
|
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
|
flag.IntVar(&webhookPort, "webhook-port", 9443, "Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.")
|
|
flag.Parse()
|
|
|
|
ctrl.SetLogger(zap.New(func(o *zap.Options) {
|
|
o.Development = true
|
|
}))
|
|
|
|
go func() {
|
|
debugLogFunc := func(msg string) {
|
|
setupLog.Info(msg)
|
|
}
|
|
if err := debug.ListenAndServe(context.TODO(), debugAddr, debugLogFunc); err != nil {
|
|
setupLog.Error(err, "failed to start debug server")
|
|
os.Exit(1)
|
|
}
|
|
}()
|
|
|
|
// Machine and cluster operations can create enough events to trigger the event recorder spam filter
|
|
// Setting the burst size higher ensures all events will be recorded and submitted to the API
|
|
broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{
|
|
BurstSize: 100,
|
|
})
|
|
|
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
|
Scheme: scheme,
|
|
MetricsBindAddress: metricsAddr,
|
|
LeaderElection: enableLeaderElection,
|
|
LeaderElectionID: "controller-leader-election-capm",
|
|
Port: webhookPort,
|
|
EventBroadcaster: broadcaster,
|
|
HealthProbeBindAddress: healthAddr,
|
|
})
|
|
if err != nil {
|
|
setupLog.Error(err, "unable to start manager")
|
|
os.Exit(1)
|
|
}
|
|
|
|
ctx := ctrl.SetupSignalHandler()
|
|
|
|
// Set up a ClusterCacheTracker to provide to controllers
|
|
// requiring a connection to a remote cluster
|
|
tracker, err := remote.NewClusterCacheTracker(mgr, remote.ClusterCacheTrackerOptions{
|
|
Indexes: []remote.Index{remote.NodeProviderIDIndex},
|
|
ClientUncachedObjects: []client.Object{},
|
|
})
|
|
if err != nil {
|
|
setupLog.Error(err, "unable to create cluster cache tracker")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err := (&remote.ClusterCacheReconciler{
|
|
Client: mgr.GetClient(),
|
|
Tracker: tracker,
|
|
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: defaultMaxConcurrentReconciles}); err != nil {
|
|
setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler")
|
|
os.Exit(1)
|
|
}
|
|
|
|
recorder := mgr.GetEventRecorderFor("caps-controller-manager")
|
|
|
|
if err = (&controllers.MetalClusterReconciler{
|
|
Client: mgr.GetClient(),
|
|
Log: ctrl.Log.WithName("controllers").WithName("MetalCluster"),
|
|
Scheme: mgr.GetScheme(),
|
|
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: defaultMaxConcurrentReconciles}); err != nil {
|
|
setupLog.Error(err, "unable to create controller", "controller", "MetalCluster")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&controllers.MetalMachineReconciler{
|
|
Client: mgr.GetClient(),
|
|
Log: ctrl.Log.WithName("controllers").WithName("MetalMachine"),
|
|
Scheme: mgr.GetScheme(),
|
|
Recorder: recorder,
|
|
Tracker: tracker,
|
|
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: defaultMaxConcurrentReconciles}); err != nil {
|
|
setupLog.Error(err, "unable to create controller", "controller", "MetalMachine")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&controllers.ServerBindingReconciler{
|
|
Client: mgr.GetClient(),
|
|
Log: ctrl.Log.WithName("controllers").WithName("ServerBinding"),
|
|
Scheme: mgr.GetScheme(),
|
|
Recorder: recorder,
|
|
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: defaultMaxConcurrentReconciles}); err != nil {
|
|
setupLog.Error(err, "unable to create controller", "controller", "ServerBinding")
|
|
os.Exit(1)
|
|
}
|
|
|
|
// +kubebuilder:scaffold:builder
|
|
|
|
setupWebhooks(mgr)
|
|
setupChecks(mgr)
|
|
|
|
setupLog.Info("starting manager")
|
|
|
|
if err := mgr.Start(ctx); err != nil {
|
|
setupLog.Error(err, "problem running manager")
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
func setupWebhooks(mgr ctrl.Manager) {
|
|
var err error
|
|
|
|
if err = (&infrav1alpha3.MetalCluster{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "MetalCluster")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&infrav1alpha3.MetalMachine{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "MetalMachine")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&infrav1alpha3.MetalMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "MetalMachineTemplate")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&infrav1alpha3.ServerBinding{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "ServerBinding")
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
func setupChecks(mgr ctrl.Manager) {
|
|
if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
|
|
setupLog.Error(err, "unable to create ready check")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
|
|
setupLog.Error(err, "unable to create health check")
|
|
os.Exit(1)
|
|
}
|
|
}
|