mirror of
https://github.com/siderolabs/sidero.git
synced 2025-10-24 05:51:20 +02:00
Fixes #717 `caps-controller-manager` has proper webhook support, so use that for readiness/liveness checks (standard CAPI way). `sidero-controller-manager` doesn't have webhooks (we should fix it eventually!), so using iPXE check Also: * bump Talos to 0.14.1 * use Talos provided default arguments for the agent environment (as agent is running Talos kernel, it makes sense). Signed-off-by: Andrey Smirnov <andrey.smirnov@talos-systems.com>
189 lines
6.2 KiB
Go
189 lines
6.2 KiB
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"os"
|
|
|
|
debug "github.com/talos-systems/go-debug"
|
|
corev1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/client-go/kubernetes"
|
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
|
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
cgrecord "k8s.io/client-go/tools/record"
|
|
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
|
ctrl "sigs.k8s.io/controller-runtime"
|
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
|
|
|
infrav1alpha2 "github.com/talos-systems/sidero/app/caps-controller-manager/api/v1alpha2"
|
|
infrav1alpha3 "github.com/talos-systems/sidero/app/caps-controller-manager/api/v1alpha3"
|
|
"github.com/talos-systems/sidero/app/caps-controller-manager/controllers"
|
|
metalv1alpha1 "github.com/talos-systems/sidero/app/sidero-controller-manager/api/v1alpha1"
|
|
// +kubebuilder:scaffold:imports
|
|
)
|
|
|
|
const (
|
|
defaultMaxConcurrentReconciles = 10
|
|
debugAddr = ":9994"
|
|
)
|
|
|
|
var (
|
|
scheme = runtime.NewScheme()
|
|
setupLog = ctrl.Log.WithName("setup")
|
|
)
|
|
|
|
//nolint:wsl
|
|
func init() {
|
|
_ = clientgoscheme.AddToScheme(scheme)
|
|
_ = capiv1.AddToScheme(scheme)
|
|
_ = infrav1alpha2.AddToScheme(scheme)
|
|
_ = infrav1alpha3.AddToScheme(scheme)
|
|
_ = metalv1alpha1.AddToScheme(scheme)
|
|
// +kubebuilder:scaffold:scheme
|
|
}
|
|
|
|
func main() {
|
|
var (
|
|
metricsAddr string
|
|
healthAddr string
|
|
enableLeaderElection bool
|
|
webhookPort int
|
|
)
|
|
|
|
flag.StringVar(&metricsAddr, "metrics-bind-addr", ":8080", "The address the metric endpoint binds to.")
|
|
flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.")
|
|
flag.BoolVar(&enableLeaderElection, "enable-leader-election", true,
|
|
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
|
flag.IntVar(&webhookPort, "webhook-port", 9443, "Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.")
|
|
flag.Parse()
|
|
|
|
ctrl.SetLogger(zap.New(func(o *zap.Options) {
|
|
o.Development = true
|
|
}))
|
|
|
|
go func() {
|
|
debugLogFunc := func(msg string) {
|
|
setupLog.Info(msg)
|
|
}
|
|
if err := debug.ListenAndServe(context.TODO(), debugAddr, debugLogFunc); err != nil {
|
|
setupLog.Error(err, "failed to start debug server")
|
|
os.Exit(1)
|
|
}
|
|
}()
|
|
|
|
// Machine and cluster operations can create enough events to trigger the event recorder spam filter
|
|
// Setting the burst size higher ensures all events will be recorded and submitted to the API
|
|
broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{
|
|
BurstSize: 100,
|
|
})
|
|
|
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
|
Scheme: scheme,
|
|
MetricsBindAddress: metricsAddr,
|
|
LeaderElection: enableLeaderElection,
|
|
LeaderElectionID: "controller-leader-election-capm",
|
|
Port: webhookPort,
|
|
EventBroadcaster: broadcaster,
|
|
HealthProbeBindAddress: healthAddr,
|
|
})
|
|
if err != nil {
|
|
setupLog.Error(err, "unable to start manager")
|
|
os.Exit(1)
|
|
}
|
|
|
|
clientset, err := kubernetes.NewForConfig(mgr.GetConfig())
|
|
if err != nil {
|
|
setupLog.Error(err, "unable to create k8s client")
|
|
os.Exit(1)
|
|
}
|
|
|
|
eventBroadcaster := cgrecord.NewBroadcaster()
|
|
eventBroadcaster.StartRecordingToSink(
|
|
&typedcorev1.EventSinkImpl{
|
|
Interface: clientset.CoreV1().Events(""),
|
|
})
|
|
|
|
recorder := eventBroadcaster.NewRecorder(
|
|
mgr.GetScheme(),
|
|
corev1.EventSource{Component: "caps-controller-manager"})
|
|
|
|
ctx := ctrl.SetupSignalHandler()
|
|
|
|
if err = (&controllers.MetalClusterReconciler{
|
|
Client: mgr.GetClient(),
|
|
Log: ctrl.Log.WithName("controllers").WithName("MetalCluster"),
|
|
Scheme: mgr.GetScheme(),
|
|
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: defaultMaxConcurrentReconciles}); err != nil {
|
|
setupLog.Error(err, "unable to create controller", "controller", "MetalCluster")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&controllers.MetalMachineReconciler{
|
|
Client: mgr.GetClient(),
|
|
Log: ctrl.Log.WithName("controllers").WithName("MetalMachine"),
|
|
Scheme: mgr.GetScheme(),
|
|
Recorder: recorder,
|
|
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: defaultMaxConcurrentReconciles}); err != nil {
|
|
setupLog.Error(err, "unable to create controller", "controller", "MetalMachine")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&controllers.ServerBindingReconciler{
|
|
Client: mgr.GetClient(),
|
|
Log: ctrl.Log.WithName("controllers").WithName("ServerBinding"),
|
|
Scheme: mgr.GetScheme(),
|
|
Recorder: recorder,
|
|
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: defaultMaxConcurrentReconciles}); err != nil {
|
|
setupLog.Error(err, "unable to create controller", "controller", "ServerBinding")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&infrav1alpha3.MetalCluster{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "MetalCluster")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&infrav1alpha3.MetalMachine{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "MetalMachine")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&infrav1alpha3.MetalMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "MetalMachineTemplate")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err = (&infrav1alpha3.ServerBinding{}).SetupWebhookWithManager(mgr); err != nil {
|
|
setupLog.Error(err, "unable to create webhook", "webhook", "ServerBinding")
|
|
os.Exit(1)
|
|
}
|
|
// +kubebuilder:scaffold:builder
|
|
|
|
setupChecks(mgr)
|
|
|
|
setupLog.Info("starting manager")
|
|
|
|
if err := mgr.Start(ctx); err != nil {
|
|
setupLog.Error(err, "problem running manager")
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
func setupChecks(mgr ctrl.Manager) {
|
|
if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
|
|
setupLog.Error(err, "unable to create ready check")
|
|
os.Exit(1)
|
|
}
|
|
|
|
if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
|
|
setupLog.Error(err, "unable to create health check")
|
|
os.Exit(1)
|
|
}
|
|
}
|