From f52fddddee43dbfb83ff332cb21fa0d77b6172af Mon Sep 17 00:00:00 2001 From: Aaron U'Ren Date: Sat, 4 Sep 2021 16:28:09 -0500 Subject: [PATCH] feat(.golangci.yml): enable gocritic and remediate --- .golangci.yml | 1 + pkg/cmd/kube-router.go | 13 +++--- .../netpol/network_policy_controller.go | 5 +-- pkg/controllers/netpol/policy.go | 30 ++++++------- .../proxy/network_service_graceful.go | 8 ++-- .../proxy/network_services_controller.go | 44 ++++++++++--------- .../proxy/service_endpoints_sync.go | 7 +-- pkg/controllers/routing/bgp_peers.go | 2 +- pkg/controllers/routing/bgp_policies.go | 1 + .../routing/network_routes_controller.go | 9 ++-- .../routing/network_routes_controller_test.go | 15 +++---- pkg/controllers/routing/utils.go | 3 +- pkg/healthcheck/health_controller.go | 20 ++++----- 13 files changed, 78 insertions(+), 80 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 25d23e71..2dcfca16 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -10,6 +10,7 @@ linters: - exportloopref - gochecknoinits - goconst + - gocritic - gofmt - goimports - misspell diff --git a/pkg/cmd/kube-router.go b/pkg/cmd/kube-router.go index 2a0b6088..d7fe8f44 100644 --- a/pkg/cmd/kube-router.go +++ b/pkg/cmd/kube-router.go @@ -73,15 +73,16 @@ func (kr *KubeRouter) Run() error { var err error var ipsetMutex sync.Mutex var wg sync.WaitGroup - healthChan := make(chan *healthcheck.ControllerHeartbeat, 10) - defer close(healthChan) - stopCh := make(chan struct{}) if !(kr.Config.RunFirewall || kr.Config.RunServiceProxy || kr.Config.RunRouter) { klog.Info("Router, Firewall or Service proxy functionality must be specified. Exiting!") os.Exit(0) } + healthChan := make(chan *healthcheck.ControllerHeartbeat, 10) + defer close(healthChan) + stopCh := make(chan struct{}) + hc, err := healthcheck.NewHealthController(kr.Config) if err != nil { return errors.New("Failed to create health controller: " + err.Error()) @@ -107,7 +108,7 @@ func (kr *KubeRouter) Run() error { wg.Add(1) go hc.RunCheck(healthChan, stopCh, &wg) - if kr.Config.MetricsPort > 0 { + if kr.Config.MetricsPort > 0 && kr.Config.MetricsPort < 65535 { kr.Config.MetricsEnabled = true mc, err := metrics.NewMetricsController(kr.Config) if err != nil { @@ -116,10 +117,8 @@ func (kr *KubeRouter) Run() error { wg.Add(1) go mc.Run(healthChan, stopCh, &wg) - } else if kr.Config.MetricsPort > 65535 { - klog.Errorf("Metrics port must be over 0 and under 65535, given port: %d", kr.Config.MetricsPort) - kr.Config.MetricsEnabled = false } else { + klog.Errorf("Metrics port must be over 0 and under 65535, given port: %d", kr.Config.MetricsPort) kr.Config.MetricsEnabled = false } diff --git a/pkg/controllers/netpol/network_policy_controller.go b/pkg/controllers/netpol/network_policy_controller.go index f7a0f6f4..a4253a54 100644 --- a/pkg/controllers/netpol/network_policy_controller.go +++ b/pkg/controllers/netpol/network_policy_controller.go @@ -324,7 +324,7 @@ func (npc *NetworkPolicyController) ensureTopLevelChains() { var ruleNo, ruleIndexOffset int for i, rule := range rules { - rule = strings.Replace(rule, "\"", "", 2) //removes quote from comment string + rule = strings.Replace(rule, "\"", "", 2) // removes quote from comment string if strings.HasPrefix(rule, "-P") || strings.HasPrefix(rule, "-N") { // if this chain has a default policy, then it will show as rule #1 from iptablesCmdHandler.List so we // need to account for this offset @@ -581,7 +581,6 @@ func (npc *NetworkPolicyController) Cleanup() { klog.Errorf("error encountered attempting to cleanup iptables rules: %v", err) return } - //klog.Infof("Final rules to save: %s", npc.filterTableRules) // Restore (iptables-restore) npc's cleaned up version of the iptables filter chain if err = utils.Restore("filter", npc.filterTableRules.Bytes()); err != nil { klog.Errorf( @@ -631,7 +630,7 @@ func NewNetworkPolicyController(clientset kubernetes.Interface, } if config.MetricsEnabled { - //Register the metrics for this controller + // Register the metrics for this controller prometheus.MustRegister(metrics.ControllerIptablesSyncTime) prometheus.MustRegister(metrics.ControllerPolicyChainsSyncTime) prometheus.MustRegister(metrics.ControllerIptablesSyncTotalTime) diff --git a/pkg/controllers/netpol/policy.go b/pkg/controllers/netpol/policy.go index c516412b..d4851d05 100644 --- a/pkg/controllers/netpol/policy.go +++ b/pkg/controllers/netpol/policy.go @@ -415,11 +415,12 @@ func (npc *NetworkPolicyController) appendRuleToPolicyChain(policyChainName, com } } + // nolint:gocritic // we want to append to a separate array here so that we can re-use args below markArgs := append(args, "-j", "MARK", "--set-xmark", "0x10000/0x10000", "\n") npc.filterTableRules.WriteString(strings.Join(markArgs, " ")) - returnArgs := append(args, "-m", "mark", "--mark", "0x10000/0x10000", "-j", "RETURN", "\n") - npc.filterTableRules.WriteString(strings.Join(returnArgs, " ")) + args = append(args, "-m", "mark", "--mark", "0x10000/0x10000", "-j", "RETURN", "\n") + npc.filterTableRules.WriteString(strings.Join(args, " ")) return nil } @@ -451,11 +452,12 @@ func (npc *NetworkPolicyController) buildNetworkPoliciesInfo() ([]networkPolicyI egressType = true } } - if ingressType && egressType { + switch { + case ingressType && egressType: newPolicy.policyType = kubeBothPolicyType - } else if egressType { + case egressType: newPolicy.policyType = kubeEgressPolicyType - } else if ingressType { + case ingressType: newPolicy.policyType = kubeIngressPolicyType } @@ -628,20 +630,18 @@ func (npc *NetworkPolicyController) processNetworkPolicyPorts(npPorts []networki if npPort.Port == nil { numericPorts = append(numericPorts, protocolAndPort{port: "", protocol: protocol}) } else if npPort.Port.Type == intstr.Int { - var portproto protocolAndPort + var portProto protocolAndPort if npPort.EndPort != nil { if *npPort.EndPort >= npPort.Port.IntVal { - portproto.endport = strconv.Itoa(int(*npPort.EndPort)) + portProto.endport = strconv.Itoa(int(*npPort.EndPort)) } } - portproto.protocol, portproto.port = protocol, npPort.Port.String() - numericPorts = append(numericPorts, portproto) - } else { - if protocol2eps, ok := namedPort2eps[npPort.Port.String()]; ok { - if numericPort2eps, ok := protocol2eps[protocol]; ok { - for _, eps := range numericPort2eps { - namedPorts = append(namedPorts, *eps) - } + portProto.protocol, portProto.port = protocol, npPort.Port.String() + numericPorts = append(numericPorts, portProto) + } else if protocol2eps, ok := namedPort2eps[npPort.Port.String()]; ok { + if numericPort2eps, ok := protocol2eps[protocol]; ok { + for _, eps := range numericPort2eps { + namedPorts = append(namedPorts, *eps) } } } diff --git a/pkg/controllers/proxy/network_service_graceful.go b/pkg/controllers/proxy/network_service_graceful.go index 53488c36..a0992e45 100644 --- a/pkg/controllers/proxy/network_service_graceful.go +++ b/pkg/controllers/proxy/network_service_graceful.go @@ -102,12 +102,10 @@ func (nsc *NetworkServicesController) gracefulDeleteIpvsDestination(req graceful aConn, iConn, err := nsc.getIpvsDestinationConnStats(req.ipvsSvc, req.ipvsDst) if err != nil { klog.V(1).Infof("Could not get connection stats for destination: %s", err.Error()) - } else { + } else if aConn == 0 && iConn == 0 { // Do we have active or inactive connections to this destination // if we don't, proceed and delete the destination ahead of graceful period - if aConn == 0 && iConn == 0 { - deleteDestination = true - } + deleteDestination = true } // Check if our destinations graceful termination period has passed @@ -115,7 +113,7 @@ func (nsc *NetworkServicesController) gracefulDeleteIpvsDestination(req graceful deleteDestination = true } - //Destination has has one or more conditions for deletion + // Destination has has one or more conditions for deletion if deleteDestination { klog.V(2).Infof("Deleting IPVS destination: %s", ipvsDestinationString(req.ipvsDst)) if err := nsc.ln.ipvsDelDestination(req.ipvsSvc, req.ipvsDst); err != nil { diff --git a/pkg/controllers/proxy/network_services_controller.go b/pkg/controllers/proxy/network_services_controller.go index 4e07f07e..9e6bc519 100644 --- a/pkg/controllers/proxy/network_services_controller.go +++ b/pkg/controllers/proxy/network_services_controller.go @@ -477,14 +477,14 @@ func (nsc *NetworkServicesController) doSync() error { } // Lookup service ip, protocol, port by given fwmark value (reverse of generateFwmark) -func (nsc *NetworkServicesController) lookupServiceByFWMark(FWMark uint32) (string, string, int, error) { +func (nsc *NetworkServicesController) lookupServiceByFwMark(fwMark uint32) (string, string, int, error) { for _, svc := range nsc.serviceMap { for _, externalIP := range svc.externalIPs { gfwmark, err := generateFwmark(externalIP, svc.protocol, fmt.Sprint(svc.port)) if err != nil { return "", "", 0, err } - if FWMark == gfwmark { + if fwMark == gfwmark { return externalIP, svc.protocol, svc.port, nil } } @@ -762,7 +762,7 @@ func (nsc *NetworkServicesController) syncIpvsFirewall() error { } port = int(ipvsService.Port) } else if ipvsService.FWMark != 0 { - address, protocol, port, err = nsc.lookupServiceByFWMark(ipvsService.FWMark) + address, protocol, port, err = nsc.lookupServiceByFwMark(ipvsService.FWMark) if err != nil { klog.Errorf("failed to lookup %d by FWMark: %s", ipvsService.FWMark, err) } @@ -888,7 +888,7 @@ func unsortedListsEquivalent(a, b []endpointsInfo) bool { values[val] = 1 } for _, val := range b { - values[val] = values[val] + 1 + values[val]++ } for _, val := range values { @@ -1147,15 +1147,16 @@ func (nsc *NetworkServicesController) buildServicesInfo() serviceInfoMap { svcInfo.scheduler = ipvs.RoundRobin schedulingMethod, ok := svc.ObjectMeta.Annotations[svcSchedulerAnnotation] if ok { - if schedulingMethod == ipvs.RoundRobin { + switch { + case schedulingMethod == ipvs.RoundRobin: svcInfo.scheduler = ipvs.RoundRobin - } else if schedulingMethod == ipvs.LeastConnection { + case schedulingMethod == ipvs.LeastConnection: svcInfo.scheduler = ipvs.LeastConnection - } else if schedulingMethod == ipvs.DestinationHashing { + case schedulingMethod == ipvs.DestinationHashing: svcInfo.scheduler = ipvs.DestinationHashing - } else if schedulingMethod == ipvs.SourceHashing { + case schedulingMethod == ipvs.SourceHashing: svcInfo.scheduler = ipvs.SourceHashing - } else if schedulingMethod == IpvsMaglevHashing { + case schedulingMethod == IpvsMaglevHashing: svcInfo.scheduler = IpvsMaglevHashing } } @@ -1276,7 +1277,7 @@ func (nsc *NetworkServicesController) ensureMasqueradeIptablesRule() error { } } if len(nsc.podCidr) > 0 { - //TODO: ipset should be used for destination podCidr(s) match after multiple podCidr(s) per node get supported + // TODO: ipset should be used for destination podCidr(s) match after multiple podCidr(s) per node get supported args = []string{"-m", "ipvs", "--ipvs", "--vdir", "ORIGINAL", "--vmethod", "MASQ", "-m", "comment", "--comment", "", "!", "-s", nsc.podCidr, "!", "-d", nsc.podCidr, "-j", "SNAT", "--to-source", nsc.nodeIP.String()} if iptablesCmdHandler.HasRandomFully() { @@ -1340,8 +1341,8 @@ func (nsc *NetworkServicesController) deleteBadMasqueradeIptablesRules() error { // enabled globally via CLI argument or a service has an annotation requesting // it. func (nsc *NetworkServicesController) syncHairpinIptablesRules() error { - //TODO: Use ipset? - //TODO: Log a warning that this will not work without hairpin sysctl set on veth + // TODO: Use ipset? + // TODO: Log a warning that this will not work without hairpin sysctl set on veth // Key is a string that will match iptables.List() rules // Value is a string[] with arguments that iptables transaction functions expect @@ -1579,27 +1580,27 @@ func ipvsServiceString(s *ipvs.Service) string { } if s.Flags&0x0001 != 0 { - flags = flags + "[persistent port]" + flags += "[persistent port]" } if s.Flags&0x0002 != 0 { - flags = flags + "[hashed entry]" + flags += "[hashed entry]" } if s.Flags&0x0004 != 0 { - flags = flags + "[one-packet scheduling]" + flags += "[one-packet scheduling]" } if s.Flags&0x0008 != 0 { - flags = flags + "[flag-1(fallback)]" + flags += "[flag-1(fallback)]" } if s.Flags&0x0010 != 0 { - flags = flags + "[flag-2(port)]" + flags += "[flag-2(port)]" } if s.Flags&0x0020 != 0 { - flags = flags + "[flag-3]" + flags += "[flag-3]" } return fmt.Sprintf("%s:%s:%v (Flags: %s)", protocol, s.Address, s.Port, flags) @@ -1743,11 +1744,12 @@ func generateFwmark(ip, protocol, port string) (uint32, error) { func (ln *linuxNetworking) ipvsAddFWMarkService(vip net.IP, protocol, port uint16, persistent bool, persistentTimeout int32, scheduler string, flags schedFlags) (*ipvs.Service, error) { var protocolStr string - if protocol == syscall.IPPROTO_TCP { + switch { + case protocol == syscall.IPPROTO_TCP: protocolStr = tcpProtocol - } else if protocol == syscall.IPPROTO_UDP { + case protocol == syscall.IPPROTO_UDP: protocolStr = udpProtocol - } else { + default: protocolStr = "unknown" } diff --git a/pkg/controllers/proxy/service_endpoints_sync.go b/pkg/controllers/proxy/service_endpoints_sync.go index de0389c8..c1ecc506 100644 --- a/pkg/controllers/proxy/service_endpoints_sync.go +++ b/pkg/controllers/proxy/service_endpoints_sync.go @@ -517,11 +517,12 @@ func (nsc *NetworkServicesController) cleanupStaleIPVSConfig(activeServiceEndpoi protocol = udpProtocol } var key string - if ipvsSvc.Address != nil { + switch { + case ipvsSvc.Address != nil: key = generateIPPortID(ipvsSvc.Address.String(), protocol, strconv.Itoa(int(ipvsSvc.Port))) - } else if ipvsSvc.FWMark != 0 { + case ipvsSvc.FWMark != 0: key = fmt.Sprint(ipvsSvc.FWMark) - } else { + default: continue } diff --git a/pkg/controllers/routing/bgp_peers.go b/pkg/controllers/routing/bgp_peers.go index c4868598..662c79c8 100644 --- a/pkg/controllers/routing/bgp_peers.go +++ b/pkg/controllers/routing/bgp_peers.go @@ -140,7 +140,7 @@ func (nrc *NetworkRoutingController) syncInternalPeers() { // we are rr-server peer with other rr-client with reflection enabled if nrc.bgpRRServer { if _, ok := node.ObjectMeta.Annotations[rrClientAnnotation]; ok { - //add rr options with clusterId + // add rr options with clusterId n.RouteReflector = &gobgpapi.RouteReflector{ RouteReflectorClient: true, RouteReflectorClusterId: fmt.Sprint(nrc.bgpClusterID), diff --git a/pkg/controllers/routing/bgp_policies.go b/pkg/controllers/routing/bgp_policies.go index 11950cb0..123039fb 100644 --- a/pkg/controllers/routing/bgp_policies.go +++ b/pkg/controllers/routing/bgp_policies.go @@ -336,6 +336,7 @@ func (nrc *NetworkRoutingController) addAllBGPPeersDefinedSet(iBGPPeerCIDRs, ext if err != nil { return err } + // nolint:gocritic // We intentionally append to a different array here so as to not change the passed in externalBGPPeerCIDRs allBgpPeers := append(externalBGPPeerCIDRs, iBGPPeerCIDRs...) if currentDefinedSet == nil { allPeerNS := &gobgpapi.DefinedSet{ diff --git a/pkg/controllers/routing/network_routes_controller.go b/pkg/controllers/routing/network_routes_controller.go index 5f31994b..f012fd1c 100644 --- a/pkg/controllers/routing/network_routes_controller.go +++ b/pkg/controllers/routing/network_routes_controller.go @@ -570,7 +570,8 @@ func (nrc *NetworkRoutingController) injectRoute(path *gobgpapi.Path) error { nrc.cleanupTunnel(dst, tunnelName) } - if link != nil { + switch { + case link != nil: // if we setup an overlay tunnel link, then use it for destination routing route = &netlink.Route{ LinkIndex: link.Attrs().Index, @@ -578,7 +579,7 @@ func (nrc *NetworkRoutingController) injectRoute(path *gobgpapi.Path) error { Dst: dst, Protocol: 0x11, } - } else if sameSubnet { + case sameSubnet: // if the nextHop is within the same subnet, add a route for the destination so that traffic can bet routed // at layer 2 and minimize the need to traverse a router route = &netlink.Route{ @@ -586,7 +587,7 @@ func (nrc *NetworkRoutingController) injectRoute(path *gobgpapi.Path) error { Gw: nextHop, Protocol: 0x11, } - } else { + default: // otherwise, let BGP do its thing, nothing to do here return nil } @@ -1101,7 +1102,7 @@ func NewNetworkRoutingController(clientset kubernetes.Interface, nrc := NetworkRoutingController{ipsetMutex: ipsetMutex} if kubeRouterConfig.MetricsEnabled { - //Register the metrics for this controller + // Register the metrics for this controller prometheus.MustRegister(metrics.ControllerBGPadvertisementsReceived) prometheus.MustRegister(metrics.ControllerBGPInternalPeersSyncTime) prometheus.MustRegister(metrics.ControllerBPGpeers) diff --git a/pkg/controllers/routing/network_routes_controller_test.go b/pkg/controllers/routing/network_routes_controller_test.go index bf454c05..fa523cd1 100644 --- a/pkg/controllers/routing/network_routes_controller_test.go +++ b/pkg/controllers/routing/network_routes_controller_test.go @@ -515,7 +515,7 @@ func Test_advertiseExternalIPs(t *testing.T) { }, } - //nolint:dupl // There is no need to spend a lot of time de-duplicating test code + // nolint:dupl // There is no need to spend a lot of time de-duplicating test code for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { go testcase.nrc.bgpServer.Serve() @@ -705,7 +705,7 @@ func Test_advertiseAnnotationOptOut(t *testing.T) { }, } - //nolint:dupl // There is no need to spend a lot of time de-duplicating test code + // nolint:dupl // There is no need to spend a lot of time de-duplicating test code for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { go testcase.nrc.bgpServer.Serve() @@ -928,7 +928,7 @@ func Test_advertiseAnnotationOptIn(t *testing.T) { }, } - //nolint:dupl // There is no need to spend a lot of time de-duplicating test code + // nolint:dupl // There is no need to spend a lot of time de-duplicating test code for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { go testcase.nrc.bgpServer.Serve() @@ -1646,8 +1646,7 @@ func Test_routeReflectorConfiguration(t *testing.T) { Name: "node-1", Annotations: map[string]string{ "kube-router.io/node.asn": "100", - //rrServerAnnotation: "10_0_0_1", - rrServerAnnotation: "hello world", + rrServerAnnotation: "hello world", }, }, }, @@ -1712,10 +1711,8 @@ func Test_routeReflectorConfiguration(t *testing.T) { if testcase.expectedClusterID != testcase.nrc.bgpClusterID { t.Errorf("Node suppose to have cluster id '%s' but got %s", testcase.expectedClusterID, testcase.nrc.bgpClusterID) } - } else { - if err == nil { - t.Fatal("misconfigured BGP server not suppose to start") - } + } else if err == nil { + t.Fatal("misconfigured BGP server not suppose to start") } }) } diff --git a/pkg/controllers/routing/utils.go b/pkg/controllers/routing/utils.go index 0d6c14b1..1b6fe0f7 100644 --- a/pkg/controllers/routing/utils.go +++ b/pkg/controllers/routing/utils.go @@ -119,7 +119,7 @@ func getNodeSubnet(nodeIP net.IP) (net.IPNet, string, error) { // is greater than 12 (after removing "."), then the interface name is tunXYZ // as opposed to tun-XYZ func generateTunnelName(nodeIP string) string { - hash := strings.Replace(nodeIP, ".", "", -1) + hash := strings.ReplaceAll(nodeIP, ".", "") if len(hash) < 12 { return "tun-" + hash @@ -162,6 +162,7 @@ func parseBGPNextHop(path *gobgpapi.Path) (net.IP, error) { if err := ptypes.UnmarshalAny(pAttr, &value); err != nil { return nil, fmt.Errorf("failed to unmarshal path attribute: %s", err) } + // nolint:gocritic // We can't change this to an if condition because it is a .(type) expression switch a := value.Message.(type) { case *gobgpapi.NextHopAttribute: nextHop := net.ParseIP(a.NextHop).To4() diff --git a/pkg/healthcheck/health_controller.go b/pkg/healthcheck/health_controller.go index 963945c4..6fe89856 100644 --- a/pkg/healthcheck/health_controller.go +++ b/pkg/healthcheck/health_controller.go @@ -11,13 +11,13 @@ import ( "k8s.io/klog/v2" ) -//ControllerHeartbeat is the structure to hold the heartbeats sent by controllers +// ControllerHeartbeat is the structure to hold the heartbeats sent by controllers type ControllerHeartbeat struct { Component string LastHeartBeat time.Time } -//HealthController reports the health of the controller loops as a http endpoint +// HealthController reports the health of the controller loops as a http endpoint type HealthController struct { HealthPort uint16 HTTPEnabled bool @@ -25,7 +25,7 @@ type HealthController struct { Config *options.KubeRouterConfig } -//HealthStats is holds the latest heartbeats +// HealthStats is holds the latest heartbeats type HealthStats struct { sync.Mutex Healthy bool @@ -38,7 +38,7 @@ type HealthStats struct { NetworkServicesControllerAliveTTL time.Duration } -//SendHeartBeat sends a heartbeat on the passed channel +// SendHeartBeat sends a heartbeat on the passed channel func SendHeartBeat(channel chan<- *ControllerHeartbeat, controller string) { heartbeat := ControllerHeartbeat{ Component: controller, @@ -47,7 +47,7 @@ func SendHeartBeat(channel chan<- *ControllerHeartbeat, controller string) { channel <- &heartbeat } -//Handler writes HTTP responses to the health path +// Handler writes HTTP responses to the health path func (hc *HealthController) Handler(w http.ResponseWriter, _ *http.Request) { if hc.Status.Healthy { w.WriteHeader(http.StatusOK) @@ -75,7 +75,7 @@ func (hc *HealthController) Handler(w http.ResponseWriter, _ *http.Request) { } } -//HandleHeartbeat handles received heartbeats on the health channel +// HandleHeartbeat handles received heartbeats on the health channel func (hc *HealthController) HandleHeartbeat(beat *ControllerHeartbeat) { klog.V(3).Infof("Received heartbeat from %s", beat.Component) @@ -143,7 +143,7 @@ func (hc *HealthController) CheckHealth() bool { return health } -//RunServer starts the HealthController's server +// RunServer starts the HealthController's server func (hc *HealthController) RunServer(stopCh <-chan struct{}, wg *sync.WaitGroup) { defer wg.Done() srv := &http.Server{Addr: ":" + strconv.Itoa(int(hc.HealthPort)), Handler: http.DefaultServeMux} @@ -156,8 +156,6 @@ func (hc *HealthController) RunServer(stopCh <-chan struct{}, wg *sync.WaitGroup klog.Errorf("Health controller error: %s", err) } }() - } else if hc.Config.MetricsPort > 65535 { - klog.Errorf("Metrics port must be over 0 and under 65535, given port: %d", hc.Config.MetricsPort) } else { hc.HTTPEnabled = false } @@ -172,7 +170,7 @@ func (hc *HealthController) RunServer(stopCh <-chan struct{}, wg *sync.WaitGroup } } -//RunCheck starts the HealthController's check +// RunCheck starts the HealthController's check func (hc *HealthController) RunCheck(healthChan <-chan *ControllerHeartbeat, stopCh <-chan struct{}, wg *sync.WaitGroup) { t := time.NewTicker(5000 * time.Millisecond) defer wg.Done() @@ -200,7 +198,7 @@ func (hc *HealthController) SetAlive() { hc.Status.NetworkServicesControllerAlive = now } -//NewHealthController creates a new health controller and returns a reference to it +// NewHealthController creates a new health controller and returns a reference to it func NewHealthController(config *options.KubeRouterConfig) (*HealthController, error) { hc := HealthController{ Config: config,