mirror of
https://github.com/cloudnativelabs/kube-router.git
synced 2025-09-28 11:31:05 +02:00
feat(.golangci.yml): enable gocritic and remediate
This commit is contained in:
parent
d6ccc22519
commit
f52fddddee
@ -10,6 +10,7 @@ linters:
|
|||||||
- exportloopref
|
- exportloopref
|
||||||
- gochecknoinits
|
- gochecknoinits
|
||||||
- goconst
|
- goconst
|
||||||
|
- gocritic
|
||||||
- gofmt
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
- misspell
|
- misspell
|
||||||
|
@ -73,15 +73,16 @@ func (kr *KubeRouter) Run() error {
|
|||||||
var err error
|
var err error
|
||||||
var ipsetMutex sync.Mutex
|
var ipsetMutex sync.Mutex
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
healthChan := make(chan *healthcheck.ControllerHeartbeat, 10)
|
|
||||||
defer close(healthChan)
|
|
||||||
stopCh := make(chan struct{})
|
|
||||||
|
|
||||||
if !(kr.Config.RunFirewall || kr.Config.RunServiceProxy || kr.Config.RunRouter) {
|
if !(kr.Config.RunFirewall || kr.Config.RunServiceProxy || kr.Config.RunRouter) {
|
||||||
klog.Info("Router, Firewall or Service proxy functionality must be specified. Exiting!")
|
klog.Info("Router, Firewall or Service proxy functionality must be specified. Exiting!")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
healthChan := make(chan *healthcheck.ControllerHeartbeat, 10)
|
||||||
|
defer close(healthChan)
|
||||||
|
stopCh := make(chan struct{})
|
||||||
|
|
||||||
hc, err := healthcheck.NewHealthController(kr.Config)
|
hc, err := healthcheck.NewHealthController(kr.Config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("Failed to create health controller: " + err.Error())
|
return errors.New("Failed to create health controller: " + err.Error())
|
||||||
@ -107,7 +108,7 @@ func (kr *KubeRouter) Run() error {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go hc.RunCheck(healthChan, stopCh, &wg)
|
go hc.RunCheck(healthChan, stopCh, &wg)
|
||||||
|
|
||||||
if kr.Config.MetricsPort > 0 {
|
if kr.Config.MetricsPort > 0 && kr.Config.MetricsPort < 65535 {
|
||||||
kr.Config.MetricsEnabled = true
|
kr.Config.MetricsEnabled = true
|
||||||
mc, err := metrics.NewMetricsController(kr.Config)
|
mc, err := metrics.NewMetricsController(kr.Config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -116,10 +117,8 @@ func (kr *KubeRouter) Run() error {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go mc.Run(healthChan, stopCh, &wg)
|
go mc.Run(healthChan, stopCh, &wg)
|
||||||
|
|
||||||
} else if kr.Config.MetricsPort > 65535 {
|
|
||||||
klog.Errorf("Metrics port must be over 0 and under 65535, given port: %d", kr.Config.MetricsPort)
|
|
||||||
kr.Config.MetricsEnabled = false
|
|
||||||
} else {
|
} else {
|
||||||
|
klog.Errorf("Metrics port must be over 0 and under 65535, given port: %d", kr.Config.MetricsPort)
|
||||||
kr.Config.MetricsEnabled = false
|
kr.Config.MetricsEnabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -581,7 +581,6 @@ func (npc *NetworkPolicyController) Cleanup() {
|
|||||||
klog.Errorf("error encountered attempting to cleanup iptables rules: %v", err)
|
klog.Errorf("error encountered attempting to cleanup iptables rules: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//klog.Infof("Final rules to save: %s", npc.filterTableRules)
|
|
||||||
// Restore (iptables-restore) npc's cleaned up version of the iptables filter chain
|
// Restore (iptables-restore) npc's cleaned up version of the iptables filter chain
|
||||||
if err = utils.Restore("filter", npc.filterTableRules.Bytes()); err != nil {
|
if err = utils.Restore("filter", npc.filterTableRules.Bytes()); err != nil {
|
||||||
klog.Errorf(
|
klog.Errorf(
|
||||||
|
@ -415,11 +415,12 @@ func (npc *NetworkPolicyController) appendRuleToPolicyChain(policyChainName, com
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint:gocritic // we want to append to a separate array here so that we can re-use args below
|
||||||
markArgs := append(args, "-j", "MARK", "--set-xmark", "0x10000/0x10000", "\n")
|
markArgs := append(args, "-j", "MARK", "--set-xmark", "0x10000/0x10000", "\n")
|
||||||
npc.filterTableRules.WriteString(strings.Join(markArgs, " "))
|
npc.filterTableRules.WriteString(strings.Join(markArgs, " "))
|
||||||
|
|
||||||
returnArgs := append(args, "-m", "mark", "--mark", "0x10000/0x10000", "-j", "RETURN", "\n")
|
args = append(args, "-m", "mark", "--mark", "0x10000/0x10000", "-j", "RETURN", "\n")
|
||||||
npc.filterTableRules.WriteString(strings.Join(returnArgs, " "))
|
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -451,11 +452,12 @@ func (npc *NetworkPolicyController) buildNetworkPoliciesInfo() ([]networkPolicyI
|
|||||||
egressType = true
|
egressType = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ingressType && egressType {
|
switch {
|
||||||
|
case ingressType && egressType:
|
||||||
newPolicy.policyType = kubeBothPolicyType
|
newPolicy.policyType = kubeBothPolicyType
|
||||||
} else if egressType {
|
case egressType:
|
||||||
newPolicy.policyType = kubeEgressPolicyType
|
newPolicy.policyType = kubeEgressPolicyType
|
||||||
} else if ingressType {
|
case ingressType:
|
||||||
newPolicy.policyType = kubeIngressPolicyType
|
newPolicy.policyType = kubeIngressPolicyType
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -628,16 +630,15 @@ func (npc *NetworkPolicyController) processNetworkPolicyPorts(npPorts []networki
|
|||||||
if npPort.Port == nil {
|
if npPort.Port == nil {
|
||||||
numericPorts = append(numericPorts, protocolAndPort{port: "", protocol: protocol})
|
numericPorts = append(numericPorts, protocolAndPort{port: "", protocol: protocol})
|
||||||
} else if npPort.Port.Type == intstr.Int {
|
} else if npPort.Port.Type == intstr.Int {
|
||||||
var portproto protocolAndPort
|
var portProto protocolAndPort
|
||||||
if npPort.EndPort != nil {
|
if npPort.EndPort != nil {
|
||||||
if *npPort.EndPort >= npPort.Port.IntVal {
|
if *npPort.EndPort >= npPort.Port.IntVal {
|
||||||
portproto.endport = strconv.Itoa(int(*npPort.EndPort))
|
portProto.endport = strconv.Itoa(int(*npPort.EndPort))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
portproto.protocol, portproto.port = protocol, npPort.Port.String()
|
portProto.protocol, portProto.port = protocol, npPort.Port.String()
|
||||||
numericPorts = append(numericPorts, portproto)
|
numericPorts = append(numericPorts, portProto)
|
||||||
} else {
|
} else if protocol2eps, ok := namedPort2eps[npPort.Port.String()]; ok {
|
||||||
if protocol2eps, ok := namedPort2eps[npPort.Port.String()]; ok {
|
|
||||||
if numericPort2eps, ok := protocol2eps[protocol]; ok {
|
if numericPort2eps, ok := protocol2eps[protocol]; ok {
|
||||||
for _, eps := range numericPort2eps {
|
for _, eps := range numericPort2eps {
|
||||||
namedPorts = append(namedPorts, *eps)
|
namedPorts = append(namedPorts, *eps)
|
||||||
@ -645,7 +646,6 @@ func (npc *NetworkPolicyController) processNetworkPolicyPorts(npPorts []networki
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,13 +102,11 @@ func (nsc *NetworkServicesController) gracefulDeleteIpvsDestination(req graceful
|
|||||||
aConn, iConn, err := nsc.getIpvsDestinationConnStats(req.ipvsSvc, req.ipvsDst)
|
aConn, iConn, err := nsc.getIpvsDestinationConnStats(req.ipvsSvc, req.ipvsDst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(1).Infof("Could not get connection stats for destination: %s", err.Error())
|
klog.V(1).Infof("Could not get connection stats for destination: %s", err.Error())
|
||||||
} else {
|
} else if aConn == 0 && iConn == 0 {
|
||||||
// Do we have active or inactive connections to this destination
|
// Do we have active or inactive connections to this destination
|
||||||
// if we don't, proceed and delete the destination ahead of graceful period
|
// if we don't, proceed and delete the destination ahead of graceful period
|
||||||
if aConn == 0 && iConn == 0 {
|
|
||||||
deleteDestination = true
|
deleteDestination = true
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Check if our destinations graceful termination period has passed
|
// Check if our destinations graceful termination period has passed
|
||||||
if time.Since(req.deletionTime) > req.gracefulTerminationPeriod {
|
if time.Since(req.deletionTime) > req.gracefulTerminationPeriod {
|
||||||
|
@ -477,14 +477,14 @@ func (nsc *NetworkServicesController) doSync() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Lookup service ip, protocol, port by given fwmark value (reverse of generateFwmark)
|
// Lookup service ip, protocol, port by given fwmark value (reverse of generateFwmark)
|
||||||
func (nsc *NetworkServicesController) lookupServiceByFWMark(FWMark uint32) (string, string, int, error) {
|
func (nsc *NetworkServicesController) lookupServiceByFwMark(fwMark uint32) (string, string, int, error) {
|
||||||
for _, svc := range nsc.serviceMap {
|
for _, svc := range nsc.serviceMap {
|
||||||
for _, externalIP := range svc.externalIPs {
|
for _, externalIP := range svc.externalIPs {
|
||||||
gfwmark, err := generateFwmark(externalIP, svc.protocol, fmt.Sprint(svc.port))
|
gfwmark, err := generateFwmark(externalIP, svc.protocol, fmt.Sprint(svc.port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", 0, err
|
return "", "", 0, err
|
||||||
}
|
}
|
||||||
if FWMark == gfwmark {
|
if fwMark == gfwmark {
|
||||||
return externalIP, svc.protocol, svc.port, nil
|
return externalIP, svc.protocol, svc.port, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -762,7 +762,7 @@ func (nsc *NetworkServicesController) syncIpvsFirewall() error {
|
|||||||
}
|
}
|
||||||
port = int(ipvsService.Port)
|
port = int(ipvsService.Port)
|
||||||
} else if ipvsService.FWMark != 0 {
|
} else if ipvsService.FWMark != 0 {
|
||||||
address, protocol, port, err = nsc.lookupServiceByFWMark(ipvsService.FWMark)
|
address, protocol, port, err = nsc.lookupServiceByFwMark(ipvsService.FWMark)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to lookup %d by FWMark: %s", ipvsService.FWMark, err)
|
klog.Errorf("failed to lookup %d by FWMark: %s", ipvsService.FWMark, err)
|
||||||
}
|
}
|
||||||
@ -888,7 +888,7 @@ func unsortedListsEquivalent(a, b []endpointsInfo) bool {
|
|||||||
values[val] = 1
|
values[val] = 1
|
||||||
}
|
}
|
||||||
for _, val := range b {
|
for _, val := range b {
|
||||||
values[val] = values[val] + 1
|
values[val]++
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, val := range values {
|
for _, val := range values {
|
||||||
@ -1147,15 +1147,16 @@ func (nsc *NetworkServicesController) buildServicesInfo() serviceInfoMap {
|
|||||||
svcInfo.scheduler = ipvs.RoundRobin
|
svcInfo.scheduler = ipvs.RoundRobin
|
||||||
schedulingMethod, ok := svc.ObjectMeta.Annotations[svcSchedulerAnnotation]
|
schedulingMethod, ok := svc.ObjectMeta.Annotations[svcSchedulerAnnotation]
|
||||||
if ok {
|
if ok {
|
||||||
if schedulingMethod == ipvs.RoundRobin {
|
switch {
|
||||||
|
case schedulingMethod == ipvs.RoundRobin:
|
||||||
svcInfo.scheduler = ipvs.RoundRobin
|
svcInfo.scheduler = ipvs.RoundRobin
|
||||||
} else if schedulingMethod == ipvs.LeastConnection {
|
case schedulingMethod == ipvs.LeastConnection:
|
||||||
svcInfo.scheduler = ipvs.LeastConnection
|
svcInfo.scheduler = ipvs.LeastConnection
|
||||||
} else if schedulingMethod == ipvs.DestinationHashing {
|
case schedulingMethod == ipvs.DestinationHashing:
|
||||||
svcInfo.scheduler = ipvs.DestinationHashing
|
svcInfo.scheduler = ipvs.DestinationHashing
|
||||||
} else if schedulingMethod == ipvs.SourceHashing {
|
case schedulingMethod == ipvs.SourceHashing:
|
||||||
svcInfo.scheduler = ipvs.SourceHashing
|
svcInfo.scheduler = ipvs.SourceHashing
|
||||||
} else if schedulingMethod == IpvsMaglevHashing {
|
case schedulingMethod == IpvsMaglevHashing:
|
||||||
svcInfo.scheduler = IpvsMaglevHashing
|
svcInfo.scheduler = IpvsMaglevHashing
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1579,27 +1580,27 @@ func ipvsServiceString(s *ipvs.Service) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if s.Flags&0x0001 != 0 {
|
if s.Flags&0x0001 != 0 {
|
||||||
flags = flags + "[persistent port]"
|
flags += "[persistent port]"
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Flags&0x0002 != 0 {
|
if s.Flags&0x0002 != 0 {
|
||||||
flags = flags + "[hashed entry]"
|
flags += "[hashed entry]"
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Flags&0x0004 != 0 {
|
if s.Flags&0x0004 != 0 {
|
||||||
flags = flags + "[one-packet scheduling]"
|
flags += "[one-packet scheduling]"
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Flags&0x0008 != 0 {
|
if s.Flags&0x0008 != 0 {
|
||||||
flags = flags + "[flag-1(fallback)]"
|
flags += "[flag-1(fallback)]"
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Flags&0x0010 != 0 {
|
if s.Flags&0x0010 != 0 {
|
||||||
flags = flags + "[flag-2(port)]"
|
flags += "[flag-2(port)]"
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Flags&0x0020 != 0 {
|
if s.Flags&0x0020 != 0 {
|
||||||
flags = flags + "[flag-3]"
|
flags += "[flag-3]"
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s:%s:%v (Flags: %s)", protocol, s.Address, s.Port, flags)
|
return fmt.Sprintf("%s:%s:%v (Flags: %s)", protocol, s.Address, s.Port, flags)
|
||||||
@ -1743,11 +1744,12 @@ func generateFwmark(ip, protocol, port string) (uint32, error) {
|
|||||||
func (ln *linuxNetworking) ipvsAddFWMarkService(vip net.IP, protocol, port uint16, persistent bool, persistentTimeout int32, scheduler string, flags schedFlags) (*ipvs.Service, error) {
|
func (ln *linuxNetworking) ipvsAddFWMarkService(vip net.IP, protocol, port uint16, persistent bool, persistentTimeout int32, scheduler string, flags schedFlags) (*ipvs.Service, error) {
|
||||||
|
|
||||||
var protocolStr string
|
var protocolStr string
|
||||||
if protocol == syscall.IPPROTO_TCP {
|
switch {
|
||||||
|
case protocol == syscall.IPPROTO_TCP:
|
||||||
protocolStr = tcpProtocol
|
protocolStr = tcpProtocol
|
||||||
} else if protocol == syscall.IPPROTO_UDP {
|
case protocol == syscall.IPPROTO_UDP:
|
||||||
protocolStr = udpProtocol
|
protocolStr = udpProtocol
|
||||||
} else {
|
default:
|
||||||
protocolStr = "unknown"
|
protocolStr = "unknown"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -517,11 +517,12 @@ func (nsc *NetworkServicesController) cleanupStaleIPVSConfig(activeServiceEndpoi
|
|||||||
protocol = udpProtocol
|
protocol = udpProtocol
|
||||||
}
|
}
|
||||||
var key string
|
var key string
|
||||||
if ipvsSvc.Address != nil {
|
switch {
|
||||||
|
case ipvsSvc.Address != nil:
|
||||||
key = generateIPPortID(ipvsSvc.Address.String(), protocol, strconv.Itoa(int(ipvsSvc.Port)))
|
key = generateIPPortID(ipvsSvc.Address.String(), protocol, strconv.Itoa(int(ipvsSvc.Port)))
|
||||||
} else if ipvsSvc.FWMark != 0 {
|
case ipvsSvc.FWMark != 0:
|
||||||
key = fmt.Sprint(ipvsSvc.FWMark)
|
key = fmt.Sprint(ipvsSvc.FWMark)
|
||||||
} else {
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,6 +336,7 @@ func (nrc *NetworkRoutingController) addAllBGPPeersDefinedSet(iBGPPeerCIDRs, ext
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// nolint:gocritic // We intentionally append to a different array here so as to not change the passed in externalBGPPeerCIDRs
|
||||||
allBgpPeers := append(externalBGPPeerCIDRs, iBGPPeerCIDRs...)
|
allBgpPeers := append(externalBGPPeerCIDRs, iBGPPeerCIDRs...)
|
||||||
if currentDefinedSet == nil {
|
if currentDefinedSet == nil {
|
||||||
allPeerNS := &gobgpapi.DefinedSet{
|
allPeerNS := &gobgpapi.DefinedSet{
|
||||||
|
@ -570,7 +570,8 @@ func (nrc *NetworkRoutingController) injectRoute(path *gobgpapi.Path) error {
|
|||||||
nrc.cleanupTunnel(dst, tunnelName)
|
nrc.cleanupTunnel(dst, tunnelName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if link != nil {
|
switch {
|
||||||
|
case link != nil:
|
||||||
// if we setup an overlay tunnel link, then use it for destination routing
|
// if we setup an overlay tunnel link, then use it for destination routing
|
||||||
route = &netlink.Route{
|
route = &netlink.Route{
|
||||||
LinkIndex: link.Attrs().Index,
|
LinkIndex: link.Attrs().Index,
|
||||||
@ -578,7 +579,7 @@ func (nrc *NetworkRoutingController) injectRoute(path *gobgpapi.Path) error {
|
|||||||
Dst: dst,
|
Dst: dst,
|
||||||
Protocol: 0x11,
|
Protocol: 0x11,
|
||||||
}
|
}
|
||||||
} else if sameSubnet {
|
case sameSubnet:
|
||||||
// if the nextHop is within the same subnet, add a route for the destination so that traffic can bet routed
|
// if the nextHop is within the same subnet, add a route for the destination so that traffic can bet routed
|
||||||
// at layer 2 and minimize the need to traverse a router
|
// at layer 2 and minimize the need to traverse a router
|
||||||
route = &netlink.Route{
|
route = &netlink.Route{
|
||||||
@ -586,7 +587,7 @@ func (nrc *NetworkRoutingController) injectRoute(path *gobgpapi.Path) error {
|
|||||||
Gw: nextHop,
|
Gw: nextHop,
|
||||||
Protocol: 0x11,
|
Protocol: 0x11,
|
||||||
}
|
}
|
||||||
} else {
|
default:
|
||||||
// otherwise, let BGP do its thing, nothing to do here
|
// otherwise, let BGP do its thing, nothing to do here
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1646,7 +1646,6 @@ func Test_routeReflectorConfiguration(t *testing.T) {
|
|||||||
Name: "node-1",
|
Name: "node-1",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"kube-router.io/node.asn": "100",
|
"kube-router.io/node.asn": "100",
|
||||||
//rrServerAnnotation: "10_0_0_1",
|
|
||||||
rrServerAnnotation: "hello world",
|
rrServerAnnotation: "hello world",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1712,11 +1711,9 @@ func Test_routeReflectorConfiguration(t *testing.T) {
|
|||||||
if testcase.expectedClusterID != testcase.nrc.bgpClusterID {
|
if testcase.expectedClusterID != testcase.nrc.bgpClusterID {
|
||||||
t.Errorf("Node suppose to have cluster id '%s' but got %s", testcase.expectedClusterID, testcase.nrc.bgpClusterID)
|
t.Errorf("Node suppose to have cluster id '%s' but got %s", testcase.expectedClusterID, testcase.nrc.bgpClusterID)
|
||||||
}
|
}
|
||||||
} else {
|
} else if err == nil {
|
||||||
if err == nil {
|
|
||||||
t.Fatal("misconfigured BGP server not suppose to start")
|
t.Fatal("misconfigured BGP server not suppose to start")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ func getNodeSubnet(nodeIP net.IP) (net.IPNet, string, error) {
|
|||||||
// is greater than 12 (after removing "."), then the interface name is tunXYZ
|
// is greater than 12 (after removing "."), then the interface name is tunXYZ
|
||||||
// as opposed to tun-XYZ
|
// as opposed to tun-XYZ
|
||||||
func generateTunnelName(nodeIP string) string {
|
func generateTunnelName(nodeIP string) string {
|
||||||
hash := strings.Replace(nodeIP, ".", "", -1)
|
hash := strings.ReplaceAll(nodeIP, ".", "")
|
||||||
|
|
||||||
if len(hash) < 12 {
|
if len(hash) < 12 {
|
||||||
return "tun-" + hash
|
return "tun-" + hash
|
||||||
@ -162,6 +162,7 @@ func parseBGPNextHop(path *gobgpapi.Path) (net.IP, error) {
|
|||||||
if err := ptypes.UnmarshalAny(pAttr, &value); err != nil {
|
if err := ptypes.UnmarshalAny(pAttr, &value); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal path attribute: %s", err)
|
return nil, fmt.Errorf("failed to unmarshal path attribute: %s", err)
|
||||||
}
|
}
|
||||||
|
// nolint:gocritic // We can't change this to an if condition because it is a .(type) expression
|
||||||
switch a := value.Message.(type) {
|
switch a := value.Message.(type) {
|
||||||
case *gobgpapi.NextHopAttribute:
|
case *gobgpapi.NextHopAttribute:
|
||||||
nextHop := net.ParseIP(a.NextHop).To4()
|
nextHop := net.ParseIP(a.NextHop).To4()
|
||||||
|
@ -156,8 +156,6 @@ func (hc *HealthController) RunServer(stopCh <-chan struct{}, wg *sync.WaitGroup
|
|||||||
klog.Errorf("Health controller error: %s", err)
|
klog.Errorf("Health controller error: %s", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
} else if hc.Config.MetricsPort > 65535 {
|
|
||||||
klog.Errorf("Metrics port must be over 0 and under 65535, given port: %d", hc.Config.MetricsPort)
|
|
||||||
} else {
|
} else {
|
||||||
hc.HTTPEnabled = false
|
hc.HTTPEnabled = false
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user