mirror of
https://github.com/flatcar/scripts.git
synced 2025-08-23 23:41:10 +02:00
Merge pull request #1814 from mischief/kubelet
app-admin/kubelet: bump go-dockerclient for v1.10.x support
This commit is contained in:
commit
eb25e130c8
1
sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/Manifest
vendored
Normal file
1
sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/Manifest
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
DIST 1.1.2_coreos.0.tar.gz 22873406 SHA256 bf608be06915d1d04c84e075a1cd26b9e18fec7b37411ac5ecf94ff3113cd7b5 SHA512 340f455c7cb284cfac98a6d06f69f8be99589f8faea8c4c90acc0afa6b8778059e9d36e864b6f02d63d115dba8e8fd067701093ecddfe31b775765faa1b6666e WHIRLPOOL 06eb82e4e0cb82ac8f21050c18388f0eab491554c66ea35d5469206e63ff75bc8bc5403e4e56e14df3fc4ac47d2a15f14cc05bd02d77baac8396800693491a22
|
@ -1,335 +0,0 @@
|
|||||||
From 6195a477686ce589f0568d9f0e6e44e7a9e146e3 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Aaron Levy <aaron.levy@coreos.com>
|
|
||||||
Date: Tue, 17 Nov 2015 13:05:53 -0800
|
|
||||||
Subject: [PATCH 1/4] kubelet: report NodeReady last in status list
|
|
||||||
|
|
||||||
Addresses a version skew issue where the last condition status is always
|
|
||||||
evaluated as the NodeReady status. As a workaround force the NodeReady
|
|
||||||
condition to be the last in the list of node conditions.
|
|
||||||
|
|
||||||
ref: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
---
|
|
||||||
pkg/kubelet/kubelet.go | 111 +++++++++++++++++++++++---------------------
|
|
||||||
pkg/kubelet/kubelet_test.go | 89 +++++++++++++++++++++--------------
|
|
||||||
2 files changed, 112 insertions(+), 88 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go
|
|
||||||
index 913c375..df83ce4 100644
|
|
||||||
--- a/pkg/kubelet/kubelet.go
|
|
||||||
+++ b/pkg/kubelet/kubelet.go
|
|
||||||
@@ -2441,6 +2441,63 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|
||||||
containerRuntimeVersionRequirementMet := kl.containerRuntimeVersionRequirementMet()
|
|
||||||
|
|
||||||
currentTime := unversioned.Now()
|
|
||||||
+ var nodeOODCondition *api.NodeCondition
|
|
||||||
+
|
|
||||||
+ // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update.
|
|
||||||
+ for i := range node.Status.Conditions {
|
|
||||||
+ if node.Status.Conditions[i].Type == api.NodeOutOfDisk {
|
|
||||||
+ nodeOODCondition = &node.Status.Conditions[i]
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ newOODCondition := false
|
|
||||||
+ // If the NodeOutOfDisk condition doesn't exist, create one.
|
|
||||||
+ if nodeOODCondition == nil {
|
|
||||||
+ nodeOODCondition = &api.NodeCondition{
|
|
||||||
+ Type: api.NodeOutOfDisk,
|
|
||||||
+ Status: api.ConditionUnknown,
|
|
||||||
+ LastTransitionTime: currentTime,
|
|
||||||
+ }
|
|
||||||
+ // nodeOODCondition cannot be appended to node.Status.Conditions here because it gets
|
|
||||||
+ // copied to the slice. So if we append nodeOODCondition to the slice here none of the
|
|
||||||
+ // updates we make to nodeOODCondition below are reflected in the slice.
|
|
||||||
+ newOODCondition = true
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ // Update the heartbeat time irrespective of all the conditions.
|
|
||||||
+ nodeOODCondition.LastHeartbeatTime = currentTime
|
|
||||||
+
|
|
||||||
+ // Note: The conditions below take care of the case when a new NodeOutOfDisk condition is
|
|
||||||
+ // created and as well as the case when the condition already exists. When a new condition
|
|
||||||
+ // is created its status is set to api.ConditionUnknown which matches either
|
|
||||||
+ // nodeOODCondition.Status != api.ConditionTrue or
|
|
||||||
+ // nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether
|
|
||||||
+ // the kubelet is out of disk or not.
|
|
||||||
+ if kl.isOutOfDisk() {
|
|
||||||
+ if nodeOODCondition.Status != api.ConditionTrue {
|
|
||||||
+ nodeOODCondition.Status = api.ConditionTrue
|
|
||||||
+ nodeOODCondition.Reason = "KubeletOutOfDisk"
|
|
||||||
+ nodeOODCondition.Message = "out of disk space"
|
|
||||||
+ nodeOODCondition.LastTransitionTime = currentTime
|
|
||||||
+ kl.recordNodeStatusEvent("NodeOutOfDisk")
|
|
||||||
+ }
|
|
||||||
+ } else {
|
|
||||||
+ if nodeOODCondition.Status != api.ConditionFalse {
|
|
||||||
+ nodeOODCondition.Status = api.ConditionFalse
|
|
||||||
+ nodeOODCondition.Reason = "KubeletHasSufficientDisk"
|
|
||||||
+ nodeOODCondition.Message = "kubelet has sufficient disk space available"
|
|
||||||
+ nodeOODCondition.LastTransitionTime = currentTime
|
|
||||||
+ kl.recordNodeStatusEvent("NodeHasSufficientDisk")
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ if newOODCondition {
|
|
||||||
+ node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition)
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
|
|
||||||
+ // This is due to an issue with version skewed kubelet and master components.
|
|
||||||
+ // ref: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
var newNodeReadyCondition api.NodeCondition
|
|
||||||
var oldNodeReadyConditionStatus api.ConditionStatus
|
|
||||||
if containerRuntimeUp && networkConfigured && containerRuntimeVersionRequirementMet {
|
|
||||||
@@ -2497,60 +2554,6 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- var nodeOODCondition *api.NodeCondition
|
|
||||||
-
|
|
||||||
- // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update.
|
|
||||||
- for i := range node.Status.Conditions {
|
|
||||||
- if node.Status.Conditions[i].Type == api.NodeOutOfDisk {
|
|
||||||
- nodeOODCondition = &node.Status.Conditions[i]
|
|
||||||
- }
|
|
||||||
- }
|
|
||||||
-
|
|
||||||
- newOODCondition := false
|
|
||||||
- // If the NodeOutOfDisk condition doesn't exist, create one.
|
|
||||||
- if nodeOODCondition == nil {
|
|
||||||
- nodeOODCondition = &api.NodeCondition{
|
|
||||||
- Type: api.NodeOutOfDisk,
|
|
||||||
- Status: api.ConditionUnknown,
|
|
||||||
- LastTransitionTime: currentTime,
|
|
||||||
- }
|
|
||||||
- // nodeOODCondition cannot be appended to node.Status.Conditions here because it gets
|
|
||||||
- // copied to the slice. So if we append nodeOODCondition to the slice here none of the
|
|
||||||
- // updates we make to nodeOODCondition below are reflected in the slice.
|
|
||||||
- newOODCondition = true
|
|
||||||
- }
|
|
||||||
-
|
|
||||||
- // Update the heartbeat time irrespective of all the conditions.
|
|
||||||
- nodeOODCondition.LastHeartbeatTime = currentTime
|
|
||||||
-
|
|
||||||
- // Note: The conditions below take care of the case when a new NodeOutOfDisk condition is
|
|
||||||
- // created and as well as the case when the condition already exists. When a new condition
|
|
||||||
- // is created its status is set to api.ConditionUnknown which matches either
|
|
||||||
- // nodeOODCondition.Status != api.ConditionTrue or
|
|
||||||
- // nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether
|
|
||||||
- // the kubelet is out of disk or not.
|
|
||||||
- if kl.isOutOfDisk() {
|
|
||||||
- if nodeOODCondition.Status != api.ConditionTrue {
|
|
||||||
- nodeOODCondition.Status = api.ConditionTrue
|
|
||||||
- nodeOODCondition.Reason = "KubeletOutOfDisk"
|
|
||||||
- nodeOODCondition.Message = "out of disk space"
|
|
||||||
- nodeOODCondition.LastTransitionTime = currentTime
|
|
||||||
- kl.recordNodeStatusEvent("NodeOutOfDisk")
|
|
||||||
- }
|
|
||||||
- } else {
|
|
||||||
- if nodeOODCondition.Status != api.ConditionFalse {
|
|
||||||
- nodeOODCondition.Status = api.ConditionFalse
|
|
||||||
- nodeOODCondition.Reason = "KubeletHasSufficientDisk"
|
|
||||||
- nodeOODCondition.Message = "kubelet has sufficient disk space available"
|
|
||||||
- nodeOODCondition.LastTransitionTime = currentTime
|
|
||||||
- kl.recordNodeStatusEvent("NodeHasSufficientDisk")
|
|
||||||
- }
|
|
||||||
- }
|
|
||||||
-
|
|
||||||
- if newOODCondition {
|
|
||||||
- node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition)
|
|
||||||
- }
|
|
||||||
-
|
|
||||||
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
|
||||||
if node.Spec.Unschedulable {
|
|
||||||
kl.recordNodeStatusEvent("NodeNotSchedulable")
|
|
||||||
diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go
|
|
||||||
index 986cf7b..eed55df 100644
|
|
||||||
--- a/pkg/kubelet/kubelet_test.go
|
|
||||||
+++ b/pkg/kubelet/kubelet_test.go
|
|
||||||
@@ -2558,14 +2558,6 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|
||||||
Status: api.NodeStatus{
|
|
||||||
Conditions: []api.NodeCondition{
|
|
||||||
{
|
|
||||||
- Type: api.NodeReady,
|
|
||||||
- Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletReady",
|
|
||||||
- Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
- LastHeartbeatTime: unversioned.Time{},
|
|
||||||
- LastTransitionTime: unversioned.Time{},
|
|
||||||
- },
|
|
||||||
- {
|
|
||||||
Type: api.NodeOutOfDisk,
|
|
||||||
Status: api.ConditionFalse,
|
|
||||||
Reason: "KubeletHasSufficientDisk",
|
|
||||||
@@ -2573,6 +2565,14 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|
||||||
LastHeartbeatTime: unversioned.Time{},
|
|
||||||
LastTransitionTime: unversioned.Time{},
|
|
||||||
},
|
|
||||||
+ {
|
|
||||||
+ Type: api.NodeReady,
|
|
||||||
+ Status: api.ConditionTrue,
|
|
||||||
+ Reason: "KubeletReady",
|
|
||||||
+ Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
+ LastHeartbeatTime: unversioned.Time{},
|
|
||||||
+ LastTransitionTime: unversioned.Time{},
|
|
||||||
+ },
|
|
||||||
},
|
|
||||||
NodeInfo: api.NodeSystemInfo{
|
|
||||||
MachineID: "123",
|
|
||||||
@@ -2622,6 +2622,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|
||||||
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
+ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
+ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
|
|
||||||
+ t.Errorf("unexpected node condition order. NodeReady should be last.")
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
if !reflect.DeepEqual(expectedNode, updatedNode) {
|
|
||||||
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
|
|
||||||
}
|
|
||||||
@@ -2673,14 +2678,6 @@ func TestDockerRuntimeVersion(t *testing.T) {
|
|
||||||
Status: api.NodeStatus{
|
|
||||||
Conditions: []api.NodeCondition{
|
|
||||||
{
|
|
||||||
- Type: api.NodeReady,
|
|
||||||
- Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletReady",
|
|
||||||
- Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
- LastHeartbeatTime: unversioned.Time{},
|
|
||||||
- LastTransitionTime: unversioned.Time{},
|
|
||||||
- },
|
|
||||||
- {
|
|
||||||
Type: api.NodeOutOfDisk,
|
|
||||||
Status: api.ConditionFalse,
|
|
||||||
Reason: "KubeletHasSufficientDisk",
|
|
||||||
@@ -2688,6 +2685,14 @@ func TestDockerRuntimeVersion(t *testing.T) {
|
|
||||||
LastHeartbeatTime: unversioned.Time{},
|
|
||||||
LastTransitionTime: unversioned.Time{},
|
|
||||||
},
|
|
||||||
+ {
|
|
||||||
+ Type: api.NodeReady,
|
|
||||||
+ Status: api.ConditionTrue,
|
|
||||||
+ Reason: "KubeletReady",
|
|
||||||
+ Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
+ LastHeartbeatTime: unversioned.Time{},
|
|
||||||
+ LastTransitionTime: unversioned.Time{},
|
|
||||||
+ },
|
|
||||||
},
|
|
||||||
NodeInfo: api.NodeSystemInfo{
|
|
||||||
MachineID: "123",
|
|
||||||
@@ -2736,6 +2741,12 @@ func TestDockerRuntimeVersion(t *testing.T) {
|
|
||||||
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
|
|
||||||
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
|
|
||||||
}
|
|
||||||
+
|
|
||||||
+ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
+ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
|
|
||||||
+ t.Errorf("unexpected node condition order. NodeReady should be last.")
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
if !reflect.DeepEqual(expectedNode, updatedNode) {
|
|
||||||
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
|
|
||||||
}
|
|
||||||
@@ -2775,18 +2786,18 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|
||||||
Status: api.NodeStatus{
|
|
||||||
Conditions: []api.NodeCondition{
|
|
||||||
{
|
|
||||||
- Type: api.NodeReady,
|
|
||||||
+ Type: api.NodeOutOfDisk,
|
|
||||||
Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletReady",
|
|
||||||
- Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
+ Reason: "KubeletOutOfDisk",
|
|
||||||
+ Message: "out of disk space",
|
|
||||||
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
- Type: api.NodeOutOfDisk,
|
|
||||||
+ Type: api.NodeReady,
|
|
||||||
Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletOutOfDisk",
|
|
||||||
- Message: "out of disk space",
|
|
||||||
+ Reason: "KubeletReady",
|
|
||||||
+ Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
},
|
|
||||||
@@ -2836,18 +2847,18 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|
||||||
Status: api.NodeStatus{
|
|
||||||
Conditions: []api.NodeCondition{
|
|
||||||
{
|
|
||||||
- Type: api.NodeReady,
|
|
||||||
+ Type: api.NodeOutOfDisk,
|
|
||||||
Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletReady",
|
|
||||||
- Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
+ Reason: "KubeletOutOfDisk",
|
|
||||||
+ Message: "out of disk space",
|
|
||||||
LastHeartbeatTime: unversioned.Time{}, // placeholder
|
|
||||||
LastTransitionTime: unversioned.Time{}, // placeholder
|
|
||||||
},
|
|
||||||
{
|
|
||||||
- Type: api.NodeOutOfDisk,
|
|
||||||
+ Type: api.NodeReady,
|
|
||||||
Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletOutOfDisk",
|
|
||||||
- Message: "out of disk space",
|
|
||||||
+ Reason: "KubeletReady",
|
|
||||||
+ Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
LastHeartbeatTime: unversioned.Time{}, // placeholder
|
|
||||||
LastTransitionTime: unversioned.Time{}, // placeholder
|
|
||||||
},
|
|
||||||
@@ -2902,6 +2913,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|
||||||
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
+ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
+ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
|
|
||||||
+ t.Errorf("unexpected node condition order. NodeReady should be last.")
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
if !reflect.DeepEqual(expectedNode, updatedNode) {
|
|
||||||
t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
|
|
||||||
}
|
|
||||||
@@ -2957,18 +2973,18 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
|
||||||
Status: api.NodeStatus{
|
|
||||||
Conditions: []api.NodeCondition{
|
|
||||||
{
|
|
||||||
- Type: api.NodeReady,
|
|
||||||
+ Type: api.NodeOutOfDisk,
|
|
||||||
Status: api.ConditionFalse,
|
|
||||||
- Reason: "KubeletNotReady",
|
|
||||||
- Message: fmt.Sprintf("container runtime is down"),
|
|
||||||
+ Reason: "KubeletHasSufficientDisk",
|
|
||||||
+ Message: "kubelet has sufficient disk space available",
|
|
||||||
LastHeartbeatTime: unversioned.Time{},
|
|
||||||
LastTransitionTime: unversioned.Time{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
- Type: api.NodeOutOfDisk,
|
|
||||||
+ Type: api.NodeReady,
|
|
||||||
Status: api.ConditionFalse,
|
|
||||||
- Reason: "KubeletHasSufficientDisk",
|
|
||||||
- Message: "kubelet has sufficient disk space available",
|
|
||||||
+ Reason: "KubeletNotReady",
|
|
||||||
+ Message: fmt.Sprintf("container runtime is down"),
|
|
||||||
LastHeartbeatTime: unversioned.Time{},
|
|
||||||
LastTransitionTime: unversioned.Time{},
|
|
||||||
},
|
|
||||||
@@ -3023,6 +3039,11 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
|
||||||
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
+ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
+ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
|
|
||||||
+ t.Errorf("unexpected node condition order. NodeReady should be last.")
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
if !reflect.DeepEqual(expectedNode, updatedNode) {
|
|
||||||
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
|
|
||||||
}
|
|
||||||
--
|
|
||||||
2.3.8 (Apple Git-58)
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
|||||||
From 48d7f5314be4903c6ffc2fb4be542d61b7241b8b Mon Sep 17 00:00:00 2001
|
|
||||||
From: Aaron Levy <aaron.levy@coreos.com>
|
|
||||||
Date: Thu, 19 Nov 2015 19:06:39 -0800
|
|
||||||
Subject: [PATCH 2/4] explicitly check "Ready" condition in validate-cluster
|
|
||||||
|
|
||||||
---
|
|
||||||
cluster/validate-cluster.sh | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh
|
|
||||||
index 7b26295..9756439 100755
|
|
||||||
--- a/cluster/validate-cluster.sh
|
|
||||||
+++ b/cluster/validate-cluster.sh
|
|
||||||
@@ -40,7 +40,7 @@ while true; do
|
|
||||||
# Suppress errors from kubectl output because during cluster bootstrapping
|
|
||||||
# for clusters where the master node is registered, the apiserver will become
|
|
||||||
# available and then get restarted as the kubelet configures the docker bridge.
|
|
||||||
- nodes_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o template --template='{{range .items}}{{with index .status.conditions 0}}{{.type}}:{{.status}},{{end}}{{end}}' --api-version=v1) || true
|
|
||||||
+ nodes_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o template --template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}}{{.type}}:{{.status}},{{end}}{{end}}{{end}}' --api-version=v1) || true
|
|
||||||
found=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:') || true
|
|
||||||
ready=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:True') || true
|
|
||||||
|
|
||||||
--
|
|
||||||
2.3.8 (Apple Git-58)
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
|||||||
From 2b2afd9fe6501150e2da0341fc8725260a42c74f Mon Sep 17 00:00:00 2001
|
|
||||||
From: Aaron Levy <aaron.levy@coreos.com>
|
|
||||||
Date: Mon, 30 Nov 2015 19:02:12 -0800
|
|
||||||
Subject: [PATCH 3/4] kubelet: check node condition by type rather than by
|
|
||||||
index
|
|
||||||
|
|
||||||
---
|
|
||||||
pkg/kubelet/kubelet_test.go | 9 ++++++---
|
|
||||||
1 file changed, 6 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go
|
|
||||||
index eed55df..7777814 100644
|
|
||||||
--- a/pkg/kubelet/kubelet_test.go
|
|
||||||
+++ b/pkg/kubelet/kubelet_test.go
|
|
||||||
@@ -2769,9 +2769,12 @@ func TestDockerRuntimeVersion(t *testing.T) {
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("unexpected object type")
|
|
||||||
}
|
|
||||||
- if updatedNode.Status.Conditions[0].Reason != "KubeletNotReady" &&
|
|
||||||
- !strings.Contains(updatedNode.Status.Conditions[0].Message, "container runtime version is older than") {
|
|
||||||
- t.Errorf("unexpect NodeStatus due to container runtime version")
|
|
||||||
+
|
|
||||||
+ for _, cond := range updatedNode.Status.Conditions {
|
|
||||||
+ if cond.Type == api.NodeReady && cond.Reason != "KubeletNotReady" &&
|
|
||||||
+ !strings.Contains(cond.Message, "container runtime version is older than") {
|
|
||||||
+ t.Errorf("unexpect NodeStatus due to container runtime version")
|
|
||||||
+ }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
--
|
|
||||||
2.3.8 (Apple Git-58)
|
|
||||||
|
|
@ -1,75 +0,0 @@
|
|||||||
From aaa856a8d73fc36f9b458498fdbde8445f99dd7b Mon Sep 17 00:00:00 2001
|
|
||||||
From: Aaron Levy <aaron.levy@coreos.com>
|
|
||||||
Date: Tue, 8 Dec 2015 14:36:49 -0800
|
|
||||||
Subject: [PATCH 4/4] pkg/kubelet: force NodeReady condition to be last on
|
|
||||||
existing nodes
|
|
||||||
|
|
||||||
---
|
|
||||||
pkg/kubelet/kubelet.go | 14 +++++++++++---
|
|
||||||
pkg/kubelet/kubelet_test.go | 12 ++++++------
|
|
||||||
2 files changed, 17 insertions(+), 9 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go
|
|
||||||
index df83ce4..6928563 100644
|
|
||||||
--- a/pkg/kubelet/kubelet.go
|
|
||||||
+++ b/pkg/kubelet/kubelet.go
|
|
||||||
@@ -2495,9 +2495,6 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|
||||||
node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition)
|
|
||||||
}
|
|
||||||
|
|
||||||
- // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
|
|
||||||
- // This is due to an issue with version skewed kubelet and master components.
|
|
||||||
- // ref: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
var newNodeReadyCondition api.NodeCondition
|
|
||||||
var oldNodeReadyConditionStatus api.ConditionStatus
|
|
||||||
if containerRuntimeUp && networkConfigured && containerRuntimeVersionRequirementMet {
|
|
||||||
@@ -2562,6 +2559,17 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|
||||||
}
|
|
||||||
oldNodeUnschedulable = node.Spec.Unschedulable
|
|
||||||
}
|
|
||||||
+
|
|
||||||
+ // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
|
|
||||||
+ // This is due to an issue with version skewed kubelet and master components.
|
|
||||||
+ // ref: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
+ lastIndex := len(node.Status.Conditions) - 1
|
|
||||||
+ for i := range node.Status.Conditions {
|
|
||||||
+ if node.Status.Conditions[i].Type == api.NodeReady && i < lastIndex {
|
|
||||||
+ node.Status.Conditions[i], node.Status.Conditions[lastIndex] = node.Status.Conditions[lastIndex], node.Status.Conditions[i]
|
|
||||||
+ break
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go
|
|
||||||
index 7777814..30081a7 100644
|
|
||||||
--- a/pkg/kubelet/kubelet_test.go
|
|
||||||
+++ b/pkg/kubelet/kubelet_test.go
|
|
||||||
@@ -2789,18 +2789,18 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|
||||||
Status: api.NodeStatus{
|
|
||||||
Conditions: []api.NodeCondition{
|
|
||||||
{
|
|
||||||
- Type: api.NodeOutOfDisk,
|
|
||||||
+ Type: api.NodeReady,
|
|
||||||
Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletOutOfDisk",
|
|
||||||
- Message: "out of disk space",
|
|
||||||
+ Reason: "KubeletReady",
|
|
||||||
+ Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
- Type: api.NodeReady,
|
|
||||||
+ Type: api.NodeOutOfDisk,
|
|
||||||
Status: api.ConditionTrue,
|
|
||||||
- Reason: "KubeletReady",
|
|
||||||
- Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
+ Reason: "KubeletOutOfDisk",
|
|
||||||
+ Message: "out of disk space",
|
|
||||||
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
||||||
},
|
|
||||||
--
|
|
||||||
2.3.8 (Apple Git-58)
|
|
||||||
|
|
@ -11,8 +11,9 @@ inherit flag-o-matic systemd toolchain-funcs
|
|||||||
DESCRIPTION="Kubernetes Container Manager"
|
DESCRIPTION="Kubernetes Container Manager"
|
||||||
HOMEPAGE="http://kubernetes.io/"
|
HOMEPAGE="http://kubernetes.io/"
|
||||||
KEYWORDS="amd64"
|
KEYWORDS="amd64"
|
||||||
SRC_URI="https://github.com/GoogleCloudPlatform/kubernetes/archive/v${PV}.tar.gz -> ${P}.tar.gz"
|
MY_PV="${PV/_p/_coreos.}"
|
||||||
S="${WORKDIR}/kubernetes-${PV}"
|
SRC_URI="https://github.com/coreos/kubernetes/archive/v${MY_PV}.tar.gz -> ${MY_PV}.tar.gz"
|
||||||
|
S="${WORKDIR}/kubernetes-${MY_PV}"
|
||||||
|
|
||||||
LICENSE="Apache-2.0"
|
LICENSE="Apache-2.0"
|
||||||
SLOT="0"
|
SLOT="0"
|
||||||
@ -22,11 +23,6 @@ DEPEND="dev-lang/go"
|
|||||||
RDEPEND="net-misc/socat"
|
RDEPEND="net-misc/socat"
|
||||||
|
|
||||||
src_prepare() {
|
src_prepare() {
|
||||||
epatch "${FILESDIR}/0001-kubelet-report-NodeReady-last-in-status-list.patch"
|
|
||||||
epatch "${FILESDIR}/0002-explicitly-check-Ready-condition-in-validate-cluster.patch"
|
|
||||||
epatch "${FILESDIR}/0003-kubelet-check-node-condition-by-type-rather-than-by-.patch"
|
|
||||||
epatch "${FILESDIR}/0004-pkg-kubelet-force-NodeReady-condition-to-be-last-on-.patch"
|
|
||||||
|
|
||||||
if gcc-specs-pie; then
|
if gcc-specs-pie; then
|
||||||
append-ldflags -nopie
|
append-ldflags -nopie
|
||||||
fi
|
fi
|
Loading…
x
Reference in New Issue
Block a user