diff --git a/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0001-Plumb-linker-flags-through-from-the-Makefile.patch b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0001-Plumb-linker-flags-through-from-the-Makefile.patch deleted file mode 100644 index c46538daea..0000000000 --- a/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0001-Plumb-linker-flags-through-from-the-Makefile.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 15052509eab969b8ce5076be694ad28615a70dc9 Mon Sep 17 00:00:00 2001 -From: Alex Crawford -Date: Wed, 5 Aug 2015 14:46:34 -0700 -Subject: [PATCH] Plumb linker flags through from the Makefile - ---- - Makefile | 6 ++++++ - hack/lib/golang.sh | 17 +++++++---------- - 2 files changed, 13 insertions(+), 10 deletions(-) - -diff --git a/Makefile b/Makefile -index af0fc4f..3636ce5 100644 ---- a/Makefile -+++ b/Makefile -@@ -12,6 +12,9 @@ GODEPS_PKG_DIR = Godeps/_workspace/pkg - KUBE_GOFLAGS = $(GOFLAGS) - export KUBE_GOFLAGS - -+KUBE_GOLDFLAGS = $(GOLDFLAGS) -+export KUBE_GOLDFLAGS -+ - # Build code. - # - # Args: -@@ -19,6 +22,7 @@ export KUBE_GOFLAGS - # package, the build will produce executable files under $(OUT_DIR)/go/bin. - # If not specified, "everything" will be built. - # GOFLAGS: Extra flags to pass to 'go' when building. -+# GOLDFLAGS: Extra linking flags to pass to 'go' when building. - # - # Example: - # make -@@ -35,6 +39,7 @@ all: - # directories will be run. If not specified, "everything" will be tested. - # TESTS: Same as WHAT. - # GOFLAGS: Extra flags to pass to 'go' when building. -+# GOLDFLAGS: Extra linking flags to pass to 'go' when building. - # - # Example: - # make check -@@ -78,6 +83,7 @@ clean: - # vetted. - # TESTS: Same as WHAT. - # GOFLAGS: Extra flags to pass to 'go' when building. -+# GOLDFLAGS: Extra linking flags to pass to 'go' when building. - # - # Example: - # make vet -diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh -index f0fe3bd..8f2c5d4 100644 ---- a/hack/lib/golang.sh -+++ b/hack/lib/golang.sh -@@ -369,7 +369,7 @@ kube::golang::build_binaries_for_platform() { - local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}") - CGO_ENABLED=0 go build -o "${outfile}" \ - "${goflags[@]:+${goflags[@]}}" \ -- -ldflags "${version_ldflags}" \ -+ -ldflags "${goldflags}" \ - "${binary}" - kube::log::progress "*" - done -@@ -377,7 +377,7 @@ kube::golang::build_binaries_for_platform() { - local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}") - go build -o "${outfile}" \ - "${goflags[@]:+${goflags[@]}}" \ -- -ldflags "${version_ldflags}" \ -+ -ldflags "${goldflags}" \ - "${binary}" - kube::log::progress "*" - done -@@ -386,12 +386,12 @@ kube::golang::build_binaries_for_platform() { - # Use go install. - if [[ "${#nonstatics[@]}" != 0 ]]; then - go install "${goflags[@]:+${goflags[@]}}" \ -- -ldflags "${version_ldflags}" \ -+ -ldflags "${goldflags}" \ - "${nonstatics[@]:+${nonstatics[@]}}" - fi - if [[ "${#statics[@]}" != 0 ]]; then - CGO_ENABLED=0 go install -installsuffix cgo "${goflags[@]:+${goflags[@]}}" \ -- -ldflags "${version_ldflags}" \ -+ -ldflags "${goldflags}" \ - "${statics[@]:+${statics[@]}}" - fi - fi -@@ -405,7 +405,7 @@ kube::golang::build_binaries_for_platform() { - pushd "$(dirname ${outfile})" >/dev/null - go test -c \ - "${goflags[@]:+${goflags[@]}}" \ -- -ldflags "${version_ldflags}" \ -+ -ldflags "${goldflags}" \ - "$(dirname ${test})" - popd >/dev/null - done -@@ -447,16 +447,13 @@ kube::golang::build_binaries() { - # Check for `go` binary and set ${GOPATH}. - kube::golang::setup_env - -- # Fetch the version. -- local version_ldflags -- version_ldflags=$(kube::version::ldflags) -- - local host_platform - host_platform=$(kube::golang::host_platform) - - # Use eval to preserve embedded quoted strings. -- local goflags -+ local goflags goldflags - eval "goflags=(${KUBE_GOFLAGS:-})" -+ goldflags="${KUBE_GOLDFLAGS:-} $(kube::version::ldflags)" - - local use_go_build - local -a targets=() --- -2.3.6 - diff --git a/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0001-kubelet-report-NodeReady-last-in-status-list.patch b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0001-kubelet-report-NodeReady-last-in-status-list.patch new file mode 100644 index 0000000000..ffcab06d47 --- /dev/null +++ b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0001-kubelet-report-NodeReady-last-in-status-list.patch @@ -0,0 +1,335 @@ +From 0180f019c7575a5cfe8344ecab14e9c3da883ac7 Mon Sep 17 00:00:00 2001 +From: Aaron Levy +Date: Tue, 17 Nov 2015 13:05:53 -0800 +Subject: [PATCH 1/3] kubelet: report NodeReady last in status list + +Addresses a version skew issue where the last condition status is always +evaluated as the NodeReady status. As a workaround force the NodeReady +condition to be the last in the list of node conditions. + +ref: https://github.com/kubernetes/kubernetes/issues/16961 +--- + pkg/kubelet/kubelet.go | 111 +++++++++++++++++++++++--------------------- + pkg/kubelet/kubelet_test.go | 89 +++++++++++++++++++++-------------- + 2 files changed, 112 insertions(+), 88 deletions(-) + +diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go +index 913c375..df83ce4 100644 +--- a/pkg/kubelet/kubelet.go ++++ b/pkg/kubelet/kubelet.go +@@ -2441,6 +2441,63 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { + containerRuntimeVersionRequirementMet := kl.containerRuntimeVersionRequirementMet() + + currentTime := unversioned.Now() ++ var nodeOODCondition *api.NodeCondition ++ ++ // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update. ++ for i := range node.Status.Conditions { ++ if node.Status.Conditions[i].Type == api.NodeOutOfDisk { ++ nodeOODCondition = &node.Status.Conditions[i] ++ } ++ } ++ ++ newOODCondition := false ++ // If the NodeOutOfDisk condition doesn't exist, create one. ++ if nodeOODCondition == nil { ++ nodeOODCondition = &api.NodeCondition{ ++ Type: api.NodeOutOfDisk, ++ Status: api.ConditionUnknown, ++ LastTransitionTime: currentTime, ++ } ++ // nodeOODCondition cannot be appended to node.Status.Conditions here because it gets ++ // copied to the slice. So if we append nodeOODCondition to the slice here none of the ++ // updates we make to nodeOODCondition below are reflected in the slice. ++ newOODCondition = true ++ } ++ ++ // Update the heartbeat time irrespective of all the conditions. ++ nodeOODCondition.LastHeartbeatTime = currentTime ++ ++ // Note: The conditions below take care of the case when a new NodeOutOfDisk condition is ++ // created and as well as the case when the condition already exists. When a new condition ++ // is created its status is set to api.ConditionUnknown which matches either ++ // nodeOODCondition.Status != api.ConditionTrue or ++ // nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether ++ // the kubelet is out of disk or not. ++ if kl.isOutOfDisk() { ++ if nodeOODCondition.Status != api.ConditionTrue { ++ nodeOODCondition.Status = api.ConditionTrue ++ nodeOODCondition.Reason = "KubeletOutOfDisk" ++ nodeOODCondition.Message = "out of disk space" ++ nodeOODCondition.LastTransitionTime = currentTime ++ kl.recordNodeStatusEvent("NodeOutOfDisk") ++ } ++ } else { ++ if nodeOODCondition.Status != api.ConditionFalse { ++ nodeOODCondition.Status = api.ConditionFalse ++ nodeOODCondition.Reason = "KubeletHasSufficientDisk" ++ nodeOODCondition.Message = "kubelet has sufficient disk space available" ++ nodeOODCondition.LastTransitionTime = currentTime ++ kl.recordNodeStatusEvent("NodeHasSufficientDisk") ++ } ++ } ++ ++ if newOODCondition { ++ node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition) ++ } ++ ++ // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions. ++ // This is due to an issue with version skewed kubelet and master components. ++ // ref: https://github.com/kubernetes/kubernetes/issues/16961 + var newNodeReadyCondition api.NodeCondition + var oldNodeReadyConditionStatus api.ConditionStatus + if containerRuntimeUp && networkConfigured && containerRuntimeVersionRequirementMet { +@@ -2497,60 +2554,6 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { + } + } + +- var nodeOODCondition *api.NodeCondition +- +- // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update. +- for i := range node.Status.Conditions { +- if node.Status.Conditions[i].Type == api.NodeOutOfDisk { +- nodeOODCondition = &node.Status.Conditions[i] +- } +- } +- +- newOODCondition := false +- // If the NodeOutOfDisk condition doesn't exist, create one. +- if nodeOODCondition == nil { +- nodeOODCondition = &api.NodeCondition{ +- Type: api.NodeOutOfDisk, +- Status: api.ConditionUnknown, +- LastTransitionTime: currentTime, +- } +- // nodeOODCondition cannot be appended to node.Status.Conditions here because it gets +- // copied to the slice. So if we append nodeOODCondition to the slice here none of the +- // updates we make to nodeOODCondition below are reflected in the slice. +- newOODCondition = true +- } +- +- // Update the heartbeat time irrespective of all the conditions. +- nodeOODCondition.LastHeartbeatTime = currentTime +- +- // Note: The conditions below take care of the case when a new NodeOutOfDisk condition is +- // created and as well as the case when the condition already exists. When a new condition +- // is created its status is set to api.ConditionUnknown which matches either +- // nodeOODCondition.Status != api.ConditionTrue or +- // nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether +- // the kubelet is out of disk or not. +- if kl.isOutOfDisk() { +- if nodeOODCondition.Status != api.ConditionTrue { +- nodeOODCondition.Status = api.ConditionTrue +- nodeOODCondition.Reason = "KubeletOutOfDisk" +- nodeOODCondition.Message = "out of disk space" +- nodeOODCondition.LastTransitionTime = currentTime +- kl.recordNodeStatusEvent("NodeOutOfDisk") +- } +- } else { +- if nodeOODCondition.Status != api.ConditionFalse { +- nodeOODCondition.Status = api.ConditionFalse +- nodeOODCondition.Reason = "KubeletHasSufficientDisk" +- nodeOODCondition.Message = "kubelet has sufficient disk space available" +- nodeOODCondition.LastTransitionTime = currentTime +- kl.recordNodeStatusEvent("NodeHasSufficientDisk") +- } +- } +- +- if newOODCondition { +- node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition) +- } +- + if oldNodeUnschedulable != node.Spec.Unschedulable { + if node.Spec.Unschedulable { + kl.recordNodeStatusEvent("NodeNotSchedulable") +diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go +index 986cf7b..eed55df 100644 +--- a/pkg/kubelet/kubelet_test.go ++++ b/pkg/kubelet/kubelet_test.go +@@ -2558,14 +2558,6 @@ func TestUpdateNewNodeStatus(t *testing.T) { + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { +- Type: api.NodeReady, +- Status: api.ConditionTrue, +- Reason: "KubeletReady", +- Message: fmt.Sprintf("kubelet is posting ready status"), +- LastHeartbeatTime: unversioned.Time{}, +- LastTransitionTime: unversioned.Time{}, +- }, +- { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientDisk", +@@ -2573,6 +2565,14 @@ func TestUpdateNewNodeStatus(t *testing.T) { + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, ++ { ++ Type: api.NodeReady, ++ Status: api.ConditionTrue, ++ Reason: "KubeletReady", ++ Message: fmt.Sprintf("kubelet is posting ready status"), ++ LastHeartbeatTime: unversioned.Time{}, ++ LastTransitionTime: unversioned.Time{}, ++ }, + }, + NodeInfo: api.NodeSystemInfo{ + MachineID: "123", +@@ -2622,6 +2622,11 @@ func TestUpdateNewNodeStatus(t *testing.T) { + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + } + ++ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 ++ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { ++ t.Errorf("unexpected node condition order. NodeReady should be last.") ++ } ++ + if !reflect.DeepEqual(expectedNode, updatedNode) { + t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode)) + } +@@ -2673,14 +2678,6 @@ func TestDockerRuntimeVersion(t *testing.T) { + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { +- Type: api.NodeReady, +- Status: api.ConditionTrue, +- Reason: "KubeletReady", +- Message: fmt.Sprintf("kubelet is posting ready status"), +- LastHeartbeatTime: unversioned.Time{}, +- LastTransitionTime: unversioned.Time{}, +- }, +- { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientDisk", +@@ -2688,6 +2685,14 @@ func TestDockerRuntimeVersion(t *testing.T) { + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, ++ { ++ Type: api.NodeReady, ++ Status: api.ConditionTrue, ++ Reason: "KubeletReady", ++ Message: fmt.Sprintf("kubelet is posting ready status"), ++ LastHeartbeatTime: unversioned.Time{}, ++ LastTransitionTime: unversioned.Time{}, ++ }, + }, + NodeInfo: api.NodeSystemInfo{ + MachineID: "123", +@@ -2736,6 +2741,12 @@ func TestDockerRuntimeVersion(t *testing.T) { + updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + } ++ ++ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 ++ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { ++ t.Errorf("unexpected node condition order. NodeReady should be last.") ++ } ++ + if !reflect.DeepEqual(expectedNode, updatedNode) { + t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode)) + } +@@ -2775,18 +2786,18 @@ func TestUpdateExistingNodeStatus(t *testing.T) { + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { +- Type: api.NodeReady, ++ Type: api.NodeOutOfDisk, + Status: api.ConditionTrue, +- Reason: "KubeletReady", +- Message: fmt.Sprintf("kubelet is posting ready status"), ++ Reason: "KubeletOutOfDisk", ++ Message: "out of disk space", + LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + { +- Type: api.NodeOutOfDisk, ++ Type: api.NodeReady, + Status: api.ConditionTrue, +- Reason: "KubeletOutOfDisk", +- Message: "out of disk space", ++ Reason: "KubeletReady", ++ Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, +@@ -2836,18 +2847,18 @@ func TestUpdateExistingNodeStatus(t *testing.T) { + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { +- Type: api.NodeReady, ++ Type: api.NodeOutOfDisk, + Status: api.ConditionTrue, +- Reason: "KubeletReady", +- Message: fmt.Sprintf("kubelet is posting ready status"), ++ Reason: "KubeletOutOfDisk", ++ Message: "out of disk space", + LastHeartbeatTime: unversioned.Time{}, // placeholder + LastTransitionTime: unversioned.Time{}, // placeholder + }, + { +- Type: api.NodeOutOfDisk, ++ Type: api.NodeReady, + Status: api.ConditionTrue, +- Reason: "KubeletOutOfDisk", +- Message: "out of disk space", ++ Reason: "KubeletReady", ++ Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: unversioned.Time{}, // placeholder + LastTransitionTime: unversioned.Time{}, // placeholder + }, +@@ -2902,6 +2913,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) { + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + } + ++ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 ++ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { ++ t.Errorf("unexpected node condition order. NodeReady should be last.") ++ } ++ + if !reflect.DeepEqual(expectedNode, updatedNode) { + t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode) + } +@@ -2957,18 +2973,18 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) { + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { +- Type: api.NodeReady, ++ Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, +- Reason: "KubeletNotReady", +- Message: fmt.Sprintf("container runtime is down"), ++ Reason: "KubeletHasSufficientDisk", ++ Message: "kubelet has sufficient disk space available", + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, + { +- Type: api.NodeOutOfDisk, ++ Type: api.NodeReady, + Status: api.ConditionFalse, +- Reason: "KubeletHasSufficientDisk", +- Message: "kubelet has sufficient disk space available", ++ Reason: "KubeletNotReady", ++ Message: fmt.Sprintf("container runtime is down"), + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, +@@ -3023,6 +3039,11 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) { + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + } + ++ // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 ++ if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { ++ t.Errorf("unexpected node condition order. NodeReady should be last.") ++ } ++ + if !reflect.DeepEqual(expectedNode, updatedNode) { + t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode)) + } +-- +2.3.8 (Apple Git-58) + diff --git a/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0002-explicitly-check-Ready-condition-in-validate-cluster.patch b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0002-explicitly-check-Ready-condition-in-validate-cluster.patch new file mode 100644 index 0000000000..01ee7542aa --- /dev/null +++ b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0002-explicitly-check-Ready-condition-in-validate-cluster.patch @@ -0,0 +1,25 @@ +From 1fdd876287ed3454df939d15602d6f8037ff3156 Mon Sep 17 00:00:00 2001 +From: Aaron Levy +Date: Thu, 19 Nov 2015 19:06:39 -0800 +Subject: [PATCH 2/3] explicitly check "Ready" condition in validate-cluster + +--- + cluster/validate-cluster.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh +index 7b26295..9756439 100755 +--- a/cluster/validate-cluster.sh ++++ b/cluster/validate-cluster.sh +@@ -40,7 +40,7 @@ while true; do + # Suppress errors from kubectl output because during cluster bootstrapping + # for clusters where the master node is registered, the apiserver will become + # available and then get restarted as the kubelet configures the docker bridge. +- nodes_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o template --template='{{range .items}}{{with index .status.conditions 0}}{{.type}}:{{.status}},{{end}}{{end}}' --api-version=v1) || true ++ nodes_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o template --template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}}{{.type}}:{{.status}},{{end}}{{end}}{{end}}' --api-version=v1) || true + found=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:') || true + ready=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:True') || true + +-- +2.3.8 (Apple Git-58) + diff --git a/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0003-kubelet-check-node-condition-by-type-rather-than-by-.patch b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0003-kubelet-check-node-condition-by-type-rather-than-by-.patch new file mode 100644 index 0000000000..9adc2718d7 --- /dev/null +++ b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/files/0003-kubelet-check-node-condition-by-type-rather-than-by-.patch @@ -0,0 +1,33 @@ +From 26728c3a424fcd6ac3a3c23963bab3beac388ae6 Mon Sep 17 00:00:00 2001 +From: Aaron Levy +Date: Mon, 30 Nov 2015 19:02:12 -0800 +Subject: [PATCH 3/3] kubelet: check node condition by type rather than by + index + +--- + pkg/kubelet/kubelet_test.go | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go +index eed55df..7777814 100644 +--- a/pkg/kubelet/kubelet_test.go ++++ b/pkg/kubelet/kubelet_test.go +@@ -2769,9 +2769,12 @@ func TestDockerRuntimeVersion(t *testing.T) { + if !ok { + t.Errorf("unexpected object type") + } +- if updatedNode.Status.Conditions[0].Reason != "KubeletNotReady" && +- !strings.Contains(updatedNode.Status.Conditions[0].Message, "container runtime version is older than") { +- t.Errorf("unexpect NodeStatus due to container runtime version") ++ ++ for _, cond := range updatedNode.Status.Conditions { ++ if cond.Type == api.NodeReady && cond.Reason != "KubeletNotReady" && ++ !strings.Contains(cond.Message, "container runtime version is older than") { ++ t.Errorf("unexpect NodeStatus due to container runtime version") ++ } + } + } + +-- +2.3.8 (Apple Git-58) + diff --git a/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/kubelet-1.0.7.ebuild b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/kubelet-1.1.2.ebuild similarity index 79% rename from sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/kubelet-1.0.7.ebuild rename to sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/kubelet-1.1.2.ebuild index bc14103c9d..5ffbe6470a 100644 --- a/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/kubelet-1.0.7.ebuild +++ b/sdk_container/src/third_party/coreos-overlay/app-admin/kubelet/kubelet-1.1.2.ebuild @@ -22,7 +22,9 @@ DEPEND="dev-lang/go" RDEPEND="net-misc/socat" src_prepare() { - epatch "${FILESDIR}/0001-Plumb-linker-flags-through-from-the-Makefile.patch" + epatch "${FILESDIR}/0001-kubelet-report-NodeReady-last-in-status-list.patch" + epatch "${FILESDIR}/0002-explicitly-check-Ready-condition-in-validate-cluster.patch" + epatch "${FILESDIR}/0003-kubelet-check-node-condition-by-type-rather-than-by-.patch" if gcc-specs-pie; then append-ldflags -nopie