From 5329b847b4743be83af936f188373b0a5098ec47 Mon Sep 17 00:00:00 2001 From: Patrick Baxter Date: Fri, 2 Dec 2016 16:27:56 -0800 Subject: [PATCH 1/5] v1.5.1 addon updates --- multi-node/generic/controller-install.sh | 130 +++++++++++++++++------ single-node/user-data | 129 ++++++++++++++++------ 2 files changed, 199 insertions(+), 60 deletions(-) diff --git a/multi-node/generic/controller-install.sh b/multi-node/generic/controller-install.sh index a8565168a5..af60226c8a 100644 --- a/multi-node/generic/controller-install.sh +++ b/multi-node/generic/controller-install.sh @@ -382,37 +382,38 @@ spec: EOF fi - local TEMPLATE=/srv/kubernetes/manifests/kube-dns-rc.yaml + local TEMPLATE=/srv/kubernetes/manifests/kube-dns-de.yaml if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE -apiVersion: v1 -kind: ReplicationController +apiVersion: extensions/v1beta1 +kind: Deployment metadata: - name: kube-dns-v20 + name: kube-dns namespace: kube-system labels: k8s-app: kube-dns - version: v20 kubernetes.io/cluster-service: "true" spec: - replicas: 1 + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 selector: - k8s-app: kube-dns - version: v20 + matchLabels: + k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns - version: v20 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubedns - image: gcr.io/google_containers/kubedns-amd64:1.8 + image: gcr.io/google_containers/kubedns-amd64:1.9 resources: limits: memory: 170Mi @@ -438,6 +439,13 @@ spec: args: - --domain=cluster.local. - --dns-port=10053 + - --config-map=kube-dns + # This should be set to v=2 only after the new image (cut from 1.5) has + # been released, otherwise we will flood the logs. + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" ports: - containerPort: 10053 name: dns-local @@ -445,6 +453,9 @@ spec: - containerPort: 10053 name: dns-tcp-local protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP - name: dnsmasq image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4 livenessProbe: @@ -468,6 +479,32 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 10Mi + - name: dnsmasq-metrics + image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 10Mi - name: healthz image: gcr.io/google_containers/exechealthz-amd64:1.2 resources: @@ -487,6 +524,48 @@ spec: - containerPort: 8080 protocol: TCP dnsPolicy: Default + +EOF + fi + + local TEMPLATE=/srv/kubernetes/manifests/kube-dns-autoscaler-de.yaml + if [ ! -f $TEMPLATE ]; then + echo "TEMPLATE: $TEMPLATE" + mkdir -p $(dirname $TEMPLATE) + cat << EOF > $TEMPLATE +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns-autoscaler + namespace: kube-system + labels: + k8s-app: kube-dns-autoscaler + kubernetes.io/cluster-service: "true" +spec: + template: + metadata: + labels: + k8s-app: kube-dns-autoscaler + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + spec: + containers: + - name: autoscaler + image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0 + resources: + requests: + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=kube-dns-autoscaler + - --mode=linear + - --target=Deployment/kube-dns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}} + - --logtostderr=true + - --v=2 EOF fi @@ -557,14 +636,6 @@ spec: scheme: HTTP initialDelaySeconds: 180 timeoutSeconds: 5 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 80m - memory: 200Mi - requests: - cpu: 80m - memory: 200Mi command: - /heapster - --source=kubernetes.summary_api:'' @@ -622,38 +693,36 @@ spec: EOF fi - local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-rc.yaml + local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-de.yaml if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE -apiVersion: v1 -kind: ReplicationController +apiVersion: extensions/v1beta1 +kind: Deployment metadata: - name: kubernetes-dashboard-v1.4.1 + name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard - version: v1.4.1 kubernetes.io/cluster-service: "true" spec: - replicas: 1 selector: - k8s-app: kubernetes-dashboard + matchLabels: + k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard - version: v1.4.1 - kubernetes.io/cluster-service: "true" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubernetes-dashboard - image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.1 + image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0 resources: + # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 50Mi @@ -954,13 +1023,14 @@ function start_addons { echo echo "K8S: DNS addon" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-rc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null + curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null + curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-autoscaler-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null echo "K8S: Heapster addon" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null echo "K8S: Dashboard addon" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-rc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null + curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null } diff --git a/single-node/user-data b/single-node/user-data index e9ca3d2816..3504ccbaa3 100644 --- a/single-node/user-data +++ b/single-node/user-data @@ -371,37 +371,38 @@ spec: EOF fi - local TEMPLATE=/srv/kubernetes/manifests/kube-dns-rc.yaml + local TEMPLATE=/srv/kubernetes/manifests/kube-dns-de.yaml if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE -apiVersion: v1 -kind: ReplicationController +apiVersion: extensions/v1beta1 +kind: Deployment metadata: - name: kube-dns-v20 + name: kube-dns namespace: kube-system labels: k8s-app: kube-dns - version: v20 kubernetes.io/cluster-service: "true" spec: - replicas: 1 + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 selector: - k8s-app: kube-dns - version: v20 + matchLabels: + k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns - version: v20 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubedns - image: gcr.io/google_containers/kubedns-amd64:1.8 + image: gcr.io/google_containers/kubedns-amd64:1.9 resources: limits: memory: 170Mi @@ -427,6 +428,13 @@ spec: args: - --domain=cluster.local. - --dns-port=10053 + - --config-map=kube-dns + # This should be set to v=2 only after the new image (cut from 1.5) has + # been released, otherwise we will flood the logs. + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" ports: - containerPort: 10053 name: dns-local @@ -434,6 +442,9 @@ spec: - containerPort: 10053 name: dns-tcp-local protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP - name: dnsmasq image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4 livenessProbe: @@ -457,6 +468,32 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 10Mi + - name: dnsmasq-metrics + image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 10Mi - name: healthz image: gcr.io/google_containers/exechealthz-amd64:1.2 resources: @@ -479,6 +516,47 @@ spec: EOF fi + local TEMPLATE=/srv/kubernetes/manifests/kube-dns-autoscaler-de.yaml + if [ ! -f $TEMPLATE ]; then + echo "TEMPLATE: $TEMPLATE" + mkdir -p $(dirname $TEMPLATE) + cat << EOF > $TEMPLATE +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns-autoscaler + namespace: kube-system + labels: + k8s-app: kube-dns-autoscaler + kubernetes.io/cluster-service: "true" +spec: + template: + metadata: + labels: + k8s-app: kube-dns-autoscaler + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + spec: + containers: + - name: autoscaler + image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0 + resources: + requests: + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=kube-dns-autoscaler + - --mode=linear + - --target=Deployment/kube-dns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}} + - --logtostderr=true + - --v=2 +EOF + fi + local TEMPLATE=/srv/kubernetes/manifests/kube-dns-svc.yaml if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" @@ -546,14 +624,6 @@ spec: scheme: HTTP initialDelaySeconds: 180 timeoutSeconds: 5 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 80m - memory: 200Mi - requests: - cpu: 80m - memory: 200Mi command: - /heapster - --source=kubernetes.summary_api:'' @@ -611,38 +681,36 @@ spec: EOF fi - local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-rc.yaml + local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-de.yaml if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE -apiVersion: v1 -kind: ReplicationController +apiVersion: extensions/v1beta1 +kind: Deployment metadata: - name: kubernetes-dashboard-v1.4.1 + name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard - version: v1.4.1 kubernetes.io/cluster-service: "true" spec: - replicas: 1 selector: - k8s-app: kubernetes-dashboard + matchLabels: + k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard - version: v1.4.1 - kubernetes.io/cluster-service: "true" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubernetes-dashboard - image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.1 + image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0 resources: + # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 50Mi @@ -941,13 +1009,14 @@ function start_addons { done echo echo "K8S: DNS addon" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-rc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null + curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null + curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-autoscaler-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null echo "K8S: Heapster addon" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null echo "K8S: Dashboard addon" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-rc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null + curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null } From d134da83bbc4ab02c4d78591742fc0637c64f28d Mon Sep 17 00:00:00 2001 From: Patrick Baxter Date: Mon, 12 Dec 2016 18:20:32 -0800 Subject: [PATCH 2/5] contrib: update conformance for 1.5 --- contrib/conformance-test.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/conformance-test.sh b/contrib/conformance-test.sh index 436d41becd..120aa290b6 100755 --- a/contrib/conformance-test.sh +++ b/contrib/conformance-test.sh @@ -38,10 +38,10 @@ ssh ${SSH_OPTS} -i ${ssh_key} -p ${ssh_port} core@${ssh_host} \ ssh ${SSH_OPTS} -i ${ssh_key} -p ${ssh_port} core@${ssh_host} \ "[[ -f /home/core/kubeconfig ]] || echo '${kubeconfig}' > /home/core/kubeconfig" -# Init steps necessary to run conformance in docker://golang:1.6.3 container +# Init steps necessary to run conformance in docker://golang:1.7.4 container INIT="apt-get update && apt-get install -y rsync && go get -u github.com/jteeuwen/go-bindata/go-bindata" -TEST_FLAGS="-v --test -check_version_skew=false -check_node_count=${CHECK_NODE_COUNT} --test_args=\"ginkgo.focus='\[Conformance\]'\"" +TEST_FLAGS="-v --test -check_version_skew=false --test_args=\"ginkgo.focus='\[Conformance\]'\"" CONFORMANCE=$(echo \ "cd /go/src/k8s.io/kubernetes && " \ @@ -57,6 +57,6 @@ RKT_OPTS=$(echo \ "--mount volume=kc,target=/kubeconfig " \ "--mount volume=k8s,target=/go/src/k8s.io/kubernetes") -CMD="sudo rkt run --net=host --insecure-options=image ${RKT_OPTS} docker://golang:1.6.3 --exec /bin/bash -- -c \"${INIT} && ${CONFORMANCE}\"" +CMD="sudo rkt run --net=host --insecure-options=image ${RKT_OPTS} docker://golang:1.7.4 --exec /bin/bash -- -c \"${INIT} && ${CONFORMANCE}\"" ssh ${SSH_OPTS} -i ${ssh_key} -p ${ssh_port} core@${ssh_host} "${CMD}" From c6f9dd0910c4a9bc771160317e105b7f1de281b4 Mon Sep 17 00:00:00 2001 From: Patrick Baxter Date: Wed, 14 Dec 2016 13:34:37 -0800 Subject: [PATCH 3/5] all: bump to v1.5.1+coreos.0 --- Documentation/configure-kubectl.md | 4 ++-- Documentation/deploy-addons.md | 2 +- Documentation/deploy-master.md | 12 ++++++------ Documentation/deploy-workers.md | 4 ++-- Documentation/kubelet-wrapper.md | 10 +++++----- Documentation/kubernetes-on-vagrant-single.md | 4 ++-- Documentation/kubernetes-on-vagrant.md | 4 ++-- Documentation/kubernetes-upgrade.md | 2 +- contrib/bump-version.sh | 4 ++-- contrib/conformance-test.sh | 2 +- multi-node/generic/controller-install.sh | 2 +- multi-node/generic/worker-install.sh | 2 +- single-node/user-data | 2 +- 13 files changed, 27 insertions(+), 27 deletions(-) diff --git a/Documentation/configure-kubectl.md b/Documentation/configure-kubectl.md index d135a3c1b2..275e89f11e 100644 --- a/Documentation/configure-kubectl.md +++ b/Documentation/configure-kubectl.md @@ -15,13 +15,13 @@ Download `kubectl` from the Kubernetes release artifact site with the `curl` too The linux `kubectl` binary can be fetched with a command like: ```sh -$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/linux/amd64/kubectl +$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kubectl ``` On an OS X workstation, replace `linux` in the URL above with `darwin`: ```sh -$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/darwin/amd64/kubectl +$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/darwin/amd64/kubectl ``` After downloading the binary, ensure it is executable and move it into your `PATH`: diff --git a/Documentation/deploy-addons.md b/Documentation/deploy-addons.md index a6b182c933..21221734e3 100644 --- a/Documentation/deploy-addons.md +++ b/Documentation/deploy-addons.md @@ -142,7 +142,7 @@ spec: *Note:* The above YAML definition is based on the upstream DNS addon in the [Kubernetes addon folder][k8s-dns-addon]. -[k8s-dns-addon]: https://github.com/kubernetes/kubernetes/tree/v1.4.6/cluster/addons/dns +[k8s-dns-addon]: https://github.com/kubernetes/kubernetes/tree/v1.5.1/cluster/addons/dns This single YAML file is actually creating 2 different Kubernetes objects, separated by `---`. diff --git a/Documentation/deploy-master.md b/Documentation/deploy-master.md index d14f68d72c..94473df2bb 100644 --- a/Documentation/deploy-master.md +++ b/Documentation/deploy-master.md @@ -123,7 +123,7 @@ Note that the kubelet running on a master node may log repeated attempts to post * Replace `${ADVERTISE_IP}` with this node's publicly routable IP. * Replace `${DNS_SERVICE_IP}` -* Replace `${K8S_VER}` This will map to: `quay.io/coreos/hyperkube:${K8S_VER}` release, e.g. `v1.4.6_coreos.0`. +* Replace `${K8S_VER}` This will map to: `quay.io/coreos/hyperkube:${K8S_VER}` release, e.g. `v1.5.1_coreos.0`. * If using Calico for network policy - Replace `${NETWORK_PLUGIN}` with `cni` - Add the following to `RKT_OPS=` @@ -194,7 +194,7 @@ spec: hostNetwork: true containers: - name: kube-apiserver - image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 + image: quay.io/coreos/hyperkube:v1.5.1_coreos.0 command: - /hyperkube - apiserver @@ -260,7 +260,7 @@ spec: hostNetwork: true containers: - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 + image: quay.io/coreos/hyperkube:v1.5.1_coreos.0 command: - /hyperkube - proxy @@ -299,7 +299,7 @@ spec: hostNetwork: true containers: - name: kube-controller-manager - image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 + image: quay.io/coreos/hyperkube:v1.5.1_coreos.0 command: - /hyperkube - controller-manager @@ -352,7 +352,7 @@ spec: hostNetwork: true containers: - name: kube-scheduler - image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 + image: quay.io/coreos/hyperkube:v1.5.1_coreos.0 command: - /hyperkube - scheduler @@ -640,7 +640,7 @@ A successful response should look something like: { "major": "1", "minor": "4", - "gitVersion": "v1.4.6+coreos.0", + "gitVersion": "v1.5.1+coreos.0", "gitCommit": "ec2b52fabadf824a42b66b6729fe4cff2c62af8c", "gitTreeState": "clean", "buildDate": "2016-11-14T19:42:00Z", diff --git a/Documentation/deploy-workers.md b/Documentation/deploy-workers.md index 4c6156563e..3d485f9e2b 100644 --- a/Documentation/deploy-workers.md +++ b/Documentation/deploy-workers.md @@ -105,7 +105,7 @@ Create `/etc/systemd/system/kubelet.service` and substitute the following variab * Replace `${MASTER_HOST}` * Replace `${ADVERTISE_IP}` with this node's publicly routable IP. * Replace `${DNS_SERVICE_IP}` -* Replace `${K8S_VER}` This will map to: `quay.io/coreos/hyperkube:${K8S_VER}` release, e.g. `v1.4.6_coreos.0`. +* Replace `${K8S_VER}` This will map to: `quay.io/coreos/hyperkube:${K8S_VER}` release, e.g. `v1.5.1_coreos.0`. * If using Calico for network policy - Replace `${NETWORK_PLUGIN}` with `cni` - Add the following to `RKT_OPS=` @@ -173,7 +173,7 @@ spec: hostNetwork: true containers: - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 + image: quay.io/coreos/hyperkube:v1.5.1_coreos.0 command: - /hyperkube - proxy diff --git a/Documentation/kubelet-wrapper.md b/Documentation/kubelet-wrapper.md index 2ee070adb1..667937c0dc 100644 --- a/Documentation/kubelet-wrapper.md +++ b/Documentation/kubelet-wrapper.md @@ -19,7 +19,7 @@ An example systemd kubelet.service file which takes advantage of the kubelet-wra ```ini [Service] -Environment=KUBELET_VERSION=v1.4.6_coreos.0 +Environment=KUBELET_VERSION=v1.5.1_coreos.0 Environment="RKT_OPTS=--uuid-file-save=/var/run/kubelet-pod.uuid" ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid ExecStart=/usr/lib/coreos/kubelet-wrapper \ @@ -40,7 +40,7 @@ Mount the host's `/etc/resolv.conf` file directly into the container in order to ```ini [Service] -Environment=KUBELET_VERSION=v1.4.6_coreos.0 +Environment=KUBELET_VERSION=v1.5.1_coreos.0 Environment="RKT_OPTS=--volume=resolv,kind=host,source=/etc/resolv.conf \ --mount volume=resolv,target=/etc/resolv.conf \ --uuid-file-save=/var/run/kubelet-pod.uuid" @@ -58,7 +58,7 @@ Pods running in your cluster can reference remote storage volumes located on an ```ini [Service] -Environment=KUBELET_VERSION=v1.4.6_coreos.0 +Environment=KUBELET_VERSION=v1.5.1_coreos.0 Environment="RKT_OPTS=--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \ --mount volume=iscsiadm,target=/usr/sbin/iscsiadm \ --uuid-file-save=/var/run/kubelet-pod.uuid" @@ -76,7 +76,7 @@ Pods using the [rbd volume plugin][rbd-example] to consume data from ceph must e ```ini [Service] -Environment=KUBELET_VERSION=v1.4.6_coreos.0 +Environment=KUBELET_VERSION=v1.5.1_coreos.0 Environment="RKT_OPTS=--volume modprobe,kind=host,source=/usr/sbin/modprobe \ --mount volume=modprobe,target=/usr/sbin/modprobe \ --volume lib-modules,kind=host,source=/lib/modules \ @@ -100,7 +100,7 @@ For example: ```ini [Service] -Environment=KUBELET_VERSION=v1.4.6_coreos.0 +Environment=KUBELET_VERSION=v1.5.1_coreos.0 ... ExecStart=/opt/bin/kubelet-wrapper \ --api-servers=http://127.0.0.1:8080 \ diff --git a/Documentation/kubernetes-on-vagrant-single.md b/Documentation/kubernetes-on-vagrant-single.md index 19ce01661b..bb414a098d 100644 --- a/Documentation/kubernetes-on-vagrant-single.md +++ b/Documentation/kubernetes-on-vagrant-single.md @@ -19,13 +19,13 @@ Navigate to the [Vagrant downloads page][vagrant-downloads] and grab the appropr The linux `kubectl` binary can be fetched with a command like: ```sh -$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/linux/amd64/kubectl +$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kubectl ``` On an OS X workstation, replace `linux` in the URL above with `darwin`: ```sh -$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/darwin/amd64/kubectl +$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/darwin/amd64/kubectl ``` After downloading the binary, ensure it is executable and move it into your PATH: diff --git a/Documentation/kubernetes-on-vagrant.md b/Documentation/kubernetes-on-vagrant.md index 54d6395847..9105d8cc32 100644 --- a/Documentation/kubernetes-on-vagrant.md +++ b/Documentation/kubernetes-on-vagrant.md @@ -18,13 +18,13 @@ Navigate to the [Vagrant downloads page][vagrant-downloads] and grab the appropr The linux `kubectl` binary can be fetched with a command like: ```sh -$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/linux/amd64/kubectl +$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kubectl ``` On an OS X workstation, replace `linux` in the URL above with `darwin`: ```sh -$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/darwin/amd64/kubectl +$ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/darwin/amd64/kubectl ``` After downloading the binary, ensure it is executable and move it into your PATH: diff --git a/Documentation/kubernetes-upgrade.md b/Documentation/kubernetes-upgrade.md index 537641305c..9c0937f73b 100644 --- a/Documentation/kubernetes-upgrade.md +++ b/Documentation/kubernetes-upgrade.md @@ -15,7 +15,7 @@ For example, modifying the `KUBELET_VERSION` environment variable in the followi **/etc/systemd/system/kubelet.service** ``` -Environment=KUBELET_VERSION=v1.4.6_coreos.0 +Environment=KUBELET_VERSION=v1.5.1_coreos.0 ExecStart=/usr/lib/coreos/kubelet-wrapper \ --api-servers=https://master [...] ``` diff --git a/contrib/bump-version.sh b/contrib/bump-version.sh index 7acd4590f2..fa95d7f334 100755 --- a/contrib/bump-version.sh +++ b/contrib/bump-version.sh @@ -6,11 +6,11 @@ if [ $# -ne 1 ] || [ `expr $1 : ".*_.*"` == 0 ]; then echo "USAGE: $0 " - echo " example: $0 'v1.4.6_coreos.0'" + echo " example: $0 'v1.5.1_coreos.0'" exit 1 fi -CURRENT_VERSION=${CURRENT_VERSION:-"v1.4.6_coreos.0"} +CURRENT_VERSION=${CURRENT_VERSION:-"v1.5.1_coreos.0"} TARGET_VERSION=${1} CURRENT_VERSION_BASE=${CURRENT_VERSION%%_*} diff --git a/contrib/conformance-test.sh b/contrib/conformance-test.sh index 120aa290b6..6311c2bd66 100755 --- a/contrib/conformance-test.sh +++ b/contrib/conformance-test.sh @@ -3,7 +3,7 @@ set -euo pipefail CHECK_NODE_COUNT=${CHECK_NODE_COUNT:-true} CONFORMANCE_REPO=${CONFORMANCE_REPO:-github.com/coreos/kubernetes} -CONFORMANCE_VERSION=${CONFORMANCE_VERSION:-v1.4.6+coreos.0} +CONFORMANCE_VERSION=${CONFORMANCE_VERSION:-v1.5.1+coreos.0} SSH_OPTS=${SSH_OPTS:-} usage() { diff --git a/multi-node/generic/controller-install.sh b/multi-node/generic/controller-install.sh index af60226c8a..e561c6a1cc 100644 --- a/multi-node/generic/controller-install.sh +++ b/multi-node/generic/controller-install.sh @@ -5,7 +5,7 @@ set -e export ETCD_ENDPOINTS= # Specify the version (vX.Y.Z) of Kubernetes assets to deploy -export K8S_VER=v1.4.6_coreos.0 +export K8S_VER=v1.5.1_coreos.0 # Hyperkube image repository to use. export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube diff --git a/multi-node/generic/worker-install.sh b/multi-node/generic/worker-install.sh index 398cf0f516..850f09be5b 100644 --- a/multi-node/generic/worker-install.sh +++ b/multi-node/generic/worker-install.sh @@ -10,7 +10,7 @@ export ETCD_ENDPOINTS= export CONTROLLER_ENDPOINT= # Specify the version (vX.Y.Z) of Kubernetes assets to deploy -export K8S_VER=v1.4.6_coreos.0 +export K8S_VER=v1.5.1_coreos.0 # Hyperkube image repository to use. export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube diff --git a/single-node/user-data b/single-node/user-data index 3504ccbaa3..8d1d255476 100644 --- a/single-node/user-data +++ b/single-node/user-data @@ -5,7 +5,7 @@ set -e export ETCD_ENDPOINTS="http://127.0.0.1:2379" # Specify the version (vX.Y.Z) of Kubernetes assets to deploy -export K8S_VER=v1.4.6_coreos.0 +export K8S_VER=v1.5.1_coreos.0 # Hyperkube image repository to use. export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube From 34023d1ea3e149efdd8864cd4b2ea7b5138bde7f Mon Sep 17 00:00:00 2001 From: Patrick Baxter Date: Mon, 12 Dec 2016 18:36:32 -0800 Subject: [PATCH 4/5] set apiserver flag --anonymous-auth=false --- Documentation/deploy-master.md | 1 + multi-node/generic/controller-install.sh | 1 + single-node/user-data | 1 + 3 files changed, 3 insertions(+) diff --git a/Documentation/deploy-master.md b/Documentation/deploy-master.md index 94473df2bb..1bbae194b0 100644 --- a/Documentation/deploy-master.md +++ b/Documentation/deploy-master.md @@ -210,6 +210,7 @@ spec: - --client-ca-file=/etc/kubernetes/ssl/ca.pem - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem - --runtime-config=extensions/v1beta1/networkpolicies=true + - --anonymous-auth=false livenessProbe: httpGet: host: 127.0.0.1 diff --git a/multi-node/generic/controller-install.sh b/multi-node/generic/controller-install.sh index e561c6a1cc..9ece13a892 100644 --- a/multi-node/generic/controller-install.sh +++ b/multi-node/generic/controller-install.sh @@ -269,6 +269,7 @@ spec: - --client-ca-file=/etc/kubernetes/ssl/ca.pem - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem - --runtime-config=extensions/v1beta1/networkpolicies=true + - --anonymous-auth=false livenessProbe: httpGet: host: 127.0.0.1 diff --git a/single-node/user-data b/single-node/user-data index 8d1d255476..a2c933ff51 100644 --- a/single-node/user-data +++ b/single-node/user-data @@ -260,6 +260,7 @@ spec: - --client-ca-file=/etc/kubernetes/ssl/ca.pem - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem - --runtime-config=extensions/v1beta1/networkpolicies=true + - --anonymous-auth=false livenessProbe: httpGet: host: 127.0.0.1 From ffd2e78efdb8e9dfa1963f27a635b7a87638dba7 Mon Sep 17 00:00:00 2001 From: Patrick Baxter Date: Wed, 14 Dec 2016 14:32:56 -0800 Subject: [PATCH 5/5] multi-node/generic: remove grave accents --- multi-node/generic/controller-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/multi-node/generic/controller-install.sh b/multi-node/generic/controller-install.sh index 9ece13a892..927524a4b9 100644 --- a/multi-node/generic/controller-install.sh +++ b/multi-node/generic/controller-install.sh @@ -900,7 +900,7 @@ spec: # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND value: "none" - # Disable file logging so `kubectl logs` works. + # Disable file logging so 'kubectl logs' works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" - name: NO_DEFAULT_POOLS