From 2e31d6adfb2125d3ec3871cb1a287fad58ab3192 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Thu, 16 Jun 2016 22:26:04 -0400 Subject: [PATCH 1/8] Enable petsets --- .../server/bootstrappolicy/infra_sa_policy.go | 60 +++++++++++ pkg/cmd/server/bootstrappolicy/policy.go | 6 ++ pkg/cmd/server/kubernetes/master.go | 7 ++ pkg/cmd/server/start/start_master.go | 6 ++ test/cmd/basicresources.sh | 8 ++ test/extended/setup.sh | 1 + .../bootstrap_cluster_roles.yaml | 100 ++++++++++++++++++ 7 files changed, 188 insertions(+) diff --git a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go index 4bbc60d3e68f..4cd02b2cea1b 100644 --- a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go +++ b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go @@ -4,6 +4,7 @@ import ( "fmt" kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" @@ -55,6 +56,9 @@ const ( InfraServiceLoadBalancerControllerServiceAccountName = "service-load-balancer-controller" ServiceLoadBalancerControllerRoleName = "system:service-load-balancer-controller" + InfraPetSetControllerServiceAccountName = "pet-set-controller" + PetSetControllerRoleName = "system:pet-set-controller" + ServiceServingCertServiceAccountName = "service-serving-cert-controller" ServiceServingCertControllerRoleName = "system:service-serving-cert-controller" @@ -710,6 +714,62 @@ func init() { panic(err) } + err = InfraSAs.addServiceAccount( + InfraPetSetControllerServiceAccountName, + authorizationapi.ClusterRole{ + ObjectMeta: kapi.ObjectMeta{ + Name: PetSetControllerRoleName, + }, + Rules: []authorizationapi.PolicyRule{ + // PetSetController.podCache.ListWatch + { + APIGroups: []string{kapi.GroupName}, + Verbs: sets.NewString("list", "watch"), + Resources: sets.NewString("pods"), + }, + // PetSetController.cache.ListWatch + { + APIGroups: []string{apps.GroupName}, + Verbs: sets.NewString("list", "watch"), + Resources: sets.NewString("petsets"), + }, + // PetSetController.petClient + { + APIGroups: []string{apps.GroupName}, + Verbs: sets.NewString("get"), + Resources: sets.NewString("petsets"), + }, + { + APIGroups: []string{apps.GroupName}, + Verbs: sets.NewString("update"), + Resources: sets.NewString("petsets/status"), + }, + // PetSetController.podClient + { + APIGroups: []string{kapi.GroupName}, + Verbs: sets.NewString("get", "create", "delete", "update"), + Resources: sets.NewString("pods"), + }, + // PetSetController.petClient (PVC) + // This is an escalating client and we must admission check the petset + { + APIGroups: []string{kapi.GroupName}, + Verbs: sets.NewString("get", "create"), // future "delete" + Resources: sets.NewString("persistentvolumeclaims"), + }, + // PetSetController.eventRecorder + { + APIGroups: []string{kapi.GroupName}, + Verbs: sets.NewString("create", "update", "patch"), + Resources: sets.NewString("events"), + }, + }, + }, + ) + if err != nil { + panic(err) + } + err = InfraSAs.addServiceAccount( ServiceServingCertServiceAccountName, authorizationapi.ClusterRole{ diff --git a/pkg/cmd/server/bootstrappolicy/policy.go b/pkg/cmd/server/bootstrappolicy/policy.go index 8b598f236c50..897180d463eb 100644 --- a/pkg/cmd/server/bootstrappolicy/policy.go +++ b/pkg/cmd/server/bootstrappolicy/policy.go @@ -222,6 +222,8 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole { "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(), authorizationapi.NewRule(read...).Groups(extensionsGroup).Resources("daemonsets").RuleOrDie(), + authorizationapi.NewRule(readWrite...).Groups(appsGroup).Resources("petsets").RuleOrDie(), + authorizationapi.NewRule(readWrite...).Groups(authzGroup).Resources("roles", "rolebindings").RuleOrDie(), authorizationapi.NewRule("create").Groups(authzGroup).Resources("localresourceaccessreviews", "localsubjectaccessreviews").RuleOrDie(), authorizationapi.NewRule(read...).Groups(authzGroup).Resources("policies", "policybindings").RuleOrDie(), @@ -276,6 +278,8 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole { "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(), authorizationapi.NewRule(read...).Groups(extensionsGroup).Resources("daemonsets").RuleOrDie(), + authorizationapi.NewRule(readWrite...).Groups(appsGroup).Resources("petsets").RuleOrDie(), + authorizationapi.NewRule(readWrite...).Groups(buildGroup).Resources("builds", "buildconfigs", "buildconfigs/webhooks").RuleOrDie(), authorizationapi.NewRule(read...).Groups(buildGroup).Resources("builds/log").RuleOrDie(), authorizationapi.NewRule("create").Groups(buildGroup).Resources("buildconfigs/instantiate", "buildconfigs/instantiatebinary", "builds/clone").RuleOrDie(), @@ -322,6 +326,8 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole { "deployments", "deployments/scale").RuleOrDie(), authorizationapi.NewRule(read...).Groups(extensionsGroup).Resources("daemonsets").RuleOrDie(), + authorizationapi.NewRule(read...).Groups(appsGroup).Resources("petsets").RuleOrDie(), + authorizationapi.NewRule(read...).Groups(buildGroup).Resources("builds", "buildconfigs", "buildconfigs/webhooks").RuleOrDie(), authorizationapi.NewRule(read...).Groups(buildGroup).Resources("builds/log").RuleOrDie(), diff --git a/pkg/cmd/server/kubernetes/master.go b/pkg/cmd/server/kubernetes/master.go index b0f8c819a65a..c1ae32f33495 100644 --- a/pkg/cmd/server/kubernetes/master.go +++ b/pkg/cmd/server/kubernetes/master.go @@ -37,6 +37,7 @@ import ( jobcontroller "k8s.io/kubernetes/pkg/controller/job" namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace" nodecontroller "k8s.io/kubernetes/pkg/controller/node" + petsetcontroller "k8s.io/kubernetes/pkg/controller/petset" podautoscalercontroller "k8s.io/kubernetes/pkg/controller/podautoscaler" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" replicasetcontroller "k8s.io/kubernetes/pkg/controller/replicaset" @@ -371,6 +372,12 @@ func (c *MasterConfig) RunServiceLoadBalancerController(client *client.Client) { } } +// RunPetSetController starts the PetSet controller +func (c *MasterConfig) RunPetSetController(client *client.Client) { + ps := petsetcontroller.NewPetSetController(c.Informers.Pods().Informer(), client, kctrlmgr.ResyncPeriod(c.ControllerManager)()) + go ps.Run(1, utilwait.NeverStop) +} + func (c *MasterConfig) createSchedulerConfig() (*scheduler.Config, error) { var policy schedulerapi.Policy var configData []byte diff --git a/pkg/cmd/server/start/start_master.go b/pkg/cmd/server/start/start_master.go index 281e6b910b11..5318297fea75 100644 --- a/pkg/cmd/server/start/start_master.go +++ b/pkg/cmd/server/start/start_master.go @@ -598,6 +598,11 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro glog.Fatalf("Could not get client for pod gc controller: %v", err) } + _, _, petSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPetSetControllerServiceAccountName) + if err != nil { + glog.Fatalf("Could not get client for pet set controller: %v", err) + } + namespaceControllerClientConfig, _, namespaceControllerKubeClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraNamespaceControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for namespace controller: %v", err) @@ -639,6 +644,7 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro kc.RunGCController(gcClient) kc.RunServiceLoadBalancerController(serviceLoadBalancerClient) + kc.RunPetSetController(petSetClient) glog.Infof("Started Kubernetes Controllers") } diff --git a/test/cmd/basicresources.sh b/test/cmd/basicresources.sh index 65d6a8e73233..0525b12ce8ad 100755 --- a/test/cmd/basicresources.sh +++ b/test/cmd/basicresources.sh @@ -164,6 +164,14 @@ os::cmd::expect_success 'oc delete clusterquota/limit-bob' echo "create subcommands: ok" os::test::junit::declare_suite_end +os::test::junit::declare_suite_start "cmd/basicresources/petsets" +os::cmd::expect_success 'oc create -f examples/pets/zookeeper/zookeeper.yaml' +os::cmd::try_until_success 'oc get pods zoo-0' +os::cmd::expect_success 'oc get pvc datadir-zoo-2' +os::cmd::expect_success_and_text 'oc describe petset zoo' 'app=zk' +os::cmd::expect_success 'oc delete -f examples/pets/zookeeper/zookeeper.yaml' +echo "petsets: ok" +os::test::junit::declare_suite_end os::test::junit::declare_suite_start "cmd/basicresources/routes" os::cmd::expect_success 'oc get routes' diff --git a/test/extended/setup.sh b/test/extended/setup.sh index ec0de230de03..43ea857391fa 100644 --- a/test/extended/setup.sh +++ b/test/extended/setup.sh @@ -206,6 +206,7 @@ readonly CONFORMANCE_TESTS=( "\[networking\]\[router\]" "Ensure supplemental groups propagate to docker" "EmptyDir" + "PetSet" "PrivilegedPod should test privileged pod" "Pods should support remote command execution" "Pods should support retrieving logs from the container" diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index 3d6b61a220a7..107ebcfda2ca 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -509,6 +509,20 @@ items: - get - list - watch + - apiGroups: + - apps + attributeRestrictions: null + resources: + - petsets + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - "" attributeRestrictions: null @@ -870,6 +884,20 @@ items: - get - list - watch + - apiGroups: + - apps + attributeRestrictions: null + resources: + - petsets + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - "" attributeRestrictions: null @@ -1132,6 +1160,15 @@ items: - get - list - watch + - apiGroups: + - apps + attributeRestrictions: null + resources: + - petsets + verbs: + - get + - list + - watch - apiGroups: - "" attributeRestrictions: null @@ -2580,6 +2617,69 @@ items: - deletecollection - get - list +- apiVersion: v1 + kind: ClusterRole + metadata: + creationTimestamp: null + name: system:pet-set-controller + rules: + - apiGroups: + - "" + attributeRestrictions: null + resources: + - pods + verbs: + - list + - watch + - apiGroups: + - apps + attributeRestrictions: null + resources: + - petsets + verbs: + - list + - watch + - apiGroups: + - apps + attributeRestrictions: null + resources: + - petsets + verbs: + - get + - apiGroups: + - apps + attributeRestrictions: null + resources: + - petsets/status + verbs: + - update + - apiGroups: + - "" + attributeRestrictions: null + resources: + - pods + verbs: + - create + - delete + - get + - update + - apiGroups: + - "" + attributeRestrictions: null + resources: + - persistentvolumeclaims + verbs: + - create + - get + - apiGroups: + - "" + attributeRestrictions: null + resources: + - events + verbs: + - create + - patch + - update - apiVersion: v1 kind: ClusterRole metadata: From b9e2091fd7acce99cc0401ee1ef8f999df7eb34d Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Fri, 17 Jun 2016 14:12:32 -0400 Subject: [PATCH 2/8] PetSet examples --- examples/pets/README.md | 10 ++ examples/pets/mysql/galera/README.md | 33 +++++ examples/pets/mysql/galera/init/Dockerfile | 29 +++++ examples/pets/mysql/galera/init/Makefile | 27 ++++ examples/pets/mysql/galera/init/install.sh | 50 +++++++ examples/pets/mysql/galera/init/my-galera.cnf | 22 ++++ examples/pets/mysql/galera/init/on-start.sh | 52 ++++++++ examples/pets/mysql/galera/mysql-galera.yaml | 120 +++++++++++++++++ examples/pets/mysql/galera/test.sh | 21 +++ examples/pets/mysql/healthz/Dockerfile | 22 ++++ examples/pets/mysql/healthz/Makefile | 31 +++++ examples/pets/mysql/healthz/README.md | 3 + examples/pets/mysql/healthz/main.go | 96 ++++++++++++++ examples/pets/mysql/healthz/pod.yaml | 19 +++ examples/pets/peer-finder/Dockerfile | 23 ++++ examples/pets/peer-finder/Makefile | 33 +++++ examples/pets/peer-finder/README.md | 15 +++ examples/pets/peer-finder/peer-finder.go | 110 ++++++++++++++++ examples/pets/redis/README.md | 23 ++++ examples/pets/redis/init/Dockerfile | 29 +++++ examples/pets/redis/init/Makefile | 27 ++++ examples/pets/redis/init/install.sh | 64 +++++++++ examples/pets/redis/init/on-start.sh | 49 +++++++ examples/pets/redis/redis.yaml | 115 ++++++++++++++++ examples/pets/redis/test.sh | 19 +++ examples/pets/zookeeper/README.md | 85 ++++++++++++ examples/pets/zookeeper/init/Dockerfile | 29 +++++ examples/pets/zookeeper/init/Makefile | 27 ++++ examples/pets/zookeeper/init/install.sh | 76 +++++++++++ examples/pets/zookeeper/init/on-change.sh | 49 +++++++ examples/pets/zookeeper/init/on-start.sh | 73 +++++++++++ examples/pets/zookeeper/test.sh | 19 +++ examples/pets/zookeeper/zookeeper.yaml | 123 ++++++++++++++++++ 33 files changed, 1523 insertions(+) create mode 100644 examples/pets/README.md create mode 100644 examples/pets/mysql/galera/README.md create mode 100644 examples/pets/mysql/galera/init/Dockerfile create mode 100644 examples/pets/mysql/galera/init/Makefile create mode 100644 examples/pets/mysql/galera/init/install.sh create mode 100644 examples/pets/mysql/galera/init/my-galera.cnf create mode 100755 examples/pets/mysql/galera/init/on-start.sh create mode 100644 examples/pets/mysql/galera/mysql-galera.yaml create mode 100755 examples/pets/mysql/galera/test.sh create mode 100644 examples/pets/mysql/healthz/Dockerfile create mode 100644 examples/pets/mysql/healthz/Makefile create mode 100644 examples/pets/mysql/healthz/README.md create mode 100644 examples/pets/mysql/healthz/main.go create mode 100644 examples/pets/mysql/healthz/pod.yaml create mode 100644 examples/pets/peer-finder/Dockerfile create mode 100644 examples/pets/peer-finder/Makefile create mode 100644 examples/pets/peer-finder/README.md create mode 100644 examples/pets/peer-finder/peer-finder.go create mode 100644 examples/pets/redis/README.md create mode 100644 examples/pets/redis/init/Dockerfile create mode 100644 examples/pets/redis/init/Makefile create mode 100755 examples/pets/redis/init/install.sh create mode 100755 examples/pets/redis/init/on-start.sh create mode 100644 examples/pets/redis/redis.yaml create mode 100755 examples/pets/redis/test.sh create mode 100644 examples/pets/zookeeper/README.md create mode 100644 examples/pets/zookeeper/init/Dockerfile create mode 100644 examples/pets/zookeeper/init/Makefile create mode 100755 examples/pets/zookeeper/init/install.sh create mode 100755 examples/pets/zookeeper/init/on-change.sh create mode 100755 examples/pets/zookeeper/init/on-start.sh create mode 100755 examples/pets/zookeeper/test.sh create mode 100644 examples/pets/zookeeper/zookeeper.yaml diff --git a/examples/pets/README.md b/examples/pets/README.md new file mode 100644 index 000000000000..309eba2c9eb2 --- /dev/null +++ b/examples/pets/README.md @@ -0,0 +1,10 @@ +# PetSet examples + +These examples are tracked from the [Kubernetes contrib project @d6e4be](https://github.com/kubernetes/contrib/tree/d6e4be066cc076fbb91ff69691819e117711b30b/pets) + +Note that some of these examples require the ability to run root containers which may not be possible for all users in all environments. To grant +access to run containers as root to a service account in your project, run: + + oadm policy add-scc-to-user anyuid -z default + +which allows the `default` service account to run root containers. \ No newline at end of file diff --git a/examples/pets/mysql/galera/README.md b/examples/pets/mysql/galera/README.md new file mode 100644 index 000000000000..c19ed05956ce --- /dev/null +++ b/examples/pets/mysql/galera/README.md @@ -0,0 +1,33 @@ +# Mysql Galera + +This example runs mysql galera through a petset. + +## Bootstrap + +Create the petset in this directory +``` +$ kubectl create -f galera.yaml +``` + +Once you have all 3 nodes in Running, you can run the "test.sh" script in this directory. +This example requires manual intervention. +Once you have all 3 nodes in Running, you can run the "test.sh" script in this directory. + +## Caveats + +Starting up all galera nodes at once leads to an issue where all the mysqls +belive they're in the primary component because they don't see the others in +the DNS. For the bootstrapping to work: mysql-0 needs to see itself, mysql-1 +needs to see itself and mysql-0, and so on, because the first node that sees +a peer list of 1 will assume it's the leader. + +## TODO + +Expect better solutions for the following as petset matures. + +* Failover +* Scaling Up +* Scaling Down +* Image Upgrade +* Maintenance + diff --git a/examples/pets/mysql/galera/init/Dockerfile b/examples/pets/mysql/galera/init/Dockerfile new file mode 100644 index 000000000000..238d12f7eaac --- /dev/null +++ b/examples/pets/mysql/galera/init/Dockerfile @@ -0,0 +1,29 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: get rid of bash dependency and switch to plain busybox. +# The tar in busybox also doesn't seem to understand compression. +FROM debian:jessie +MAINTAINER Prashanth.B + +RUN apt-get update && apt-get install -y wget + +ADD on-start.sh / +ADD my-galera.cnf / +# See contrib/pets/peer-finder for details +RUN wget -qO /peer-finder https://storage.googleapis.com/kubernetes-release/pets/peer-finder + +ADD install.sh / +RUN chmod -c 755 /install.sh /on-start.sh /peer-finder +Entrypoint ["/install.sh"] diff --git a/examples/pets/mysql/galera/init/Makefile b/examples/pets/mysql/galera/init/Makefile new file mode 100644 index 000000000000..b240c38b03e5 --- /dev/null +++ b/examples/pets/mysql/galera/init/Makefile @@ -0,0 +1,27 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all: push + +TAG = 0.1 +PREFIX = gcr.io/google_containers/galera-install + +container: + docker build -t $(PREFIX):$(TAG) . + +push: container + gcloud docker push $(PREFIX):$(TAG) + +clean: + docker rmi $(PREFIX):$(TAG) diff --git a/examples/pets/mysql/galera/init/install.sh b/examples/pets/mysql/galera/init/install.sh new file mode 100644 index 000000000000..473ed770dddf --- /dev/null +++ b/examples/pets/mysql/galera/init/install.sh @@ -0,0 +1,50 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This volume is assumed to exist and is shared with parent of the init +# container. It contains the mysq config. +CONFIG_VOLUME="/etc/mysql" + +# This volume is assumed to exist and is shared with the peer-finder +# init container. It contains on-start/change configuration scripts. +WORKDIR_VOLUME="/work-dir" + +for i in "$@" +do +case $i in + -c=*|--config=*) + CONFIG_VOLUME="${i#*=}" + shift + ;; + -w=*|--work-dir=*) + WORKDIR_VOLUME="${i#*=}" + shift + ;; + *) + # unknown option + ;; +esac +done + +echo installing config scripts into "${WORKDIR_VOLUME}" +mkdir -p "${WORKDIR_VOLUME}" +cp /on-start.sh "${WORKDIR_VOLUME}"/ +cp /peer-finder "${WORKDIR_VOLUME}"/ + +echo installing my-galera.cnf into "${CONFIG_VOLUME}" +mkdir -p "${CONFIG_VOLUME}" +chown -R mysql:mysql "${CONFIG_VOLUME}" +cp /my-galera.cnf "${CONFIG_VOLUME}"/ diff --git a/examples/pets/mysql/galera/init/my-galera.cnf b/examples/pets/mysql/galera/init/my-galera.cnf new file mode 100644 index 000000000000..75c477dbf9fa --- /dev/null +++ b/examples/pets/mysql/galera/init/my-galera.cnf @@ -0,0 +1,22 @@ +[mysqld] +user = mysql +bind-address = 0.0.0.0 +wsrep_provider = /usr/lib/galera/libgalera_smm.so +# TODO: is rsync the best option? +wsrep_sst_method = rsync +default_storage_engine = innodb +binlog_format = row +innodb_autoinc_lock_mode = 2 +innodb_flush_log_at_trx_commit = 0 +query_cache_size = 0 +query_cache_type = 0 + +# By default every node is standalone +wsrep_cluster_address=gcomm:// +wsrep_cluster_name=galera +wsrep_node_address=127.0.0.1 + +# TODO: Enable use privileges. This doesn't work +# on mysql restart, for some reason after the SST +# permissions are not setup correctly. +skip-grant-tables diff --git a/examples/pets/mysql/galera/init/on-start.sh b/examples/pets/mysql/galera/init/on-start.sh new file mode 100755 index 000000000000..69fccda8f432 --- /dev/null +++ b/examples/pets/mysql/galera/init/on-start.sh @@ -0,0 +1,52 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script writes out a mysql galera config using a list of newline seperated +# peer DNS names it accepts through stdin. + +# /etc/mysql is assumed to be a shared volume so we can modify my.cnf as required +# to keep the config up to date, without wrapping mysqld in a custom pid1. +# The config location is intentionally not /etc/mysql/my.cnf because the +# standard base image clobbers that location. +CFG=/etc/mysql/my-galera.cnf + +function join { + local IFS="$1"; shift; echo "$*"; +} + +HOSTNAME=$(hostname) +# Parse out cluster name, formatted as: petset_name-index +IFS='-' read -ra ADDR <<< "$(hostname)" +CLUSTER_NAME="${ADDR[0]}" + +while read -ra LINE; do + if [[ "${LINE}" == *"${HOSTNAME}"* ]]; then + MY_NAME=$LINE + fi + PEERS=("${PEERS[@]}" $LINE) +done + +if [ "${#PEERS[@]}" = 1 ]; then + WSREP_CLUSTER_ADDRESS="" +else + WSREP_CLUSTER_ADDRESS=$(join , "${PEERS[@]}") +fi +sed -i -e "s|^wsrep_node_address=.*$|wsrep_node_address=${MY_NAME}|" ${CFG} +sed -i -e "s|^wsrep_cluster_name=.*$|wsrep_cluster_name=${CLUSTER_NAME}|" ${CFG} +sed -i -e "s|^wsrep_cluster_address=.*$|wsrep_cluster_address=gcomm://${WSREP_CLUSTER_ADDRESS}|" ${CFG} + +# don't need a restart, we're just writing the conf in case there's an +# unexpected restart on the node. diff --git a/examples/pets/mysql/galera/mysql-galera.yaml b/examples/pets/mysql/galera/mysql-galera.yaml new file mode 100644 index 000000000000..35884c11436a --- /dev/null +++ b/examples/pets/mysql/galera/mysql-galera.yaml @@ -0,0 +1,120 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: galera + labels: + app: mysql +spec: + ports: + - port: 3306 + name: mysql + # *.galear.default.svc.cluster.local + clusterIP: None + selector: + app: mysql +--- +apiVersion: apps/v1alpha1 +kind: PetSet +metadata: + name: mysql +spec: + serviceName: "galera" + replicas: 3 + template: + metadata: + labels: + app: mysql + annotations: + pod.alpha.kubernetes.io/initialized: "true" + pod.alpha.kubernetes.io/init-containers: '[ + { + "name": "install", + "image": "gcr.io/google_containers/galera-install:0.1", + "imagePullPolicy": "Always", + "args": ["--work-dir=/work-dir"], + "volumeMounts": [ + { + "name": "workdir", + "mountPath": "/work-dir" + }, + { + "name": "config", + "mountPath": "/etc/mysql" + } + ] + }, + { + "name": "bootstrap", + "image": "debian:jessie", + "command": ["/work-dir/peer-finder"], + "args": ["-on-start=\"/work-dir/on-start.sh\"", "-service=galera"], + "env": [ + { + "name": "POD_NAMESPACE", + "valueFrom": { + "fieldRef": { + "apiVersion": "v1", + "fieldPath": "metadata.namespace" + } + } + } + ], + "volumeMounts": [ + { + "name": "workdir", + "mountPath": "/work-dir" + }, + { + "name": "config", + "mountPath": "/etc/mysql" + } + ] + } + ]' + spec: + containers: + - name: mysql + image: erkules/galera:basic + ports: + - containerPort: 3306 + name: mysql + - containerPort: 4444 + name: sst + - containerPort: 4567 + name: replication + - containerPort: 4568 + name: ist + args: + - --defaults-file=/etc/mysql/my-galera.cnf + - --user=root + readinessProbe: + exec: + command: + - sh + - -c + - "mysql -u root -e 'show databases;'" + initialDelaySeconds: 15 + timeoutSeconds: 5 + volumeMounts: + - name: datadir + mountPath: /var/lib/ + - name: config + mountPath: /etc/mysql + volumes: + - name: config + emptyDir: {} + - name: workdir + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 10Gi diff --git a/examples/pets/mysql/galera/test.sh b/examples/pets/mysql/galera/test.sh new file mode 100755 index 000000000000..fbd6a7165814 --- /dev/null +++ b/examples/pets/mysql/galera/test.sh @@ -0,0 +1,21 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl exec mysql-0 -- mysql -u root -e "create database test;" +kubectl exec mysql-1 -- mysql -u root -e "use test; create table pet (id int(10), name varchar(20));" +kubectl exec mysql-1 -- mysql -u root -e "use test; insert into pet (id, name) values (1, \"galera\");" +kubectl exec mysql-2 -- mysql -u root -e "use test; select * from pet;" + diff --git a/examples/pets/mysql/healthz/Dockerfile b/examples/pets/mysql/healthz/Dockerfile new file mode 100644 index 000000000000..565916b1cfd7 --- /dev/null +++ b/examples/pets/mysql/healthz/Dockerfile @@ -0,0 +1,22 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: get rid of bash dependency and switch to plain busybox. +# The tar in busybox also doesn't seem to understand compression. +FROM debian:jessie +MAINTAINER Prashanth.B + +RUN apt-get update && apt-get install -y mysql-client wget +ADD mysql_healthz /mysql_healthz +Entrypoint ["/mysql_healthz"] diff --git a/examples/pets/mysql/healthz/Makefile b/examples/pets/mysql/healthz/Makefile new file mode 100644 index 000000000000..2bbad7ed8b43 --- /dev/null +++ b/examples/pets/mysql/healthz/Makefile @@ -0,0 +1,31 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all: push + +# 0.0 shouldn't clobber any released builds +TAG = 0.1 +PREFIX = gcr.io/google_containers/mysql-healthz + +server: main.go + CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' -o mysql_healthz ./main.go + +container: server + docker build -t $(PREFIX):$(TAG) . + +push: container + gcloud docker push $(PREFIX):$(TAG) + +clean: + rm -f mysql_healthz diff --git a/examples/pets/mysql/healthz/README.md b/examples/pets/mysql/healthz/README.md new file mode 100644 index 000000000000..960167a2c643 --- /dev/null +++ b/examples/pets/mysql/healthz/README.md @@ -0,0 +1,3 @@ +# Mysql healthz + +This is a simple exec-over-http healthz probe for mysql. Mainly intended as a work-around for (#25456)[https://github.com/kubernetes/kubernetes/issues/25456]. diff --git a/examples/pets/mysql/healthz/main.go b/examples/pets/mysql/healthz/main.go new file mode 100644 index 000000000000..8ea4977a3be9 --- /dev/null +++ b/examples/pets/mysql/healthz/main.go @@ -0,0 +1,96 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is a simple health monitor for mysql. Only needed till #25456 is fixed. +package main + +import ( + "fmt" + "log" + "net/http" + "os" + osExec "os/exec" + "strings" + + flag "github.com/spf13/pflag" +) + +var ( + flags = flag.NewFlagSet("a http healthz server for mysql.", flag.ExitOnError) + port = flags.Int("port", 8080, "port to use for healthz server.") + verbose = flags.Bool("verbose", false, "log verbose output?") + pass = flags.String("password", "", "mysql password.") + host = flags.String("host", "", "mysql host.") + + mysqlChecks = map[string]string{ + "/healthz": "show databases;", + } +) + +type mysqlManager struct { + host, pass string +} + +func (m *mysqlManager) exec(cmd string) ([]byte, error) { + var password string + if m.pass != "" { + password = fmt.Sprintf("-p %v", m.pass) + } + mysqlCmd := fmt.Sprintf("/usr/bin/mysql -u root %v -h %v -B -e '%v'", password, m.host, cmd) + return osExec.Command("sh", "-c", mysqlCmd).CombinedOutput() +} + +func registerHandlers(verbose bool, m *mysqlManager) { + var str string + for endpoint, cmd := range mysqlChecks { + str += fmt.Sprintf("\t%v: %q\n", endpoint, cmd) + http.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) { + output, err := m.exec(cmd) + if err != nil { + w.WriteHeader(http.StatusServiceUnavailable) + } else { + w.WriteHeader(http.StatusOK) + } + if verbose { + log.Printf("Output of %v:\n%v\n", cmd, string(output)) + } + w.Write(output) + }) + } + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf("Available handlers:\n%v", str))) + }) +} + +func getHostnameOrDie() string { + output, err := osExec.Command("hostname").CombinedOutput() + if err != nil { + log.Fatalf("%v", err) + } + return strings.Trim(string(output), "\n") +} + +func main() { + flags.Parse(os.Args) + hostname := *host + if hostname == "" { + hostname = getHostnameOrDie() + } + registerHandlers(*verbose, &mysqlManager{pass: *pass, host: hostname}) + log.Printf("Starting mysql healthz server on port %v", *port) + log.Fatalf(fmt.Sprintf("%v", http.ListenAndServe(fmt.Sprintf(":%v", *port), nil))) +} diff --git a/examples/pets/mysql/healthz/pod.yaml b/examples/pets/mysql/healthz/pod.yaml new file mode 100644 index 000000000000..cb047100ec4f --- /dev/null +++ b/examples/pets/mysql/healthz/pod.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: mysql + namespace: default +spec: + containers: + - image: mysql:5.6 + name: server + env: + - name: MYSQL_ALLOW_EMPTY_PASSWORD + value: "yes" + readinessProbe: + httpGet: + path: /healthz + port: 8080 + - name: healthz + image: gcr.io/google_containers/mysql-healthz:1.0 + imagePullPolicy: Always diff --git a/examples/pets/peer-finder/Dockerfile b/examples/pets/peer-finder/Dockerfile new file mode 100644 index 000000000000..137d2b42c797 --- /dev/null +++ b/examples/pets/peer-finder/Dockerfile @@ -0,0 +1,23 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM gcr.io/google_containers/ubuntu-slim:0.2 +MAINTAINER Prashanth.B + +RUN apt-get update && apt-get install -y wget bash dnsutils +ADD peer-finder /peer-finder +ADD peer-finder.go /peer-finder.go + +EXPOSE 9376 +ENTRYPOINT ["/peer-finder"] diff --git a/examples/pets/peer-finder/Makefile b/examples/pets/peer-finder/Makefile new file mode 100644 index 000000000000..5cc743c21475 --- /dev/null +++ b/examples/pets/peer-finder/Makefile @@ -0,0 +1,33 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all: push + +TAG = 0.1 +PREFIX = gcr.io/google_containers/peer-finder + +server: peer-finder.go + CGO_ENABLED=0 go build -a -installsuffix cgo --ldflags '-w' ./peer-finder.go + +release: server + gsutil cp peer-finder gs://kubernetes-release/pets/peer-finder + +container: server + docker build -t $(PREFIX):$(TAG) . + +push: container + gcloud docker push $(PREFIX):$(TAG) + +clean: + rm -f peer-finder diff --git a/examples/pets/peer-finder/README.md b/examples/pets/peer-finder/README.md new file mode 100644 index 000000000000..54f2100846fa --- /dev/null +++ b/examples/pets/peer-finder/README.md @@ -0,0 +1,15 @@ +# Peer finder + +This is a simple peer finder daemon that runs as pid 1 in a petset. +It is expected to be a temporary solution till the main Kubernetes repo supports: +1. Init containers to replace on-start scripts +2. A notification delivery mechanism that allows external controllers to + declaratively execute on-change scripts in containers. + +Though we don't expect this container to always run as pid1, it will be +necessary in some form. All it does is resolve DNS. Even when we get (2) +the most natural way to update the input for the on-change script is through +a sidecar that runs the peer-finder. + + + diff --git a/examples/pets/peer-finder/peer-finder.go b/examples/pets/peer-finder/peer-finder.go new file mode 100644 index 000000000000..93ec34143411 --- /dev/null +++ b/examples/pets/peer-finder/peer-finder.go @@ -0,0 +1,110 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// A small utility program to lookup hostnames of endpoints in a service. +package main + +import ( + "flag" + "fmt" + "log" + "net" + "os" + "os/exec" + "sort" + "strings" + "time" + + "k8s.io/kubernetes/pkg/util/sets" +) + +const ( + svcLocalSuffix = "svc.cluster.local" + pollPeriod = 1 * time.Second +) + +var ( + onChange = flag.String("on-change", "", "Script to run on change, must accept a new line separated list of peers via stdin.") + onStart = flag.String("on-start", "", "Script to run on start, must accept a new line separated list of peers via stdin.") + svc = flag.String("service", "", "Governing service responsible for the DNS records of the domain this pod is in.") + namespace = flag.String("ns", "", "The namespace this pod is running in. If unspecified, the POD_NAMESPACE env var is used.") +) + +func lookup(svcName string) (sets.String, error) { + endpoints := sets.NewString() + _, srvRecords, err := net.LookupSRV("", "", svcName) + if err != nil { + return endpoints, err + } + for _, srvRecord := range srvRecords { + // The SRV records ends in a "." for the root domain + ep := fmt.Sprintf("%v", srvRecord.Target[:len(srvRecord.Target)-1]) + endpoints.Insert(ep) + } + return endpoints, nil +} + +func shellOut(sendStdin, script string) { + log.Printf("execing: %v with stdin: %v", script, sendStdin) + // TODO: Switch to sending stdin from go + out, err := exec.Command("bash", "-c", fmt.Sprintf("echo -e '%v' | %v", sendStdin, script)).CombinedOutput() + if err != nil { + log.Fatalf("Failed to execute %v: %v, err: %v", script, string(out), err) + } + log.Print(string(out)) +} + +func main() { + flag.Parse() + + ns := *namespace + if ns == "" { + ns = os.Getenv("POD_NAMESPACE") + } + if *svc == "" || ns == "" || (*onChange == "" && *onStart == "") { + log.Fatalf("Incomplete args, require -on-change and/or -on-start, -service and -ns or an env var for POD_NAMESPACE.") + } + + hostname, err := os.Hostname() + if err != nil { + log.Fatalf("Failed to get hostname: %s", err) + } + + myName := strings.Join([]string{hostname, *svc, ns, svcLocalSuffix}, ".") + script := *onStart + if script == "" { + script = *onChange + log.Printf("No on-start supplied, on-change %v will be applied on start.", script) + } + for newPeers, peers := sets.NewString(), sets.NewString(); script != ""; time.Sleep(pollPeriod) { + newPeers, err = lookup(*svc) + if err != nil { + log.Printf("%v", err) + continue + } + if newPeers.Equal(peers) || !newPeers.Has(myName) { + continue + } + peerList := newPeers.List() + sort.Strings(peerList) + log.Printf("Peer list updated\nwas %v\nnow %v", peers.List(), newPeers.List()) + shellOut(strings.Join(peerList, "\n"), script) + peers = newPeers + script = *onChange + } + // TODO: Exit if there's no on-change? + log.Printf("Peer finder exiting") +} diff --git a/examples/pets/redis/README.md b/examples/pets/redis/README.md new file mode 100644 index 000000000000..d5acbf108160 --- /dev/null +++ b/examples/pets/redis/README.md @@ -0,0 +1,23 @@ +# Redis + +This example runs redis through a petset. + +## Master/slave + +### Bootstrap + +Create the yaml in this directory +``` +$ kubectl create -f redis.yaml +``` + +can run the "test.sh" script in this directory. + +## TODO + +Expect cleaner solutions for the following as petset matures. + +* Scaling Up/down +* Image Upgrade +* Periodic maintenance +* Sentinel failover diff --git a/examples/pets/redis/init/Dockerfile b/examples/pets/redis/init/Dockerfile new file mode 100644 index 000000000000..62b975631b4c --- /dev/null +++ b/examples/pets/redis/init/Dockerfile @@ -0,0 +1,29 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: get rid of bash dependency and switch to plain busybox. +# The tar in busybox also doesn't seem to understand compression. +FROM debian:jessie +MAINTAINER Prashanth.B + +# TODO: just use standard redis when there is one for 3.2.0. +RUN apt-get update && apt-get install -y wget make gcc + +ADD on-start.sh / +# See contrib/pets/peer-finder for details +RUN wget -qO /peer-finder https://storage.googleapis.com/kubernetes-release/pets/peer-finder + +ADD install.sh / +RUN chmod -c 755 /install.sh /on-start.sh /peer-finder +Entrypoint ["/install.sh"] diff --git a/examples/pets/redis/init/Makefile b/examples/pets/redis/init/Makefile new file mode 100644 index 000000000000..ff217dd12bb8 --- /dev/null +++ b/examples/pets/redis/init/Makefile @@ -0,0 +1,27 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all: push + +TAG = 0.1 +PREFIX = gcr.io/google_containers/redis-install + +container: + docker build -t $(PREFIX):$(TAG) . + +push: container + gcloud docker push $(PREFIX):$(TAG) + +clean: + docker rmi $(PREFIX):$(TAG) diff --git a/examples/pets/redis/init/install.sh b/examples/pets/redis/init/install.sh new file mode 100755 index 000000000000..94786d485f29 --- /dev/null +++ b/examples/pets/redis/init/install.sh @@ -0,0 +1,64 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This volume is assumed to exist and is shared with parent of the init +# container. It contains the redis installation. +INSTALL_VOLUME="/opt" + +# This volume is assumed to exist and is shared with the peer-finder +# init container. It contains on-start/change configuration scripts. +WORK_DIR="/work-dir" + +TEMP_DIR="/tmp" + +VERSION="3.2.0" + +for i in "$@" +do +case $i in + -v=*|--version=*) + VERSION="${i#*=}" + shift + ;; + -i=*|--install-into=*) + INSTALL_VOLUME="${i#*=}" + shift + ;; + -w=*|--work-dir=*) + WORK_DIR="${i#*=}" + shift + ;; + *) + # unknown option + ;; +esac +done + +echo installing config scripts into "${WORK_DIR}" +mkdir -p "${WORK_DIR}" +cp /on-start.sh "${WORK_DIR}"/ +cp /peer-finder "${WORK_DIR}"/ + +echo installing redis-"${VERSION}" into "${INSTALL_VOLUME}" +mkdir -p "${TEMP_DIR}" "${INSTALL_VOLUME}"/redis +wget -q -O - http://download.redis.io/releases/redis-"${VERSION}".tar.gz | tar -xzf - -C "${TEMP_DIR}" + +cd "${TEMP_DIR}"/redis-"${VERSION}"/ +# Clean out existing deps, see https://github.com/antirez/redis/issues/722 +make distclean +make install INSTALL_BIN="${INSTALL_VOLUME}"/redis +cp "${TEMP_DIR}"/redis-"${VERSION}"/redis.conf ${INSTALL_VOLUME}/redis/redis.conf + diff --git a/examples/pets/redis/init/on-start.sh b/examples/pets/redis/init/on-start.sh new file mode 100755 index 000000000000..37d3be032601 --- /dev/null +++ b/examples/pets/redis/init/on-start.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +CFG=/opt/redis/redis.conf +HOSTNAME=$(hostname) +DATADIR="/data" +# Port on which redis listens for connections. +PORT=6379 + +# Ping everyone but ourself to see if there's a master. Only one pet starts at +# a time, so if we don't see a master we can assume the position is ours. +while read -ra LINE; do + if [[ "${LINE}" == *"${HOSTNAME}"* ]]; then + sed -i -e "s|^bind.*$|bind ${LINE}|" ${CFG} + elif [ "$(/opt/redis/redis-cli -h $LINE info | grep role | sed 's,\r$,,')" = "role:master" ]; then + # TODO: More restrictive regex? + sed -i -e "s|^.*slaveof.*$|slaveof ${LINE} ${PORT}|" ${CFG} + fi +done + +# Set the data directory for append only log and snapshot files. This should +# be a persistent volume for consistency. +sed -i -e "s|^.*dir .*$|dir ${DATADIR}|" ${CFG} + +# The append only log is written for every SET operation. Without this setting, +# redis just snapshots periodically which is only safe for a cache. This will +# produce an appendonly.aof file in the configured data dir. +sed -i -e "s|^appendonly .*$|appendonly yes|" ${CFG} + +# Every write triggers an fsync. Recommended default is "everysec", which +# is only safe for AP applications. +sed -i -e "s|^appendfsync .*$|appendfsync always|" ${CFG} + + diff --git a/examples/pets/redis/redis.yaml b/examples/pets/redis/redis.yaml new file mode 100644 index 000000000000..01fd99d616ad --- /dev/null +++ b/examples/pets/redis/redis.yaml @@ -0,0 +1,115 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: redis + labels: + app: redis +spec: + ports: + - port: 6379 + name: peer + # *.redis.default.svc.cluster.local + clusterIP: None + selector: + app: redis +--- +apiVersion: apps/v1alpha1 +kind: PetSet +metadata: + name: rd +spec: + serviceName: "redis" + replicas: 3 + template: + metadata: + labels: + app: redis + annotations: + pod.alpha.kubernetes.io/initialized: "true" + pod.alpha.kubernetes.io/init-containers: '[ + { + "name": "install", + "image": "gcr.io/google_containers/redis-install:0.1", + "imagePullPolicy": "Always", + "args": ["--version=3.2.0", "--install-into=/opt", "--work-dir=/work-dir"], + "volumeMounts": [ + { + "name": "opt", + "mountPath": "/opt" + }, + { + "name": "workdir", + "mountPath": "/work-dir" + } + ] + }, + { + "name": "bootstrap", + "image": "debian:jessie", + "command": ["/work-dir/peer-finder"], + "args": ["-on-start=\"/work-dir/on-start.sh\"", "-service=redis"], + "env": [ + { + "name": "POD_NAMESPACE", + "valueFrom": { + "fieldRef": { + "apiVersion": "v1", + "fieldPath": "metadata.namespace" + } + } + } + ], + "volumeMounts": [ + { + "name": "opt", + "mountPath": "/opt" + }, + { + "name": "workdir", + "mountPath": "/work-dir" + } + ] + } + ]' + spec: + containers: + - name: redis + image: debian:jessie + ports: + - containerPort: 6379 + name: peer + command: + - /opt/redis/redis-server + args: + - /opt/redis/redis.conf + readinessProbe: + exec: + command: + - sh + - -c + - "/opt/redis/redis-cli -h $(hostname) ping" + initialDelaySeconds: 15 + timeoutSeconds: 5 + volumeMounts: + - name: datadir + mountPath: /data + - name: opt + mountPath: /opt + volumes: + - name: opt + emptyDir: {} + - name: workdir + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 20Gi diff --git a/examples/pets/redis/test.sh b/examples/pets/redis/test.sh new file mode 100755 index 000000000000..52a167f96add --- /dev/null +++ b/examples/pets/redis/test.sh @@ -0,0 +1,19 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl exec rd-0 -- /opt/redis/redis-cli -h rd-0.redis SET replicated:test true +kubectl exec rd-2 -- /opt/redis/redis-cli -h rd-2.redis GET replicated:test + diff --git a/examples/pets/zookeeper/README.md b/examples/pets/zookeeper/README.md new file mode 100644 index 000000000000..99217d009265 --- /dev/null +++ b/examples/pets/zookeeper/README.md @@ -0,0 +1,85 @@ +# Zookeeper + +This example runs zookeeper through a petset. + +## Bootstrap + +Create the petset in this directory +``` +$ kubetl create -f zookeeper.yaml +``` + +Once you have all 3 nodes in Running, you can run the "test.sh" script in this directory. + +## Failover + +You can test failover by killing the leader. Insert a key: +```console +$ kubectl exec zoo-0 -- /opt/zookeeper/bin/zkCli.sh create /foo bar; +$ kubectl exec zoo-2 -- /opt/zookeeper/bin/zkCli.sh get /foo; + +Watch existing members: +```console +$ kubectl run --attach bbox --image=busybox --restart=Never -- sh -c 'while true; do for i in 0 1 2; do echo zoo-$i $(echo stats | nc zoo-$1.zk:2181 | grep Mode); sleep 1; done; done'; +zoo-2 Mode: follower +zoo-0 Mode: follower +zoo-1 Mode: leader +zoo-2 Mode: follower +``` + +Delete pets and wait for the petset controller to bring the back up: +```console +$ kubectl delete po -l app=zk +$ kubectl get po --watch-only +NAME READY STATUS RESTARTS AGE +zoo-0 0/1 Init:0/2 0 16s +zoo-0 0/1 Init:0/2 0 21s +zoo-0 0/1 PodInitializing 0 23s +zoo-0 1/1 Running 0 41s +zoo-1 0/1 Pending 0 0s +zoo-1 0/1 Init:0/2 0 0s +zoo-1 0/1 Init:0/2 0 14s +zoo-1 0/1 PodInitializing 0 17s +zoo-1 0/1 Running 0 18s +zoo-2 0/1 Pending 0 0s +zoo-2 0/1 Init:0/2 0 0s +zoo-2 0/1 Init:0/2 0 12s +zoo-2 0/1 Init:0/2 0 28s +zoo-2 0/1 PodInitializing 0 31s +zoo-2 0/1 Running 0 32s +... + +zoo-0 Mode: follower +zoo-1 Mode: leader +zoo-2 Mode: follower +``` + +Check the previously inserted key: +```console +$ kubectl exec zoo-1 -- /opt/zookeeper/bin/zkCli.sh get /foo +ionid = 0x354887858e80035, negotiated timeout = 30000 + +WATCHER:: + +WatchedEvent state:SyncConnected type:None path:null +bar +``` + +## Scaling + +You can scale up by modifying the number of replicas on the PetSet. + +## Image Upgrade + +TODO: Add details + +## Maintenance + +TODO: Add details + +## Limitations +* Both petset and init containers are in alpha +* Look through the on-start and on-change scripts for TODOs +* Doesn't support the addition of observers through the petset +* Only supports storage options that have backends for persistent volume claims + diff --git a/examples/pets/zookeeper/init/Dockerfile b/examples/pets/zookeeper/init/Dockerfile new file mode 100644 index 000000000000..d3e8aaeef645 --- /dev/null +++ b/examples/pets/zookeeper/init/Dockerfile @@ -0,0 +1,29 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: get rid of bash dependency and switch to plain busybox. +# The tar in busybox also doesn't seem to understand compression. +FROM debian:jessie +MAINTAINER Prashanth.B + +RUN apt-get update && apt-get install -y wget netcat + +ADD on-start.sh / +ADD on-change.sh / +# See contrib/pets/peer-finder for details +RUN wget -qO /peer-finder https://storage.googleapis.com/kubernetes-release/pets/peer-finder + +ADD install.sh / +RUN chmod -c 755 /install.sh /on-start.sh /on-change.sh /peer-finder +Entrypoint ["/install.sh"] diff --git a/examples/pets/zookeeper/init/Makefile b/examples/pets/zookeeper/init/Makefile new file mode 100644 index 000000000000..bfb697899e7f --- /dev/null +++ b/examples/pets/zookeeper/init/Makefile @@ -0,0 +1,27 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all: push + +TAG = 0.1 +PREFIX = gcr.io/google_containers/zookeeper-install + +container: + docker build -t $(PREFIX):$(TAG) . + +push: container + gcloud docker push $(PREFIX):$(TAG) + +clean: + docker rmi $(PREFIX):$(TAG) diff --git a/examples/pets/zookeeper/init/install.sh b/examples/pets/zookeeper/init/install.sh new file mode 100755 index 000000000000..6ed72696be0c --- /dev/null +++ b/examples/pets/zookeeper/init/install.sh @@ -0,0 +1,76 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This volume is assumed to exist and is shared with parent of the init +# container. It contains the zookeeper installation. +INSTALL_VOLUME="/opt" + +# This volume is assumed to exist and is shared with the peer-finder +# init container. It contains on-start/change configuration scripts. +WORKDIR_VOLUME="/work-dir" + +# As of April-2016 is 3.4.8 is the latest stable, but versions 3.5.0 onward +# allow dynamic reconfiguration. +VERSION="3.5.0-alpha" + +for i in "$@" +do +case $i in + -v=*|--version=*) + VERSION="${i#*=}" + shift + ;; + -i=*|--install-into=*) + INSTALL_VOLUME="${i#*=}" + shift + ;; + -w=*|--work-dir=*) + WORKDIR_VOLUME="${i#*=}" + shift + ;; + *) + # unknown option + ;; +esac +done + +echo installing config scripts into "${WORKDIR_VOLUME}" +mkdir -p "${WORKDIR_VOLUME}" +cp /on-start.sh "${WORKDIR_VOLUME}"/ +cp /on-change.sh "${WORKDIR_VOLUME}"/ +cp /peer-finder "${WORKDIR_VOLUME}"/ + +echo installing zookeeper-"${VERSION}" into "${INSTALL_VOLUME}" +mkdir -p "${INSTALL_VOLUME}" +wget -q -O - http://apache.mirrors.pair.com/zookeeper/zookeeper-"${VERSION}"/zookeeper-"${VERSION}".tar.gz | tar -xzf - -C "${INSTALL_VOLUME}" +mv "${INSTALL_VOLUME}"/zookeeper-"${VERSION}" "${INSTALL_VOLUME}"/zookeeper +cp "${INSTALL_VOLUME}"/zookeeper/conf/zoo_sample.cfg "${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg + +# TODO: Should dynamic config be tied to the version? +IFS="." read -ra RELEASE <<< "${VERSION}" +if [ $(expr "${RELEASE[1]}") -gt 4 ]; then + echo zookeeper-"${VERSION}" supports dynamic reconfiguration, enabling it + echo "standaloneEnabled=false" >> "${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg + echo "dynamicConfigFile="${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg.dynamic" >> "${INSTALL_VOLUME}"/zookeeper/conf/zoo.cfg +fi + +# TODO: This is a hack, netcat is convenient to have in the zookeeper container +# I want to avoid using a custom zookeeper image just for this. So copy it. +NC=$(which nc) +if [ "${NC}" != "" ]; then + echo copying nc into "${INSTALL_VOLUME}" + cp "${NC}" "${INSTALL_VOLUME}" +fi diff --git a/examples/pets/zookeeper/init/on-change.sh b/examples/pets/zookeeper/init/on-change.sh new file mode 100755 index 000000000000..bb9a5049e401 --- /dev/null +++ b/examples/pets/zookeeper/init/on-change.sh @@ -0,0 +1,49 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script configures zookeeper cluster member ship for version of zookeeper +# < 3.5.0. It should not be used with the on-start.sh script in this example. +# As of April-2016 is 3.4.8 is the latest stable. + +CFG=/opt/zookeeper/conf/zoo.cfg +CFG_BAK=/opt/zookeeper/conf/zoo.cfg.bak +MY_ID=/tmp/zookeeper/myid + +# write myid +IFS='-' read -ra ADDR <<< "$(hostname)" +echo $(expr "1" + "${ADDR[1]}") > "${MY_ID}" + +# TODO: This is a dumb way to reconfigure zookeeper because it allows dynamic +# reconfig, but it's simple. +i=0 +echo " +tickTime=2000 +initLimit=10 +syncLimit=5 +dataDir=/tmp/zookeeper +clientPort=2181 +" > "${CFG_BAK}" + +while read -ra LINE; do + let i=i+1 + echo "server.${i}=${LINE}:2888:3888" >> "${CFG_BAK}" +done +cp ${CFG_BAK} ${CFG} + +# TODO: Typically one needs to first add a new member as an "observer" then +# promote it to "participant", but that requirement is relaxed if we never +# start > 1 at a time. +/opt/zookeeper/bin/zkServer.sh restart diff --git a/examples/pets/zookeeper/init/on-start.sh b/examples/pets/zookeeper/init/on-start.sh new file mode 100755 index 000000000000..392b82aa9dad --- /dev/null +++ b/examples/pets/zookeeper/init/on-start.sh @@ -0,0 +1,73 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script configures zookeeper cluster member ship for version of zookeeper +# >= 3.5.0. It should not be used with the on-change.sh script in this example. +# As of April-2016 is 3.4.8 is the latest stable. + +# Both /opt and /tmp/zookeeper are assumed to be volumes shared with the parent. +CFG=/opt/zookeeper/conf/zoo.cfg.dynamic +CFG_BAK=/opt/zookeeper/conf/zoo.cfg.bak +MY_ID_FILE=/tmp/zookeeper/myid +HOSTNAME=$(hostname) + +while read -ra LINE; do + PEERS=("${PEERS[@]}" $LINE) +done + +# Don't add the first member as an observer +if [ ${#PEERS[@]} -eq 1 ]; then + # We need to write our index in this list of servers into MY_ID_FILE. + # Note that this may not always coincide with the hostname id. + echo 1 > "${MY_ID_FILE}" + echo "server.1=${PEERS[0]}:2888:3888;2181" > "${CFG}" + # TODO: zkServer-initialize is the safe way to handle changes to datadir + # because simply starting will create a new datadir, BUT if the user changed + # pod template they might end up with 2 datadirs and brief split brain. + exit +fi + +# Every subsequent member is added as an observer and promoted to a participant +echo "" > "${CFG_BAK}" +i=0 +for peer in "${PEERS[@]}"; do + let i=i+1 + if [[ "${peer}" == *"${HOSTNAME}"* ]]; then + MY_ID=$i + MY_NAME=${peer} + echo $i > "${MY_ID_FILE}" + echo "server.${i}=${peer}:2888:3888:observer;2181" >> "${CFG_BAK}" + else + echo "server.${i}=${peer}:2888:3888:participant;2181" >> "${CFG_BAK}" + fi +done + +# Once the dynamic config file is written it shouldn't be modified, so the final +# reconfigure needs to happen through the "reconfig" command. +cp ${CFG_BAK} ${CFG} + +# TODO: zkServer-initialize is the safe way to handle changes to datadir +# because simply starting will create a new datadir, BUT if the user changed +# pod template they might end up with 2 datadirs and brief split brain. +/opt/zookeeper/bin/zkServer.sh start + +# TODO: We shouldn't need to specify the address of the master as long as +# there's quorum. According to the docs the new server is just not allowed to +# vote, it's still allowed to propose config changes, and it knows the +# existing members of the ensemble from *its* config. This works as expected, +# but we should correlate with more satisfying empirical evidence. +/opt/zookeeper/bin/zkCli.sh reconfig -add "server.$MY_ID=$MY_NAME:2888:3888:participant;2181" +/opt/zookeeper/bin/zkServer.sh stop diff --git a/examples/pets/zookeeper/test.sh b/examples/pets/zookeeper/test.sh new file mode 100755 index 000000000000..fbe12ff25ca1 --- /dev/null +++ b/examples/pets/zookeeper/test.sh @@ -0,0 +1,19 @@ +#! /bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl exec zoo-0 -- /opt/zookeeper/bin/zkCli.sh create /foo bar; +kubectl exec zoo-2 -- /opt/zookeeper/bin/zkCli.sh get /foo; + diff --git a/examples/pets/zookeeper/zookeeper.yaml b/examples/pets/zookeeper/zookeeper.yaml new file mode 100644 index 000000000000..ada1af378b8e --- /dev/null +++ b/examples/pets/zookeeper/zookeeper.yaml @@ -0,0 +1,123 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: zk + labels: + app: zk +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + # *.zk.default.svc.cluster.local + clusterIP: None + selector: + app: zk +--- +apiVersion: apps/v1alpha1 +kind: PetSet +metadata: + name: zoo +spec: + serviceName: "zk" + replicas: 3 + template: + metadata: + labels: + app: zk + annotations: + pod.alpha.kubernetes.io/initialized: "true" + pod.alpha.kubernetes.io/init-containers: '[ + { + "name": "install", + "image": "gcr.io/google_containers/zookeeper-install:0.1", + "imagePullPolicy": "Always", + "args": ["--version=3.5.0-alpha", "--install-into=/opt", "--work-dir=/work-dir"], + "volumeMounts": [ + { + "name": "opt", + "mountPath": "/opt/" + }, + { + "name": "workdir", + "mountPath": "/work-dir" + } + ] + }, + { + "name": "bootstrap", + "image": "java:openjdk-8-jre", + "command": ["/work-dir/peer-finder"], + "args": ["-on-start=\"/work-dir/on-start.sh\"", "-service=zk"], + "env": [ + { + "name": "POD_NAMESPACE", + "valueFrom": { + "fieldRef": { + "apiVersion": "v1", + "fieldPath": "metadata.namespace" + } + } + } + ], + "volumeMounts": [ + { + "name": "opt", + "mountPath": "/opt/" + }, + { + "name": "workdir", + "mountPath": "/work-dir" + }, + { + "name": "datadir", + "mountPath": "/tmp/zookeeper" + } + ] + } + ]' + spec: + containers: + - name: zk + image: java:openjdk-8-jre + ports: + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election + command: + - /opt/zookeeper/bin/zkServer.sh + args: + - start-foreground + readinessProbe: + exec: + command: + - sh + - -c + - "/opt/zookeeper/bin/zkCli.sh ls /" + initialDelaySeconds: 15 + timeoutSeconds: 5 + volumeMounts: + - name: datadir + mountPath: /tmp/zookeeper + - name: opt + mountPath: /opt/ + volumes: + - name: opt + emptyDir: {} + - name: workdir + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 20Gi From 140c6c94c58c72621d62bbf5ccb337b211774ad3 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Thu, 21 Jul 2016 22:02:41 -0400 Subject: [PATCH 3/8] Debug should be able to skip init containers --- pkg/cmd/cli/cmd/debug.go | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/pkg/cmd/cli/cmd/debug.go b/pkg/cmd/cli/cmd/debug.go index e8c527c19382..7f228b8479a1 100644 --- a/pkg/cmd/cli/cmd/debug.go +++ b/pkg/cmd/cli/cmd/debug.go @@ -42,19 +42,20 @@ type DebugOptions struct { Filename string Timeout time.Duration - Command []string - Annotations map[string]string - AsRoot bool - AsNonRoot bool - AsUser int64 - KeepLabels bool // TODO: evaluate selecting the right labels automatically - KeepAnnotations bool - KeepLiveness bool - KeepReadiness bool - OneContainer bool - NodeName string - AddEnv []kapi.EnvVar - RemoveEnv []string + Command []string + Annotations map[string]string + AsRoot bool + AsNonRoot bool + AsUser int64 + KeepLabels bool // TODO: evaluate selecting the right labels automatically + KeepAnnotations bool + KeepLiveness bool + KeepReadiness bool + KeepInitContainers bool + OneContainer bool + NodeName string + AddEnv []kapi.EnvVar + RemoveEnv []string } const ( @@ -148,6 +149,7 @@ func NewCmdDebug(fullName string, f *clientcmd.Factory, in io.Reader, out, errou cmd.Flags().StringVarP(&options.Attach.ContainerName, "container", "c", "", "Container name; defaults to first container") cmd.Flags().BoolVar(&options.KeepAnnotations, "keep-annotations", false, "Keep the original pod annotations") cmd.Flags().BoolVar(&options.KeepLiveness, "keep-liveness", false, "Keep the original pod liveness probes") + cmd.Flags().BoolVar(&options.KeepInitContainers, "keep-init-containers", true, "Run the init containers for the pod. Defaults to true.") cmd.Flags().BoolVar(&options.KeepReadiness, "keep-readiness", false, "Keep the original pod readiness probes") cmd.Flags().BoolVar(&options.OneContainer, "one-container", false, "Run only the selected container, remove all others") cmd.Flags().StringVar(&options.NodeName, "node-name", "", "Set a specific node to run on - by default the pod will run on any valid node") @@ -389,6 +391,10 @@ func (o *DebugOptions) Debug() error { func (o *DebugOptions) transformPodForDebug(annotations map[string]string) (*kapi.Pod, []string) { pod := o.Attach.Pod + if !o.KeepInitContainers { + pod.Spec.InitContainers = nil + } + // reset the container container := containerForName(pod, o.Attach.ContainerName) From 9d296e46c8bbb7c366330b1004489daa84a8c2ac Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Tue, 26 Jul 2016 17:21:09 -0400 Subject: [PATCH 4/8] generate: debug command --- contrib/completions/bash/oc | 1 + contrib/completions/bash/openshift | 1 + contrib/completions/zsh/oc | 1 + contrib/completions/zsh/openshift | 1 + docs/man/man1/oc-debug.1 | 4 ++++ docs/man/man1/openshift-cli-debug.1 | 4 ++++ 6 files changed, 12 insertions(+) diff --git a/contrib/completions/bash/oc b/contrib/completions/bash/oc index bdcb7132ed06..99c39a45574d 100644 --- a/contrib/completions/bash/oc +++ b/contrib/completions/bash/oc @@ -4067,6 +4067,7 @@ _oc_debug() flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag yaml|yml|json") flags+=("--keep-annotations") + flags+=("--keep-init-containers") flags+=("--keep-liveness") flags+=("--keep-readiness") flags+=("--no-headers") diff --git a/contrib/completions/bash/openshift b/contrib/completions/bash/openshift index c967340839fa..8c71a941309c 100644 --- a/contrib/completions/bash/openshift +++ b/contrib/completions/bash/openshift @@ -8569,6 +8569,7 @@ _openshift_cli_debug() flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag yaml|yml|json") flags+=("--keep-annotations") + flags+=("--keep-init-containers") flags+=("--keep-liveness") flags+=("--keep-readiness") flags+=("--no-headers") diff --git a/contrib/completions/zsh/oc b/contrib/completions/zsh/oc index bbfcb1649915..61487f6a9799 100644 --- a/contrib/completions/zsh/oc +++ b/contrib/completions/zsh/oc @@ -4228,6 +4228,7 @@ _oc_debug() flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag yaml|yml|json") flags+=("--keep-annotations") + flags+=("--keep-init-containers") flags+=("--keep-liveness") flags+=("--keep-readiness") flags+=("--no-headers") diff --git a/contrib/completions/zsh/openshift b/contrib/completions/zsh/openshift index a966ff85d0a7..f8329d7304d9 100644 --- a/contrib/completions/zsh/openshift +++ b/contrib/completions/zsh/openshift @@ -8730,6 +8730,7 @@ _openshift_cli_debug() flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag yaml|yml|json") flags+=("--keep-annotations") + flags+=("--keep-init-containers") flags+=("--keep-liveness") flags+=("--keep-readiness") flags+=("--no-headers") diff --git a/docs/man/man1/oc-debug.1 b/docs/man/man1/oc-debug.1 index 5bee92b80441..3d80bf90ec89 100644 --- a/docs/man/man1/oc-debug.1 +++ b/docs/man/man1/oc-debug.1 @@ -60,6 +60,10 @@ the shell. \fB\-\-keep\-annotations\fP=false Keep the original pod annotations +.PP +\fB\-\-keep\-init\-containers\fP=true + Run the init containers for the pod. Defaults to true. + .PP \fB\-\-keep\-liveness\fP=false Keep the original pod liveness probes diff --git a/docs/man/man1/openshift-cli-debug.1 b/docs/man/man1/openshift-cli-debug.1 index da9d590a3e89..e0e06882c379 100644 --- a/docs/man/man1/openshift-cli-debug.1 +++ b/docs/man/man1/openshift-cli-debug.1 @@ -60,6 +60,10 @@ the shell. \fB\-\-keep\-annotations\fP=false Keep the original pod annotations +.PP +\fB\-\-keep\-init\-containers\fP=true + Run the init containers for the pod. Defaults to true. + .PP \fB\-\-keep\-liveness\fP=false Keep the original pod liveness probes From aa0cae0b4ed7a7e3e13bbd0af384c9f2b45f74b6 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Thu, 21 Jul 2016 22:02:56 -0400 Subject: [PATCH 5/8] DNS should support hostname annotations --- pkg/dns/serviceresolver.go | 156 ++++++++++++++++++++++++----------- test/cmd/dns.sh | 55 ++++++++++++ test/extended/dns/dns.go | 3 +- test/integration/dns_test.go | 23 ++++-- test/testdata/services.yaml | 49 +++++++++++ 5 files changed, 228 insertions(+), 58 deletions(-) create mode 100755 test/cmd/dns.sh create mode 100644 test/testdata/services.yaml diff --git a/pkg/dns/serviceresolver.go b/pkg/dns/serviceresolver.go index a08e45093cd1..333ee7a64deb 100644 --- a/pkg/dns/serviceresolver.go +++ b/pkg/dns/serviceresolver.go @@ -1,6 +1,7 @@ package dns import ( + "encoding/json" "fmt" "hash/fnv" "net" @@ -10,8 +11,10 @@ import ( "github.com/golang/glog" kapi "k8s.io/kubernetes/pkg/api" + kendpoints "k8s.io/kubernetes/pkg/api/endpoints" "k8s.io/kubernetes/pkg/api/errors" kclient "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/util/validation" "github.com/skynetservices/skydns/msg" "github.com/skynetservices/skydns/server" @@ -133,6 +136,8 @@ func (b *ServiceResolver) Records(dnsName string, exact bool) ([]msg.Service, er endpointPrefix := base == "endpoints" retrieveEndpoints := endpointPrefix || (len(segments) > 3 && segments[3] == "_endpoints") + includePorts := len(segments) > 3 && hasAllPrefixedSegments(segments[3:], "_") && segments[3] != "_endpoints" + // if has a portal IP and looking at svc if svc.Spec.ClusterIP != kapi.ClusterIPNone && !retrieveEndpoints { defaultService := msg.Service{ @@ -147,41 +152,41 @@ func (b *ServiceResolver) Records(dnsName string, exact bool) ([]msg.Service, er defaultName := buildDNSName(subdomain, defaultHash) defaultService.Key = msg.Path(defaultName) - if len(svc.Spec.Ports) == 0 { + if len(svc.Spec.Ports) == 0 || !includePorts { return []msg.Service{defaultService}, nil } services := []msg.Service{} - if len(segments) == 3 { - for _, p := range svc.Spec.Ports { - port := p.Port - if port == 0 { - port = int32(p.TargetPort.IntVal) - } - if port == 0 { - continue - } - if len(p.Protocol) == 0 { - p.Protocol = kapi.ProtocolTCP - } - portName := p.Name - if len(portName) == 0 { - portName = fmt.Sprintf("unknown-port-%d", port) - } - keyName := buildDNSName(subdomain, "_"+strings.ToLower(string(p.Protocol)), "_"+portName) - services = append(services, - msg.Service{ - Host: svc.Spec.ClusterIP, - Port: int(port), - - Priority: 10, - Weight: 10, - Ttl: 30, - - Key: msg.Path(keyName), - }, - ) + protocolMatch, portMatch := segments[3], "*" + if len(segments) > 4 { + portMatch = segments[4] + } + for _, p := range svc.Spec.Ports { + portSegment, protocolSegment, ok := matchesPortAndProtocol(p.Name, string(p.Protocol), portMatch, protocolMatch) + if !ok { + continue } + + port := p.Port + if port == 0 { + port = int32(p.TargetPort.IntVal) + } + + keyName := buildDNSName(defaultName, protocolSegment, portSegment) + services = append(services, + msg.Service{ + Host: svc.Spec.ClusterIP, + Port: int(port), + + Priority: 10, + Weight: 10, + Ttl: 30, + + TargetStrip: 2, + + Key: msg.Path(keyName), + }, + ) } if len(services) == 0 { services = append(services, defaultService) @@ -196,6 +201,16 @@ func (b *ServiceResolver) Records(dnsName string, exact bool) ([]msg.Service, er return nil, errNoSuchName } + hostnameMappings := noHostnameMappings + if savedHostnames := endpoints.Annotations[kendpoints.PodHostnamesAnnotation]; len(savedHostnames) > 0 { + mapped := make(map[string]kendpoints.HostRecord) + if err = json.Unmarshal([]byte(savedHostnames), &mapped); err == nil { + hostnameMappings = mapped + } + } + + matchHostname := len(segments) > 3 && !hasAllPrefixedSegments(segments[3:4], "_") + services := make([]msg.Service, 0, len(endpoints.Subsets)*4) for _, s := range endpoints.Subsets { for _, a := range s.Addresses { @@ -207,38 +222,47 @@ func (b *ServiceResolver) Records(dnsName string, exact bool) ([]msg.Service, er Weight: 10, Ttl: 30, } - defaultHash := getHash(defaultService.Host) - defaultName := buildDNSName(subdomain, defaultHash) + var endpointName string + if hostname, ok := getHostname(&a, hostnameMappings); ok { + endpointName = hostname + } else { + endpointName = getHash(defaultService.Host) + } + if matchHostname && endpointName != segments[3] { + continue + } + + defaultName := buildDNSName(subdomain, endpointName) defaultService.Key = msg.Path(defaultName) + if !includePorts { + services = append(services, defaultService) + continue + } + + protocolMatch, portMatch := segments[3], "*" + if len(segments) > 4 { + portMatch = segments[4] + } for _, p := range s.Ports { - port := p.Port - if port == 0 { + portSegment, protocolSegment, ok := matchesPortAndProtocol(p.Name, string(p.Protocol), portMatch, protocolMatch) + if !ok || p.Port == 0 { continue } - if len(p.Protocol) == 0 { - p.Protocol = kapi.ProtocolTCP - } - portName := p.Name - if len(portName) == 0 { - portName = fmt.Sprintf("unknown-port-%d", port) - } - - keyName := buildDNSName(subdomain, "_"+strings.ToLower(string(p.Protocol)), "_"+portName, defaultHash) + keyName := buildDNSName(defaultName, protocolSegment, portSegment) services = append(services, msg.Service{ Host: a.IP, - Port: int(port), + Port: int(p.Port), Priority: 10, Weight: 10, Ttl: 30, + TargetStrip: 2, + Key: msg.Path(keyName), }) } - if len(services) == 0 { - services = append(services, defaultService) - } } } glog.V(4).Infof("Answered %s:%t with %#v", dnsName, exact, services) @@ -279,6 +303,21 @@ func (b *ServiceResolver) ReverseRecord(name string) (*msg.Service, error) { // arpaSuffix is the standard suffix for PTR IP reverse lookups. const arpaSuffix = ".in-addr.arpa." +func matchesPortAndProtocol(name, protocol, matchPortSegment, matchProtocolSegment string) (portSegment string, protocolSegment string, match bool) { + if len(name) == 0 { + return "", "", false + } + portSegment = "_" + name + if portSegment != matchPortSegment && matchPortSegment != "*" { + return "", "", false + } + protocolSegment = "_" + strings.ToLower(string(protocol)) + if protocolSegment != matchProtocolSegment && matchProtocolSegment != "*" { + return "", "", false + } + return portSegment, protocolSegment, true +} + // extractIP turns a standard PTR reverse record lookup name // into an IP address func extractIP(reverseName string) (string, bool) { @@ -309,6 +348,17 @@ func buildDNSName(labels ...string) string { return res } +// getHostname returns true if the provided address has a hostname, or false otherwise. +func getHostname(address *kapi.EndpointAddress, podHostnames map[string]kendpoints.HostRecord) (string, bool) { + if len(address.Hostname) > 0 { + return address.Hostname, true + } + if hostRecord, exists := podHostnames[address.IP]; exists && len(validation.IsDNS1123Label(hostRecord.HostName)) == 0 { + return hostRecord.HostName, true + } + return "", false +} + // return a hash for the key name func getHash(text string) string { h := fnv.New32a() @@ -321,3 +371,15 @@ func getHash(text string) string { func convertDashIPToIP(ip string) string { return strings.Join(strings.Split(ip, "-"), ".") } + +// hasAllPrefixedSegments returns true if all provided segments have the given prefix. +func hasAllPrefixedSegments(segments []string, prefix string) bool { + for _, s := range segments { + if !strings.HasPrefix(s, prefix) { + return false + } + } + return true +} + +var noHostnameMappings = map[string]kendpoints.HostRecord{} diff --git a/test/cmd/dns.sh b/test/cmd/dns.sh new file mode 100755 index 000000000000..b4a7c76c7da2 --- /dev/null +++ b/test/cmd/dns.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +OS_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${OS_ROOT}/hack/lib/init.sh" +os::log::stacktrace::install +trap os::test::junit::reconcile_output EXIT + +# Cleanup cluster resources created by this test +( + set +e + oc delete svc,endpoints --all + exit 0 +) &>/dev/null + + +os::test::junit::declare_suite_start "cmd/dns" +# This test validates DNS behavior + +ns="$(oc project -q)" +dig="dig @${API_HOST} -p 8053" +if [[ -z "$(which dig)" ]]; then + dig="echo SKIPPED TEST: dig is not installed: " +fi + +os::cmd::expect_success 'oc create -f test/testdata/services.yaml' +os::cmd::try_until_success "${dig} +short headless.${ns}.svc.cluster.local" + +ip="$( oc get svc/clusterip --template '{{ .spec.clusterIP }}' )" + +os::cmd::expect_success_and_text "${dig} +short headless.${ns}.svc.cluster.local | wc -l" "2" +os::cmd::expect_success_and_text "${dig} +short headless.${ns}.svc.cluster.local" "10.1.2.3" +os::cmd::expect_success_and_text "${dig} +short headless.${ns}.svc.cluster.local" "10.1.2.4" +os::cmd::expect_success_and_text "${dig} +short test2.headless.${ns}.svc.cluster.local" "^10.1.2.4$" +os::cmd::expect_success_and_text "${dig} +short _endpoints.headless.${ns}.svc.cluster.local | wc -l" "2" +os::cmd::expect_success_and_text "${dig} +short _endpoints.headless.${ns}.svc.cluster.local" "10.1.2.3" +os::cmd::expect_success_and_text "${dig} +short _endpoints.headless.${ns}.svc.cluster.local" "10.1.2.4" +os::cmd::expect_success_and_text "${dig} +short headless.${ns}.svc.cluster.local SRV" "^10 50 0 3987d90a.headless.${ns}.svc.cluster.local" +os::cmd::expect_success_and_text "${dig} +short headless.${ns}.svc.cluster.local SRV" "^10 50 0 test2.headless.${ns}.svc.cluster.local" +os::cmd::expect_success_and_text "${dig} +short test2.headless.${ns}.svc.cluster.local SRV" "^10 100 0 test2.headless.${ns}.svc.cluster.local" +os::cmd::expect_success_and_text "${dig} +short _http._tcp.headless.${ns}.svc.cluster.local SRV" "^10 50 80 3987d90a.headless.${ns}.svc.cluster.local" +os::cmd::expect_success_and_text "${dig} +short _http._tcp.headless.${ns}.svc.cluster.local SRV" "^10 50 80 test2.headless.${ns}.svc.cluster.local" + +os::cmd::expect_success_and_text "${dig} +short clusterip.${ns}.svc.cluster.local" "^${ip}$" +os::cmd::expect_success_and_text "${dig} +short clusterip.${ns}.svc.cluster.local SRV" "^10 100 0 [0-9a-f]+.clusterip.${ns}.svc.cluster.local" +os::cmd::expect_success_and_text "${dig} +short _http._tcp.clusterip.${ns}.svc.cluster.local SRV" "^10 100 80 [0-9a-f]+.clusterip.${ns}.svc.cluster.local" +os::cmd::expect_success_and_text "${dig} +short _endpoints.clusterip.${ns}.svc.cluster.local | wc -l" "2" +os::cmd::expect_success_and_text "${dig} +short _endpoints.clusterip.${ns}.svc.cluster.local" "10.1.2.3" +os::cmd::expect_success_and_text "${dig} +short _endpoints.clusterip.${ns}.svc.cluster.local" "10.1.2.4" + +echo "dns: ok" +os::test::junit::declare_suite_end diff --git a/test/extended/dns/dns.go b/test/extended/dns/dns.go index 806a51ff5ce9..b4379076f3b7 100644 --- a/test/extended/dns/dns.go +++ b/test/extended/dns/dns.go @@ -266,8 +266,7 @@ var _ = Describe("DNS", func() { "prefix.kubernetes.default.svc", "prefix.kubernetes.default.svc.cluster.local", - // answer wildcards on cluster service - fmt.Sprintf("prefix.headless.%s", f.Namespace.Name), + // answer wildcards on clusterIP services fmt.Sprintf("prefix.clusterip.%s", f.Namespace.Name), }, expect), diff --git a/test/integration/dns_test.go b/test/integration/dns_test.go index 7467d63ac87a..02ae5554c77e 100644 --- a/test/integration/dns_test.go +++ b/test/integration/dns_test.go @@ -194,8 +194,17 @@ func TestDNS(t *testing.T) { dnsQuestionName: "headless.default.svc.cluster.local.", srv: []*dns.SRV{ { - Target: headlessIPHash + "._unknown-port-2345._tcp.headless.default.svc.cluster.local.", - Port: 2345, + Target: headlessIPHash + ".headless.default.svc.cluster.local.", + Port: 0, + }, + }, + }, + { // SRV record for a port + dnsQuestionName: "_http._tcp.headless2.default.svc.cluster.local.", + srv: []*dns.SRV{ + { + Target: headless2IPHash + ".headless2.default.svc.cluster.local.", + Port: 2346, }, }, }, @@ -211,17 +220,13 @@ func TestDNS(t *testing.T) { dnsQuestionName: "headless2.default.svc.cluster.local.", srv: []*dns.SRV{ { - Target: headless2IPHash + "._http._tcp.headless2.default.svc.cluster.local.", - Port: 2346, - }, - { - Target: headless2IPHash + "._other._tcp.headless2.default.svc.cluster.local.", - Port: 2345, + Target: headless2IPHash + ".headless2.default.svc.cluster.local.", + Port: 0, }, }, }, { // the SRV record resolves to the IP - dnsQuestionName: "other.e1.headless2.default.svc.cluster.local.", + dnsQuestionName: headless2IPHash + ".headless2.default.svc.cluster.local.", expect: []*net.IP{&headless2IP}, }, { diff --git a/test/testdata/services.yaml b/test/testdata/services.yaml new file mode 100644 index 000000000000..8d522149f6d3 --- /dev/null +++ b/test/testdata/services.yaml @@ -0,0 +1,49 @@ +kind: List +apiVersion: v1 +items: +- kind: Service + apiVersion: v1 + metadata: + name: clusterip + spec: + ports: + - name: http + protocol: TCP + port: 80 +- kind: Endpoints + apiVersion: v1 + metadata: + name: clusterip + annotations: + "endpoints.beta.kubernetes.io/hostnames-map": '{"10.1.2.4":{"HostName": "test2"}}' + subsets: + - addresses: + - ip: 10.1.2.3 + - ip: 10.1.2.4 + ports: + - name: http + protocol: TCP + port: 80 +- kind: Service + apiVersion: v1 + metadata: + name: headless + spec: + clusterIP: None + ports: + - name: http + protocol: TCP + port: 80 +- kind: Endpoints + apiVersion: v1 + metadata: + name: headless + subsets: + - addresses: + - ip: 10.1.2.3 + - ip: 10.1.2.4 + hostname: test2 + ports: + - name: http + protocol: TCP + port: 80 \ No newline at end of file From df50db25c843ad1dda888a809d9bd41b8701c328 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 27 Jul 2016 00:16:12 -0400 Subject: [PATCH 6/8] Show PetSets in oc status Make a minor change to prepare for generic controller references (since PetSets and RCs could conflict over pods). --- pkg/api/graph/graphview/dc_pipeline.go | 2 +- pkg/api/graph/graphview/petset.go | 51 +++ pkg/api/graph/graphview/rc.go | 4 +- pkg/api/graph/graphview/service_group.go | 18 +- pkg/api/graph/graphview/veneering_test.go | 6 +- pkg/api/kubegraph/analysis/rc.go | 4 +- pkg/api/kubegraph/analysis/rc_test.go | 2 +- pkg/api/kubegraph/edge_test.go | 16 +- pkg/api/kubegraph/edges.go | 29 +- pkg/api/kubegraph/nodes/nodes.go | 31 ++ pkg/api/kubegraph/nodes/types.go | 62 ++- pkg/cmd/cli/describe/projectstatus.go | 102 +++-- pkg/cmd/cli/describe/projectstatus_test.go | 16 + pkg/deploy/graph/analysis/dc.go | 2 +- pkg/deploy/graph/edges.go | 2 +- pkg/deploy/graph/helpers.go | 2 +- pkg/deploy/graph/nodes/types.go | 2 +- test/testdata/app-scenarios/petset.yaml | 496 +++++++++++++++++++++ 18 files changed, 789 insertions(+), 58 deletions(-) create mode 100644 pkg/api/graph/graphview/petset.go create mode 100644 test/testdata/app-scenarios/petset.yaml diff --git a/pkg/api/graph/graphview/dc_pipeline.go b/pkg/api/graph/graphview/dc_pipeline.go index 3584d464dba6..4cf39de9774b 100644 --- a/pkg/api/graph/graphview/dc_pipeline.go +++ b/pkg/api/graph/graphview/dc_pipeline.go @@ -80,5 +80,5 @@ type SortedDeploymentConfigPipeline []DeploymentConfigPipeline func (m SortedDeploymentConfigPipeline) Len() int { return len(m) } func (m SortedDeploymentConfigPipeline) Swap(i, j int) { m[i], m[j] = m[j], m[i] } func (m SortedDeploymentConfigPipeline) Less(i, j int) bool { - return CompareObjectMeta(&m[i].Deployment.ObjectMeta, &m[j].Deployment.ObjectMeta) + return CompareObjectMeta(&m[i].Deployment.DeploymentConfig.ObjectMeta, &m[j].Deployment.DeploymentConfig.ObjectMeta) } diff --git a/pkg/api/graph/graphview/petset.go b/pkg/api/graph/graphview/petset.go new file mode 100644 index 000000000000..b06d76f5cf7d --- /dev/null +++ b/pkg/api/graph/graphview/petset.go @@ -0,0 +1,51 @@ +package graphview + +import ( + osgraph "github.com/openshift/origin/pkg/api/graph" + kubeedges "github.com/openshift/origin/pkg/api/kubegraph" + kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" +) + +type PetSet struct { + PetSet *kubegraph.PetSetNode + + OwnedPods []*kubegraph.PodNode + CreatedPods []*kubegraph.PodNode + + // TODO: handle conflicting once controller refs are present, not worth it yet +} + +// AllPetSets returns all the PetSets that aren't in the excludes set and the set of covered NodeIDs +func AllPetSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]PetSet, IntSet) { + covered := IntSet{} + views := []PetSet{} + + for _, uncastNode := range g.NodesByKind(kubegraph.PetSetNodeKind) { + if excludeNodeIDs.Has(uncastNode.ID()) { + continue + } + + view, covers := NewPetSet(g, uncastNode.(*kubegraph.PetSetNode)) + covered.Insert(covers.List()...) + views = append(views, view) + } + + return views, covered +} + +// NewPetSet returns the PetSet and a set of all the NodeIDs covered by the PetSet +func NewPetSet(g osgraph.Graph, node *kubegraph.PetSetNode) (PetSet, IntSet) { + covered := IntSet{} + covered.Insert(node.ID()) + + view := PetSet{} + view.PetSet = node + + for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) { + podNode := uncastPodNode.(*kubegraph.PodNode) + covered.Insert(podNode.ID()) + view.OwnedPods = append(view.OwnedPods, podNode) + } + + return view, covered +} diff --git a/pkg/api/graph/graphview/rc.go b/pkg/api/graph/graphview/rc.go index 9f767e1dba78..59d57dec2af9 100644 --- a/pkg/api/graph/graphview/rc.go +++ b/pkg/api/graph/graphview/rc.go @@ -60,13 +60,13 @@ func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationCont rcView.RC = rcNode rcView.ConflictingRCIDToPods = map[int][]*kubegraph.PodNode{} - for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) { + for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) { podNode := uncastPodNode.(*kubegraph.PodNode) covered.Insert(podNode.ID()) rcView.OwnedPods = append(rcView.OwnedPods, podNode) // check to see if this pod is managed by more than one RC - uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind) + uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind) if len(uncastOwningRCs) > 1 { for _, uncastOwningRC := range uncastOwningRCs { if uncastOwningRC.ID() == rcNode.ID() { diff --git a/pkg/api/graph/graphview/service_group.go b/pkg/api/graph/graphview/service_group.go index dc6168c63b8d..5706dbc4f34f 100644 --- a/pkg/api/graph/graphview/service_group.go +++ b/pkg/api/graph/graphview/service_group.go @@ -21,10 +21,13 @@ type ServiceGroup struct { DeploymentConfigPipelines []DeploymentConfigPipeline ReplicationControllers []ReplicationController + PetSets []PetSet - FulfillingDCs []*deploygraph.DeploymentConfigNode - FulfillingRCs []*kubegraph.ReplicationControllerNode - FulfillingPods []*kubegraph.PodNode + // TODO: this has to stop + FulfillingPetSets []*kubegraph.PetSetNode + FulfillingDCs []*deploygraph.DeploymentConfigNode + FulfillingRCs []*kubegraph.ReplicationControllerNode + FulfillingPods []*kubegraph.PodNode ExposingRoutes []*routegraph.RouteNode } @@ -66,6 +69,8 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi service.FulfillingRCs = append(service.FulfillingRCs, castContainer) case *kubegraph.PodNode: service.FulfillingPods = append(service.FulfillingPods, castContainer) + case *kubegraph.PetSetNode: + service.FulfillingPetSets = append(service.FulfillingPetSets, castContainer) default: utilruntime.HandleError(fmt.Errorf("unrecognized container: %v", castContainer)) } @@ -97,6 +102,13 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi service.ReplicationControllers = append(service.ReplicationControllers, rcView) } + for _, fulfillingPetSet := range service.FulfillingPetSets { + view, covers := NewPetSet(g, fulfillingPetSet) + + covered.Insert(covers.List()...) + service.PetSets = append(service.PetSets, view) + } + for _, fulfillingPod := range service.FulfillingPods { _, podCovers := NewPod(g, fulfillingPod) covered.Insert(podCovers.List()...) diff --git a/pkg/api/graph/graphview/veneering_test.go b/pkg/api/graph/graphview/veneering_test.go index cb73e89f5da8..c052b79f106f 100644 --- a/pkg/api/graph/graphview/veneering_test.go +++ b/pkg/api/graph/graphview/veneering_test.go @@ -78,7 +78,7 @@ func TestBareRCGroup(t *testing.T) { kubeedges.AddAllExposedPodTemplateSpecEdges(g) kubeedges.AddAllExposedPodEdges(g) - kubeedges.AddAllManagedByRCPodEdges(g) + kubeedges.AddAllManagedByControllerPodEdges(g) coveredNodes := IntSet{} @@ -399,7 +399,7 @@ func TestGraph(t *testing.T) { } for _, bareDCPipeline := range bareDCPipelines { - t.Logf("from %s", bareDCPipeline.Deployment.Name) + t.Logf("from %s", bareDCPipeline.Deployment.DeploymentConfig.Name) for _, path := range bareDCPipeline.Images { t.Logf(" %v", path) } @@ -413,7 +413,7 @@ func TestGraph(t *testing.T) { indent := " " for _, deployment := range serviceGroup.DeploymentConfigPipelines { - t.Logf("%sdeployment %s", indent, deployment.Deployment.Name) + t.Logf("%sdeployment %s", indent, deployment.Deployment.DeploymentConfig.Name) for _, image := range deployment.Images { t.Logf("%s image %s", indent, image.Image.ImageSpec()) if image.Build != nil { diff --git a/pkg/api/kubegraph/analysis/rc.go b/pkg/api/kubegraph/analysis/rc.go index 253cfe585100..a0b1644355ee 100644 --- a/pkg/api/kubegraph/analysis/rc.go +++ b/pkg/api/kubegraph/analysis/rc.go @@ -21,11 +21,11 @@ func FindDuelingReplicationControllers(g osgraph.Graph, f osgraph.Namer) []osgra for _, uncastRCNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) { rcNode := uncastRCNode.(*kubegraph.ReplicationControllerNode) - for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) { + for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) { podNode := uncastPodNode.(*kubegraph.PodNode) // check to see if this pod is managed by more than one RC - uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind) + uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind) if len(uncastOwningRCs) > 1 { involvedRCNames := []string{} relatedNodes := []graph.Node{uncastPodNode} diff --git a/pkg/api/kubegraph/analysis/rc_test.go b/pkg/api/kubegraph/analysis/rc_test.go index 02c52a5da93c..ae6ca419b4e4 100644 --- a/pkg/api/kubegraph/analysis/rc_test.go +++ b/pkg/api/kubegraph/analysis/rc_test.go @@ -14,7 +14,7 @@ func TestDuelingRC(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - kubeedges.AddAllManagedByRCPodEdges(g) + kubeedges.AddAllManagedByControllerPodEdges(g) markers := FindDuelingReplicationControllers(g, osgraph.DefaultNamer) if e, a := 2, len(markers); e != a { diff --git a/pkg/api/kubegraph/edge_test.go b/pkg/api/kubegraph/edge_test.go index feb15963050a..2dd9512b4c36 100644 --- a/pkg/api/kubegraph/edge_test.go +++ b/pkg/api/kubegraph/edge_test.go @@ -8,6 +8,8 @@ import ( kapi "k8s.io/kubernetes/pkg/api" _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/api/unversioned" + kapps "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/runtime" @@ -38,6 +40,14 @@ func TestNamespaceEdgeMatching(t *testing.T) { rc.Spec.Selector = map[string]string{"a": "1"} kubegraph.EnsureReplicationControllerNode(g, rc) + p := &kapps.PetSet{} + p.Namespace = namespace + p.Name = "the-petset" + p.Spec.Selector = &unversioned.LabelSelector{ + MatchLabels: map[string]string{"a": "1"}, + } + kubegraph.EnsurePetSetNode(g, p) + svc := &kapi.Service{} svc.Namespace = namespace svc.Name = "the-svc" @@ -49,7 +59,7 @@ func TestNamespaceEdgeMatching(t *testing.T) { fn("other", g) AddAllExposedPodEdges(g) AddAllExposedPodTemplateSpecEdges(g) - AddAllManagedByRCPodEdges(g) + AddAllManagedByControllerPodEdges(g) for _, edge := range g.Edges() { nsTo, err := namespaceFor(edge.To()) @@ -79,6 +89,10 @@ func namespaceFor(node graph.Node) (string, error) { return node.(*kubegraph.PodSpecNode).Namespace, nil case *kapi.ReplicationControllerSpec: return node.(*kubegraph.ReplicationControllerSpecNode).Namespace, nil + case *kapps.PetSetSpec: + return node.(*kubegraph.PetSetSpecNode).Namespace, nil + case *kapi.PodTemplateSpec: + return node.(*kubegraph.PodTemplateSpecNode).Namespace, nil default: return "", fmt.Errorf("unknown object: %#v", obj) } diff --git a/pkg/api/kubegraph/edges.go b/pkg/api/kubegraph/edges.go index eafce6721bc6..053a63fbff98 100644 --- a/pkg/api/kubegraph/edges.go +++ b/pkg/api/kubegraph/edges.go @@ -20,8 +20,8 @@ import ( const ( // ExposedThroughServiceEdgeKind goes from a PodTemplateSpec or a Pod to Service. The head should make the service's selector. ExposedThroughServiceEdgeKind = "ExposedThroughService" - // ManagedByRCEdgeKind goes from Pod to ReplicationController when the Pod satisfies the ReplicationController's label selector - ManagedByRCEdgeKind = "ManagedByRC" + // ManagedByControllerEdgeKind goes from Pod to controller when the Pod satisfies a controller's label selector + ManagedByControllerEdgeKind = "ManagedByController" // MountedSecretEdgeKind goes from PodSpec to Secret indicating that is or will be a request to mount a volume with the Secret. MountedSecretEdgeKind = "MountedSecret" // MountableSecretEdgeKind goes from ServiceAccount to Secret indicating that the SA allows the Secret to be mounted @@ -91,31 +91,36 @@ func AddAllExposedPodEdges(g osgraph.MutableUniqueGraph) { } } -// AddManagedByRCPodEdges ensures that a directed edge exists between an RC and all the pods +// AddManagedByControllerPodEdges ensures that a directed edge exists between a controller and all the pods // in the graph that match the label selector -func AddManagedByRCPodEdges(g osgraph.MutableUniqueGraph, rcNode *kubegraph.ReplicationControllerNode) { - if rcNode.Spec.Selector == nil { +func AddManagedByControllerPodEdges(g osgraph.MutableUniqueGraph, to graph.Node, namespace string, selector map[string]string) { + if selector == nil { return } - query := labels.SelectorFromSet(rcNode.Spec.Selector) + query := labels.SelectorFromSet(selector) for _, n := range g.(graph.Graph).Nodes() { switch target := n.(type) { case *kubegraph.PodNode: - if target.Namespace != rcNode.Namespace { + if target.Namespace != namespace { continue } if query.Matches(labels.Set(target.Labels)) { - g.AddEdge(target, rcNode, ManagedByRCEdgeKind) + g.AddEdge(target, to, ManagedByControllerEdgeKind) } } } } -// AddAllManagedByRCPodEdges calls AddManagedByRCPodEdges for every ServiceNode in the graph -func AddAllManagedByRCPodEdges(g osgraph.MutableUniqueGraph) { +// AddAllManagedByControllerPodEdges calls AddManagedByControllerPodEdges for every node in the graph +// TODO: should do this through an interface (selects pods) +func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) { for _, node := range g.(graph.Graph).Nodes() { - if rcNode, ok := node.(*kubegraph.ReplicationControllerNode); ok { - AddManagedByRCPodEdges(g, rcNode) + switch cast := node.(type) { + case *kubegraph.ReplicationControllerNode: + AddManagedByControllerPodEdges(g, cast, cast.ReplicationController.Namespace, cast.ReplicationController.Spec.Selector) + case *kubegraph.PetSetNode: + // TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments) + AddManagedByControllerPodEdges(g, cast, cast.PetSet.Namespace, cast.PetSet.Spec.Selector.MatchLabels) } } } diff --git a/pkg/api/kubegraph/nodes/nodes.go b/pkg/api/kubegraph/nodes/nodes.go index a94c5d15f334..5b7368f3801d 100644 --- a/pkg/api/kubegraph/nodes/nodes.go +++ b/pkg/api/kubegraph/nodes/nodes.go @@ -4,6 +4,7 @@ import ( "github.com/gonum/graph" kapi "k8s.io/kubernetes/pkg/api" + kapps "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" osgraph "github.com/openshift/origin/pkg/api/graph" @@ -154,3 +155,33 @@ func EnsureHorizontalPodAutoscalerNode(g osgraph.MutableUniqueGraph, hpa *autosc }, ).(*HorizontalPodAutoscalerNode) } + +func EnsurePetSetNode(g osgraph.MutableUniqueGraph, petset *kapps.PetSet) *PetSetNode { + nodeName := PetSetNodeName(petset) + node := osgraph.EnsureUnique(g, + nodeName, + func(node osgraph.Node) graph.Node { + return &PetSetNode{node, petset} + }, + ).(*PetSetNode) + + specNode := EnsurePetSetSpecNode(g, &petset.Spec, petset.Namespace, nodeName) + g.AddEdge(node, specNode, osgraph.ContainsEdgeKind) + + return node +} + +func EnsurePetSetSpecNode(g osgraph.MutableUniqueGraph, spec *kapps.PetSetSpec, namespace string, ownerName osgraph.UniqueName) *PetSetSpecNode { + specName := PetSetSpecNodeName(spec, ownerName) + specNode := osgraph.EnsureUnique(g, + specName, + func(node osgraph.Node) graph.Node { + return &PetSetSpecNode{node, spec, namespace, ownerName} + }, + ).(*PetSetSpecNode) + + ptSpecNode := EnsurePodTemplateSpecNode(g, &spec.Template, namespace, specName) + g.AddEdge(specNode, ptSpecNode, osgraph.ContainsEdgeKind) + + return specNode +} diff --git a/pkg/api/kubegraph/nodes/types.go b/pkg/api/kubegraph/nodes/types.go index 57fee9de52ca..78f4c452c30c 100644 --- a/pkg/api/kubegraph/nodes/types.go +++ b/pkg/api/kubegraph/nodes/types.go @@ -5,6 +5,7 @@ import ( "reflect" kapi "k8s.io/kubernetes/pkg/api" + kapps "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" osgraph "github.com/openshift/origin/pkg/api/graph" @@ -20,6 +21,8 @@ var ( ServiceAccountNodeKind = reflect.TypeOf(kapi.ServiceAccount{}).Name() SecretNodeKind = reflect.TypeOf(kapi.Secret{}).Name() HorizontalPodAutoscalerNodeKind = reflect.TypeOf(autoscaling.HorizontalPodAutoscaler{}).Name() + PetSetNodeKind = reflect.TypeOf(kapps.PetSet{}).Name() + PetSetSpecNodeKind = reflect.TypeOf(kapps.PetSetSpec{}).Name() ) func ServiceNodeName(o *kapi.Service) osgraph.UniqueName { @@ -108,7 +111,7 @@ func ReplicationControllerNodeName(o *kapi.ReplicationController) osgraph.Unique type ReplicationControllerNode struct { osgraph.Node - *kapi.ReplicationController + ReplicationController *kapi.ReplicationController IsFound bool } @@ -139,8 +142,8 @@ func ReplicationControllerSpecNodeName(o *kapi.ReplicationControllerSpec, ownerN type ReplicationControllerSpecNode struct { osgraph.Node - *kapi.ReplicationControllerSpec - Namespace string + ReplicationControllerSpec *kapi.ReplicationControllerSpec + Namespace string OwnerName osgraph.UniqueName } @@ -267,3 +270,56 @@ func (*HorizontalPodAutoscalerNode) Kind() string { func (n HorizontalPodAutoscalerNode) UniqueName() osgraph.UniqueName { return HorizontalPodAutoscalerNodeName(n.HorizontalPodAutoscaler) } + +func PetSetNodeName(o *kapps.PetSet) osgraph.UniqueName { + return osgraph.GetUniqueRuntimeObjectNodeName(PetSetNodeKind, o) +} + +type PetSetNode struct { + osgraph.Node + PetSet *kapps.PetSet +} + +func (n PetSetNode) Object() interface{} { + return n.PetSet +} + +func (n PetSetNode) String() string { + return string(n.UniqueName()) +} + +func (n PetSetNode) UniqueName() osgraph.UniqueName { + return PetSetNodeName(n.PetSet) +} + +func (*PetSetNode) Kind() string { + return PetSetNodeKind +} + +func PetSetSpecNodeName(o *kapps.PetSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName { + return osgraph.UniqueName(fmt.Sprintf("%s|%v", PetSetSpecNodeKind, ownerName)) +} + +type PetSetSpecNode struct { + osgraph.Node + PetSetSpec *kapps.PetSetSpec + Namespace string + + OwnerName osgraph.UniqueName +} + +func (n PetSetSpecNode) Object() interface{} { + return n.PetSetSpec +} + +func (n PetSetSpecNode) String() string { + return string(n.UniqueName()) +} + +func (n PetSetSpecNode) UniqueName() osgraph.UniqueName { + return PetSetSpecNodeName(n.PetSetSpec, n.OwnerName) +} + +func (*PetSetSpecNode) Kind() string { + return PetSetSpecNodeKind +} diff --git a/pkg/cmd/cli/describe/projectstatus.go b/pkg/cmd/cli/describe/projectstatus.go index 48688ec15c86..dbec6187261c 100644 --- a/pkg/cmd/cli/describe/projectstatus.go +++ b/pkg/cmd/cli/describe/projectstatus.go @@ -11,6 +11,7 @@ import ( kapi "k8s.io/kubernetes/pkg/api" kapierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" + kapps "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" kclient "k8s.io/kubernetes/pkg/client/unversioned" utilerrors "k8s.io/kubernetes/pkg/util/errors" @@ -69,6 +70,7 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set &secretLoader{namespace: namespace, lister: d.K}, &rcLoader{namespace: namespace, lister: d.K}, &podLoader{namespace: namespace, lister: d.K}, + &petsetLoader{namespace: namespace, lister: d.K.Apps()}, &horizontalPodAutoscalerLoader{namespace: namespace, lister: d.K.Autoscaling()}, // TODO check swagger for feature enablement and selectively add bcLoader and buildLoader // then remove errors.TolerateNotFoundError method. @@ -108,7 +110,7 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set kubeedges.AddAllExposedPodTemplateSpecEdges(g) kubeedges.AddAllExposedPodEdges(g) - kubeedges.AddAllManagedByRCPodEdges(g) + kubeedges.AddAllManagedByControllerPodEdges(g) kubeedges.AddAllRequestedServiceAccountEdges(g) kubeedges.AddAllMountableSecretEdges(g) kubeedges.AddAllMountedSecretEdges(g) @@ -188,6 +190,10 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error printLines(out, indent, 1, describeDeploymentInServiceGroup(local, dcPipeline, graphview.MaxRecentContainerRestartsForRC(g, dcPipeline.ActiveDeployment))...) } + for _, node := range service.FulfillingPetSets { + printLines(out, indent, 1, describePetSetInServiceGroup(local, node)...) + } + rcNode: for _, rcNode := range service.FulfillingRCs { for _, coveredDC := range service.FulfillingDCs { @@ -199,14 +205,20 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error } pod: - for _, podNode := range service.FulfillingPods { + for _, node := range service.FulfillingPods { // skip pods that have been displayed in a roll-up of RCs and DCs (by implicit usage of RCs) for _, coveredRC := range service.FulfillingRCs { - if g.Edge(podNode, coveredRC) != nil { + if g.Edge(node, coveredRC) != nil { + continue pod + } + } + // TODO: collapse into FulfillingControllers + for _, covered := range service.FulfillingPetSets { + if g.Edge(node, covered) != nil { continue pod } } - printLines(out, indent, 1, describePodInServiceGroup(local, podNode)...) + printLines(out, indent, 1, describePodInServiceGroup(local, node)...) } } @@ -423,9 +435,11 @@ func (f namespacedFormatter) ResourceName(obj interface{}) string { case *kubegraph.ServiceAccountNode: return namespaceNameWithType("sa", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace) case *kubegraph.ReplicationControllerNode: - return namespaceNameWithType("rc", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace) + return namespaceNameWithType("rc", t.ReplicationController.Name, t.ReplicationController.Namespace, f.currentNamespace, f.hideNamespace) case *kubegraph.HorizontalPodAutoscalerNode: return namespaceNameWithType("hpa", t.HorizontalPodAutoscaler.Name, t.HorizontalPodAutoscaler.Namespace, f.currentNamespace, f.hideNamespace) + case *kubegraph.PetSetNode: + return namespaceNameWithType("petset", t.PetSet.Name, t.PetSet.Namespace, f.currentNamespace, f.hideNamespace) case *imagegraph.ImageStreamNode: return namespaceNameWithType("is", t.ImageStream.Name, t.ImageStream.Namespace, f.currentNamespace, f.hideNamespace) @@ -467,15 +481,15 @@ func describeAllProjectsOnServer(f formatter, server string) string { } func describeDeploymentInServiceGroup(f formatter, deploy graphview.DeploymentConfigPipeline, restartCount int32) []string { - local := namespacedFormatter{currentNamespace: deploy.Deployment.Namespace} + local := namespacedFormatter{currentNamespace: deploy.Deployment.DeploymentConfig.Namespace} includeLastPass := deploy.ActiveDeployment == nil if len(deploy.Images) == 1 { format := "%s deploys %s %s" - if deploy.Deployment.Spec.Test { + if deploy.Deployment.DeploymentConfig.Spec.Test { format = "%s test deploys %s %s" } - lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.Namespace), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} + lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.DeploymentConfig.Namespace), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") { segments := strings.SplitN(lines[0], " <- ", 2) lines[0] = segments[0] + " <-" @@ -487,18 +501,27 @@ func describeDeploymentInServiceGroup(f formatter, deploy graphview.DeploymentCo } format := "%s deploys %s" - if deploy.Deployment.Spec.Test { + if deploy.Deployment.DeploymentConfig.Spec.Test { format = "%s test deploys %s" } lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} for _, image := range deploy.Images { - lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.Namespace)) + lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.DeploymentConfig.Namespace)) lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartCount, maxDisplayDeployments)...) } return lines } +func describePetSetInServiceGroup(f formatter, node *kubegraph.PetSetNode) []string { + images := []string{} + for _, container := range node.PetSet.Spec.Template.Spec.Containers { + images = append(images, container.Image) + } + + return []string{fmt.Sprintf("%s manages %s, %s", f.ResourceName(node), strings.Join(images, ", "), describePetSetStatus(node.PetSet))} +} + func describeRCInServiceGroup(f formatter, rcNode *kubegraph.ReplicationControllerNode) []string { if rcNode.ReplicationController.Spec.Template == nil { return []string{} @@ -916,7 +939,7 @@ func describeDeployments(f formatter, dcNode *deploygraph.DeploymentConfigNode, switch { case count == -1: - if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete { + if deployutil.DeploymentStatusFor(deployment.ReplicationController) == deployapi.DeploymentStatusComplete { return out } default: @@ -944,48 +967,51 @@ func describeDeploymentStatus(deploy *kapi.ReplicationController, first, test bo reason = fmt.Sprintf(": %s", reason) } // TODO: encode fail time in the rc - return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(deploy, false, restartCount)) + return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount)) case deployapi.DeploymentStatusComplete: // TODO: pod status output if test { return fmt.Sprintf("test deployment #%d deployed %s ago", version, timeAt) } - return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(deploy, first, restartCount)) + return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, first, restartCount)) case deployapi.DeploymentStatusRunning: format := "deployment #%d running%s for %s%s" if test { format = "test deployment #%d running%s for %s%s" } - return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(deploy, false, restartCount)) + return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount)) default: - return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(deploy, false, restartCount)) + return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount)) } } +func describePetSetStatus(p *kapps.PetSet) string { + timeAt := strings.ToLower(formatRelativeTime(p.CreationTimestamp.Time)) + return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(p.Status.Replicas), int32(p.Spec.Replicas), false, 0)) +} + func describeRCStatus(rc *kapi.ReplicationController) string { timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time)) - return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc, false, 0)) + return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.Replicas, rc.Spec.Replicas, false, 0)) } -func describePodSummaryInline(rc *kapi.ReplicationController, includeEmpty bool, restartCount int32) string { - s := describePodSummary(rc, includeEmpty, restartCount) +func describePodSummaryInline(actual, requested int32, includeEmpty bool, restartCount int32) string { + s := describePodSummary(actual, requested, includeEmpty, restartCount) if len(s) == 0 { return s } change := "" - desired := rc.Spec.Replicas switch { - case desired < rc.Status.Replicas: - change = fmt.Sprintf(" reducing to %d", desired) - case desired > rc.Status.Replicas: - change = fmt.Sprintf(" growing to %d", desired) + case requested < actual: + change = fmt.Sprintf(" reducing to %d", requested) + case requested > actual: + change = fmt.Sprintf(" growing to %d", requested) } return fmt.Sprintf(" - %s%s", s, change) } -func describePodSummary(rc *kapi.ReplicationController, includeEmpty bool, restartCount int32) string { - actual, requested := rc.Status.Replicas, rc.Spec.Replicas - restartWarn := "" +func describePodSummary(actual, requested int32, includeEmpty bool, restartCount int32) string { + var restartWarn string if restartCount > 0 { restartWarn = fmt.Sprintf(" (warning: %d restarts)", restartCount) } @@ -1192,6 +1218,30 @@ func (l *podLoader) AddToGraph(g osgraph.Graph) error { return nil } +type petsetLoader struct { + namespace string + lister kclient.PetSetNamespacer + items []kapps.PetSet +} + +func (l *petsetLoader) Load() error { + list, err := l.lister.PetSets(l.namespace).List(kapi.ListOptions{}) + if err != nil { + return err + } + + l.items = list.Items + return nil +} + +func (l *petsetLoader) AddToGraph(g osgraph.Graph) error { + for i := range l.items { + kubegraph.EnsurePetSetNode(g, &l.items[i]) + } + + return nil +} + type horizontalPodAutoscalerLoader struct { namespace string lister kclient.HorizontalPodAutoscalersNamespacer diff --git a/pkg/cmd/cli/describe/projectstatus_test.go b/pkg/cmd/cli/describe/projectstatus_test.go index 74e08a5f274a..ca57d7a09860 100644 --- a/pkg/cmd/cli/describe/projectstatus_test.go +++ b/pkg/cmd/cli/describe/projectstatus_test.go @@ -270,6 +270,22 @@ func TestProjectStatus(t *testing.T) { }, Time: mustParseTime("2015-04-07T04:12:25Z"), }, + "with pet sets": { + Path: "../../../../test/testdata/app-scenarios/petset.yaml", + Extra: []runtime.Object{ + &projectapi.Project{ + ObjectMeta: kapi.ObjectMeta{Name: "example", Namespace: ""}, + }, + }, + ErrFn: func(err error) bool { return err == nil }, + Contains: []string{ + "In project example on server https://example.com:8443\n", + "svc/galera[default] (headless):3306", + "petset/mysql manages erkules/galera:basic, created less than a second ago - 3 pods", + "* pod/mysql-1[default] has restarted 7 times", + }, + Time: mustParseTime("2015-04-07T04:12:25Z"), + }, "restarting pod": { Path: "../../../api/graph/test/restarting-pod.yaml", Extra: []runtime.Object{ diff --git a/pkg/deploy/graph/analysis/dc.go b/pkg/deploy/graph/analysis/dc.go index 7fb0fbd870c6..ca1c20ad9b66 100644 --- a/pkg/deploy/graph/analysis/dc.go +++ b/pkg/deploy/graph/analysis/dc.go @@ -103,7 +103,7 @@ func FindDeploymentConfigReadinessWarnings(g osgraph.Graph, f osgraph.Namer, set Node: for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) - if t := dcNode.Spec.Template; t != nil && len(t.Spec.Containers) > 0 { + if t := dcNode.DeploymentConfig.Spec.Template; t != nil && len(t.Spec.Containers) > 0 { for _, container := range t.Spec.Containers { if container.ReadinessProbe != nil { continue Node diff --git a/pkg/deploy/graph/edges.go b/pkg/deploy/graph/edges.go index 6bf4f0e0d364..ead9a385ff38 100644 --- a/pkg/deploy/graph/edges.go +++ b/pkg/deploy/graph/edges.go @@ -64,7 +64,7 @@ func AddAllTriggerEdges(g osgraph.MutableUniqueGraph) { func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *deploygraph.DeploymentConfigNode) *deploygraph.DeploymentConfigNode { for _, n := range g.(graph.Graph).Nodes() { if rcNode, ok := n.(*kubegraph.ReplicationControllerNode); ok { - if rcNode.Namespace != node.Namespace { + if rcNode.ReplicationController.Namespace != node.DeploymentConfig.Namespace { continue } if BelongsToDeploymentConfig(node.DeploymentConfig, rcNode.ReplicationController) { diff --git a/pkg/deploy/graph/helpers.go b/pkg/deploy/graph/helpers.go index 018dfec1a29e..6f99c6227c6d 100644 --- a/pkg/deploy/graph/helpers.go +++ b/pkg/deploy/graph/helpers.go @@ -26,7 +26,7 @@ func RelevantDeployments(g osgraph.Graph, dcNode *deploygraph.DeploymentConfigNo sort.Sort(RecentDeploymentReferences(allDeployments)) - if dcNode.DeploymentConfig.Status.LatestVersion == deployutil.DeploymentVersionFor(allDeployments[0]) { + if dcNode.DeploymentConfig.Status.LatestVersion == deployutil.DeploymentVersionFor(allDeployments[0].ReplicationController) { return allDeployments[0], allDeployments[1:] } diff --git a/pkg/deploy/graph/nodes/types.go b/pkg/deploy/graph/nodes/types.go index 4d5ef227562c..ce847ad3dff1 100644 --- a/pkg/deploy/graph/nodes/types.go +++ b/pkg/deploy/graph/nodes/types.go @@ -17,7 +17,7 @@ func DeploymentConfigNodeName(o *deployapi.DeploymentConfig) osgraph.UniqueName type DeploymentConfigNode struct { osgraph.Node - *deployapi.DeploymentConfig + DeploymentConfig *deployapi.DeploymentConfig IsFound bool } diff --git a/test/testdata/app-scenarios/petset.yaml b/test/testdata/app-scenarios/petset.yaml new file mode 100644 index 000000000000..9b500e5f2263 --- /dev/null +++ b/test/testdata/app-scenarios/petset.yaml @@ -0,0 +1,496 @@ +apiVersion: v1 +items: +- apiVersion: apps/v1alpha1 + kind: PetSet + metadata: + creationTimestamp: 2016-07-21T15:53:09Z + generation: 3 + labels: + app: mysql + name: mysql + namespace: default + resourceVersion: "6790" + selfLink: /apis/apps/v1alpha1/namespaces/default/petsets/mysql + uid: 3900c985-4f5b-11e6-b8a1-080027242396 + spec: + replicas: 3 + selector: + matchLabels: + app: mysql + serviceName: galera + template: + metadata: + annotations: + pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]' + pod.alpha.kubernetes.io/initialized: "true" + creationTimestamp: null + labels: + app: mysql + spec: + containers: + - args: + - --defaults-file=/etc/mysql/my-galera.cnf + - --user=root + image: erkules/galera:basic + imagePullPolicy: IfNotPresent + name: mysql + ports: + - containerPort: 3306 + name: mysql + protocol: TCP + - containerPort: 4444 + name: sst + protocol: TCP + - containerPort: 4567 + name: replication + protocol: TCP + - containerPort: 4568 + name: ist + protocol: TCP + readinessProbe: + exec: + command: + - sh + - -c + - mysql -u root -e 'show databases;' + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /var/lib/ + name: datadir + - mountPath: /etc/mysql + name: config + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: config + - emptyDir: {} + name: workdir + volumeClaimTemplates: + - metadata: + annotations: + volume.alpha.kubernetes.io/storage-class: anything + creationTimestamp: null + name: datadir + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + status: + phase: Pending + status: + replicas: 3 +- apiVersion: v1 + kind: Service + metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + creationTimestamp: 2016-07-21T15:53:09Z + labels: + app: mysql + name: galera + namespace: default + resourceVersion: "343" + selfLink: /api/v1/namespaces/default/services/galera + uid: 38fb3915-4f5b-11e6-b8a1-080027242396 + spec: + clusterIP: None + portalIP: None + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: 3306 + selector: + app: mysql + sessionAffinity: None + type: ClusterIP + status: + loadBalancer: {} +- apiVersion: v1 + kind: Pod + metadata: + annotations: + kubernetes.io/created-by: | + {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6784"}} + openshift.io/scc: anyuid + pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:12Z","finishedAt":"2016-07-27T02:41:12Z","containerID":"docker://5c727d8732899605fcfe3eecbeeb02576f18f5b989496073340427a8d2134622"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://5c727d8732899605fcfe3eecbeeb02576f18f5b989496073340427a8d2134622"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:14Z","finishedAt":"2016-07-27T02:41:15Z","containerID":"docker://ab4ca0b3b6ec4860cd55c615534e1e2b11f4c3a33746783aab145919feb2446e"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://ab4ca0b3b6ec4860cd55c615534e1e2b11f4c3a33746783aab145919feb2446e"}]' + pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]' + pod.alpha.kubernetes.io/initialized: "true" + pod.beta.kubernetes.io/hostname: mysql-0 + pod.beta.kubernetes.io/subdomain: galera + creationTimestamp: 2016-07-27T02:41:09Z + generateName: mysql- + labels: + app: mysql + name: mysql-0 + namespace: default + resourceVersion: "7191" + selfLink: /api/v1/namespaces/default/pods/mysql-0 + uid: 92e49e79-53a3-11e6-b45a-080027242396 + spec: + containers: + - args: + - --defaults-file=/etc/mysql/my-galera.cnf + - --user=root + image: erkules/galera:basic + imagePullPolicy: IfNotPresent + name: mysql + ports: + - containerPort: 3306 + name: mysql + protocol: TCP + - containerPort: 4444 + name: sst + protocol: TCP + - containerPort: 4567 + name: replication + protocol: TCP + - containerPort: 4568 + name: ist + protocol: TCP + readinessProbe: + exec: + command: + - sh + - -c + - mysql -u root -e 'show databases;' + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + capabilities: + drop: + - MKNOD + - SYS_CHROOT + privileged: false + seLinuxOptions: + level: s0:c5,c0 + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /var/lib/ + name: datadir + - mountPath: /etc/mysql + name: config + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-au2xq + readOnly: true + dnsPolicy: ClusterFirst + host: localhost.localdomain + imagePullSecrets: + - name: default-dockercfg-pzhsj + nodeName: localhost.localdomain + restartPolicy: Always + securityContext: + seLinuxOptions: + level: s0:c5,c0 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir-mysql-0 + - emptyDir: {} + name: config + - emptyDir: {} + name: workdir + - name: default-token-au2xq + secret: + secretName: default-token-au2xq + status: + conditions: + - lastProbeTime: null + lastTransitionTime: 2016-07-27T02:41:15Z + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: 2016-07-27T03:00:47Z + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: 2016-07-27T02:41:09Z + status: "True" + type: PodScheduled + containerStatuses: + - containerID: docker://f2406b0f697c525df44b64aec6b1f6024ab88d9df80256426247dc6e9a92cb30 + image: erkules/galera:basic + imageID: docker://sha256:b4780e247a38c12612f539ce1ac8e0988e1781d56fddf719c80fb8d4d7b8bbde + lastState: {} + name: mysql + ready: true + restartCount: 0 + state: + running: + startedAt: 2016-07-27T02:41:16Z + hostIP: 10.0.2.15 + phase: Running + podIP: 172.17.0.2 + startTime: 2016-07-27T02:41:09Z +- apiVersion: v1 + kind: Pod + metadata: + annotations: + kubernetes.io/created-by: | + {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}} + openshift.io/scc: anyuid + pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:42Z","finishedAt":"2016-07-27T02:41:42Z","containerID":"docker://2538c65f65557955c02745ef4021181cf322c8dc0db62144dd1e1f8ea9f7fa54"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://2538c65f65557955c02745ef4021181cf322c8dc0db62144dd1e1f8ea9f7fa54"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:44Z","finishedAt":"2016-07-27T02:41:45Z","containerID":"docker://4df7188d37033c182e675d45179941766bd1e6a013469038f43fa3fecc2cc06d"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://4df7188d37033c182e675d45179941766bd1e6a013469038f43fa3fecc2cc06d"}]' + pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]' + pod.alpha.kubernetes.io/initialized: "true" + pod.beta.kubernetes.io/hostname: mysql-1 + pod.beta.kubernetes.io/subdomain: galera + creationTimestamp: 2016-07-27T02:41:39Z + generateName: mysql- + labels: + app: mysql + name: mysql-1 + namespace: default + resourceVersion: "7195" + selfLink: /api/v1/namespaces/default/pods/mysql-1 + uid: a4da4725-53a3-11e6-b45a-080027242396 + spec: + containers: + - args: + - --defaults-file=/etc/mysql/my-galera.cnf + - --user=root + image: erkules/galera:basic + imagePullPolicy: IfNotPresent + name: mysql + ports: + - containerPort: 3306 + name: mysql + protocol: TCP + - containerPort: 4444 + name: sst + protocol: TCP + - containerPort: 4567 + name: replication + protocol: TCP + - containerPort: 4568 + name: ist + protocol: TCP + readinessProbe: + exec: + command: + - sh + - -c + - mysql -u root -e 'show databases;' + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + capabilities: + drop: + - MKNOD + - SYS_CHROOT + privileged: false + seLinuxOptions: + level: s0:c5,c0 + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /var/lib/ + name: datadir + - mountPath: /etc/mysql + name: config + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-au2xq + readOnly: true + dnsPolicy: ClusterFirst + host: localhost.localdomain + imagePullSecrets: + - name: default-dockercfg-pzhsj + nodeName: localhost.localdomain + restartPolicy: Always + securityContext: + seLinuxOptions: + level: s0:c5,c0 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir-mysql-1 + - emptyDir: {} + name: config + - emptyDir: {} + name: workdir + - name: default-token-au2xq + secret: + secretName: default-token-au2xq + status: + conditions: + - lastProbeTime: null + lastTransitionTime: 2016-07-27T02:41:46Z + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: 2016-07-27T03:00:58Z + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: 2016-07-27T02:41:39Z + status: "True" + type: PodScheduled + containerStatuses: + - containerID: docker://be1d5be42ab23d1db23f4552141e9068e2385ba19c3e84596e047eb6d2762d1c + image: erkules/galera:basic + imageID: docker://sha256:b4780e247a38c12612f539ce1ac8e0988e1781d56fddf719c80fb8d4d7b8bbde + lastState: + terminated: + containerID: docker://9a662fa5b74a962fa362c6a5d632fe3642b12fefde36c8158ab1a50d8fa4e33e + exitCode: 1 + finishedAt: 2016-07-27T02:51:40Z + reason: Error + startedAt: 2016-07-27T02:51:05Z + name: mysql + ready: true + restartCount: 7 + state: + running: + startedAt: 2016-07-27T03:00:39Z + hostIP: 10.0.2.15 + phase: Running + podIP: 172.17.0.3 + startTime: 2016-07-27T02:41:39Z +- apiVersion: v1 + kind: Pod + metadata: + annotations: + kubernetes.io/created-by: | + {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}} + openshift.io/scc: anyuid + pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T03:01:01Z","finishedAt":"2016-07-27T03:01:01Z","containerID":"docker://af008b4ce59d36695fbabf40ae2f7431b51441eb2e9c6962378937c06ac69a35"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://af008b4ce59d36695fbabf40ae2f7431b51441eb2e9c6962378937c06ac69a35"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T03:01:02Z","finishedAt":"2016-07-27T03:01:03Z","containerID":"docker://ee97005854130335b54a65429865956260b7729e51e6363ab05e63d5c7c9ee48"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://ee97005854130335b54a65429865956260b7729e51e6363ab05e63d5c7c9ee48"}]' + pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]' + pod.alpha.kubernetes.io/initialized: "true" + pod.beta.kubernetes.io/hostname: mysql-2 + pod.beta.kubernetes.io/subdomain: galera + creationTimestamp: 2016-07-27T03:00:58Z + generateName: mysql- + labels: + app: mysql + name: mysql-2 + namespace: default + resourceVersion: "7226" + selfLink: /api/v1/namespaces/default/pods/mysql-2 + uid: 57e618f1-53a6-11e6-b215-080027242396 + spec: + containers: + - args: + - --defaults-file=/etc/mysql/my-galera.cnf + - --user=root + image: erkules/galera:basic + imagePullPolicy: IfNotPresent + name: mysql + ports: + - containerPort: 3306 + name: mysql + protocol: TCP + - containerPort: 4444 + name: sst + protocol: TCP + - containerPort: 4567 + name: replication + protocol: TCP + - containerPort: 4568 + name: ist + protocol: TCP + readinessProbe: + exec: + command: + - sh + - -c + - mysql -u root -e 'show databases;' + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + capabilities: + drop: + - MKNOD + - SYS_CHROOT + privileged: false + seLinuxOptions: + level: s0:c5,c0 + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /var/lib/ + name: datadir + - mountPath: /etc/mysql + name: config + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-au2xq + readOnly: true + dnsPolicy: ClusterFirst + host: localhost.localdomain + imagePullSecrets: + - name: default-dockercfg-pzhsj + nodeName: localhost.localdomain + restartPolicy: Always + securityContext: + seLinuxOptions: + level: s0:c5,c0 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir-mysql-2 + - emptyDir: {} + name: config + - emptyDir: {} + name: workdir + - name: default-token-au2xq + secret: + secretName: default-token-au2xq + status: + conditions: + - lastProbeTime: null + lastTransitionTime: 2016-07-27T03:01:03Z + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: 2016-07-27T03:01:28Z + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: 2016-07-27T03:00:58Z + status: "True" + type: PodScheduled + containerStatuses: + - containerID: docker://82b774855cdb5d12d98e7bc34f4f9d4e88e757e9cc2da1593e2e2f66e3241e5f + image: erkules/galera:basic + imageID: docker://sha256:b4780e247a38c12612f539ce1ac8e0988e1781d56fddf719c80fb8d4d7b8bbde + lastState: {} + name: mysql + ready: true + restartCount: 0 + state: + running: + startedAt: 2016-07-27T03:01:04Z + hostIP: 10.0.2.15 + phase: Running + podIP: 172.17.0.4 + startTime: 2016-07-27T03:00:58Z +kind: List +metadata: {} From eb0dc7917a66c5a61c1c231745b1109310b2c944 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 27 Jul 2016 00:16:42 -0400 Subject: [PATCH 7/8] UPSTREAM: 29655: No PetSet client --- .../pkg/client/unversioned/client.go | 1 + .../unversioned/testclient/fake_petsets.go | 83 +++++++++++++++++++ .../unversioned/testclient/testclient.go | 17 ++++ 3 files changed, 101 insertions(+) create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_petsets.go diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go index 00c0ed118c25..21ed099804da 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go @@ -44,6 +44,7 @@ type Interface interface { PersistentVolumeClaimsNamespacer ComponentStatusesInterface ConfigMapsNamespacer + Apps() AppsInterface Autoscaling() AutoscalingInterface Authentication() AuthenticationInterface Batch() BatchInterface diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_petsets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_petsets.go new file mode 100644 index 000000000000..274f0143be12 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_petsets.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/watch" +) + +// FakePetSets implements PetSetsInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the method you want to test easier. +type FakePetSets struct { + Fake *FakeApps + Namespace string +} + +func (c *FakePetSets) Get(name string) (*apps.PetSet, error) { + obj, err := c.Fake.Invokes(NewGetAction("petsets", c.Namespace, name), &apps.PetSet{}) + if obj == nil { + return nil, err + } + + return obj.(*apps.PetSet), err +} + +func (c *FakePetSets) List(opts api.ListOptions) (*apps.PetSetList, error) { + obj, err := c.Fake.Invokes(NewListAction("petsets", c.Namespace, opts), &apps.PetSetList{}) + if obj == nil { + return nil, err + } + return obj.(*apps.PetSetList), err +} + +func (c *FakePetSets) Create(rs *apps.PetSet) (*apps.PetSet, error) { + obj, err := c.Fake.Invokes(NewCreateAction("petsets", c.Namespace, rs), rs) + if obj == nil { + return nil, err + } + + return obj.(*apps.PetSet), err +} + +func (c *FakePetSets) Update(rs *apps.PetSet) (*apps.PetSet, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("petsets", c.Namespace, rs), rs) + if obj == nil { + return nil, err + } + + return obj.(*apps.PetSet), err +} + +func (c *FakePetSets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("petsets", c.Namespace, name), &apps.PetSet{}) + return err +} + +func (c *FakePetSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("petsets", c.Namespace, opts)) +} + +func (c *FakePetSets) UpdateStatus(rs *apps.PetSet) (result *apps.PetSet, err error) { + obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("petsets", "status", c.Namespace, rs), rs) + if obj == nil { + return nil, err + } + + return obj.(*apps.PetSet), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go index 2277961c33d0..a10bd741c624 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go @@ -281,6 +281,10 @@ func (c *Fake) Namespaces() client.NamespaceInterface { return &FakeNamespaces{Fake: c} } +func (c *Fake) Apps() client.AppsInterface { + return &FakeApps{c} +} + func (c *Fake) Autoscaling() client.AutoscalingInterface { return &FakeAutoscaling{c} } @@ -327,6 +331,19 @@ func (c *Fake) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDecl return &swagger.ApiDeclaration{}, nil } +// NewSimpleFakeApps returns a client that will respond with the provided objects +func NewSimpleFakeApps(objects ...runtime.Object) *FakeApps { + return &FakeApps{Fake: NewSimpleFake(objects...)} +} + +type FakeApps struct { + *Fake +} + +func (c *FakeApps) PetSets(namespace string) client.PetSetInterface { + return &FakePetSets{Fake: c, Namespace: namespace} +} + // NewSimpleFakeAutoscaling returns a client that will respond with the provided objects func NewSimpleFakeAutoscaling(objects ...runtime.Object) *FakeAutoscaling { return &FakeAutoscaling{Fake: NewSimpleFake(objects...)} From cc585097d4706f1d49fb9a451760f38e9863a794 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sun, 31 Jul 2016 23:05:24 -0400 Subject: [PATCH 8/8] Remove unsupported DNS calls from tests --- test/extended/dns/dns.go | 8 +++++--- test/integration/dns_test.go | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/test/extended/dns/dns.go b/test/extended/dns/dns.go index b4379076f3b7..b80a8f6aadbc 100644 --- a/test/extended/dns/dns.go +++ b/test/extended/dns/dns.go @@ -204,7 +204,7 @@ func createEndpointSpec(name string) *api.Endpoints { Subsets: []api.EndpointSubset{ { Addresses: []api.EndpointAddress{ - {IP: "1.1.1.1"}, + {IP: "1.1.1.1", Hostname: "endpoint1"}, {IP: "1.1.1.2"}, }, NotReadyAddresses: []api.EndpointAddress{ @@ -272,12 +272,14 @@ var _ = Describe("DNS", func() { // the DNS pod should be able to look up endpoints for names and wildcards digForARecords(map[string][]string{ - "kubernetes.default.endpoints": kubeEndpoints, - "prefix.kubernetes.default.endpoints.cluster.local": kubeEndpoints, + "kubernetes.default.endpoints": kubeEndpoints, fmt.Sprintf("headless.%s.svc", f.Namespace.Name): readyEndpoints, fmt.Sprintf("headless.%s.endpoints", f.Namespace.Name): readyEndpoints, fmt.Sprintf("clusterip.%s.endpoints", f.Namespace.Name): readyEndpoints, + + fmt.Sprintf("endpoint1.headless.%s.endpoints", f.Namespace.Name): {"1.1.1.1"}, + fmt.Sprintf("endpoint1.clusterip.%s.endpoints", f.Namespace.Name): {"1.1.1.1"}, }, expect), // the DNS pod should respond to its own request diff --git a/test/integration/dns_test.go b/test/integration/dns_test.go index 02ae5554c77e..c9b74f1eb53e 100644 --- a/test/integration/dns_test.go +++ b/test/integration/dns_test.go @@ -118,7 +118,7 @@ func TestDNS(t *testing.T) { Subsets: []kapi.EndpointSubset{{ Addresses: []kapi.EndpointAddress{{IP: "172.0.0.1"}}, Ports: []kapi.EndpointPort{ - {Port: 2345}, + {Port: 2345, Name: "http"}, }, }}, }); err != nil { @@ -187,7 +187,7 @@ func TestDNS(t *testing.T) { expect: []*net.IP{&headlessIP}, }, { // specific port of a headless service - dnsQuestionName: "unknown-port-2345.e1.headless.default.svc.cluster.local.", + dnsQuestionName: "_http._tcp.headless.default.svc.cluster.local.", expect: []*net.IP{&headlessIP}, }, { // SRV record for that service @@ -209,7 +209,7 @@ func TestDNS(t *testing.T) { }, }, { // the SRV record resolves to the IP - dnsQuestionName: "unknown-port-2345.e1.headless.default.svc.cluster.local.", + dnsQuestionName: "_http._tcp.headless.default.svc.cluster.local.", expect: []*net.IP{&headlessIP}, }, { // headless 2 service