Prepared for K8 Version 1.20 (Q2).
Curriculum of CKAD is changing from Q3. Details here
- CKAD Notes and Imperative Commands
-
wc -l <filename>
prints the line count -
k config set-context --current --namespace <namespace>
: use namespace for current context -
Alias and autocomplete
alias k=kubectl alias kx=kubectl explain alias kd=kubectl describe source <(kubectl completion bash) complete -F __start_kubectl k alias kcs='k config set-context --namespace' # Even better alias: alias kns='kubectl config set-context --current --namespace' # example: kns <mynamspace>
-
Another way of configuring alias
export NS=default alias k='kubectl -n $NS' alias ka='kubectl -n $NS apply -f' alias kgp='kubectl get pod -n $NS'
-
Vim editor settings
vi ~/.vimrc set nu #for numbering set expandtab #Expand TABs to spaces set shiftwidth=2 set tabstop=2 #tab width syntax on colorscheme desert
-
Create manifest using
cat
commandcat <<eof > file.yaml <your content> eof
-
If using tmux
tmux CTRL+B % (Split tmux screen vertically) CTRL+B " (Split tmux screen horizontally) CTRL+B <- -> (Switch between panes) CTRL+B x (Exit)
-
netcat to check connectivity from one pod to another:
nc -z -v -w 1 <name of service> <port>
kubectl config set-context --current --namespace=cloud-app-gateway
k config set-context --namespace=ingress-space --current
kcs ingress-namespace --current
: using alias (Saves lot of time)kubectl run nginx --image=nginx
kubectl run nginx --image=nginx --dry-run=client -o yaml
k explain pod --recursive | grep -A8 -B4 envFrom
kubectl get pods -o wide --show-labels | grep -i finance | wc -l
-- word countkubectl get pods --selector bu=finance
- filter based on labelkubectl get all --selector env=prod
- get all based on labelskubectl get all --selector env=prod,bu=finance,tier=frontend
multiple selectorskubectl get po -l env=dev --no-headers | wc -l
- No headers - for not displaying the first line of the result.k run nginx --image=nginx --port=80 --expose
: Creates a service
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
resources: # At container level
limits:
cpu: 200m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
kubectl run nginx --image=nginx --restart=Never --port=80 --namespace=myname --command --serviceaccount=mysa1 --env=HOSTNAME=local --labels=bu=finance,env=dev --requests='cpu=100m,memory=256Mi' --limits='cpu=200m,memory=512Mi' --dry-run -o yaml -- /bin/sh -c 'echo hello world'
spec:
containers:
- command:
- /bin/sh
- -c
- echo hello world
kubectl run nginx --image=nginx --restart=Never --port=80 --namespace=myname --serviceaccount=mysa1 --env=HOSTNAME=local --labels=bu=finance,env=dev --requests='cpu=100m,memory=256Mi' --limits='cpu=200m,memory=512Mi' --dry-run -o yaml -- /bin/sh -c 'echo hello world'
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello world
apiVersion: v1
kind: Pod
metadata:
labels:
run: box
name: box
spec:
initContainers: #
- args: #
- /bin/sh #
- -c #
- wget -O /work-dir/index.html http://neverssl.com/online #
image: busybox #
name: box #
volumeMounts: #
- name: vol #
mountPath: /work-dir #
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
volumeMounts: #
- name: vol #
mountPath: /usr/share/nginx/html #
volumes: #
- name: vol #
emptyDir: {} #
k annotate pod nginx1 nginx2 nginx3 description='my description'
k annotate pod nginx1 nginx2 nginx3 description-
k label pod nginx1 app=v2 --overwrite
k label pod nginx3 app-
kubectl label po nginx1 nginx2 nginx3 app-
: Remove labels at once
-
kubectl create deployment --image=nginx nginx
-
kubectl create deployment --image=nginx nginx --dry-run -o yaml
-
kubectl create deployment nginx --image=nginx --replicas=4
-
kubectl scale deployment nginx --replicas=4
-
kubectl create deployment nginx --image=nginx--dry-run=client -o yaml > nginx-deployment.yaml
-
kubectl set image deployment/nginx busybox=busybox nginx=nginx:1.9.1
-
When asked for specific image or when looking of something specific try to use grep command
-
kubhectl expose deployement my-dep --name=my-service --target-port=8080 --type=NodePort --port=80
-
kubectl rollout undo deployment/myapp-deployment
-
kubectl autoscale deployment nginx --min=5 --max=10 --cpu-percent=80
: Autoscale deployment -
kubectl rollout status deployment nginx
-
kubectl rollout history deployment nginx
-
kubectl rollout history deployment nginx --revision=1
-
kubectl set image deployment nginx nginx=nginx:1.17 --record
-
kubectl rollout undo deployment nginx --to-revision=2
: To a specific revision -
k get rs nginx-67dfd6c8f9 -o yaml --show-managed-fields=false
: To avoid extra fields in yaml -
.spec.strategy.type==RollingUpdate
-
.spec.strategy.rollingUpdate.maxUnavailable
-
.spec.strategy.rollingUpdate.maxSurge
kubectl create cronjob throw-dice-cron-job --image=kodekloud/throw-dice --schedule="30 21 * * *"
k create job busybox --image=busybox -- /bin/sh -c 'echo hello;sleep 30;echo world'
apiVersion: batch/v1
kind: CronJob
metadata:
name: hello
spec:
## Cron Spec
schedule: "*/1 * * * *"
jobTemplate:
spec:
## Job Spec
template:
## Pod spec
spec:
containers:
- name: hello
image: busybox
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure
## Few important properties:
- completions: Number of jobs to run
- parallelism: Parallel executions of jobs
- backOffLimit: to specify the number of retries before considering a Job as failed.
- activeDeadlineSeconds: Number of seconds after the job is scheduled
- ttlSecondsAfterFinished: Clean up the jobs after it finishes after n number of seconds
kubectl run nginx --image=nginx --restart=Never --port=80 --namespace=myname --command --serviceaccount=mysa1 --env=HOSTNAME=local --labels=bu=finance,env=dev --requests='cpu=100m,memory=256Mi' --limits='cpu=200m,memory=512Mi' --dry-run -o yaml -- /bin/sh -c 'echo hello world'
spec:
containers:
- command:
- /bin/sh
- -c
- echo hello world
kubectl run nginx --image=nginx --restart=Never --port=80 --namespace=myname --serviceaccount=mysa1 --env=HOSTNAME=local --labels=bu=finance,env=dev --requests='cpu=100m,memory=256Mi' --limits='cpu=200m,memory=512Mi' --dry-run -o yaml -- /bin/sh -c 'echo hello world'
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello world
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
resources: # At container level
limits:
cpu: 200m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
-
Default limits by LimitRange
apiVersion: v1 kind: LimitRange metadata: name: mem-limit-range spec: limits: - default: memory: 512Mi defaultRequest: memory: 256Mi type: Container
spec:
containers:
- name: nginx
image: nginx
env:
- key: APP_COLOR
valueFrom:
configMapKeyRef:
name:
key:
spec:
containers:
- name: nginx
image: nginx
env:
- key: APP_COLOR
valueFrom:
secretKeyRef:
name:
key:
spec:
containers:
- name: nginx
image: nginx
envFrom:
- configMapRef:
name:
spec:
containers:
- name: nginx
image: nginx
envFrom:
- secretRef:
name:
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
name: special-config
spec:
containers:
- name: nginx
image: nginx
volumeMounts: # container level
- name: secret-volume
mountPath: /etc/secrets
volumes: # Pod level
- name: secret-volume
secret:
secretName: secret-values
spec:
securityContext: # Pod level
runAsUser: 1000
containers:
- image: nginx
name: nginx
securityContext: # Container level
runAsUser:
capabilities:
add: ["MAC_ADMIN"]
spec:
containers:
- name: nginx
image: nginx
serviceAccount: dashboard-sa # At pod level
k create sa myuser
k run nginx1 --image=nginx --port=80 --serviceaccount=myuser
kubectl set serviceaccount deployment frontend myuser
-
Taints on node
-
Toleration on pod
-
Does not prevent tolerated pods to be scheduled on other nodes
-
Taint effects:
- NoSchedule : No new pods will be scheduled
- PreferNoSchedule: Try to avoid new pods schedule
- NoExecute : No New pods will be scheduled and existing pods will be deleted from the node.
-
k taint nodes node-name key=value:taint-effect
spec:
containers:
- name: nginx
image: nginx
tolerations: # Pod level
- key: "app"
operator: "Equals"
value: "blue"
effect: "NoSchedule"
- Schedule pod on specific node.
spec:
nodeSelector:
size: large ## These are labels assigned to nodes
containers:
- image: nginx
name: nginx
- Limitations:
- Does not support OR conditions like Large or Medium but not small
- For this node affinity and anti-affinity features were introduced.
- Support advanced expressions
spec:
containers:
- image: nginx
name: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disktype
operator: In
values:
- ssd
-
k run nginx --image=nginx --port=80 --expose
: creates pod and exposes a service -
kubectl expose pod redis --port=6379 --name redis-service --dry-run=client -o yaml
: Create a Service named redis-service of type ClusterIP to expose pod redis on port 6379 (This will automatically use the pod's labels as selectors)OR
-
kubectl create service clusterip redis --tcp=6379:6379 --dry-run=client -o yaml
: (This will not use the pods labels as selectors, instead it will assume selectors as app=redis. You cannot pass in selectors as an option. So it does not work very well if your pod has a different label set. So generate the file and modify the selectors before creating the service) -
kubectl expose pod nginx --port=80 --name nginx-service --type=NodePort --dry-run=client -o yaml
: Create a Service named nginx of type NodePort to expose pod nginx's port 80 on port 30080 on the nodes. (This will automatically use the pod's labels as selectors, but you cannot specify the node port. You have to generate a definition file and then add the node port in manually before creating the service with the pod.)OR
-
kubectl create service nodeport nginx --tcp=80:80 --node-port=30080 --dry-run=client -o yaml
(This will not use the pods labels as selectors) -
Both the above commands have their own challenges. While one of it cannot accept a selector the other cannot accept a node port. I would recommend going with the
kubectl expose
command. If you need to specify a node port, generate a definition file using the same command and manually input the nodeport before creating the service. -
kubectl expose deployment simple-web-app --name=my-service --targetPort=8080 --type=NodePort --port=8080
-
kubectl run frontend --replicas=2 --labels=run=load-balancer-example --image=busybox --port=8080
-
kubectl expose deployment frontend --type=NodePort --name=frontend-service --port=6262 --target-port=8080
-
kubectl create service clusterip my-cs --tcp=5678:8080 --dry-run -o yaml
-
To access a service from temporary pod using service name (if service is running in different namespace than temporary pod).
-
k run tmp --restart=Never --rm -i --image=nginx:alpine -- curl -m 5 <svc-name>.<namespace>:<port>
-
k run tmp --restart=Never --rm -i --image=nginx:alpine -- curl -m 5 <svc-name>.<namespace>.svc.cluster.local:<port>
kubectl create ingress ingress1 --class=default --rule="foo.com/path*=svc:8080" --rule="bar.com/admin*=svc2:http" -o yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-wildcard-host
spec:
rules:
- host: "foo.bar.com"
http:
paths:
- pathType: Prefix
path: "/bar"
backend:
service:
name: service1
port:
number: 80
- host: "*.foo.com"
http:
paths:
- pathType: Prefix
path: "/foo"
backend:
service:
name: service2
port:
number: 80
# Single host multiple paths
- host: foo.bar.com
http:
paths:
- path: /foo
pathType: Prefix
backend:
service:
name: service1
port:
number: 4200
- path: /bar
pathType: Prefix
backend:
service:
name: service2
port:
number: 8080
spec:
podSelector:
matchLabels:
role: db
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
name: api-prod
namespaceSelector: # If you want to restrict for pods in a specific namespace.
matchLabels:
name: prod
ports:
- protocol: TCP
port: 3306
kubectl logs question-13-pod -c question-thirteen --v 4
-- logs with verbose levelskubectl get events | grep -i error
kubectl logs nginx --previous
k top pods -A | sort -k 4n
k top pod --no-headers -A --sort-by=cpu | awk '{print $2}' | head -1
: get name of the pod with max cpu utilization.
## Running a command
readinessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
failureThreshold: 8
## HTTP Get
readinessProbe:
httpGet:
path: /
port: 80
### Sample
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-exec
spec:
containers:
- name: liveness
image: k8s.gcr.io/busybox
args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
- To find the storage class that does not support dynamic provisioning Look for the storage class name that uses
no-provisioner
kubectl cp busybox:etc/passwd ./passwd
: copy from container to local
$ cat etc/passwd
root:x:0:0:root:/root:/bin/sh
daemon:x:1:1:daemon:/usr/sbin:/bin/false
bin:x:2:2:bin:/bin:/bin/false
sys:x:3:3:sys:/dev:/bin/false
sync:x:4:100:sync:/bin:/bin/sync
mail:x:8:8:mail:/var/spool/mail:/bin/false
www-data:x:33:33:www-data:/var/www:/bin/false
operator:x:37:37:Operator:/var:/bin/false
nobody:x:65534:65534:nobody:/home:/bin/false
# Cut first column
cat /etc/passwd | cut -f 1 -d ':' > /etc/foo/passwd
apiVersion: v1
kind: PersistentVolume
metadata:
name: task-pv-volume
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: task-pv-claim
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
apiVersion: v1
kind: Pod
metadata:
name: task-pv-pod
spec:
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: task-pv-claim
containers:
- name: task-pv-container
image: nginx
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: task-pv-storage
- If you are new to
vim
then usevimtutor
to get hands dirty onvim
editor. - Udemy Course - Mumshad Mannambeth
- Udemy Course - Zeal Vora
- CKAD-Exercises.
- CKAD Simulator. You get access to one free simulator for 36 hours using Linux foundation single sign on.
- 5 Sample Questions - CKAD
- Answers to 5 sample questions