From c04274e7b73f36c0807e0e2358089a558efddf40 Mon Sep 17 00:00:00 2001
From: seifrajhi
+
+```bash
+kubectl exec -it kube-apiserver-controlplane -n kube-system -- kube-apiserver -h | grep 'enable-admission-plugins'
+```
+
+
+
+#### Check the `--enable-admission-plugins` property in the `/etc/kubernetes/manifests/kube-apiserver.yaml` file
+
+
+
+#### Add `--disable-admission-plugins=DefaultStorageClass` to the `/etc/kubernetes/manifests/kube-apiserver.yaml` file
+
+
+
+```bash
+kubectl run nginx-annotations --image nginx
+kubectl annotate pod nginx-annotations description='my description'
+```
+
+
+
+```yaml
+apiVersion: apps/v1 # Update from apps/v1beta1 to apps/v1 and apply
+kind: Deployment
+metadata:
+ labels:
+ app: nginx
+ name: nginx-deployment
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx:1.20
+ name: nginx
+```
+
+
+
+```bash
+kubectl api-resources
+```
+
+
+```bash
+kubectl api-resources | grep jobs
+#cronjobs cj batch/v1beta1 true CronJob
+#jobs batch/v1 true Job
+```
+
+
+
+Add `--runtime-config=rbac.authorization.k8s.io/v1alpha1` to the `/etc/kubernetes/manifests/kube-apiserver.yaml` file and let the kube-apiserver restart
+
+
+
+#### Load the AppArmor profile
+
+**NOTE** : Profile needs to be loaded on all the nodes.
+
+```bash
+apparmor_parser -q k8s-apparmor-example-deny-write # load the apparmor profile
+
+aa-status | grep k8s-apparmor-example-deny-write # verify its loaded
+# k8s-apparmor-example-deny-write
+```
+
+#### Enable AppArmor for the pod
+
+```yaml
+cat << EOF > hello-apparmor.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: hello-apparmor
+ annotations: # add apparmor annotations
+ container.apparmor.security.beta.kubernetes.io/hello: localhost/k8s-apparmor-example-deny-write # add this
+spec:
+ containers:
+ - name: hello
+ image: busybox
+ command: [ "sh", "-c", "echo 'Hello AppArmor!' && sleep 1h" ]
+EOF
+
+kubectl apply -f hello-apparmor.yaml
+```
+
+#### Verify
+
+```bash
+kubectl exec hello-apparmor -- cat /proc/1/attr/current
+# k8s-apparmor-example-deny-write (enforce)
+```
+
+
+
+#### Create the audit policy file
+
+```yaml
+cat << EOF > /etc/kubernetes/audit-policy.yaml
+apiVersion: audit.k8s.io/v1 # This is required.
+kind: Policy
+rules:
+ # Log pod changes at RequestResponse level
+ - level: RequestResponse
+ resources:
+ - group: ""
+ resources: ["pods"]
+
+ # Log secret delete events in prod namespaces at the Metadata level.
+ - level: Metadata
+ verbs: ["delete"]
+ resources:
+ - group: "" # core API group
+ resources: ["secrets"]
+ namespaces: ["prod"]
+EOF
+```
+
+#### Backup the original file `cp kube-apiserver.yaml kube-apiserver.yaml_org`
+
+#### Update the `/etc/kubernetes/manifests/kube-apiserver.yaml` to add audit configs and volume mounts.
+
+```yaml
+- --audit-policy-file=/etc/kubernetes/audit-policy.yaml
+- --audit-log-path=/var/log/kubernetes/audit/audit.log
+- --audit-log-maxage=30
+```
+
+```yaml
+volumeMounts:
+ - mountPath: /etc/kubernetes/audit-policy.yaml
+ name: audit
+ readOnly: true
+ - mountPath: /var/log/kubernetes/audit/
+ name: audit-log
+ readOnly: false
+
+volumes:
+- name: audit
+ hostPath:
+ path: /etc/kubernetes/audit-policy.yaml
+ type: File
+- name: audit-log
+ hostPath:
+ path: /var/log/kubernetes/audit/
+ type: DirectoryOrCreate
+```
+
+#### Check the `/var/log/kubernetes/audit/audit.log` for audit log entries
+
+
+
+```yaml
+cat << EOF > normal-csr.yaml
+apiVersion: certificates.k8s.io/v1
+kind: CertificateSigningRequest
+metadata:
+ name: normal-csr
+spec:
+ request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ2lqQ0NBWElDQVFBd1JURUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlZCQWdNQ2xOdmJXVXRVM1JoZEdVeApJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MFpEQ0NBU0l3RFFZSktvWklodmNOCkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNellTKzhhTXdBVmkwWHovaVp2Z2k0eGtNWTkyMWZRSmd1bGM2eDYKS0Q4UjNteEMyRkxlWklJSHRYTDZadG5KSHYxY0g0eWtMUEZtR2hDRURVNnRxQ2FpczNaWWV3MVBzVG5nd1Jzego3TG1oeDV4dzVRc3lRaFBkNjRuY3h1MFRJZmFGbmducU9UT0NGWERyaXBtZzJ5TExvbTIxL1ZxbjNQMVJQeE51CjZJdDlBOHB6aURlTVg5VTlaTHhzT0Jld2FzaFJzM29jb3NIcHp5cXN1SnQralVvUjNmaGducVB3UkNBZmQ3YUUKaUhKOWFxblhHVVNUWENXb2g2OEtPL3VkU3p2djNmcExhV1JxUUdHWi9HSWpjM1ZiZzNHN0FqNWNITUp2WHV3bwp3M0JkV1pZaEpycU9Ld21sMW9QVHJRNlhMQ2FBTFZ2NnFqZWVOSFNvOVZyVmM0OENBd0VBQWFBQU1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUFEZGNmMHZVSnVtcmRwcGxOa0pwSERSVFI2ZlFzYk84OFM3cnlndC9vcFEvOCsKNVkyUVVjVzhSUUdpVGdvQjFGUG1FeERVcFRna2p1SEtDQ0l3RWdjc3pPRm5YdC95N1FsWXBuc0E3dG01V1ppUAozbG1xSFpQMU9tQlRBRU45L2swSFpKdjc4Rytmcm0xNnRJbWtzUHpSK2lBajZ2WDZtT1RNVEk3Y1U5cmIvSElLCmVOTTZjV2dYQzYrbU9PbDFqM3BjS1hlVlB0YS9MbDZEVFc0VWdnR0J1NVJPb3FWRS9sTDNQNnc4K2R3M0lWQngKWlBrK0JDNVQrMkZLMFNzd3VvSCtaKzhtbi8weHR2bk1nL3FPTWIwdXVvcDNSTklVZmFhR1pRSjRmSnVrMGdkQwpXZHFselJMREsydXZYcWVFUXFjMENxZmVVdXRGdzVuOWNWZVdvRFVwCi0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo= # use base64 encoded value of normal.csr file
+ signerName: kubernetes.io/kube-apiserver-client
+ usages:
+ - client auth
+EOF
+
+kubectl apply -f normal-csr.yaml
+```
+
+#### Verify its submitted and in Pending status
+
+```bash
+kubectl get csr normal-csr
+# NAME AGE SIGNERNAME REQUESTOR CONDITION
+# normal-csr 37s kubernetes.io/kube-apiserver-client kubernetes-admin Pending
+```
+
+
+
+```bash
+kubectl certificate approve normal-csr
+# certificatesigningrequest.certificates.k8s.io/normal-csr approved
+```
+
+#### Verify its in Approved,Issued status
+
+```bash
+kubectl get csr normal-csr
+# NAME AGE SIGNERNAME REQUESTOR CONDITION
+# normal-csr 4m15s kubernetes.io/kube-apiserver-client kubernetes-admin Approved,Issued
+```
+
+
+
+```bash
+ubectl certificate deny hacker-csr
+# certificatesigningrequest.certificates.k8s.io/hacker-csr denied
+
+```
+
+#### Verify its in Approved,Issued status
+
+```bash
+kubectl get csr hacker-csr
+# NAME AGE SIGNERNAME REQUESTOR CONDITION
+# hacker-csr 16s kubernetes.io/kube-apiserver-client kubernetes-admin Denied
+```
+
+
+
+```bash
+curl https://dl.k8s.io/v1.22.0/kubernetes.tar.gz -L -o kubernetes.tar.gz
+shasum -a 512 kubernetes.tar.gz
+# d1145ec29a8581a4c94a83cefa3658a73bfc7d8e2624d31e735d53551718c9212e477673f74cfa4e430a8367a47bba65e2573162711613e60db54563dc912f00 kubernetes.tar.gz
+```
+
+
+
+```bash
+kubectl get configmaps
+# OR
+kubectl get cm
+```
+
+
+
+```bash
+kubectl get configmaps --all-namespaces
+# OR
+kubectl get configmaps -A
+```
+
+
+```bash
+kubectl run nginx-1 --image=nginx --env="DB_HOST=db.example.com" --env="DB_USER=development" --env="DB_PASSWD=password"
+```
+
+```bash
+# verify env variables
+kubectl exec nginx-1 -- env | grep DB_
+# DB_HOST=db.example.com
+# DB_USER=development
+# DB_PASSWD=password
+```
+
+
+
+```bash
+kubectl create configmap db-config-1 --from-literal=DB_HOST=db.example.com --from-literal=DB_USER=development --from-literal=DB_PASSWD=password
+```
+
+OR
+
+```yaml
+cat << EOF > db-config-1.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: db-config-1
+data:
+ DB_HOST: db.example.com
+ DB_PASSWD: password
+ DB_USER: development
+EOF
+
+kubectl apply -f db-config-1.yaml
+```
+
+```bash
+# verify
+kubectl describe configmap db-config-1
+# Name: db-config-1
+# Namespace: default
+# Labels:
+
+```bash
+kubectl create configmap db-config-2 --from-file=db.properties
+```
+
+```bash
+# verify
+kubectl describe configmap db-config-2
+# Name: db-config-2
+# Namespace: default
+# Labels:
+
+```yaml
+cat << EOF > nginx-2.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: nginx-2
+spec:
+ containers:
+ - image: nginx
+ name: nginx-2
+ env:
+ - name: DB_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: db-config-1
+ key: DB_HOST
+EOF
+
+kubectl apply -f nginx-2.yaml
+
+kubectl exec nginx-2 -- env | grep DB_HOST # verify env variables
+# DB_HOST=db.example.com
+```
+
+
+
+```yaml
+cat << EOF > nginx-3.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: nginx-3
+spec:
+ containers:
+ - image: nginx
+ name: nginx-3
+ envFrom:
+ - configMapRef:
+ name: db-config-1
+EOF
+
+kubectl apply -f nginx-3.yaml
+
+kubectl exec nginx-3 -- env | grep DB_ # verify env variables
+# DB_HOST=db.example.com
+# DB_PASSWD=password
+# DB_USER=development
+```
+
+
+
+```yaml
+cat << EOF > nginx-4.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: nginx-4
+spec:
+ containers:
+ - image: nginx
+ name: nginx-4
+ volumeMounts:
+ - name: db-config
+ mountPath: "/config"
+ readOnly: true
+ volumes:
+ - name: db-config
+ configMap:
+ name: db-config-1
+EOF
+
+kubectl apply -f nginx-4.yaml
+
+kubectl exec nginx-4 -- cat /config/DB_HOST # verify env variables
+# db.example.com
+```
+
+
+
+```bash
+kubectl get daemonsets --all-namespaces
+# OR
+kubectl get ds -A
+```
+
+
+
+```bash
+kubectl create deploy nginx --image=nginx --dry-run=client -o yaml > nginx-ds.yaml
+```
+
+#### Edit the deployment to daemonset
+
+```yaml
+cat << EOF > nginx-ds.yaml
+apiVersion: apps/v1
+kind: DaemonSet # Update from Deployment to DaemonSet
+metadata:
+ labels:
+ app: nginx
+ name: nginx
+spec:
+# replicas: 1 - remove replicas
+ selector:
+ matchLabels:
+ app: nginx
+# strategy: {} - remove strategy
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx
+ name: nginx
+ resources: {}
+EOF
+
+kubectl apply -f nginx-ds.yaml
+
+kk get pods -o wide
+# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+# nginx-5k7dk 1/1 Running 0 6m10s 10.244.1.3 node01
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ kind: frontend
+ name: nginx-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx # Update the selector label to app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx
+ name: nginx
+```
+
+
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ kind: frontend
+ name: frontend-svc
+spec:
+ ports:
+ - port: 8080 # Update the port to 80
+ protocol: TCP
+ targetPort: 8080 # Update the port to 80
+ selector:
+ kind: frontend # Update the selector label to app: nginx
+status:
+ loadBalancer: {}
+```
+
+
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: web
+ name: web-svc
+spec:
+ ports:
+ - port: 80
+ protocol: TCP
+ targetPort: 8080 # Update target port to 8080 as exposed by the deployment
+ selector:
+ app: web
+ type: ClusterIP
+status:
+ loadBalancer: {}
+```
+
+**NOTE**: The ingress might not work if there are ingress controllers deployed on the cluster.
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: web-ingress
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+spec:
+ rules:
+ - host: hello-world.info # add host entry
+ http:
+ paths:
+ - backend:
+ service:
+ name: web # update to web-svc
+ port:
+ number: 80
+ path: /
+ pathType: Prefix
+status:
+ loadBalancer: {}
+```
+
+
+
+```bash
+kubectl get deployments
+# OR
+kubectl get deploy
+```
+
+
+
+```bash
+kubectl create deploy nginx-deployment --image nginx:1.20 && kubectl scale deploy nginx-deployment --replicas 3
+# deployment.apps/nginx-deployment created
+# deployment.apps/nginx-deployment scaled
+
+kubectl get replicaset # check the replica set created
+# NAME DESIRED CURRENT READY AGE
+# nginx-deployment-bd78d5dc6 3 3 3 37s
+
+```
+
+OR
+
+```yaml
+cat << EOF > nginx-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: nginx
+ name: nginx-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx:1.20
+ name: nginx
+EOF
+
+kubectl apply -f nginx-deployment.yaml
+
+```
+
+
+
+```bash
+kubectl get deploy nginx-deployment -o yaml
+```
+
+
+
+```bash
+kubectl create deployment frontend --replicas=4 --image=nginx:1.21 --dry-run=client -o yaml > frontend.yaml
+# --replicas is newly introduced and if it does not work use it without the replicas
+kubectl create deployment frontend --image=nginx:1.21 --dry-run=client -o yaml > frontend.yaml
+```
+
+#### Edit the frontend.yaml for replicas, port and env variable
+
+```yaml
+cat << EOF > frontend.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ creationTimestamp: null
+ labels:
+ app: frontend
+ name: frontend
+spec:
+ replicas: 4
+ selector:
+ matchLabels:
+ app: frontend
+ strategy: {}
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: frontend
+ spec:
+ containers:
+ - image: nginx:1.21
+ name: nginx
+ ports:
+ - containerPort: 8080
+ env:
+ - name: NGINX_PORT
+ value: "8080"
+ resources: {}
+status: {}
+EOF
+
+kubectl apply -f frontend.yaml
+```
+
+
+
+```bash
+kubectl create deployment nginx-random --image=nginx
+kubectl expose deployment nginx-random --name=nginx-random --port=80 --target-port=80
+```
+
+#### Verify the nslookup works. (Busybox latest version had issues with service nslookup, so using dnsutils)
+
+```yaml
+cat << EOF > dnsutils.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: dnsutils
+spec:
+ containers:
+ - name: dnsutils
+ image: k8s.gcr.io/e2e-test-images/jessie-dnsutils:1.3
+ command:
+ - sleep
+ - "3600"
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+EOF
+
+kubectl apply -f dnsutils.yaml
+
+kubectl exec dnsutils -- nslookup nginx-random
+# Server: 10.96.0.10
+# Address: 10.96.0.10#53
+
+# Name: nginx-random.default.svc.cluster.local
+# Address: 10.110.119.135
+
+kubectl get pods -l app=nginx-random -o wide
+# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+# nginx-random-77fb464776-sbp8v 1/1 Running 0 8m25s 10.50.0.8 controlplane
+
+#### Create deployment
+
+```bash
+kubectl create deployment nginx-ha --image=nginx --replicas=3
+
+kubectl get pods -l app=nginx-ha
+# NAME READY STATUS RESTARTS AGE
+# nginx-ha-684994c76-2j4w8 1/1 Running 0 57s
+# nginx-ha-684994c76-7ssm8 1/1 Running 0 57s
+# nginx-ha-684994c76-kdp28 1/1 Running 0 57s
+```
+
+#### Delete all the pods and check behaviour as new pods are created.
+
+```bash
+kubectl delete pods -l app=nginx-ha --force
+
+kubectl get pods -l app=nginx-ha -w
+# NAME READY STATUS RESTARTS AGE
+# nginx-ha-684994c76-m5n28 0/1 ContainerCreating 0 3s
+# nginx-ha-684994c76-pqfj4 0/1 ContainerCreating 0 3s
+# nginx-ha-684994c76-qxgfl 0/1 ContainerCreating 0 2s
+# nginx-ha-684994c76-pqfj4 1/1 Running 0 7s
+# nginx-ha-684994c76-m5n28 1/1 Running 0 9s
+# nginx-ha-684994c76-qxgfl 1/1 Running 0 8s
+```
+
+
+
+```bash
+kubectl scale deployment nginx-deployment --replicas=5
+```
+
+OR
+
+#### Edit the replica set definition file and use `kubectl apply -f nginx-deployment.yaml`
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: nginx
+ name: nginx-deployment
+spec:
+ replicas: 5 # Update the replicas count
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx:1.20
+ name: nginx
+```
+
+
+
+```bash
+kubectl scale deployment nginx-deployment --replicas=3
+```
+
+OR
+
+#### Edit the replica set definition file and use `kubectl apply -f nginx-deployment.yaml`
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: nginx
+ name: nginx-deployment
+spec:
+ replicas: 3 # Update the replicas count
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx:1.20
+ name: nginx
+```
+
+
+
+#### Create namespace `ha`
+
+```bash
+kubectl create namespace ha
+```
+
+#### Edit the deployment specs for 4 replicas and label
+
+```yaml
+cat << EOF > ha-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: ha-deployment
+ name: ha-deployment
+spec:
+ replicas: 4 # 4 replicas
+ selector:
+ matchLabels:
+ app: ha-deployment
+ strategy: {}
+ template:
+ metadata:
+ labels:
+ app: ha-deployment
+ func: frontend # label added to pod
+ spec:
+ containers:
+ - image: nginx
+ name: nginx
+ resources: {}
+status: {}
+EOF
+
+kubectl apply -f ha-deployment.yaml -n ha
+
+kubectl get pods -n ha
+# NAME READY STATUS RESTARTS AGE
+# ha-deployment-66b7f8d45b-4pndp 1/1 Running 0 22s
+# ha-deployment-66b7f8d45b-5r77r 1/1 Running 0 22s
+# ha-deployment-66b7f8d45b-7hq7q 1/1 Running 0 22s
+# ha-deployment-66b7f8d45b-szklj 1/1 Running 0 22s
+```
+
+#### Expose the deployment as a service with name cherry
+
+```bash
+kubectl expose deployment ha-deployment --name cherry --type NodePort --port 8080 --target-port 80 --namespace ha
+
+kubectl get svc -n ha
+# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+# cherry NodePort 10.104.241.152
+
+```bash
+kubectl rollout status deploy nginx-deployment
+# deployment "nginx-deployment" successfully rolled out
+```
+
+
+
+```bash
+kubectl set image deploy nginx-deployment nginx=nginx:1.20.2
+# deployment.apps/nginx-deployment image updated
+```
+
+OR
+
+#### Update the `nginx-deployment.yaml` file and `kubectl apply -f nginx-deployment.yaml`
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: nginx
+ name: nginx-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx:1.20.2 # Update the image
+ name: nginx
+```
+
+
+
+```bash
+kubectl rollout history deploy nginx-deployment
+# deployment.apps/nginx-deployment
+# REVISION CHANGE-CAUSE
+# 1
+
+```bash
+kubectl rollout undo deploy nginx-deployment # wait a bit
+# deployment.apps/nginx-deployment rolled back
+
+# verify the rollback
+
+kubectl rollout history deploy nginx-deployment
+# deployment.apps/nginx-deployment
+# REVISION CHANGE-CAUSE
+# 2
+
+```bash
+kubectl set image deploy nginx-deployment nginx=nginx:1.202.333
+```
+
+OR
+
+#### Update the `nginx-deployment.yaml` file and `kubectl apply -f nginx-deployment.yaml`
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: nginx
+ name: nginx-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx:1.202.333 # Update the image
+ name: nginx
+```
+
+
+
+```bash
+kubectl rollout status deploy nginx-deployment # would show 'Waiting for deployment "nginx-deployment" rollout to finish: 1 out of 3 new replicas have been updated...'
+
+kubectl get pod # would show status as 'ErrImagePull' or 'ImagePullBackOff'
+# NAME READY STATUS RESTARTS AGE
+# nginx-deployment-68b88f4dcf-8drvq 0/1 ErrImagePull 0 71s
+# nginx-deployment-bd78d5dc6-59x4r 1/1 Running 0 7m16s
+# nginx-deployment-bd78d5dc6-cxg7l 1/1 Running 0 7m19s
+# nginx-deployment-bd78d5dc6-xxkdj 1/1 Running 0 7m14s
+```
+
+
+
+```bash
+kubectl rollout undo deploy nginx-deployment --to-revision=2
+
+# verify
+kubectl rollout history deploy nginx-deployment
+# deployment.apps/nginx-deployment
+# REVISION CHANGE-CAUSE
+# 3
+
+```bash
+kubectl rollout history deploy nginx-deployment --revision=4 # check the wrong image displayed here
+# deployment.apps/nginx-deployment with revision #4
+# Pod Template:
+# Labels: app=nginx-deployment
+# pod-template-hash=68b88f4dcf
+# Containers:
+# nginx:
+# Image: nginx:1.202.333
+# Port:
+
+```bash
+kubectl delete deployment nginx-deployment
+# OR
+kubectl delete -f nginx-deployment.yaml
+```
+
+
+
+#### Edit the deployment to update the rolling update strategy for maxSurge & maxUnavailable
+
+```yaml
+cat << EOF > web1.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: web1
+ name: web1
+spec:
+ replicas: 10
+ selector:
+ matchLabels:
+ app: web1
+ strategy:
+ rollingUpdate: # update the rolling update strategy for maxSurge & maxUnavailable
+ maxSurge: 5%
+ maxUnavailable: 2%
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: web1
+ spec:
+ containers:
+ - image: nginx:1.20-alpine
+ name: web1
+EOF
+
+kubectl apply -f web1.yaml
+```
+
+#### Update the image to 1.21-alpine
+
+```bash
+kubectl set image deployment web1 web1=nginx:1.21-alpine
+```
+
+#### Check rollout history and undo rollout
+
+```bash
+kubectl rollout history deployment web1
+# deployment.apps/web1
+# REVISION CHANGE-CAUSE
+# 1
+
+## Manage role based access control (RBAC)
+
+
+
+Refer [RBAC](../topics/rbac.md)
+
+
+
+## Use Kubeadm to install a basic cluster
+
+
+
+Refer [Creating cluster using Kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/)
+
+
+
+## Manage a highly-available Kubernetes cluster
+
+
+
+Refer [Creating HA Kubernete cluster](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/)
+
+
+
+## Provision underlying infrastructure to deploy a Kubernetes cluster
+
+
+
+TBD
+
+
+
+## Perform a version upgrade on a Kubernetes cluster using Kubeadm
+
+
+
+Refer [Upgrading Kubeadm Clusters](../topics/cluster_upgrade.md)
+
+
+
+## Implement etcd backup and restore
+
+
+
+Refer [ETCD](../topics/etcd.md)
+
diff --git a/k8s-certifications/cka/2.workloads_scheduling.md b/k8s-certifications/cka/2.workloads_scheduling.md
new file mode 100644
index 0000000..bbf0571
--- /dev/null
+++ b/k8s-certifications/cka/2.workloads_scheduling.md
@@ -0,0 +1,52 @@
+# Workloads & Scheduling - 15%
+
+
+
+## Understand deployments and how to perform rolling update and rollbacks
+
+
+
+Refer [Deployment Rollouts](../topics/deployments.md#deployment-rollout)
+
+
+
+## Use ConfigMaps and Secrets to configure applications
+
+
+
+Refer [ConfigMaps](../topics/configmaps.md)
+Refer [Secrets](../topics/secrets.md)
+
+
+
+## Know how to scale applications
+
+
+
+Refer [Deployment Scaling](../topics/deployments.md#deployment-scaling)
+
+
+
+## Understand the primitives used to create robust, self-healing, application deployments
+
+
+
+Refer [Deployment Scaling](../topics/deployments.md##deployment-self-healing)
+
+
+
+## Understand how resource limits can affect Pod scheduling
+
+
+
+Refer [Resources - Requests & Limits](../topics/pods.md#resources)
+
+
+
+## Awareness of manifest management and common templating tools
+
+
+
+TBD
+
+
diff --git a/k8s-certifications/cka/3.services_networking.md b/k8s-certifications/cka/3.services_networking.md
new file mode 100644
index 0000000..e24ae63
--- /dev/null
+++ b/k8s-certifications/cka/3.services_networking.md
@@ -0,0 +1,43 @@
+# Services & Networking - 20%
+
+
+
+## Understand host networking configuration on the cluster nodes
+
+
+
+TBD
+
+
+
+## Understand connectivity between Pods
+
+
+
+Refer [Cluster Networking](https://kubernetes.io/docs/concepts/cluster-administration/networking/)
+
+
+
+## Understand ClusterIP, NodePort, LoadBalancer service types and endpoints
+
+Refer [Services](../topics/services.md)
+
+## Know how to use Ingress controllers and Ingress resources
+
+Refer [Ingress](../topics/ingress.md)
+
+## Know how to configure and use CoreDNS
+
+
+
+Refer [CoreDNS for Service Discovery](https://kubernetes.io/docs/tasks/administer-cluster/coredns/)
+
+
+
+## Choose an appropriate container network interface plugin
+
+
+
+Refer [Network Plugins](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)
+
+
diff --git a/k8s-certifications/cka/4.storage.md b/k8s-certifications/cka/4.storage.md
new file mode 100644
index 0000000..560f887
--- /dev/null
+++ b/k8s-certifications/cka/4.storage.md
@@ -0,0 +1,35 @@
+# Storage - 10%
+
+
+
+## Understand storage classes, persistent volumes
+
+
+
+Refer [Volumes](../topics/volumes.md)
+
+
+
+## Understand volume mode, access modes and reclaim policies for volumes
+
+
+
+Refer [PV Volume mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#volume-mode) -- Refer [PV Access modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes), Refer [PV Reclaim policies](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaim-policy)
+
+
+
+## Understand persistent volume claims primitive
+
+
+
+Refer [Persistent Volume Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+
+
+
+## Know how to configure applications with persistent storage
+
+
+
+Refer [Volumes](../topics/volumes.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cka/5.troubleshooting.md b/k8s-certifications/cka/5.troubleshooting.md
new file mode 100644
index 0000000..3a5a445
--- /dev/null
+++ b/k8s-certifications/cka/5.troubleshooting.md
@@ -0,0 +1,54 @@
+# Troubleshooting - 30%
+
+
+
+## Evaluate cluster and node logging
+
+
+
+Refer [Cluster Logging](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-cluster/#looking-at-logs)
+
+
+
+## Understand how to monitor applications
+
+
+
+Refer [Monitoring](../topics/monitoring.md)
+
+
+
+## Manage container stdout & stderr logs
+
+
+
+TBD
+
+
+
+## Troubleshoot application failure
+
+
+
+Refer [Deployment Troubleshooting](../topics/deployments.md#troubleshooting)
+Refer [Probes Troubleshooting](../topics/probes.md#troubleshooting)
+Refer [Application Troubleshooting](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-application/)
+
+
+
+## Troubleshoot cluster component failure
+
+
+
+TBD
+
+
+
+## Troubleshoot networking
+
+
+
+TBD
+
+
+
diff --git a/k8s-certifications/cka/README.md b/k8s-certifications/cka/README.md
new file mode 100644
index 0000000..211909f
--- /dev/null
+++ b/k8s-certifications/cka/README.md
@@ -0,0 +1,16 @@
+# Certified Kubernetes Administrator (CKA)
+
+## [CKA Curriculum](https://github.com/cncf/curriculum/blob/master/CKA_Curriculum_v1.22.pdf)
+
+1. [Cluster Architecture, Installation & Configuration - 25%](1.cluster_architecture_installation_configuration.md)
+2. [Workloads & Scheduling - 15%](2.workloads_scheduling.md)
+3. [Services & Networking - 20%](3.services_networking.md)
+4. [Storage - 10%](4.storage.md)
+5. [Troubleshooting - 30%](5.troubleshooting.md)
+
+## Resources
+
+ - [Certified Kubernetes Administrator - CKA learning path](https://jayendrapatil.com/certified-kubernetes-administrator-cka-learning-path/)
+ - [KodeKloud Certified Kubernetes Administrator Course](https://shareasale.com/r.cfm?b=2319101&u=2367365&m=132199&urllink=&afftrack=)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/ckad/1.application_design_build.md b/k8s-certifications/ckad/1.application_design_build.md
new file mode 100644
index 0000000..bc4948f
--- /dev/null
+++ b/k8s-certifications/ckad/1.application_design_build.md
@@ -0,0 +1,35 @@
+# Application Design and Build - 20%
+
+
+
+## Define, build and modify container images
+
+
+
+Refer [Docker](../topics/docker.md)
+
+
+
+## Understand Jobs and CronJobs
+
+
+
+Refer [Jobs & Cron Jobs](../topics/jobs.md)
+
+
+
+## Understand multi-container Pod design patterns (e.g. sidecar, init and others)
+
+
+
+Refer [Multi-container Pods](../topics/multi_container_pods.md)
+
+
+
+## Utilize persistent and ephemeral volumes
+
+
+
+Refer [Volumes](../topics/volumes.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/ckad/2.application_deployment.md b/k8s-certifications/ckad/2.application_deployment.md
new file mode 100644
index 0000000..3d5b791
--- /dev/null
+++ b/k8s-certifications/ckad/2.application_deployment.md
@@ -0,0 +1,25 @@
+# Application Deployment - 20%
+
+
+
+## Use Kubernetes primitives to implement common deployment strategies (e.g. blue/green or canary)
+
+
+
+- Kubernetes supports only Recreate and Rolling deployments within the same cluster.
+- A service mesh like Istio can be used for [traffic management and canary deployments](https://istio.io/latest/docs/tasks/traffic-management/traffic-shifting/).
+
+
+
+## Understand Deployments and how to perform rolling updates
+
+Refer [Deployment Rollouts](../topics/deployments.md#deployment-rollout)
+
+## Use the Helm package manager to deploy existing packages
+
+
+
+ - [Helm](https://helm.sh/) can be used for templating and deployment.
+
+
+
diff --git a/k8s-certifications/ckad/3.application_observability_maintenance.md b/k8s-certifications/ckad/3.application_observability_maintenance.md
new file mode 100644
index 0000000..9854cc1
--- /dev/null
+++ b/k8s-certifications/ckad/3.application_observability_maintenance.md
@@ -0,0 +1,32 @@
+# Application Observability and Maintenance - 15%
+
+
+
+## Understand API deprecations
+
+
+
+Refer [API Deprectations](../topics/api_deprecations.md)
+
+
+
+## Implement probes and health checks
+
+Refer [Readiness & Liveness probes](../topics/probes.md)
+
+## Use provided tools to monitor Kubernetes applications
+
+Refer [Monitoring](../topics/monitoring.md)
+
+## Utilize container logs
+
+Refer [Logging](../topics/logging.md)
+
+## Debugging in Kubernetes
+
+
+
+TBD
+
+
+
diff --git a/k8s-certifications/ckad/4.application_environment_configuration_security.md b/k8s-certifications/ckad/4.application_environment_configuration_security.md
new file mode 100644
index 0000000..48be2d8
--- /dev/null
+++ b/k8s-certifications/ckad/4.application_environment_configuration_security.md
@@ -0,0 +1,53 @@
+# Application Environment, Configuration and Security
+
+
+
+## Discover and use resources that extend Kubernetes (CRD)
+
+
+
+Refer [Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
+
+
+
+## Understand authentication, authorization and admission control
+
+Refer [Authentication](../topics/authentication.md)
+Refer [RBAC](../topics/rbac.md)
+Refer [Admission Controllers](../topics/admission_controllers.md)
+
+## Understanding and defining resource requirements, limits and quotas
+
+
+
+Refer [Resources - Requests & Limits](../topics/pods.md#resources)
+
+
+
+## Understand ConfigMaps
+
+
+
+Refer [ConfigMaps](../topics/configmaps.md)
+
+
+
+## Create & consume Secrets
+
+Refer [Secrets](../topics/secrets.md)
+
+## Understand ServiceAccounts
+
+
+
+Refer [Service Accounts](../topics/service_accounts.md)
+
+
+
+## Understand SecurityContexts
+
+
+
+Refer [Security Context](../topics/pod_security_context.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/ckad/5.services_networking.md b/k8s-certifications/ckad/5.services_networking.md
new file mode 100644
index 0000000..bf774df
--- /dev/null
+++ b/k8s-certifications/ckad/5.services_networking.md
@@ -0,0 +1,27 @@
+# Services and Networking
+
+
+
+## Demonstrate basic understanding of NetworkPolicies
+
+
+
+Refer [Network Policies](../topics/network_policies.md)
+
+
+
+## Provide and troubleshoot access to applications via services
+
+
+
+Refer [Services](../topics/services.md)
+
+
+
+## Use Ingress rules to expose applications
+
+
+
+Refer [Ingress](../topics/ingress.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/ckad/README.md b/k8s-certifications/ckad/README.md
new file mode 100644
index 0000000..a26371d
--- /dev/null
+++ b/k8s-certifications/ckad/README.md
@@ -0,0 +1,17 @@
+# Certified Kubernetes Application Developer (CKAD)
+
+## [CKAD Curriculum](https://github.com/cncf/curriculum/blob/master/CKAD_Curriculum_v1.28.pdf)
+
+ - [Application Design and Build - 20%](1.application_design_build.md)
+ - [Application Deployment - 20%](2.application_deployment.md)
+ - [Application observability and maintenance - 15%](3.application_observability_maintenance.md)
+ - [Application Environment, Configuration and Security - 25%](4.application_environment_configuration_security.md)
+ - [Services & Networking - 20%](5.services_networking.md)
+
+## Resources
+
+ - [Certified Kubernetes Application Developer - CKAD learning path](https://jayendrapatil.com/certified-kubernetes-application-developer-ckad-learning-path/)
+ - [KodeKloud Certified Kubernetes Application Developer Course](https://shareasale.com/r.cfm?b=2319509&u=2367365&m=132199&urllink=&afftrack=)
+
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cks/1.cluster_setup.md b/k8s-certifications/cks/1.cluster_setup.md
new file mode 100644
index 0000000..169c107
--- /dev/null
+++ b/k8s-certifications/cks/1.cluster_setup.md
@@ -0,0 +1,51 @@
+# Cluster Setup - 10%
+
+
+
+## Use Network security policies to restrict cluster level access
+
+
+
+Refer [Network Policies](../topics/network_policies.md)
+
+
+
+## Use CIS benchmark to review the security configuration of Kubernetes components (etcd, kubelet, kubedns, kubeapi)
+
+
+
+Refer [Kube-bench](../topics/kube-bench.md)
+
+
+
+## Properly set up Ingress objects with security control
+
+
+
+Refer [Ingress with tls cert](../topics/ingress.md#ingress-security)
+
+
+
+## Protect node metadata and endpoints
+
+
+
+Refer [Kubelet Security](../topics/kubelet_security.md)
+
+
+
+## Minimize use of, and access to, GUI elements
+
+
+
+Kubernetes Dashboard
+
+
+
+## Verify platform binaries before deploying
+
+
+
+Refer [Platform Binary Verification](../topics/binary_verification.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cks/2.cluster_hardening.md b/k8s-certifications/cks/2.cluster_hardening.md
new file mode 100644
index 0000000..fe269e9
--- /dev/null
+++ b/k8s-certifications/cks/2.cluster_hardening.md
@@ -0,0 +1,35 @@
+# Cluster Hardening - 15%
+
+
+
+## Restrict access to Kubernetes API
+
+
+
+Refer [Controlling Access to Kubernetes API](https://kubernetes.io/docs/concepts/security/controlling-access/)
+
+
+
+## Use Role Based Access Controls to minimize exposure
+
+
+
+Refer [RBAC](../topics/rbac.md)
+
+
+
+## Exercise caution in using service accounts e.g. disable defaults, minimize permissions on newly created ones
+
+
+
+Refer [Service Accounts](../topics/service_accounts.md)
+
+
+
+## Update Kubernetes frequently
+
+
+
+Refer [Upgrading Kubeadm Clusters](../topics/cluster_upgrade.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cks/3.system_hardening.md b/k8s-certifications/cks/3.system_hardening.md
new file mode 100644
index 0000000..19a2e6b
--- /dev/null
+++ b/k8s-certifications/cks/3.system_hardening.md
@@ -0,0 +1,36 @@
+# System Hardening - 15%
+
+
+
+## Minimize host OS footprint (reduce attack surface)
+
+
+
+Refer [Docker](../topics/docker.md)
+
+
+
+## Minimize IAM roles
+
+
+
+IAM Roles are mainly related to Cloud and should follow the principle of least privilege.
+
+
+
+## Minimize external access to the network
+
+
+
+Refer [Network Policies](../topics/network_policies.md)
+
+
+
+## Appropriately use kernel hardening tools such as AppArmor, seccomp
+
+
+
+Refer [Seccomp - Secure Computing](../topics/seccomp.md)
+Refer [AppArmor](../topics/apparmor.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cks/4.minimize_microservice_vulnerabilities.md b/k8s-certifications/cks/4.minimize_microservice_vulnerabilities.md
new file mode 100644
index 0000000..1ee83ef
--- /dev/null
+++ b/k8s-certifications/cks/4.minimize_microservice_vulnerabilities.md
@@ -0,0 +1,37 @@
+# Minimize Microservice Vulnerabilities - 20%
+
+## Setup appropriate OS level security domains e.g. using PSP, OPA, security contexts
+
+
+
+Refer [Pod Security Policies](../topics/pod_security_policies.md)
+
+Refer [Pod Security Context](../topics/pod_security_context.md)
+
+Refer [Open Policy Agent](https://kubernetes.io/blog/2019/08/06/opa-gatekeeper-policy-and-governance-for-kubernetes/)
+
+
+
+## Manage kubernetes secrets
+
+
+
+Refer [Secrets](../topics/secrets.md)
+
+
+
+## Use container runtime sandboxes in multi-tenant environments (e.g. gvisor, kata containers)
+
+
+
+Refer [Runtime Class](../topics/runtimes.md)
+
+
+
+## Implement pod to pod encryption by use of mTLS
+
+
+
+Refer [Istio MTLS](https://istio.io/latest/docs/tasks/security/authentication/authn-policy/#auto-mutual-tls)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cks/5.supply_chain_security.md b/k8s-certifications/cks/5.supply_chain_security.md
new file mode 100644
index 0000000..d317dcb
--- /dev/null
+++ b/k8s-certifications/cks/5.supply_chain_security.md
@@ -0,0 +1,33 @@
+# Supply Chain Security - 20%
+
+## Minimize base image footprint
+
+
+
+Refer [Docker best practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/)
+
+
+
+## Secure your supply chain: whitelist allowed image registries, sign and validate images
+
+
+
+Refer [Admission Controllers ImagePolicyWebhook](../topics/admission_controllers.md#imagepolicywebhook)
+
+
+
+## Use static analysis of user workloads (e.g. kubernetes resources, docker files)
+
+
+
+Refer [Kubesec](../topics/kubesec.md)
+
+
+
+## Scan images for known vulnerabilities
+
+
+
+Refer [Trivy](../topics/trivy.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cks/6.monitoring_logging_runtime_security.md b/k8s-certifications/cks/6.monitoring_logging_runtime_security.md
new file mode 100644
index 0000000..1cf3ef6
--- /dev/null
+++ b/k8s-certifications/cks/6.monitoring_logging_runtime_security.md
@@ -0,0 +1,53 @@
+# Monitoring, Logging and Runtime Security - 20%
+
+
+
+## Perform behavioral analytics of syscall process and file activities at the host and container level to detect malicious activities
+
+
+
+Refer [Falco](../topics/falco.md)
+
+Other tools include `strace` and `tracee`
+
+
+
+## Detect threats within physical infrastructure, apps, networks, data, users and workloads
+
+
+
+TBD
+
+
+
+## Detect all phases of attack regardless where it occurs and how it spreads
+
+
+
+TBD
+
+
+
+## Perform deep analytical investigation and identification of bad actors within environment
+
+
+
+TBD
+
+
+
+## Ensure immutability of containers at runtime
+
+
+
+Refer [Pod Security Context Immutability](../topics/pod_security_context.md#immutability)
+
+
+
+## Use Audit Logs to monitor access
+
+
+
+Refer [Kubernetes Auditing](../topics/auditing.md)
+
+
\ No newline at end of file
diff --git a/k8s-certifications/cks/README.md b/k8s-certifications/cks/README.md
new file mode 100644
index 0000000..491847d
--- /dev/null
+++ b/k8s-certifications/cks/README.md
@@ -0,0 +1,21 @@
+# Certified Kubernetes Security Specialist (CKS)
+
+## [CKA Curriculum](https://github.com/cncf/curriculum/blob/master/CKA_Curriculum_v1.22.pdf)
+
+- [Cluster Setup - 10%](1.cluster_setup.md)
+- [Cluster Hardening - 15%](2.cluster_hardening.md)
+- [System Hardening - 15%](3.system_hardening.md)
+- [Minimize Microservice Vulnerabilities - 20%](4.minimize_microservice_vulnerabilities.md)
+- [Supply Chain Security - 20%](5.supply_chain_security.md)
+- [Monitoring, Logging and Runtime Security - 20%](6.monitoring_logging_runtime_security.md)
+
+## Resources
+
+- [Certified Kubernetes Security Specialist - CKS learning path](https://jayendrapatil.com/certified-kubernetes-security-specialist-cks-learning-path/)
+- [KodeKloud Certified Kubernetes Security Specialist Course](https://shareasale.com/r.cfm?b=2319531&u=2367365&m=132199&urllink=&afftrack=)
+- [Udemy Kubernetes CKS 2021 Complete Course – Theory – Practice](https://click.linksynergy.com/link?id=l7C703x9gqw&offerid=507388.3573079&type=2&murl=https%3A%2F%2Fwww.udemy.com%2Fcourse%2Fcertified-kubernetes-security-specialist%2F)
+
+
+
+
+
diff --git a/k8s-certifications/data/ImagePolicyWebhook/webhook.crt b/k8s-certifications/data/ImagePolicyWebhook/webhook.crt
new file mode 100644
index 0000000..103176b
--- /dev/null
+++ b/k8s-certifications/data/ImagePolicyWebhook/webhook.crt
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIID5TCCAs2gAwIBAgIUL1k7p/ksn6VRIuAKmeDMctyCUwcwDQYJKoZIhvcNAQEL
+BQAwRjFEMEIGA1UEAww7c3lzdGVtOm5vZGU6aW1hZ2UtYm91bmNlci13ZWJob29r
+LmRlZmF1bHQucG9kLmNsdXN0ZXIubG9jYWwwHhcNMjExMjE1MDY1MTMyWhcNMzEx
+MjEzMDY1MTMyWjBGMUQwQgYDVQQDDDtzeXN0ZW06bm9kZTppbWFnZS1ib3VuY2Vy
+LXdlYmhvb2suZGVmYXVsdC5wb2QuY2x1c3Rlci5sb2NhbDCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAK2/gOl+AEJjnbc5DG4iFg2WvD8JAjgwXHd3zQ6A
+HujxMz1EjJmDksc6S7aKrCJmP42tDdzQatVINMFHBR/8kb5bVN+f0LSNEM3iktfE
+KmB7VsfEk6gaPJg8VOitA/7KpVDyZ4yJZmb2iaGLFzFF41XwiCP2pzihBUTj669Q
+6MWDKxbONSrUpA60vvfhpWbnZxTbX8BfB1xDXOK51kK7rnXRfiJt6NHg+n87+1Lk
+SFcUoZ/BRarSfweHorCu8c/agZfN9rKyj5tPNb3ZCvp3WJs3ZElK2+j/abZwW6cY
+PIorQM0Zl3BZMFCdhoBEcqkeccb1DFjz0RB09SbH8WHCH3cCAwEAAaOByjCBxzAd
+BgNVHQ4EFgQUgcvgsxHiAEkdgZgWa6XWuEApS6swHwYDVR0jBBgwFoAUgcvgsxHi
+AEkdgZgWa6XWuEApS6swDwYDVR0TAQH/BAUwAwEB/zB0BgNVHREEbTBrghVpbWFn
+ZS1ib3VuY2VyLXdlYmhvb2uCIWltYWdlLWJvdW5jZXItd2ViaG9vay5kZWZhdWx0
+LnN2Y4IvaW1hZ2UtYm91bmNlci13ZWJob29rLmRlZmF1bHQuc3ZjLmNsdXN0ZXIu
+bG9jYWwwDQYJKoZIhvcNAQELBQADggEBAAofI9qArTMFQ4W19OsE3Sp1GLdTie2P
+GIVFoiyedYwF+mJWbSgBxklnAKkJf7/sj0PHUEPP4cs7BUM6YHUrjC3OUPhbiH9f
+CB8cVjVJhrI4mWDbAXiPa1mvo44x5eZeWDoz+DkUK+nna1/6ik40yOlonoyPXS/y
+1qEWPijRr/3nJ6Vfy6823UNasEQN6mqeUWAO29M1vrYvq0rzUGiU4xTUvWH3JA26
+1sk+ZYAWyZe2/kOTRMjTnKAaki+dnWt14ed1ipuyHxfR6vHKS80eZuJEd2hmytoE
+PRljY4asLiazIAP5j9/T4Xj66n0fvgTh75iUwAMkQHS2swC4ZjVS7nc=
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/k8s-certifications/data/ImagePolicyWebhook/webhook.key b/k8s-certifications/data/ImagePolicyWebhook/webhook.key
new file mode 100644
index 0000000..1c03ca3
--- /dev/null
+++ b/k8s-certifications/data/ImagePolicyWebhook/webhook.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCtv4DpfgBCY523
+OQxuIhYNlrw/CQI4MFx3d80OgB7o8TM9RIyZg5LHOku2iqwiZj+NrQ3c0GrVSDTB
+RwUf/JG+W1Tfn9C0jRDN4pLXxCpge1bHxJOoGjyYPFTorQP+yqVQ8meMiWZm9omh
+ixcxReNV8Igj9qc4oQVE4+uvUOjFgysWzjUq1KQOtL734aVm52cU21/AXwdcQ1zi
+udZCu6510X4ibejR4Pp/O/tS5EhXFKGfwUWq0n8Hh6KwrvHP2oGXzfayso+bTzW9
+2Qr6d1ibN2RJStvo/2m2cFunGDyKK0DNGZdwWTBQnYaARHKpHnHG9QxY89EQdPUm
+x/Fhwh93AgMBAAECggEAfW5S0j10Unk30p4MqzVQVl8LZzZJs+a12klSb7VumxwF
+saVbGzgxLkKXhiB2RB8sokrcRxzvAyota5qpyH29eX7VttrZAH8WMovvFnU3Yo+o
+Bm+TaTgHpp9nbNH6oGYLEnTs7DgFBS/WDBktlRSvGcubfNsDvY4BD8q6ysXORUdL
+Mji+JiPgIxlvHLZleP5zAyLWesSvKpUZxvE3/8G0M6rJD70Ufq9w3O2/UbrXoOEK
+vdKn3MIarI8x3O7dDauFdA+LbBMMG3Pl+GbkRuG5eFwMhUHzqks+sx0M8vz5YDzw
+mUxO1gzktvmSDiEcnIS5aINXgItviQp545KCCd+RAQKBgQDgFg8c5yUk9pMSIrIC
+kUT6uWfi0rnHREBfrCZUkso4acIt1PBEOOYKJoLwbdjE1w7fuysk6Ok7o5rg9Cch
+qen7hIFoWwKhfNO7dcwozs6gnT7QVUpHnID3t23m8wGtf7d2QRAXqCDRmaQUfHRc
+zupj7LPRsbrrc1ZBCI3i9g1mDwKBgQDGfivUe5n9+W6215SR3ofHrzkr6GQBl1bb
+H9WRhmvxNpLARdbKoGeBYMggdFte/SlzHdN6c5gaXIM7OJZj3NMSU/Flaqe/drOR
+76zN1nACvNZazpxHLnVklgSesRdFYZkvzhwnuS3sPiBEseV/Zi/Hp+Lc9XguqH5a
+LZHmGMJYGQKBgCZOPwkezi+yYtOv0KQ1twfxF7wjb5SLq0FviSHd8emQ0pvJEcVn
+wJMtoCZ/cJW9eZJvSWHG2s/SGNCpi+LqS9AuB30SSbHXR858xYiYSaQVHT65xbfW
+Hgm6dnQLSFcjRPZXCuwwVmPeErlZyP5wdIreVKLc8en7zlvRnYeVrharAoGBAIf9
+QUIePG6ISZXzNNKLRzNDlUPDv2BnsxYFRWiiU6m63efk8Td5lfBJwlKZ5U+62n8H
+3C90qqzE3RPhvQdF70YLRMNawvql9HjzX8zWMX9uqN0l2GPcLIlxTlD6uxrJtw3N
+g/SjJhdIqQrnZnhWJj3/g6omcuRkg8x8lAy0wdFhAoGAS2dEds2M9/OtAHSvGScr
+Pb7hXWT+5cX3PqgPiLc1R0TRTjCzUEJYtuSpwb6/JHuVNXmpek1xzLfkykXu7LsG
+sy0GXILOBAX5lxYrIgHIMv4a3pjI4UbwB1OzvthRc4kJXyBBT7L7LlPgaJ97xelf
+L4TAluWzris5Xa7Y53IfkhE=
+-----END PRIVATE KEY-----
\ No newline at end of file
diff --git a/k8s-certifications/data/Seccomp/audit.json b/k8s-certifications/data/Seccomp/audit.json
new file mode 100644
index 0000000..1f2d5df
--- /dev/null
+++ b/k8s-certifications/data/Seccomp/audit.json
@@ -0,0 +1,3 @@
+{
+ "defaultAction": "SCMP_ACT_LOG"
+}
\ No newline at end of file
diff --git a/k8s-certifications/data/kubeconfig.yaml b/k8s-certifications/data/kubeconfig.yaml
new file mode 100644
index 0000000..5bc7cf4
--- /dev/null
+++ b/k8s-certifications/data/kubeconfig.yaml
@@ -0,0 +1,67 @@
+apiVersion: v1
+current-context: kubernetes-admin@kubernetes
+kind: Config
+preferences: {}
+clusters:
+- cluster:
+ certificate-authority: /etc/kubernetes/pki/ca.crt
+ server: https://controlplane:6443
+ name: kubernetes
+- name: labs
+ cluster:
+ certificate-authority: /etc/kubernetes/pki/ca.crt
+ server: https://controlplane:6443
+- name: development
+ cluster:
+ certificate-authority: /etc/kubernetes/pki/ca.crt
+ server: https://controlplane:6443
+- name: qa
+ cluster:
+ certificate-authority: /etc/kubernetes/pki/ca.crt
+ server: https://controlplane:6443
+- name: production
+ cluster:
+ certificate-authority: /etc/kubernetes/pki/ca.crt
+ server: https://controlplane:6443
+users:
+- name: kubernetes-admin
+ user:
+ client-certificate: /etc/kubernetes/pki/users/user/user.crt
+ client-key: /etc/kubernetes/pki/users/user/user.key
+- name: labs-user
+ user:
+ client-certificate: /etc/kubernetes/pki/users/test-user/labs-user.crt
+ client-key: /etc/kubernetes/pki/users/test-user/labs-user.key
+- name: dev-user
+ user:
+ client-certificate: /etc/kubernetes/pki/users/dev-user/dev-user.crt
+ client-key: /etc/kubernetes/pki/users/dev-user/dev-user.key
+- name: qa-user
+ user:
+ client-certificate: /etc/kubernetes/pki/users/qa-user/qa-user.crt
+ client-key: /etc/kubernetes/pki/users/qa-user/qa-user.key
+- name: prod-user
+ user:
+ client-certificate: /etc/kubernetes/pki/users/prod-user/prod-user.crt
+ client-key: /etc/kubernetes/pki/users/prod-user/prod-user.key
+contexts:
+- context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ name: kubernetes-admin@kubernetes
+- name: labs-user@labs
+ context:
+ cluster: labs
+ user: labs-user
+- name: development-user@labs
+ context:
+ cluster: development
+ user: development-user
+- name: qa-user@qa
+ context:
+ cluster: qa
+ user: qa-user
+- name: prod-user@prod
+ context:
+ cluster: prod
+ user: prod-user
\ No newline at end of file
diff --git a/k8s-certifications/data/tls.crt b/k8s-certifications/data/tls.crt
new file mode 100644
index 0000000..8b2e031
--- /dev/null
+++ b/k8s-certifications/data/tls.crt
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFZDCCA0wCCQCLkCF9TN02ITANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJJ
+TjELMAkGA1UECAwCTUgxDTALBgNVBAcMBGNpdHkxEDAOBgNVBAoMB2NvbXBhbnkx
+EDAOBgNVBAsMB3NlY3Rpb24xCzAJBgNVBAMMAkRLMRgwFgYJKoZIhvcNAQkBFglh
+LmJAYy5jb20wHhcNMjExMjEwMTMzMTA0WhcNMjIxMjEwMTMzMTA0WjB0MQswCQYD
+VQQGEwJJTjELMAkGA1UECAwCTUgxDTALBgNVBAcMBGNpdHkxEDAOBgNVBAoMB2Nv
+bXBhbnkxEDAOBgNVBAsMB3NlY3Rpb24xCzAJBgNVBAMMAkRLMRgwFgYJKoZIhvcN
+AQkBFglhLmJAYy5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDc
+56DteFTMyJeLn2qP+5AIuumvW3B4ndk/h8p7489J5EH6KNlL4gp5P4q0rZRXJqaX
+sRzBZD2nM2kWDwRC+KjgffQHxTESZOe8jLBl4kz2iPWLIsa2nfIVgoi0U9qZ6bGN
+LU4yxYWyliKgD0xweTV9EsUHCgYjLO8lkwRPcMCAHNPckcXooOO/PLKHz5Kzg4J/
+au6TNF3GqzX5ECpArgZOd+67rM1ZFg9jxGyQZmfAnklILOBuN9DCsHqVHScdcATi
+Y105KLFAg8KCJ8+BSPzBVRNjuWhmfzmHBqPAWg4N50D10IHgeJWcdg51VbgC0aYO
+sbx4JSCUfvjKHDAQfd0PhQDpfvam2tERc1HQfKFAa89SWPVblRE4szaI+uSqJdIg
+P+XJ3YVqIHJUblC1mM85EAfSRmEv3Tn2C+gwi65gpYLkjvJr4ucRs+vCvF/s3qYA
+QnP87FyXa7GEZSpLop/lVb2J5o7muc69FKNOpUHYDkVxjlmMs+T5RZgOXL7lvjnN
+c09rjVs+lVZ/fW+Ej4p0lF4HJuG+vaGU79w8SJz7nUQiU+A9ayoJfbld7BgCv4UQ
+yS0G2uuKlxRVw+NZGCNSmthDAvytNBR2C4qpXw8pK+BrAc7jibOOvJWg1Zl7KY89
+taD0RLpd9WE+6QTvyXnS88p+uY6fjhAivS85tW+7LwIDAQABMA0GCSqGSIb3DQEB
+CwUAA4ICAQAZ0lH73nsPbm40JtqElGCzdf/OjlbfiPPATOy+6FvR5e2myg2hnDu8
+nPYSKs3F5hRdYm90a6r3q4+Cyej58259WOK5r0gW6GTJFoT/A/cKyqsolXZ4jjK6
+RPT0a5Vll0M8uRMPysRc8hGI1s06DFOfRWYDwtAfn20UpHjmLvjRYjXDS4FNLAh1
+c4G1GGGFVTpQo6yL881m+iErDUqU9pOR3Yu+NbOG7FFQXQtSuy7tFlRL65oyASHx
+I3REB6VL7CL37E9LDhdGoLRAWARRFWCGvZLRj9IBF/dQKXGjeD8BGnmNEUIMA9JW
+KiXmx41Rnf41v1v77LonCBveU2oubuc4YfnNcbAQHnoiN7sjcNIkIBFWspbhSstc
+761G7bejMgP8HUYp0NZySABRsL+3bXtkVX8tmOx7/riR4TxMVjyPp8wGg/cuo8AJ
+DpizNmUQAg1YEo+5xe9tQV+C7ScvbbtTDkrWm+vXci4qXaXaJZv4VFvDCnQnfhL1
+mKbLZp7L7vpoWfezE0jNw7NV1Ys75AZDJBcOp2RyNaP+MCWf6/EQs2/UL0YntexE
+c7eqGREkFsxyaF960B2K73qbMlxahCwK3h7Q2Z7udmWGvayaIr7V3V2sBHDr8u36
+99bwdR/h/t8Y2slP3kuuIteJSYpKAtQqt/FvoFtTDc91ZZ6ugYqnVg==
+-----END CERTIFICATE-----
diff --git a/k8s-certifications/data/tls.key b/k8s-certifications/data/tls.key
new file mode 100644
index 0000000..a175c0f
--- /dev/null
+++ b/k8s-certifications/data/tls.key
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDc56DteFTMyJeL
+n2qP+5AIuumvW3B4ndk/h8p7489J5EH6KNlL4gp5P4q0rZRXJqaXsRzBZD2nM2kW
+DwRC+KjgffQHxTESZOe8jLBl4kz2iPWLIsa2nfIVgoi0U9qZ6bGNLU4yxYWyliKg
+D0xweTV9EsUHCgYjLO8lkwRPcMCAHNPckcXooOO/PLKHz5Kzg4J/au6TNF3GqzX5
+ECpArgZOd+67rM1ZFg9jxGyQZmfAnklILOBuN9DCsHqVHScdcATiY105KLFAg8KC
+J8+BSPzBVRNjuWhmfzmHBqPAWg4N50D10IHgeJWcdg51VbgC0aYOsbx4JSCUfvjK
+HDAQfd0PhQDpfvam2tERc1HQfKFAa89SWPVblRE4szaI+uSqJdIgP+XJ3YVqIHJU
+blC1mM85EAfSRmEv3Tn2C+gwi65gpYLkjvJr4ucRs+vCvF/s3qYAQnP87FyXa7GE
+ZSpLop/lVb2J5o7muc69FKNOpUHYDkVxjlmMs+T5RZgOXL7lvjnNc09rjVs+lVZ/
+fW+Ej4p0lF4HJuG+vaGU79w8SJz7nUQiU+A9ayoJfbld7BgCv4UQyS0G2uuKlxRV
+w+NZGCNSmthDAvytNBR2C4qpXw8pK+BrAc7jibOOvJWg1Zl7KY89taD0RLpd9WE+
+6QTvyXnS88p+uY6fjhAivS85tW+7LwIDAQABAoICAGSN62stQyyUgqdDwbYYxM+0
+hXsVHHVLJQEORtVuNYVlKcM9pOwt0KawjesAuG2TYnHaZUSC5K2fcU5hN4dkuTq3
+GsYOtO+yjun9AK7f/Dicz2iuQ9YMv42bBa9QHEnDXtbssJPb5agNP2WskRcBlZ+B
+U76IiZKpeZKZAXVH1dh7RtU4ZeYmloUOlBXOHvEoA9cMTd0kESvF86OUACfBD43Y
+egtj9XV/3TGE0AZLFx9O7fy0sNR7A8QboTEPPCbiPtbudBj4tPaxA3FLveET4DoB
+B/p1A1jkwML9+rwsQgmCIsfCSdxsB25ZLuuqQUDHPdeigDAQdmwiAA3AFwDqyhzV
+wuBeQH7OitOq7kBZAZ1Sv6jT3IkeM53ysMOfCa0LCvOCZt+GYtxxlH3XGQVKjBPi
+mm9txjpbpxBdYfi15lr+SXfy48YUCXiihNkIQ2XevQlFEn4c8axW7l34j9eF1vnf
+d1IQ6cBbNP8QQVHnMr/xH+EJa4D4EBBwGDclEanCXgeVuhcJU+qi92gUnVnKKqA2
+EHseNJhgrNEff6od2xlDC2NiM8DskaHCSG15E8mVMr+N1WKjZEJPG5kjjyQ5DU/8
+v/pqHzwOK6hG2D7fJiuSVTaEClF72qWHCIEG8M46h+lpZ6DaQvzkMoQ/ga1Ebc28
+b3ghJdkt4JwtizIC9tQBAoIBAQD2f+maNqHR2InJKJ/e2s8C3e3+kxXpgZiGujr+
+06Whhz2a073zx8UX21PBlro1J0JdlIF3DVSUshSk3qu7KQZPCQr4x8IH5JPFdDi0
+ZRXshm5ByUSyWnmmDVku3pvl521Gwd8XdEKHNmFaq/wpT/aRkrLs++vapx41iLcr
+qBa7grh/0wGej/ec3xtfGClymfqNuLQIPpLCmJG+gMM9Kcoc3s5L1oA6yM1NneZB
+7rYjNG9HraF17wXY+wp1/pqu5dhCwwhRAahifvuYMRirPX2J9dqAAsSoePHf2CkF
+HA7ToDyXIa6GmpdSng2sE2A/GgXD2X0ev0QmO8b4iGChe3fvAoIBAQDlay9AzcZu
++OxCAC1T0jJZzAPeN3Wz08K4RTE4tWbsBj8j/GenimgaFn4jXYo4vMdFPa6ET9+p
+Lem9YVcGfRtp3a8N3Lx2KkT8SZTD+itMt8UPmbxIJviO1Z/KdqlNyxNt6tWlMbKA
+z72CWvwvbXXPFMKIROS2xRgXmx7r0C0750IXYEtsIColjXh5ME6faECgsWWqsCi1
+cnH1awrzGkw5BwPeGYB/pmGRtd2q1kb5BoP7GuME8/T1T/A2I0ltjU9rX8qjeyMv
+S43tEFWHxTijNsKK/UvLFn2K/lfCVQMQnKhpHKuJOtTsFkGg2Ukwe8rnDtSQdgWg
+3P2p0IXjerDBAoIBADWx/2z8YZuYk8sh8lFVUKrLNUCzQZ6wAE2424kPCZF6KE1F
+uqcT6TcdK82Ly9wwRSClbN5GJRqPADg52SbX9OvaiG1Q9k9J13a3rnJ9Yp03W2Ux
+NqmzU7R8S+UN0N/v3boAGVy+ko9ppSNfO3q0VH25ewhsiCAFL2tx8JSt9OW7v/z4
+Ne4YZlPhtdCtLrosGIwuo+j32HhTS8w3uE/mfoRzdHTIsP4dJ7u0nafXHA3nKiZv
+CDDsdFWjuc+iOofGwakpWvJqbgemqZ+pcjo7FtGqoIIqGDSqw+WC7MyUJBatXQV+
+7Mmde0Ef9NJ7Fggo3wCeq8a621mIw/r3mjUS9DkCggEAUYA8bzcrEW1Y8TGC6M45
+mPEDRsRJCjNmb3QVQmIfSCYH9E7MvBZNWUc4VHP8kJ9v40dAYjzF5iIrcV3NPr7f
+KELa13/da9UkYMP7F4weKcj3Ns2Ut8Uwc/2sII77ImnMYzYT4/W9xkkGt/J+uJKY
+UZK8cRCYd92Y63nuCDQSfb9wGUHaSXU7w894RwVESRkOLIgY6ARg0eTwWxFF+IsV
+HQVC+HnyzmZbLxp+vxwUZo9L/77Te4T3NtbJLVJn2YVj+28yW9V48GpU5yzwVaVY
+s5LWle3aKTG6M9CbeKwexJ4CriTDQ6Mk1SIq+mt2tsSjlmYMWa2z3ivj6Znslp2V
+gQKCAQEAlwe8NW+4NhvhXL1dSx4iJNfZwTeuZdTRjja9en9CtY82v2cSdRRILH57
+hfPjva5T/hqAFIkKzCkAFzgkBSF2s1oVx5tJ7fdSzUEPAMqvfWCQF06lHeUPbPM4
+fDblgStfNcCfIXKBN7LJXw2GymKK5NUqrrN8j3oT1QVGwGvQPsZyJ59mKTwX801M
+/0Qy2SRTT+97nIAHYV9iBCrw8zXGaUCevn4Jn76ps0BJ1deTZe+MHpF8mb3g/WTC
+cY/4JoaCfM6l8zjuopayxlRYaW80H6HXgUvbXZfCJFZPbJkGEHO8OnJnkAUTn08q
+Lf/09ItIfuMr+ifYGoRA2pQwUulv4g==
+-----END PRIVATE KEY-----
diff --git a/k8s-certifications/topics/README.md b/k8s-certifications/topics/README.md
new file mode 100644
index 0000000..074663f
--- /dev/null
+++ b/k8s-certifications/topics/README.md
@@ -0,0 +1,45 @@
+# Topics
+
+Topics cover test exercises for each topics
+
+ - [Admission Controllers](./admission_controllers.md)
+ - [Annotations](./annotations.md)
+ - [APIs](./apis.md)
+ - [AppArmor](./apparmor.md)
+ - [Auditing](./auditing.md)
+ - [Authentication](../authentication.md)
+ - [Platform Binary Verfication](./binary_verification.md)
+ - [Cluster Upgrade](./cluster_upgrade.md)
+ - [ConfigMaps](./configmaps.md)
+ - [DaemonSets](./daemonsets.md)
+ - [Deployments](./deployments.md)
+ - [ETCD](./etcd.md)
+ - [Falco](./falco.md)
+ - [Ingress](./ingress.md)
+ - [Init Containers](../init_containers.md)
+ - [Jobs](./jobs.md)
+ - [Kubectl Jsonpath](./jsonpath.md)
+ - [kube-bench](./kube-bench.md)
+ - [Kubeconfig](./kubeconfig.md) .
+ - [Kubelet Security](./kubelet_security.md)
+ - [Kubesec](./kubesec.md)
+ - [Labels](./labels.md)
+ - [Logging](./logging.md)
+ - [Monitoring](./monitoring.md)
+ - [Namespaces](./namespaces.md)
+ - [Network Policies](./network_policies.md)
+ - [Nodes](./nodes.md)
+ - [Pod Security Context](./pod_security_context.md)
+ - [Pod Security Policies](./pod_security_policies.md)
+ - [Pods](./pods.md)
+ - [Readiness & Liveness Probes](./probes.md)
+ - [RBAC](./rbac.md)
+ - [ReplicaSets](./replica_set.md)
+ - [Runtime Classes](./runtimes.md)
+ - [Seccomp](./seccomp.md)
+ - [Secrets](./secrets.md)
+ - [Service Accounts](./service_accounts.md)
+ - [Services](./services.md)
+ - [Taints & Tolerations](./taints_tolerations.md)
+ - [Trivy](./trivy.md)
+ - [Volumes](./volumes.md)
\ No newline at end of file
diff --git a/k8s-certifications/topics/admission_controllers.md b/k8s-certifications/topics/admission_controllers.md
new file mode 100644
index 0000000..65cdaa9
--- /dev/null
+++ b/k8s-certifications/topics/admission_controllers.md
@@ -0,0 +1,214 @@
+# [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)
+
+An admission controller is a piece of code that intercepts requests to the Kubernetes API server prior to persistence of the object, but after the request is authenticated and authorized.
+
+ - [ImagePolicyWebhook](#imagepolicywebhook)
+ - [PodSecurityPolicy](#podsecuritypolicy)
+
+
+
+## Basics
+
+
+
+### Check the admission controller enabled by default
+
+show
+
+### Check the admission controller enabled explicitly.
+
+show
+
+### Disable `DefaultStorageClass` admission controller
+
+show
+
+## [ImagePolicyWebhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook)
+
+
+
+### [Set Up](https://github.com/kainlite/kube-image-bouncer)
+
+```bash
+# add image-bouncer-webhook to the host file
+echo "127.0.0.1 image-bouncer-webhook" >> /etc/hosts
+
+# make directory to host the keys - using /etc/kubernetes/pki as the volume is already mounted
+mkdir -p /etc/kubernetes/pki/kube-image-bouncer
+cd /etc/kubernetes/pki/kube-image-bouncer
+
+# generate webhook certificate OR use the one in data folder
+openssl req -x509 -new -days 3650 -nodes \
+ -keyout webhook.key -out webhook.crt -subj "/CN=system:node:image-bouncer-webhook.default.pod.cluster.local" \
+ -addext "subjectAltName=DNS:image-bouncer-webhook,DNS:image-bouncer-webhook.default.svc,DNS:image-bouncer-webhook.default.svc.cluster.local"
+
+# create secret
+kubectl create secret tls tls-image-bouncer-webhook --cert=/etc/kubernetes/pki/kube-image-bouncer/webhook.crt --key=/etc/kubernetes/pki/kube-image-bouncer/webhook.key
+
+# create webhook deployment exposed as node port service
+cat << EOF > image-bouncer-webhook.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: image-bouncer-webhook
+ name: image-bouncer-webhook
+spec:
+ type: NodePort
+ ports:
+ - name: https
+ port: 443
+ targetPort: 1323
+ protocol: "TCP"
+ nodePort: 30080
+ selector:
+ app: image-bouncer-webhook
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: image-bouncer-webhook
+spec:
+ selector:
+ matchLabels:
+ app: image-bouncer-webhook
+ template:
+ metadata:
+ labels:
+ app: image-bouncer-webhook
+ spec:
+ containers:
+ - name: image-bouncer-webhook
+ imagePullPolicy: Always
+ image: "kainlite/kube-image-bouncer:latest"
+ args:
+ - "--cert=/etc/admission-controller/tls/tls.crt"
+ - "--key=/etc/admission-controller/tls/tls.key"
+ - "--debug"
+ - "--registry-whitelist=docker.io,k8s.gcr.io"
+ volumeMounts:
+ - name: tls
+ mountPath: /etc/admission-controller/tls
+ volumes:
+ - name: tls
+ secret:
+ secretName: tls-image-bouncer-webhook
+EOF
+
+kubectl apply -f image-bouncer-webhook.yaml
+
+# define the admission configuration file @ /etc/kubernetes/pki/kube-image-bouncer/admission_configuration.yaml
+cat << EOF > admission_configuration.yaml
+apiVersion: apiserver.config.k8s.io/v1
+kind: AdmissionConfiguration
+plugins:
+- name: ImagePolicyWebhook
+ configuration:
+ imagePolicy:
+ kubeConfigFile: /etc/kubernetes/pki/kube-image-bouncer/kube-image-bouncer.yml
+ allowTTL: 50
+ denyTTL: 50
+ retryBackoff: 500
+ defaultAllow: false
+EOF
+
+OR
+
+# Define the admission configuration file in json format @ /etc/kubernetes/admission_configuration.json
+cat << EOF > admission_configuration.json
+{
+ "imagePolicy": {
+ "kubeConfigFile": "/etc/kubernetes/pki/kube-image-bouncer/kube-image-bouncer.yml",
+ "allowTTL": 50,
+ "denyTTL": 50,
+ "retryBackoff": 500,
+ "defaultAllow": false
+ }
+}
+EOF
+
+# Define the kube config file @ /etc/kubernetes/pki/kube-image-bouncer/kube-image-bouncer.yml
+
+cat << EOF > kube-image-bouncer.yml
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ certificate-authority: /etc/kubernetes/pki/kube-image-bouncer/webhook.crt
+ server: https://image-bouncer-webhook:30080/image_policy
+ name: bouncer_webhook
+contexts:
+- context:
+ cluster: bouncer_webhook
+ user: api-server
+ name: bouncer_validator
+current-context: bouncer_validator
+preferences: {}
+users:
+- name: api-server
+ user:
+ client-certificate: /etc/kubernetes/pki/apiserver.crt
+ client-key: /etc/kubernetes/pki/apiserver.key
+EOF
+
+```
+
+#### Check if can create pods with nginx:latest image
+
+```bash
+kubectl create deploy nginx --image nginx
+# deployment.apps/nginx created
+kk get pods -w
+# NAME READY STATUS RESTARTS AGE
+# nginx-f89759699-5qbv5 1/1 Running 0 13s
+kubectl delete deploy nginx
+# deployment.apps "nginx" deleted
+```
+
+#### Enable the addmission controller.
+
+Edit the `/etc/kubernetes/manifests/kube-apiserver.yaml` file as below.
+
+```yaml
+ - --enable-admission-plugins=NodeRestriction,ImagePolicyWebhook # update
+ - --admission-control-config-file=/etc/kubernetes/pki/kube-image-bouncer/admission_configuration.yaml # add
+```
+
+#### Verify
+
+Wait for the kube-apiserver to restart and trying creating deployment with nginx:latest image
+
+```bash
+kubectl get deploy nginx
+# NAME READY UP-TO-DATE AVAILABLE AGE
+# nginx 0/1 0 0 12s
+
+kubectl get events
+# 7s Warning FailedCreate replicaset/nginx-f89759699 (combined from similar events): Error creating: pods "nginx-f89759699-b2r4k" is forbidden: image policy webhook backend denied one or more images: Images using latest tag are not allowed
+```
+
+
+
+## PodSecurityPolicy
+
+Refer [Pod Security Policy Admission Controller](./pod_security_policies.md)
\ No newline at end of file
diff --git a/k8s-certifications/topics/annotations.md b/k8s-certifications/topics/annotations.md
new file mode 100644
index 0000000..602425f
--- /dev/null
+++ b/k8s-certifications/topics/annotations.md
@@ -0,0 +1,32 @@
+# [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+
+
+
+### Create pod `nginx-annotations` and Annotate it with `description='my description'` value
+
+
+
+show
+
+
\ No newline at end of file
diff --git a/k8s-certifications/topics/api_deprecations.md b/k8s-certifications/topics/api_deprecations.md
new file mode 100644
index 0000000..097b293
--- /dev/null
+++ b/k8s-certifications/topics/api_deprecations.md
@@ -0,0 +1,59 @@
+# [Kubernetes API deprecations policy](https://kubernetes.io/docs/reference/using-api/deprecation-policy/)
+
+
+
+### Given deployment defination `nginx-deployment` for an older version of kubernetes. Fix any API depcreation issues in the manifest so that the application can be deployed on a recent version cluster k8s.
+
+```yaml
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: nginx
+ name: nginx-deployment
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx:1.20
+ name: nginx
+```
+
+
+
+show
+
+
diff --git a/k8s-certifications/topics/apis.md b/k8s-certifications/topics/apis.md
new file mode 100644
index 0000000..07859c2
--- /dev/null
+++ b/k8s-certifications/topics/apis.md
@@ -0,0 +1,36 @@
+# [APIs](https://kubernetes.io/docs/concepts/overview/kubernetes-api/)
+
+
+
+### Get all `api-resource` and check the short names, api version.
+
+show
+
+### Get the Api Group for `jobs` api
+
+show
+
+### Enable the `v1alpha1` version for `rbac.authorization.k8s.io` API group on the controlplane node.
+
+show
+
+### Check if AppArmor is available on the cluster
+
+
+
+```bash
+systemctl status apparmor
+# ● apparmor.service - AppArmor initialization
+# Loaded: loaded (/lib/systemd/system/apparmor.service; enabled; vendor preset: enabled)
+# Active: active (exited) since Thu 2021-12-16 02:19:57 UTC; 40s ago
+# Docs: man:apparmor(7)
+# http://wiki.apparmor.net/
+# Main PID: 312 (code=exited, status=0/SUCCESS)
+# Tasks: 0 (limit: 2336)
+# CGroup: /system.slice/apparmor.service
+
+# Dec 16 02:19:57 controlplane systemd[1]: Starting AppArmor initialization...
+# Dec 16 02:19:57 controlplane apparmor[312]: * Starting AppArmor profiles
+# Dec 16 02:19:57 controlplane apparmor[312]: Skipping profile in /etc/apparmor.d/disable: usr.sbin.rsyslogd
+# Dec 16 02:19:57 controlplane apparmor[312]: ...done.
+# Dec 16 02:19:57 controlplane systemd[1]: Started AppArmor initialization.
+```
+
+
+
+### Check if the AppArmor module is loaded and the profiles loaded by AppArmor in different modes.
+
+
+
+```bash
+aa-status
+# apparmor module is loaded.
+# 12 profiles are loaded.
+# 12 profiles are in enforce mode.
+# /sbin/dhclient
+# /usr/bin/man
+# /usr/lib/NetworkManager/nm-dhcp-client.action
+# /usr/lib/NetworkManager/nm-dhcp-helper
+# /usr/lib/connman/scripts/dhclient-script
+# /usr/lib/snapd/snap-confine
+# /usr/lib/snapd/snap-confine//mount-namespace-capture-helper
+# /usr/sbin/ntpd
+# /usr/sbin/tcpdump
+# docker-default
+# man_filter
+# man_groff
+# 0 profiles are in complain mode.
+# 9 processes have profiles defined.
+# 9 processes are in enforce mode.
+# /sbin/dhclient (639)
+# docker-default (2008)
+# docker-default (2026)
+# docker-default (2044)
+# docker-default (2058)
+# docker-default (2260)
+# docker-default (2277)
+# docker-default (2321)
+# docker-default (2334)
+# 0 processes are in complain mode.
+# 0 processes are unconfined but have a profile defined.
+```
+
+
+
+### Use the following `k8s-apparmor-example-deny-write` AppArmor profile with the `hello-apparmor` pod.
+
+
+
+```cpp
+cat << EOF > k8s-apparmor-example-deny-write
+#include show
+
+### Enable Auditing with the Kubernetes cluster
+ - Capture all events for `pods` at `RequestResponse` level
+ - Capture `delete` events for `secrets` in `prod namespace` at `Metadata` level
+ - Define policy at `/etc/kubernetes/audit-policy.yaml`
+ - Log should be redirected to `/var/log/kubernetes/audit/audit.log`
+ - Maximum days to keep the logs is `30`
+
+
+
+show
+
+## [Certificates API](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/)
+
+
+
+### Create a user certificate signing request using certs and specs as below and submit it for approval.
+
+#### Create user certs
+
+```bash
+openssl genrsa -out normal.key 2048
+openssl req -new -key normal.key -out normal.csr
+```
+
+#### Use below CertificateSigningRequest specs
+
+```yaml
+cat << EOF > normal-csr.yaml
+apiVersion: certificates.k8s.io/v1
+kind: CertificateSigningRequest
+metadata:
+ name: normal-csr
+spec:
+ request: ??
+ signerName: kubernetes.io/kube-apiserver-client
+ usages:
+ - client auth
+EOF
+```
+
+
+
+show
+
+### Approve the `normal-csr` request
+
+
+
+show
+
+### Create the below csr request and reject the same.
+
+
+
+```yaml
+cat << EOF > hacker-csr.yaml
+apiVersion: certificates.k8s.io/v1
+kind: CertificateSigningRequest
+metadata:
+ name: hacker-csr
+spec:
+ groups:
+ - system:masters
+ - system:authenticated
+ request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ2lqQ0NBWElDQVFBd1JURUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlZCQWdNQ2xOdmJXVXRVM1JoZEdVeApJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MFpEQ0NBU0l3RFFZSktvWklodmNOCkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNellTKzhhTXdBVmkwWHovaVp2Z2k0eGtNWTkyMWZRSmd1bGM2eDYKS0Q4UjNteEMyRkxlWklJSHRYTDZadG5KSHYxY0g0eWtMUEZtR2hDRURVNnRxQ2FpczNaWWV3MVBzVG5nd1Jzego3TG1oeDV4dzVRc3lRaFBkNjRuY3h1MFRJZmFGbmducU9UT0NGWERyaXBtZzJ5TExvbTIxL1ZxbjNQMVJQeE51CjZJdDlBOHB6aURlTVg5VTlaTHhzT0Jld2FzaFJzM29jb3NIcHp5cXN1SnQralVvUjNmaGducVB3UkNBZmQ3YUUKaUhKOWFxblhHVVNUWENXb2g2OEtPL3VkU3p2djNmcExhV1JxUUdHWi9HSWpjM1ZiZzNHN0FqNWNITUp2WHV3bwp3M0JkV1pZaEpycU9Ld21sMW9QVHJRNlhMQ2FBTFZ2NnFqZWVOSFNvOVZyVmM0OENBd0VBQWFBQU1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUFEZGNmMHZVSnVtcmRwcGxOa0pwSERSVFI2ZlFzYk84OFM3cnlndC9vcFEvOCsKNVkyUVVjVzhSUUdpVGdvQjFGUG1FeERVcFRna2p1SEtDQ0l3RWdjc3pPRm5YdC95N1FsWXBuc0E3dG01V1ppUAozbG1xSFpQMU9tQlRBRU45L2swSFpKdjc4Rytmcm0xNnRJbWtzUHpSK2lBajZ2WDZtT1RNVEk3Y1U5cmIvSElLCmVOTTZjV2dYQzYrbU9PbDFqM3BjS1hlVlB0YS9MbDZEVFc0VWdnR0J1NVJPb3FWRS9sTDNQNnc4K2R3M0lWQngKWlBrK0JDNVQrMkZLMFNzd3VvSCtaKzhtbi8weHR2bk1nL3FPTWIwdXVvcDNSTklVZmFhR1pRSjRmSnVrMGdkQwpXZHFselJMREsydXZYcWVFUXFjMENxZmVVdXRGdzVuOWNWZVdvRFVwCi0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo=
+ signerName: kubernetes.io/kube-apiserver-client
+ usages:
+ - digital signature
+ - key encipherment
+ - server auth
+EOF
+
+kubectl apply -f hacker-csr.yaml
+```
+
+show
\ No newline at end of file
diff --git a/k8s-certifications/topics/binary_verification.md b/k8s-certifications/topics/binary_verification.md
new file mode 100644
index 0000000..89f61dd
--- /dev/null
+++ b/k8s-certifications/topics/binary_verification.md
@@ -0,0 +1,22 @@
+# Verify Platform Binaries
+
+- Kubernetes provides the binaries and their checksum hash for us the verify the authenticity of the same.
+- Check the Kubernetes [CHANGELOG](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG)
+
+
+
+### Download the https://dl.k8s.io/v1.22.0/kubernetes.tar.gz and verify the sha matches with `d1145ec29a8581a4c94a83cefa3658a73bfc7d8e2624d31e735d53551718c9212e477673f74cfa4e430a8367a47bba65e2573162711613e60db54563dc912f00`.
+
+
+
+show
\ No newline at end of file
diff --git a/k8s-certifications/topics/cluster_upgrade.md b/k8s-certifications/topics/cluster_upgrade.md
new file mode 100644
index 0000000..2233a79
--- /dev/null
+++ b/k8s-certifications/topics/cluster_upgrade.md
@@ -0,0 +1,328 @@
+# [Cluster Upgrade](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
+
+~~**NOTE** - This was performed on the [katacoda playground](https://www.katacoda.com/courses/kubernetes/playground) with two node cluster at v1.18.0 version. It was upgrade to 1.19.3 version. Version 1.19.4 was not used as it had issues upgrading on the worker node.~~
+
+
+
+### Upgrade Control Panel nodes
+
+
+
+#### Check current version
+
+```bash
+kubectl get nodes
+
+# NAME STATUS ROLES AGE VERSION
+# controlplane Ready master 4m53s v1.18.0
+# node01 Ready
+
+### Upgrade worker nodes
+
+
+
+#### Upgrade kubeadm
+```bash
+apt update
+apt-cache madison kubeadm
+
+apt-get update && \
+apt-get install -y --allow-change-held-packages kubeadm=1.19.3-00
+# Unpacking kubeadm (1.19.3-00) over (1.18.0-00) ...
+# Setting up kubernetes-cni (0.8.7-00) ...
+# Setting up kubeadm (1.19.3-00) ...
+```
+
+#### Upgrade the kubelet configuration
+
+```bash
+sudo kubeadm upgrade node
+
+# [upgrade] Reading configuration from the cluster...
+# [upgrade] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
+# [preflight] Running pre-flight checks
+# [preflight] Skipping prepull. Not a control plane node.
+# [upgrade] Skipping phase. Not a control plane node.
+# [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
+# [upgrade] The configuration for this node was successfully updated!
+# [upgrade] Now you should go ahead and upgrade the kubelet package using your package manager.
+```
+
+#### Drain the node - Execute this on the master/control panel node
+
+```bash
+kubectl drain node01 --ignore-daemonsets
+
+# node/node01 cordoned
+# WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-amd64-26gz5, kube-system/kube-keepalived-vip-dskqw, kube-system/kube-proxy-jwpgs
+# evicting pod kube-system/coredns-f9fd979d6-gjfpn
+# evicting pod kube-system/coredns-f9fd979d6-xvh8h
+# evicting pod kube-system/katacoda-cloud-provider-5f5fc5786f-565r6
+# pod/katacoda-cloud-provider-5f5fc5786f-565r6 evicted
+# pod/coredns-f9fd979d6-gjfpn evicted
+# pod/coredns-f9fd979d6-xvh8h evicted
+# node/node01 evicted
+```
+
+#### Upgrade kubelet and kubectl
+
+```bash
+apt-get update && \
+> apt-get install -y --allow-change-held-packages kubelet=1.19.3-00 kubectl=1.19.3-00
+
+# ....
+# kubectl is already the newest version (1.19.3-00).
+# kubelet is already the newest version (1.19.3-00).
+# The following packages were automatically installed and are no longer required:
+# libc-ares2 libhttp-parser2.7.1 libnetplan0 libuv1 nodejs-doc python3-netifaces
+# Use 'apt autoremove' to remove them.
+# 0 upgraded, 0 newly installed, 0 to remove and 201 not upgraded.
+
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
+```
+
+#### Uncordon the node - Execute this on the master/control panel node
+
+```bash
+kubectl uncordon node01
+# node/node01 uncordoned
+```
+
+#### Verify nodes are upgraded
+
+```shell
+kubectl get nodes
+# NAME STATUS ROLES AGE VERSION
+# controlplane Ready master 22m v1.19.3
+# node01 Ready
+
+### Check the configmaps on the cluster in the default namespace
+
+
+
+show
+
+### Check the configmaps on the cluster in all the namespaces
+
+
+
+show
+
+### Create a new pod `nginx-1` with `nginx` image and add env variable for `DB_HOST=db.example.com`, `DB_USER=development`, `DB_PASSWD=password`
+
+
+
+show
+
+### Create a configmap named `db-config-1` with data `DB_HOST=db.example.com`, `DB_USER=development`, `DB_PASSWD=password`
+
+
+
+show
+
+### Create a configmap named `db-config-2` with data from file `db.properties`
+
+
+
+```bash
+cat <show
+
+### Create a new pod `nginx-2` with `nginx` image and add env variable for `DB_HOST` from configmap map `db-config-1`
+
+
+
+show
+
+### Create a new pod `nginx-3` with `nginx` image and add all env variables from from configmap map `db-config-1`
+
+
+
+show
+
+### Create a new pod `nginx-4` with `nginx` image and mount the configmap `db-config-1` as a volume named `db-config` and mount path `/config`
+
+show
+
+### Clean up
+
+```bash
+kubectl delete pod nginx-1 nginx-2 nginx-3 nginx-4 --force --grace-period=0
+kubectl delete configmap db-config-1 db-config-2
+rm db.properties nginx-2.yaml nginx-3.yaml nginx-4.yaml
+```
diff --git a/k8s-certifications/topics/daemonsets.md b/k8s-certifications/topics/daemonsets.md
new file mode 100644
index 0000000..eebc959
--- /dev/null
+++ b/k8s-certifications/topics/daemonsets.md
@@ -0,0 +1,74 @@
+# [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/)
+
+A DaemonSet ensures that all (or some) Nodes run a copy of a Pod. As nodes are added to the cluster, Pods are added to them. As nodes are removed from the cluster, those Pods are garbage collected. Deleting a DaemonSet will clean up the Pods it created.
+
+
+
+### Get the daemonset in all namespaces
+
+show
+
+### Ensure a single instance of pod nginx is running on each node of the Kubernetes cluster where nginx also represents the image name which has to be used. Do not override anytaints currently in place.
+
+show
\ No newline at end of file
diff --git a/k8s-certifications/topics/debugging.md b/k8s-certifications/topics/debugging.md
new file mode 100644
index 0000000..3ee1839
--- /dev/null
+++ b/k8s-certifications/topics/debugging.md
@@ -0,0 +1,205 @@
+# [Kubernetes Debugging](https://kubernetes.io/docs/tasks/debug/)
+
+
+
+### Given deployment defination `nginx-deployment` does not work. Identify and fix the problems by updating the associated resources so that the Deployment works.
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ kind: frontend
+ name: nginx-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ kind: frontend
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx
+ name: nginx
+```
+
+
+
+show
+
+### Given deployment defination `nginx-deployment` exposed using the Service `frontend-svc`. Identify and fix the problems by updating the associated resources so that the Service works.
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ kind: frontend
+ name: nginx-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ kind: frontend
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - image: nginx
+ name: nginx
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ kind: frontend
+ name: frontend-svc
+spec:
+ ports:
+ - port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ kind: frontend
+status:
+ loadBalancer: {}
+```
+
+
+
+show
+
+### A Deployment named `web` is exposed via Ingress `web-ingress`. The Deployment is supposed to be reachable at http://dk8s.local/web-ingress, but requesting this URL is currently returning an error. Identify and fix the problems by updating the associated resources so that the Deployment becomes externally reachable as planned.
+
+```bash
+kubectl create deployment web --image=gcr.io/google-samples/hello-app:1.0
+kubectl expose deployment web --name web-svc --port 80
+```
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: web-ingress
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+spec:
+ rules:
+ - http:
+ paths:
+ - backend:
+ service:
+ name: web
+ port:
+ number: 80
+ path: /
+ pathType: Prefix
+status:
+ loadBalancer: {}
+```
+
+
+
+show
+
diff --git a/k8s-certifications/topics/deployments.md b/k8s-certifications/topics/deployments.md
new file mode 100644
index 0000000..4cc5465
--- /dev/null
+++ b/k8s-certifications/topics/deployments.md
@@ -0,0 +1,905 @@
+# [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/)
+
+A Deployment provides declarative updates for Pods and ReplicaSets.
+
+ 1. [Basics](#basics)
+ 2. [Deployment HA & Self Healing](#deployment-self-healing)
+ 2. [Deployment Scaling](#deployment-scaling)
+ 3. [Deployment Rollout](#deployment-rollout)
+ 4. [Deployment Deletion](#deployment-deletion)
+ 5. [HPA](#hpa)
+
+
+
+## Basics
+
+
+
+### Check number of deployments in the default namespace
+
+
+
+show
+
+### Create deployment named `nginx-deployment` with `nginx:1.20` image with `3` replicas
+
+
+
+show
+
+### View the YAML of `nginx-deployment` deployment
+
+
+
+show
+
+### Create a new deployment for running nginx with the following parameters
+- Name the deployment `frontend` and configure with 4 replicas
+- Configure the pod with a container image of nginx:1.21
+- Set an environment variable of NGINX PORT=8080 and also expose that port for the container above
+
+
+
+show
+
+### Create a deployment as follows:
+- Name: nginx-random using the nginx image
+- Exposed via a service nginx-random
+- Ensure that the service & pod are accessible via their respective DNS records
+- Use the utility nslookup to lookup the DNS records of the service & pod
+
+
+
+show
+
+## Deployment Self healing
+
+
+
+### Create a deployment named nginx-ha using nginx image with 3 replicas to test self-healing properties. Delete all pod and check the behaviour.
+
+
+
+show
+
+## Deployment Scaling
+
+
+
+### Scale up the `nginx-deployment` from 3 replica to 5 replicas
+
+
+
+show
+
+### Scale down the `nginx-deployment` from 5 replica to 3 replicas
+
+
+
+show
+
+### Scale the deployment with below specs for availability, and create a service to expose the deployment within your infrastructure. Start with the deployment named ha-deployment which has already been deployed to the namespace ha .
+Edit it to:
+- create namespace ha
+- Add the func=frontend key/value label to the pod template metadata to identify the pod for the service definition
+- Have 4 replicas
+- Exposes the service on TCP port 8080
+- is mapped to the pods defined by the specification of ha-deployment
+- Is of type NodePort
+- Has a name of cherry
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: ha-deployment
+ name: ha-deployment
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ha-deployment
+ strategy: {}
+ template:
+ metadata:
+ labels:
+ app: ha-deployment
+ spec:
+ containers:
+ - image: nginx
+ name: nginx
+ resources: {}
+status: {}
+EOF
+```
+
+show
+
+## Deployment Rollout
+
+
+
+### Check the rollout for `nginx-deployment` deployment
+
+
+
+show
+
+### Update the `nginx-deployment` deployment image to `nginx:1.20.2`
+
+
+
+show
+
+### Check the rollout history for `nginx-deployment` deployment and confirm that the replicas are OK
+
+
+
+show
+
+### Undo the latest rollout and verify that new pods have the old image (nginx:1.20)
+
+
+
+show
+
+### Do an on purpose update of the deployment with a wrong image `nginx:1.202.333`
+
+
+
+show
+
+### Verify that something's wrong with the rollout
+
+
+
+show
+
+### Return the deployment to the second revision (number 2) and verify the image is nginx:1.19.8
+
+
+
+show
+
+### Check the details of the fourth revision (number 4)
+
+
+
+show
+
+## Deployment Deletion
+
+
+
+### Delete the `nginx-deployment` deployment
+
+
+
+show
+
+### As a Kubernetes application developer you will often find yourself needing to update a running application. Please complete the following using the following specs:
+- Update the web1 deployment with a maxSurge of 5% and a maxUnavailable of 2%
+- Perform a rolling update of the web1 deployment, changing the nginx image version to 1.21
+- Roll back the web1 deployment to the previous version
+
+```yaml
+cat << EOF > web1.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: web1
+ name: web1
+spec:
+ replicas: 10
+ selector:
+ matchLabels:
+ app: web1
+ template:
+ metadata:
+ labels:
+ app: web1
+ spec:
+ containers:
+ - image: nginx:1.12-alpine
+ name: web1
+EOF
+
+kubectl apply -f web1.yaml
+```
+
+show
+
+### Create a deployment as follows:
+ - Name: nginx-app
+ - Using container nginx with version 1.20-alpine
+ - The deployment should contain 3 replicas
+ - Next, deploy the application with new version 1.21.4-alpine, by performing a rolling update.
+ - Finally, rollback that update to the previous version 1.11.10-alpine.
+
+
+
+show
+ +#### The image nquix is invalid. Change the image to nginx. + +```bash +kubectl get deploy nginx-fix +# NAME READY UP-TO-DATE AVAILABLE AGE +# nginx-fix 0/3 3 0 16s + +kubectl get pods -l app=nginx +# NAME READY STATUS RESTARTS AGE +# nginx-fix-7cf9964fc7-9bkln 0/1 ErrImagePull 0 38s +# nginx-fix-7cf9964fc7-jcz6p 0/1 ErrImagePull 0 38s +# nginx-fix-7cf9964fc7-n5dqh 0/1 ErrImagePull 0 38s + +kubectl describe pod nginx-fix-7cf9964fc7-9bkln +# Warning Failed 1s (x4 over 97s) kubelet, node01 Failed to pull image "nqinx": rpc error: code = Unknown desc = Error response from daemon: pull access denied for nqinx, repository does not exist or may require 'docker login': denied: requested access to the resource is denied +# Warning Failed 1s (x4 over 97s) kubelet, node01 Error: ErrImagePull + +# fix the image +kubectl set image deployment.v1.apps/nginx-fix nqinx=nginx + +kubectl get pods -l app=nginx +# NAME READY STATUS RESTARTS AGE +# nginx-fix-f89759699-gn8q9 1/1 Running 0 27s +# nginx-fix-f89759699-lmwpc 1/1 Running 0 30s +# nginx-fix-f89759699-vbpln 1/1 Running 0 38s + +``` + +
+ +```bash +docker build . -t nginxer:3.0 +docker save nginxer:3.0 -o nginxer-3.0.tar +docker run --name nginxer-go -p 80:80 nginxer:3.0 +``` + +
+ +```bash +FROM alpine:3.12 +RUN adduser -D myuser && chown -R myuser /myapp-data +USER myuser # Avoid unnecessary privileges - run as a custom user. +ENTRYPOINT ["/myapp"] +EXPOSE 80 # Exposed ports - Expose only neccesary port +``` + +
+ +#### Install ETCD Client + +```bash +snap install etcd # version 3.4.5, or +apt install etcd-client +``` + +#### Create deployment before backup for testing + +```bash +kubectl create deploy nginx --image=nginx --replicas=3 +``` + +#### Backup ETCD + +```bash +ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt \ + --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key \ + snapshot save /opt/snapshot-pre-boot.db +# Snapshot saved at /opt/snapshot-pre-boot.db +``` + +#### Delete the deployment + +```bash +kubectl delete deployment nginx +``` + +#### Restore ETCD Snapshot to a new folder + +```bash +ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt \ + --name=master \ + --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key \ + --data-dir /var/lib/etcd-from-backup \ + --initial-cluster=master=https://127.0.0.1:2380 \ + --initial-cluster-token etcd-cluster-1 \ + --initial-advertise-peer-urls=https://127.0.0.1:2380 \ + snapshot restore /opt/snapshot-pre-boot.db +# 2021-12-21 13:56:56.460862 I | mvcc: restore compact to 1288 +# 2021-12-21 13:56:56.716540 I | etcdserver/membership: added member e92d66acd89ecf29 [https://127.0.0.1:2380] to cluster 7581d6eb2d25405b +``` + + #### Modify /etc/kubernetes/manifests/etcd.yaml + +```bash + # Update --data-dir to use new target location + --data-dir=/var/lib/etcd-from-backup + +# Update new initial-cluster-token to specify new cluster + --initial-cluster-token=etcd-cluster-1 + +# Update volumes and volume mounts to point to new path + volumeMounts: + - mountPath: /var/lib/etcd-from-backup + name: etcd-data + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + volumes: + - hostPath: + path: /var/lib/etcd-from-backup + type: DirectoryOrCreate + name: etcd-data +``` + +#### Verify the deployment exists after restoration + +```bash +kubectl get deployment nginx +``` + +
+
+```bash
+kubectl create deployment web --image=gcr.io/google-samples/hello-app:1.0
+kubectl expose deployment web --type=NodePort --port=8080
+kubectl get service web
+# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+# web NodePort 10.104.218.215
+ +```bash +kubectl create secret tls testsecret-tls --cert=tls.crt --key=tls.key +``` + +```yaml + +cat << EOF > tls-example-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tls-example-ingress +spec: + tls: # add tls entry + - hosts: + - https-example.foo.com + secretName: testsecret-tls + rules: + - host: https-example.foo.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: service1 + port: + number: 80 +EOF + +kubectl apply -f tls-example-ingress.yaml + +``` + +```bash +# verification +kubectl get secret testsecret-tls +kubectl get ingress tls-example-ingress +``` + +
+ +```yaml +cat << EOF > init-demo.yaml +apiVersion: v1 +kind: Pod +metadata: + name: init-demo +spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + volumeMounts: + - name: workdir + mountPath: /usr/share/nginx/html + # Add the init container + initContainers: + - name: install + image: busybox + command: + - wget + - "-O" + - "/work-dir/index.html" + - http://info.cern.ch + volumeMounts: + - name: workdir + mountPath: "/work-dir" + dnsPolicy: Default + volumes: + - name: workdir + emptyDir: {} +EOF + +kubectl apply -f init-demo.yaml + +kubectl exec init-demo -- curl http://localhost +# % Total % Received % Xferd Average Speed Time Time Time Current +# Dload Upload Total Spent Left Speed +# 100 646 100 646 0 0 34000 0 --:--:-- --:--:-- --:--:-- 34000 +#
From here you can:
+# +# +``` + ++ +```yaml +cat << EOF > maker-checker.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: maker-checker + name: maker-checker +spec: + containers: + - image: alpine + name: checker + command: ["/bin/sh", "-c", "if [ -f /workdir/calm.txt ]; then sleep 3600; else exit 1; fi;"] + volumeMounts: + - name: workdir + mountPath: "/workdir" + # Add the init container + initContainers: + - name: maker + image: alpine + command: ["/bin/sh", "-c", "touch /workdir/calm.txt"] + volumeMounts: + - name: workdir + mountPath: "/workdir" + dnsPolicy: Default + volumes: + - name: workdir + emptyDir: {} + restartPolicy: Always +status: {} +EOF + +kubectl apply -f maker-checker.yaml +``` + +
+ +`kubectl create job pi --image=perl -- perl -Mbignum=bpi -wle 'print bpi(2000)'` + +OR + +```bash +cat << EOF > pi.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: pi +spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never +EOF + +kubectl apply -f pi.yaml +``` + +
+ +```bash +kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big) +# NAME COMPLETIONS DURATION AGE +# pi 1/1 2m18s 2m47s +kubectl get pod # get the pod name +# NAME READY STATUS RESTARTS AGE +# pi-vkj8b 0/1 Completed 0 2m50s +kubectl logs pi-vkj8b # get the pi numbers +# 3.141592653589793238462643383279502884........ +kubectl delete job pi +``` +OR + +```bash +kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big) +kubectl logs job/pi +kubectl delete job pi +``` +OR + +```bash +kubectl wait --for=condition=complete --timeout=300s job pi +kubectl logs job/pi +kubectl delete job pi +``` + +
+ +```bash +kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'while true; do echo hello; sleep 10;done' > busybox-job.yaml +``` + +#### Edit `busybox-job.yaml` to add `job.spec.activeDeadlineSeconds=30` and apply `kubectl apply -f busybox-job.yaml` + +```yaml +cat << EOF > busybox-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + name: busybox +spec: + activeDeadlineSeconds: 30 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - while true; do echo hello; sleep 10;done + image: busybox + name: busybox + resources: {} + restartPolicy: Never +status: {} +EOF + +kubectl apply -f busybox-job.yaml +``` + +#### Describe the job with the statement `Warning DeadlineExceeded xxs job-controller Job was active longer than specified deadline` + +
+ +```bash +kubectl delete job busybox +``` + +
+ +```bash +kubectl create job busybox-completions-job --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'echo hello;sleep 10;echo world' > busybox-completions-job.yaml +``` + +#### Edit `busybox-completions-job.yaml` to add `job.spec.completions=5` and apply `kubectl apply -f busybox-completions-job.yaml` + +```yaml +cat << EOF > busybox-completions-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + name: busybox-completions-job +spec: + completions: 5 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - echo hello;sleep 10;echo world + image: busybox + name: busybox-completions-job + resources: {} + restartPolicy: Never +status: {} +EOF + +kubectl apply -f busybox-completions-job.yaml +``` + +#### Check the job pod status `kk get pods -l job-name=busybox-completions-job` or `kubectl get jobs -w` are in completed status after 2-3 minutes. + +```bash +kubectl get jobs -w +# NAME COMPLETIONS DURATION AGE +# busybox-completions-job 0/5 7s 7s +# busybox-completions-job 1/5 15s 15s +# busybox-completions-job 2/5 28s 28s +# busybox-completions-job 3/5 42s 42s +# busybox-completions-job 4/5 56s 56s +# busybox-completions-job 5/5 70s 70s +``` + +
+ +```bash +kubectl create job busybox-parallelism-job --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'echo hello;sleep 10;echo world' > busybox-parallelism-job.yaml +``` + +#### Edit `busybox-parallelism-job.yaml` to add `job.spec.parallelism=5` and apply `kubectl apply -f busybox-parallelism-job.yaml` + +```yaml +cat << EOF > busybox-parallelism-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + name: busybox-parallelism-job +spec: + parallelism: 5 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - echo hello;sleep 10;echo world + image: busybox + name: busybox-parallelism-job + resources: {} + restartPolicy: Never +status: {} +EOF + +kubectl apply -f busybox-parallelism-job.yaml +``` + +#### Check the job pod status `kk get pods -l job-name=busybox-parallelism-job` or `kubectl get jobs -w` are in completed status after 1 minute, as it would quicker as compared to before. + +```bash +kubectl get jobs -w +# NAME COMPLETIONS DURATION AGE +# busybox-parallelism-job 1/1 of 5 15s 15s +# busybox-parallelism-job 2/1 of 5 16s 16s +# busybox-parallelism-job 3/1 of 5 17s 17s +# busybox-parallelism-job 4/1 of 5 18s 18s +# busybox-parallelism-job 5/1 of 5 19s 19s +``` + +
+ +```bash +kubectl create cronjob busybox-cron-job --image=busybox --schedule="*/1 * * * *" -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' +``` + +
+ +```bash +kubectl get cj +kubectl get jobs --watch # Bear in mind that Kubernetes will run a new job/pod for each new cron job +# NAME COMPLETIONS DURATION AGE +# busybox-cron-job-1639638720 0/1 0s +# busybox-cron-job-1639638720 0/1 0s 0s +# busybox-cron-job-1639638720 1/1 8s 9s +# busybox-cron-job-1639638780 0/1 0s +# busybox-cron-job-1639638780 0/1 1s 1s +# busybox-cron-job-1639638780 1/1 9s 9s +kubectl get pod --show-labels # observe that the pods have a label that mentions their 'parent' job +kubectl logs busybox-1529745840-m867r +kubectl delete cj busybox +``` + +
+ +```bash +kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml +vi time-limited-job.yaml +``` + +#### Add `cronjob.spec.startingDeadlineSeconds=17` and apply + +```bash +cat << EOF > time-limited-job.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + creationTimestamp: null + name: time-limited-job +spec: + startingDeadlineSeconds: 17 # add this line + jobTemplate: + metadata: + creationTimestamp: null + name: time-limited-job + spec: + template: + metadata: + creationTimestamp: null + spec: + containers: + - args: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + image: busybox + name: time-limited-job + resources: {} + restartPolicy: Never + schedule: '* * * * *' +status: {} +EOF + +kubectl apply -f time-limited-job.yaml +``` + +
+ +```bash +kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml +vi time-limited-job.yaml +``` + +#### Add cronjob.spec.jobTemplate.spec.activeDeadlineSeconds=12 + +```bash +cat << EOF > time-limited-job.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + creationTimestamp: null + name: time-limited-job +spec: + jobTemplate: + metadata: + creationTimestamp: null + name: time-limited-job + spec: + activeDeadlineSeconds: 12 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - args: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + image: busybox + name: time-limited-job + resources: {} + restartPolicy: Never + schedule: '* * * * *' +status: {} +EOF + +kubectl apply -f time-limited-job.yaml +``` + +
+ +```bash +kubectl create cronjob hello --image=busybox --restart=Never --dry-run=client --schedule="*/2 * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > hello-cronjob.yaml +vi hello-cronjob.yaml +``` + +#### Add the following specs. + +```yaml +cat << EOF > hello-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + creationTimestamp: null + name: hello +spec: + jobTemplate: + metadata: + creationTimestamp: null + name: hello + spec: + activeDeadlineSeconds: 10 # Terminate Pods after 10 seconds + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + image: busybox + name: hello + resources: {} + restartPolicy: Never # Never restart Pods + schedule: '*/2 * * * *' # Execute once every 2 minutes + successfulJobsHistoryLimit: 3 # Keep 3 completed Job + failedJobsHistoryLimit: 3 # Keep 3 failed job +status: {} +EOF + +kubectl apply -f hello-cronjob.yaml + +# Trigger the job manually +kubectl create job --from=cronjob/hello hello-test +``` + +
+ +```bash +$ kubectl get nodes -o=custom-columns=NODE_NAME:.metadata.name,CPU_COUNT:.status.capacity.cpu +# NODE_NAME CPU_COUNT +# controlplane 2 +# node01 2 +``` + +
+ +```bash +kubectl get pods --all-namespaces -o jsonpath='{.items[*].spec.containers[*].image}}' | tr " " "\n" +# nginx:1.21.4-alpine +# nginx:1.21 +# nginx:1.21 +# k8s.gcr.io/coredns:1.6.7 +# k8s.gcr.io/coredns:1.6.7 +# k8s.gcr.io/etcd:3.4.3-0 +# katacoda/katacoda-cloud-provider:0.0.1 +# k8s.gcr.io/kube-apiserver:v1.18.0 +# k8s.gcr.io/kube-controller-manager:v1.18.0 +# quay.io/coreos/flannel:v0.12.0-amd64 +# quay.io/coreos/flannel:v0.12.0-amd64 +# gcr.io/google_containers/kube-keepalived-vip:0.9 +# k8s.gcr.io/kube-proxy:v1.18.0 +# k8s.gcr.io/kube-proxy:v1.18.0 +# k8s.gcr.io/kube-scheduler:v1.18.0} +``` + +
+ +```bash +kubectl get pods --sort-by=.metadata.name +# NAME READY STATUS RESTARTS AGE +# nginx-dev 1/1 Running 0 91s +# nginx-prod 1/1 Running 0 91s +# nginx-qa 1/1 Running 0 91s +``` + +
+ +```bash +kubectl get pod nginx-dev -o jsonpath='{.spec.containers[0].image}' +# nginx:1.21.4-alpine +``` + +
+ +```bash +kubectl get po -o=custom-columns="POD_NAME:.metadata.name, POD_STATUS:.status.containerStatuses[].state" | tr " " "\n" +``` + +
+ +```bash +kubectl config view --kubeconfig kubeconfig.yaml +``` + +
+ +```bash +kubectl config get-clusters --kubeconfig kubeconfig.yaml +# NAME +# development +# qa +# production +# kubernetes +# labs +``` + +
+ +```bash +kubectl config get-users --kubeconfig kubeconfig.yaml # will not work for older versions +# NAME +# dev-user +# kubernetes-admin +# labs-user +# prod-user +# qa-user +``` + +
+ +```bash +kubectl config get-contexts --kubeconfig kubeconfig.yaml +# CURRENT NAME CLUSTER AUTHINFO NAMESPACE +# development-user@labs development development-user +# * kubernetes-admin@kubernetes kubernetes kubernetes-admin +# labs-user@labs labs labs-user +# prod-user@prod prod prod-user +# qa-user@qa qa qa-user +``` + +
+ +```bash +kubectl config current-context --kubeconfig kubeconfig.yaml +# kubernetes-admin@kubernetes +``` + +
+ +```bash +kubectl config use-context prod-user@prod --kubeconfig kubeconfig.yaml +# Switched to context "prod-user@prod". +kubectl config current-context --kubeconfig kubeconfig.yaml +# prod-user@prod +``` + +
+ +```bash +kubesec scan unsecured.yaml + +# [ +# { +# "object": "Pod/nginx.default", +# "valid": true, +# "fileName": "unsecured.yaml", +# "message": "Failed with a score of -30 points", +# "score": -30, +# "scoring": { +# "critical": [ +# { +# "id": "Privileged", +# "selector": "containers[] .securityContext .privileged == true", +# "reason": "Privileged containers can allow almost completely unrestricted host access", +# "points": -30 +# } +# ], +# "advise": [ +# { +# "id": "ApparmorAny", +# "selector": ".metadata .annotations .\"container.apparmor.security.beta.kubernetes.io/nginx\"", +# "reason": "Well defined AppArmor policies may provide greater protection from unknown threats. WARNING: NOT PRODUCTION READY", +# "points": 3 +# }, +# { +# "id": "ServiceAccountName", +# "selector": ".spec .serviceAccountName", +# "reason": "Service accounts restrict Kubernetes API access and should be configured with least privilege", +# "points": 3 +# }, +# { +# "id": "SeccompAny", +# "selector": ".metadata .annotations .\"container.seccomp.security.alpha.kubernetes.io/pod\"", +# "reason": "Seccomp profiles set minimum privilege and secure against unknown threats", +# "points": 1 +# }, +# { +# "id": "LimitsCPU", +# "selector": "containers[] .resources .limits .cpu", +# "reason": "Enforcing CPU limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .limits .memory", +# "reason": "Enforcing memory limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsCPU", +# "selector": "containers[] .resources .requests .cpu", +# "reason": "Enforcing CPU requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .requests .memory", +# "reason": "Enforcing memory requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "CapDropAny", +# "selector": "containers[] .securityContext .capabilities .drop", +# "reason": "Reducing kernel capabilities available to a container limits its attack surface", +# "points": 1 +# }, +# { +# "id": "CapDropAll", +# "selector": "containers[] .securityContext .capabilities .drop | index(\"ALL\")", +# "reason": "Drop all capabilities and add only those required to reduce syscall attack surface", +# "points": 1 +# }, +# { +# "id": "ReadOnlyRootFilesystem", +# "selector": "containers[] .securityContext .readOnlyRootFilesystem == true", +# "reason": "An immutable root filesystem can prevent malicious binaries being added to PATH and increase attack cost", +# "points": 1 +# }, +# { +# "id": "RunAsNonRoot", +# "selector": "containers[] .securityContext .runAsNonRoot == true", +# "reason": "Force the running image to run as a non-root user to ensure least privilege", +# "points": 1 +# }, +# { +# "id": "RunAsUser", +# "selector": "containers[] .securityContext .runAsUser -gt 10000", +# "reason": "Run as a high-UID user to avoid conflicts with the host's user table", +# "points": 1 +# } +# ] +# } +# } +# ] + +``` + +#### Edit the specs to remove the below + +```yaml + securityContext: + privileged: true # security issue + readOnlyRootFilesystem: false # security issue +``` + +```yaml +cat << EOF > unsecured.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + name: nginx + resources: {} + dnsPolicy: ClusterFirst + restartPolicy: Never +EOF +``` + +```bash +kubesec scan unsecured.yaml + +# [ +# { +# "object": "Pod/nginx.default", +# "valid": true, +# "fileName": "unsecured.yaml", +# "message": "Passed with a score of 0 points", +# "score": 0, +# "scoring": { +# "advise": [ +# { +# "id": "ApparmorAny", +# "selector": ".metadata .annotations .\"container.apparmor.security.beta.kubernetes.io/nginx\"", +# "reason": "Well defined AppArmor policies may provide greater protection from unknown threats. WARNING: NOT PRODUCTION READY", +# "points": 3 +# }, +# { +# "id": "ServiceAccountName", +# "selector": ".spec .serviceAccountName", +# "reason": "Service accounts restrict Kubernetes API access and should be configured with least privilege", +# "points": 3 +# }, +# { +# "id": "SeccompAny", +# "selector": ".metadata .annotations .\"container.seccomp.security.alpha.kubernetes.io/pod\"", +# "reason": "Seccomp profiles set minimum privilege and secure against unknown threats", +# "points": 1 +# }, +# { +# "id": "LimitsCPU", +# "selector": "containers[] .resources .limits .cpu", +# "reason": "Enforcing CPU limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .limits .memory", +# "reason": "Enforcing memory limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsCPU", +# "selector": "containers[] .resources .requests .cpu", +# "reason": "Enforcing CPU requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .requests .memory", +# "reason": "Enforcing memory requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "CapDropAny", +# "selector": "containers[] .securityContext .capabilities .drop", +# "reason": "Reducing kernel capabilities available to a container limits its attack surface", +# "points": 1 +# }, +# { +# "id": "CapDropAll", +# "selector": "containers[] .securityContext .capabilities .drop | index(\"ALL\")", +# "reason": "Drop all capabilities and add only those required to reduce syscall attack surface", +# "points": 1 +# }, +# { +# "id": "ReadOnlyRootFilesystem", +# "selector": "containers[] .securityContext .readOnlyRootFilesystem == true", +# "reason": "An immutable root filesystem can prevent malicious binaries being added to PATH and increase attack cost", +# "points": 1 +# }, +# { +# "id": "RunAsNonRoot", +# "selector": "containers[] .securityContext .runAsNonRoot == true", +# "reason": "Force the running image to run as a non-root user to ensure least privilege", +# "points": 1 +# }, +# { +# "id": "RunAsUser", +# "selector": "containers[] .securityContext .runAsUser -gt 10000", +# "reason": "Run as a high-UID user to avoid conflicts with the host's user table", +# "points": 1 +# } +# ] +# } +# } +# ] +``` + +
+
+```bash
+kubectl get nodes node01 --show-labels
+# NAME STATUS ROLES AGE VERSION LABELS
+# node01 Ready
+ +```bash +kubectl label node node01 type=critical +# node/node01 labeled +``` + +
+ +```bash +kubectl label node node01 type- +# node/node01 labeled +``` + +
+ +```bash +kubectl create namespace alpha +kubectl label namespace alpha type=critical + +kubectl get namespace alpha --show-labels +# NAME STATUS AGE LABELS +# alpha Active 70s type=critical +``` + +
+ +```bash +kubectl run nginx-labels --image=nginx --labels=tier=frontend +``` + +```bash +# verification +kubectl get pod nginx-labels --show-labels +# NAME READY STATUS RESTARTS AGE LABELS +# nginx-labels 1/1 Running 0 16s tier=frontend +``` + +
+ +```bash +kubectl run nginx-labels --image=nginx --labels=name=nginx,tier=frontend,env=dev,version=v1 +``` + +OR + +```yaml +cat << EOF > nginx-labels.yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + env: dev + name: nginx + tier: frontend + version: v1 + name: nginx-labels +spec: + containers: + - image: nginx + name: nginx +EOF + +kubectl apply -f nginx-labels.yaml +``` + +
+ +```bash +kubectl get pod nginx-labels --show-labels +# NAME READY STATUS RESTARTS AGE LABELS +# nginx-labels 1/1 Running 0 26s env=dev,name=nginx,tier=frontend,version=v1 +``` + +
+ +```bash +kubectl label pod nginx-labels version=v2 --overwrite + +kubectl get pod nginx-labels --show-labels +# NAME READY STATUS RESTARTS AGE LABELS +# nginx-labels 1/1 Running 0 110s env=dev,name=nginx,tier=frontend,version=v2 +``` + +
+ +```bash +kubectl get pod -L env +# OR +kubectl get pod --label-columns=env +# NAME READY STATUS RESTARTS AGE ENV +# nginx-labels 1/1 Running 0 25s dev +``` + +
+ +```bash +kubectl get pod -l version=v2 +# OR +kubectl get pod -l 'version in (v2)' +OR +kubectl get pod --selector=version=v2 +``` + +
+ +```bash +kubectl label pod nginx-labels name- + +kubectl get pod nginx-labels --show-labels +NAME READY STATUS RESTARTS AGE LABELS +nginx-labels 1/1 Running 0 4m49s env=dev,tier=frontend,version=v2 +``` + +
+ +```bash +kubectl logs counter +OR +kubectl logs counter -f # for tailing the logs +``` + +#### Copy the logs to the /tmp/counter.log folder. + +```bash +kubectl logs counter > /tmp/counter.log +``` + +
+ +`kubectl logs nginx-counter counter` OR `kubectl logs nginx-counter -c counter` + +
+ +```yaml +cat << EOF > multi-container-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: multi-container-pod +spec: + containers: + - image: nginx + name: nginx + - image: redis + name: redis +EOF + +kubectl apply -f multi-container-pod.yaml +``` + +
+ +```yaml +cat << EOF > multi-container-nrm.yaml +apiVersion: v1 +kind: Pod +metadata: + name: multi-container-nrm +spec: + containers: + - image: nginx + name: nginx + - image: redis + name: redis + - image: memcached + name: memcached +EOF + +kubectl apply -f multi-container-nrm.yaml +``` + +
+ +```yaml +cat << EOF > sidecar-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: sidecar-pod +spec: + template: + spec: + containers: + - name: myapp + image: alpine:latest + command: ['sh', '-c', 'while true; do echo "logging" >> /opt/logs.txt; sleep 1; done'] + volumeMounts: + - name: data + mountPath: /opt + - name: sidecar + image: busybox + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + volumes: + - name: data + emptyDir: {} + +kubectl apply -f multi-container-nrm.yaml +``` + +
+ +```yaml +cat << EOF > two-files-counter-pod-agent-sidecar.yaml +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog # mount the varlog volume as the /var/log path + mountPath: /var/log + - name: count-agent + image: k8s.gcr.io/fluentd-gcp:1.30 + env: + - name: FLUENTD_ARGS + value: -c /etc/fluentd-config/fluentd.conf + volumeMounts: + - name: varlog # mount the varlog volume as the /var/log path + mountPath: /var/log + - name: config-volume + mountPath: /etc/fluentd-config + volumes: + - name: varlog # define varlog volume as empty dir which does not persist when the pod is deleted. + emptyDir: {} + - name: config-volume + configMap: + name: fluentd-config +EOF + +kubectl apply -f two-files-counter-pod-agent-sidecar.yaml +``` + +```bash +kubectl get pod counter +# NAME READY STATUS RESTARTS AGE +# counter 2/2 Running 0 24s + +kubectl exec counter -c count -- cat /var/log/1.log +# : Sat Dec 18 02:34:35 UTC 2021 +# : Sat Dec 18 02:34:35 UTC 2021 + +kubectl exec counter -c count-agent -- cat /var/log/1.log +# : Sat Dec 18 02:34:35 UTC 2021 +# : Sat Dec 18 02:34:35 UTC 2021 +``` + +
+ +```bash +kubectl get namespaces +``` + +
+ +```bash +kubectl create namespace alpha +``` + +
+ +```bash +kubectl get pods --namespace=alpha +# OR +kubectl get pods -n=alpha +``` + +
+ +```bash +kubectl get pods --all-namespaces +#OR +kubectl get pods -A +``` + +
+ +```bash +kubectl label namespace alpha type=critical + +kubectl get namespace alpha --show-labels +# NAME STATUS AGE LABELS +# alpha Active 70s type=critical +``` + +
+ +```bash +kubectl delete namespace alpha +``` + +
+ +```bash +kubectl get networkpolicy +``` + +
+ +```bash +cat << EOF > deny-all.yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: deny-all +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress + ingress: # deny all ingress + - {} + egress: # deny all egress + - {} +EOF + +kubectl apply -f limit-consumer.yaml +``` + +
+ +#### Create the deployments and expose as service + +```bash +kubectl run consumer --image=nginx && kubectl expose pod consumer --port=80 +kubectl run producer --image=nginx && kubectl expose pod producer --port=80 +kubectl run web --image=nginx && kubectl expose pod web --port=80 +``` + +#### Verify the communication + +```bash +# verify if web and producer can access consumer +kubectl exec producer -- curl http://consumer:80 # success +kubectl exec web -- curl http://consumer:80 # success +``` + +#### Create and apply the network policy + +```yaml +cat << EOF > limit-consumer.yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: limit-consumer +spec: + podSelector: + matchLabels: + run: consumer # selector for the pods + policyTypes: + - Ingress + ingress: # allow ingress traffic only from producer pods + - from: + - podSelector: # from pods + matchLabels: # with this label + run: producer +EOF + +kubectl apply -f limit-consumer.yaml +``` + +#### Verify the communication + +```bash +# verify if web and producer can access consumer +kubectl exec producer -- curl http://consumer:80 # success +kubectl exec web -- curl http://consumer:80 # failure +``` + +```bash +# Cleanup +kubectl delete pod web producer consumer --force +kubectl delete svc web producer consumer +rm limit-consumer.yaml +``` + +
+ +#### Create and apply the network policy + +```yaml +cat << EOF > limit-web.yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: limit-web +spec: + podSelector: + matchLabels: + name: web # selector for the pods + policyTypes: + - Ingress + - Egress + ingress: + - {} + egress: # allow egress traffic only to backend & storage pods + - to: + - podSelector: # from pods + matchLabels: # with backend label + name: backend + - podSelector: # from pods + matchLabels: # with storage label + name: storage + ports: + - protocol: TCP + port: 80 +EOF + +kubectl apply -f limit-web.yaml +``` + +#### Verify the previous curl work. Create a dummy pod and verify it should not be able to reach the same. + +```bash +# verify if web and producer can access consumer +kubectl exec web -- curl http://backend:80 # success +kubectl exec web -- curl http://storage:80 # success +kubectl exec web -- curl http://dummy:80 # failure +``` + +```bash +# Cleanup +kubectl label namespace kube-system name- +kubectl delete networkpolicy default-deny-all-egress limit-web +kubectl delete pod web backend storage dummy --force +kubectl delete svc web backend storage dummy +rm limit-web.yaml egress-deny-all.yaml +``` + +
+
+```bash
+kubectl get nodes
+# NAME STATUS ROLES AGE VERSION
+# controlplane Ready master 62m v1.18.0
+# node01 Ready
+
+```bash
+kubectl get nodes node01 --show-labels
+# NAME STATUS ROLES AGE VERSION LABELS
+# node01 Ready
+ +```bash +kubectl label node node01 type=critical +# node/node01 labeled +``` + +
+ +```bash +kubectl label node node01 type- +# node/node01 labeled +``` + +
+ +```bash +kubectl top nodes +``` + +
+ +```bash +kubectl drain node01 --ignore-daemonsets --force # drain will cordon the node as well +# node/node01 cordoned +# Pods: kube-system/kube-flannel-ds-amd64-6mrm2, kube-system/kube-keepalived-vip-zchjw, kube-system/kube-proxy-ms2mf +# WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/multi-container-nrm; ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-amd64-6mrm2, kube-system/kube-keepalived-vip-zchjw, kube-system/kube-proxy-ms2mf +# evicting pod default/multi-container-nrm +# evicting pod kube-system/katacoda-cloud-provider-6c46f89b5c-jvb7g +# pod/multi-container-nrm evicted +# pod/katacoda-cloud-provider-6c46f89b5c-jvb7g evicted +# node/node01 evicted +``` + +
+ +```yaml +cat << EOF > busybox-user.yaml +apiVersion: v1 +kind: Pod +metadata: + name: busybox-user +spec: + securityContext: # add this + runAsUser: 1000 # add user + runAsGroup: 3000 # add group + containers: + - image: busybox + name: busybox-user + command: ["sh", "-c", "sleep 600"] +EOF + +kubectl apply -f busybox-user.yaml +``` + +```bash +# verify - will have a proper user if the user exists +kk exec busybox-user -- whoami +# whoami: unknown uid 1000 +# command terminated with exit code 1 +``` + +
+ +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + securityContext: + capabilities: + add: ["SYS_TIME", "NET_ADMIN"] +EOF + +kubectl apply -f nginx.yaml +``` + +
+ +```yaml +cat << EOF > busybox-immutable.yaml +apiVersion: v1 +kind: Pod +metadata: + name: busybox-immutable +spec: + containers: + - image: busybox + name: busybox-immutable + command: ["sh", "-c", "sleep 600"] + securityContext: # add this + readOnlyRootFilesystem: true # add this to make container immutable + privileged: false # add this to prevent container making any node changes +EOF + +kubectl apply -f busybox-immutable.yaml +``` + +```bash +# verify +kubectl exec busybox-immutable -- touch echo.txt +# touch: echo.txt: Read-only file system +# command terminated with exit code 1 +``` + +
+ +#### Create Pod Security Policy + +```yaml +cat << EOF > psp.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-example +spec: + privileged: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny +EOF + +kubectl apply -f psp.yaml +``` + +#### Pods need to have access to use Pod Security Policies and the Service Account i.e. default needs to have access to the same. + +```yaml +cat << EOF > role-psp.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: role-psp +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] +EOF + +kubectl apply -f role-psp.yaml + +cat << EOF > role-psp-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: role-psp-binding +roleRef: + kind: ClusterRole + name: role-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: default + namespace: default +EOF + +kubectl apply -f role-psp-binding.yaml +``` + +#### Update `/etc/kubernetes/manifests/kube-apiserver.yaml` to enable `PodSecurityPolicy` + +```yaml +--enable-admission-plugins=NodeRestriction,PodSecurityPolicy # update the admission plugins +``` + +#### Verify +```bash +kubectl apply -f nginx.yaml +# Error from server (Forbidden): error when creating "nginx.yaml": pods "nginx" is forbidden: PodSecurityPolicy: unable to admit pod: [spec.volumes[0]: Invalid value: "secret": secret volumes are not allowed to be used spec.containers[0].securityContext.privileged: Invalid value: true: Privileged containers are not allowed] +``` + +
+ +```yaml +cat << EOF > psp.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-example +spec: + privileged: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + volumes: # add the volumes + - 'configMap' + - 'secret' +EOF + +kubectl apply -f psp.yaml +``` + +#### Verify + +```bash +kubectl apply -f nginx.yaml +# Error from server (Forbidden): error when creating "nginx.yaml": pods "nginx" is forbidden: PodSecurityPolicy: unable to admit pod: [spec.volumes[0]: Invalid value: "emptyDir": emptyDir volumes are not allowed to be used] + +# NOTE : If the pod is created check for other psp which allows the creation and delete the same. +``` + +
+ +```yaml +cat << EOF > psp.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-example +spec: + privileged: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + volumes: + - 'configMap' + - 'secret' + - 'hostPath' + allowedHostPaths: # add the allowed host paths + - pathPrefix: "/data" + readOnly: true +EOF + +kubectl apply -f psp.yaml +``` + +#### Verify + +```bash +kubectl apply -f nginx.yaml +# Error from server (Forbidden): error when creating "nginx.yaml": pods "nginx" is forbidden: PodSecurityPolicy: unable to admit pod: [spec.containers[0].volumeMounts[0].readOnly: Invalid value: false: must be read-only] +``` + +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + resources: {} + volumeMounts: + - mountPath: /test-pd + name: test-volume + readOnly: true # add this + volumes: + - name: test-volume + hostPath: + path: /data + type: Directory +EOF +``` + +#### Verify + +```bash +kubectl apply -f nginx.yaml +# pod/nginx created +``` + +
+ +```bash +kubectl get pods +# OR +kubectl get po +``` +
+ +```bash +kubectl run nginx --image=nginx +``` + +
+ +```bash +kubectl create namespace my-website +kubectl run mongo --image=mongo --namespace=my-website +``` + +
+ +```bash +kubectl create namespace alpha +kubectl run nginx --image=nginx --namespace=alpha +``` + +
+ +```bash +kubectl run custom-nginx --image=nginx --port=8080 +``` + +
+ +```bash +kubectl get pods -o wide +``` + +
+ +```bash +kubectl get pods -o name +``` + +
+ +```bash +kubectl delete pod nginx +``` + +
+ +```bash +kubectl delete pod nginx --namespace=alpha +``` + +
+ +```bash +kubectl run nginx-labels --image=nginx --labels=name=nginx,tier=frontend,env=dev,version=v1 +``` + +OR + +```yaml +cat << EOF > nginx-labels.yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + env: dev + name: nginx + tier: frontend + version: v1 + name: nginx-labels +spec: + containers: + - image: nginx + name: nginx +EOF + +kubectl apply -f nginx-labels.yaml +``` + +
+ +```bash +kubectl delete pod nginx-labels --force --grace-period=0 +``` + +
+ +```bash +kubectl run nginx-file --image=nginx --dry-run=client -o yaml > nginx-file.yaml +kubectl apply -f nginx-file.yaml +``` +
+ +```bash +kubectl run nginx --image=nginx +kubectl get nginx -o yaml > nginx_definition.yaml +``` + +
+ +```bash +kubectl run ubuntu-1 --image=ubuntu --command sleep 4800 +``` + +
+ +```bash +kubectl create namespace web +kubectl run cache --image redis:3.2 --port 6379 --namespace web +``` + +
+ +Add the label to a node: + +```bash +kubectl label nodes node01 accelerator=nvidia-tesla-p100 +``` + +We can use the 'nodeSelector' property on the Pod YAML: + +```yaml +cat << EOF > nginx-node-selector.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-node-selector +spec: + containers: + - name: nginx-node-selector + image: nginx + nodeSelector: # add this + accelerator: nvidia-tesla-p100 # the selection label +EOF + +kubectl apply -f nginx-node-selector.yaml +``` + +OR + +Use node affinity (https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/#schedule-a-pod-using-required-node-affinity) + +```yaml +cat << EOF > nginx-node-selector.yaml +apiVersion: v1 +kind: Pod +metadata: + name: affinity-pod +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: accelerator + operator: In + values: + - nvidia-tesla-p100 + containers: + - name: nginx-node-selector + image: nginx +EOF + +kubectl apply -f nginx-node-selector.yaml + +``` + +
+ +```bash +kubectl annotate pod nginx-annotations description- +``` + +
+ +```bash +kubectl delete pod nginx-annotations --force +``` + +
+ +```bash +kubectl run nginx-resources --image=nginx --restart=Never --requests='cpu=100m,memory=256Mi' --limits='cpu=200m,memory=512Mi' +``` + +OR + +```yaml +cat << EOF > nginx-resources.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx-resources + name: nginx-resources +spec: + containers: + - image: nginx + name: nginx-resources + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +EOF + +kubectl apply -f nginx-resources.yaml +``` + +
+ +#### Check the static pod path in the kubelet config file + +```bash +ps -ef | grep kubelet +# root 2794 1 3 07:43 ? 00:01:05 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.2 --resolv-conf=/run/systemd/resolve/resolv.conf + +# Check the config file @ /var/lib/kubelet/config.yaml for the staticPodPath property +staticPodPath: /etc/kubernetes/manifests +``` + +#### Execute the below on node01 + +```yaml +mkdir /etc/kubernetes/manifests # create the static pod path, if it does not exist. + +cat << EOF > webtool.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: webtool + name: webtool +spec: + containers: + - image: httpd + name: webtool + resources: {} + dnsPolicy: ClusterFirst + restartPolicy: Always +status: {} +EOF + +systemctl restart kubelet # if required +``` + +#### Check on controlpanel node + +```bash +kubectl get pods +# NAME READY STATUS RESTARTS AGE +# webtool-node01 1/1 Running 0 11s +``` + +
+ +```bash +kubectl run nginx-readiness --image=nginx --restart=Never --dry-run=client -o yaml > nginx-readiness.yaml +``` + +Edit `nginx-readiness.yaml` file to add `readinessProbe` probe as below and apply `kubectl apply -f nginx-readiness.yaml` + +```YAML +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + readinessProbe: # declare the readiness probe + httpGet: # add this line + path: / # + port: 80 # + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +``` + +
+ +```bash +kubectl run nginx-liveness --image=nginx --restart=Never --dry-run=client -o yaml > nginx-liveness.yaml +``` + +Edit `nginx-liveness.yaml` file to add `livenessProbe` probe as below and apply `kubectl apply -f nginx-liveness.yaml` + +```YAML +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + livenessProbe: # add liveness probe + exec: # add this line + command: # command definition + - ls # ls command + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +``` +
+ +#### Edit `nginx-liveness.yaml` file to update `livenessProbe` probe as below. Delete and recreate pod using `kubectl apply -f nginx-liveness.yaml` + +```YAML +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + livenessProbe: + initialDelaySeconds: 30 # add this line + periodSeconds: 5 # add this line as well + exec: + command: + - ls + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +``` + +
+ +```bash +kubectl get pod liveness-exec -w # pod restarts due to failed liveness check +# NAME READY STATUS RESTARTS AGE +# liveness-exec 1/1 Running 0 17s +# liveness-exec 1/1 Running 1 76s + +kubectl describe pod liveness-exec + +# Normal Started 69s (x2 over 2m22s) kubelet, node01 Started container liveness +# Warning Unhealthy 25s (x6 over 110s) kubelet, node01 Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory +# Normal Killing 25s (x2 over 100s) kubelet, node01 Container liveness failed liveness probe, will be restarted +``` + +
+ +Check the `/etc/kubernetes/manifests/kube-apiserver.yaml` for the `--authorization-mode=Node,RBAC` + +
+ +```bash +kubectl create role pods-read --verb=get,create,list,delete --resource=pods +``` + +OR + +```yaml +cat << EOF > pods-read.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pods-read +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - create + - list + - delete +EOF + +kubectl apply -f pods-read.yaml +``` + +```bash +# verify +kubectl get role pods-read +# NAME CREATED AT +# pods-read 2021-12-13T01:35:10Z +``` + +
+ +```bash +kubectl create sa sample-sa +``` + +OR + +```yaml +cat << EOF > sample-sa.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + name: sample-sa +EOF + +kubectl apply -f sample-sa.yaml +``` + +```bash +# verify +kubectl get serviceaccount sample-sa +# NAME SECRETS AGE +# sample-sa 1 14s +``` + +
+ +```bash +kubectl create rolebinding sample-sa-pods-read-role-binding --serviceaccount=default:sample-sa --role=pods-read +``` + +OR + +```yaml +cat << EOF > sample-sa-pods-read-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + name: sample-sa-pods-read-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pods-read +subjects: +- kind: ServiceAccount + name: sample-sa + namespace: default +EOF + +kubectl apply -f sample-sa-pods-read-role-binding.yaml +``` + +```bash +# verify +kubectl get rolebinding sample-sa-pods-read-role-binding +# NAME ROLE AGE +# sample-sa-pods-read-role-binding Role/pods-read 18s +``` + +
+ +```bash +# verify +kubectl auth can-i get pods --as system:serviceaccount:default:sample-sa +# yes +``` +
+ +```bash +kubectl create clusterrole proxy-admin-role --resource=nodes --verb=get,list,create,update +kubectl create clusterrolebinding proxy-admin-role-binding --user=proxy-admin --clusterrole=proxy-admin-role +``` + +```bash +# verify +kubectl auth can-i get nodes --as proxy-admin +# yes +``` + +
+ +```bash +kubectl create serviceaccount cicd-token -n finance +kubectl create role deployent-role --resource=nodes --verb=get,list,create,update -n finance +kubectl create rolebinding deployent-role-binding --serviceaccount=finance/cicd-token --role=deployent-role -n finance +``` + +```bash +# verify +kubectl auth can-i get nodes --as proxy-admin +# yes +``` + +
+ +```bash +kubectl get replicasets +# OR +kubectl get rs +``` + +
+ +```yaml +cat << EOF > replica-set-demo.yaml +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: replica-set-demo +spec: + replicas: 1 + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: nginx + image: nginx +EOF + +kubectl apply -f replica-set-demo.yaml +``` + +
+ +```bash +kubectl scale replicaset replica-set-demo --replicas=2 +``` + +OR + +Edit the replica set definition file `replica-set-demo.yaml` and apply `kubectl apply -f replica-set-demo.yaml` + +```yaml +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: replica-set-demo +spec: + replicas: 2 # update this + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: nginx + image: nginx +EOF +``` + +
+ +```bash +kubectl scale replicaset replica-set-demo --replicas=1 +``` + +OR + +#### Edit the replica set definition file `replica-set-demo.yaml` and apply `kubectl apply -f replica-set-demo.yaml` + +```yaml +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: replica-set-demo +spec: + replicas: 1 # update this + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: nginx + image: nginx +EOF +``` + +
+ +#### Check the apiVersion using `kubectl explain replicasets` which is `apps/v1`. +Update the version and apply again. + +
+ +The replica set selector field `tier: frontend` does not match the pod labels `tier: nginx`. Correct either of them and reapply. + +
+ +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + runtimeClassName: gvisor + containers: + - image: nginx + name: nginx + restartPolicy: Always +EOF + +kubectl apply -f nginx.yaml + +# NOTE : Pod may not come up as the runtime does not actually exist +``` +
+ +#### Apply Seccomp security context + + ```yaml +cat << EOF > amicontained.yaml +apiVersion: v1 +kind: Pod +metadata: + name: amicontained +spec: + securityContext: # add the security context with seccomp profile + seccompProfile: + type: RuntimeDefault + containers: + - args: + - amicontained + image: jess/amicontained + name: amicontained + restartPolicy: Always +EOF + +kubectl apply -f amicontained.yaml +``` + +#### Verify + +```bash + +kk logs amicontained +# Container Runtime: kube +# Has Namespaces: +# pid: true +# user: false +# AppArmor Profile: docker-default (enforce) +# Capabilities: +# BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap +# Seccomp: filtering +# Blocked Syscalls (62): +# SYSLOG SETPGID SETSID USELIB USTAT SYSFS VHANGUP PIVOT_ROOT _SYSCTL ACCT SETTIMEOFDAY MOUNT UMOUNT2 SWAPON SWAPOFF REBOOT SETHOSTNAME SETDOMAINNAME IOPL IOPERM CREATE_MODULE INIT_MODULE DELETE_MODULE GET_KERNEL_SYMS QUERY_MODULE QUOTACTL NFSSERVCTL GETPMSG PUTPMSG AFS_SYSCALL TUXCALL SECURITY LOOKUP_DCOOKIE CLOCK_SETTIME VSERVER MBIND SET_MEMPOLICY GET_MEMPOLICY KEXEC_LOAD ADD_KEY REQUEST_KEY KEYCTL MIGRATE_PAGES UNSHARE MOVE_PAGES PERF_EVENT_OPEN FANOTIFY_INIT NAME_TO_HANDLE_AT OPEN_BY_HANDLE_AT CLOCK_ADJTIME SETNS PROCESS_VM_READV PROCESS_VM_WRITEV KCMP FINIT_MODULE KEXEC_FILE_LOAD BPF USERFAULTFD MEMBARRIER PKEY_MPROTECT PKEY_ALLOC PKEY_FREE +# Looking for Docker.sock +``` + +
+ +#### Copy the audit.json file to the default profiles location `/var/lib/kubelet/seccomp/` + +```bash +mkdir -p /var/lib/kubelet/seccomp/profiles +cp audit.json /var/lib/kubelet/seccomp/profiles +``` + +#### Create nginx pod using the seccomp profile + +```yaml +cat << EOF > audit-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: audit-pod + labels: + app: audit-pod +spec: + securityContext: + seccompProfile: + type: Localhost + localhostProfile: profiles/audit.json + containers: + - name: audit-pod + image: nginx +EOF + +kubectl apply -f audit-pod.yaml + +``` + +#### Verify + +````bash +tail -f /var/log/syslog +# Dec 16 02:07:21 vagrant kernel: [ 2253.183862] audit: type=1326 audit(1639620441.516:20): auid=4294967295 uid=0 gid=0 ses=4294967295 pid=20123 comm="runc:[2:INIT]" exe="/" sig=0 arch=c000003e syscall=233 compat=0 ip=0x55e57ef09bc8 code=0x7ffc0000 +# Dec 16 02:07:21 vagrant kernel: [ 2253.183864] audit: type=1326 audit(1639620441.516:21): auid=4294967295 uid=0 gid=0 ses=4294967295 pid=20123 comm="runc:[2:INIT]" exe="/" sig=0 arch=c000003e syscall=138 compat=0 ip=0x55e57ef5e230 code=0x7ffc0000 +```` + +
+ +```bash +kubectl get secrets +``` + +
+ +```bash +kubectl get secrets --all-namespaces +# OR +kubectl get secrets -A +``` + +
+
+```bash
+kubectl create secret generic db-secret-1 --from-literal=DB_HOST=db.example.com --from-literal=DB_USER=development --from-literal=DB_PASSWD=password
+```
+
+OR
+
+```yaml
+cat << EOF > db-secret-1.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: db-secret-1
+data:
+ DB_HOST: ZGIuZXhhbXBsZS5jb20=
+ DB_PASSWD: cGFzc3dvcmQ=
+ DB_USER: ZGV2ZWxvcG1lbnQ=
+EOF
+
+kubectl apply -f db-secret-1.yaml
+```
+
+```bash
+kubectl describe secret db-secret-1 # verify
+Name: db-secret-1
+Namespace: default
+Labels:
+
+```bash
+kubectl create secret generic db-secret-2 --from-file=secret.properties
+```
+
+```bash
+kubectl describe secret db-secret-2 # verify
+Name: db-secret-2
+Namespace: default
+Labels:
+ +```yaml +cat << EOF > nginx-2.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-2 +spec: + containers: + - image: nginx + name: nginx-2 + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: db-secret-1 + key: DB_HOST +EOF + +kubectl apply -f nginx-2.yaml +``` + +```bash +kubectl exec nginx-2 -- env | grep DB_HOST # verify env variables +# DB_HOST=db.example.com +``` + +
+ +```bash +kubectl create secret generic another-secret --from-literal=key1=value4 +``` + +```yaml +cat << EOF > nginx-secret.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-secret +spec: + containers: + - image: nginx + name: nginx-secret + env: + - name: COOL_VARIABLE + valueFrom: + secretKeyRef: + name: another-secret + key: key1 +EOF + +kubectl apply -f nginx-secret.yaml +``` + +```bash +kubectl exec nginx-2 -- env | grep DB_HOST # verify env variables +# DB_HOST=db.example.com +``` + +
+ +```yaml +cat << EOF > nginx-3.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-3 +spec: + containers: + - image: nginx + name: nginx-3 + envFrom: + - secretRef: + name: db-secret-1 +EOF + +kubectl apply -f nginx-3.yaml +``` + +``` +kubectl exec nginx-3 -- env | grep DB_ # verify env variables +# DB_HOST=db.example.com +# DB_PASSWD=password +# DB_USER=development +``` + +
+ +```yaml +cat << EOF > nginx-4.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-4 +spec: + containers: + - image: nginx + name: nginx-4 + volumeMounts: + - name: db-secret + mountPath: "/secret" + readOnly: true + volumes: + - name: db-secret + secret: + secretName: db-secret-1 +EOF + +kubectl apply -f nginx-4.yaml +``` + +```bash +kubectl exec nginx-4 -- cat /secret/DB_HOST # verify env variables +# db.example.com +``` + +
+
+```bash
+kubectl create secret tls my-tls-secret --cert=../data/tls.crt --key=../data/tls.key
+```
+
+```bash
+kubectl describe secret my-tls-secret #verify
+Name: my-tls-secret
+Namespace: default
+Labels:
+ +```bash +kubectl create secret docker-registry regcred --docker-server=example.com --docker-username=user_name --docker-password=password --docker-email=user_name@example.com +``` + +```yaml +cat << EOF > nginx-5.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-5 +spec: + containers: + - name: nginx-5 + image: nginx + imagePullSecrets: + - name: regcred +EOF + +kubectl apply -f nginx-5.yaml +``` + +
+
+```bash
+kubectl create serviceaccount sample-sa
+# OR
+kubectl create sa sample-sa
+```
+
+OR
+
+```yaml
+cat << EOF > sample-sa.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: sample-sa
+EOF
+
+kubectl apply -f sample-sa.yaml
+```
+
+```bash
+kubectl describe serviceaccount sample-sa # Verify, no secret and token are created automatically
+Name: sample-sa
+Namespace: default
+Labels:
+ +```yaml +cat << EOF > sample-sa-no-auto-mount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sample-sa-no-auto-mount +automountServiceAccountToken: false +EOF + +kubectl apply -f sample-sa-no-auto-mount.yaml +``` + +
+ +```bash +kubectl run nginx-sa --image=nginx --serviceaccount=sample-sa +``` + +OR + +```yaml +cat << EOF > nginx-sa.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-sa +spec: + containers: + - image: nginx + name: nginx-sa + serviceAccountName: sample-sa +EOF + +kubectl apply -f nginx-sa.yaml +``` + +
+
+```bash
+kubectl run nginx-clusterip --image=nginx --restart=Never --port=80 --expose
+
+kubectl get service nginx-clusterip # verification
+# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+# nginx-clusterip ClusterIP 10.104.163.30
+
+```bash
+kubectl run nginx-nodeport --image=nginx --restart=Never --port=80
+kubectl expose pod nginx-nodeport --name nginx-nodeport-svc --type NodePort --port 80 --target-port 80
+```
+
+OR
+
+```yaml
+cat << EOF > nginx-nodeport.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ creationTimestamp: null
+ labels:
+ run: nginx-nodeport
+ name: nginx-nodeport-svc
+spec:
+ ports:
+ - port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ run: nginx-nodeport
+ type: NodePort
+status:
+ loadBalancer: {}
+EOF
+
+kubectl apply -f nginx-nodeport.yaml
+```
+
+```bash
+# verification - port expose might change
+kubectl get svc nginx-nodeport-svc
+# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+# nginx-nodeport-svc NodePort 10.106.55.131
+
+```bash
+kubectl create deploy nginx-deployment --image nginx && kubectl scale deploy nginx-deployment --replicas 3
+kubectl expose deployment nginx-deployment --type NodePort --port 80 --target-port 80 --dry-run=client -o yaml > nginx-deployment-svc.yaml
+```
+
+Edit `nginx-deployment-svc.yaml` to add `nodePort: 30080` and apply `kubectl apply -f nginx-deployment-svc.yaml`
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ creationTimestamp: null
+ labels:
+ app: nginx-deployment
+ name: nginx-deployment
+spec:
+ ports:
+ - port: 80
+ protocol: TCP
+ targetPort: 80
+ nodePort: 30080 # add node port
+ selector:
+ app: nginx-deployment
+ type: NodePort
+status:
+ loadBalancer: {}
+```
+
+```bash
+# verification
+kubectl get service nginx-deployment
+# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+# nginx-deployment NodePort 10.43.166.122
+ +```bash +kk taint node node01 app=critical:NoSchedule +``` + +
+ +```bash +docker pull nginx:1.21.4 +trivy image --severity CRITICAL nginx:1.21.4 +# nginx:1.21.4 (debian 11.1) +# ========================== +# Total: 7 (CRITICAL: 7) + +docker pull nginx:1.21.4-alpine +trivy image --severity CRITICAL nginx:1.21.4-alpine +# nginx:1.21.4-alpine (alpine 3.14.3) +# =================================== +# Total: 0 (CRITICAL: 0) + +docker pull amazonlinux:2.0.20211201.0 +trivy image --severity CRITICAL amazonlinux:2.0.20211201.0 +# amazonlinux:2.0.20211201.0 (amazon 2 (Karoo)) +# ============================================= +# Total: 0 (CRITICAL: 0) + +``` + +
+ +```bash +docker pull nginx:1.21.4 +trivy image --severity HIGH --format json --output /root/nginx.json nginx:1.21.4 +# nginx:1.21.4 (debian 11.1) +# ========================== +# Total: 7 (CRITICAL: 7) + +``` + +
+ +```yaml +cat << EOF > nginx-3.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-3 +spec: + containers: + - image: nginx + name: nginx-3 + volumeMounts: + - name: db-config + mountPath: "/config" + readOnly: true + volumes: + - name: db-config + configMap: + name: db-config-1 +EOF + +kubectl apply -f nginx-3.yaml + +kubectl exec nginx-4 -- cat /config/DB_HOST # verify env variables +# db.example.com +``` + +
+ +```yaml +cat << EOF > nginx-4.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-4 +spec: + containers: + - image: nginx + name: nginx-4 + volumeMounts: + - name: db-secret + mountPath: "/secret" + readOnly: true + volumes: + - name: db-secret + secret: + secretName: db-secret-1 +EOF + +kubectl apply -f nginx-4.yaml +``` + +```bash +kubectl exec nginx-4 -- cat /secret/DB_HOST # verify env variables +# db.example.com +``` + +
+ +```yaml +cat << EOF > redis.yaml +apiVersion: v1 +kind: Pod +metadata: + name: redis +spec: + containers: + - name: redis + image: redis + volumeMounts: + - name: redis-storage + mountPath: /data/redis + volumes: + - name: redis-storage + emptyDir: {} # Ephemeral storage +EOF + +kubectl apply -f redis.yaml +``` + +
+ +```yaml +kubectl create namespace staging + +cat << EOF > non-persistent-redis.yaml +apiVersion: v1 +kind: Pod +metadata: + name: non-persistent-redis + namespace: staging +spec: + containers: + - name: redis + image: redis + volumeMounts: + - name: cache-control + mountPath: /data/redis + volumes: + - name: cache-control + emptyDir: {} +EOF + +kubectl apply -f non-persistent-redis.yaml +``` + +
+ +```yaml +cat << EOF > app-data.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: app-data +spec: + storageClassName: manual + capacity: + storage: 200Mi + accessModes: + - ReadWriteMany + hostPath: + path: "/srv/app-data" +EOF + +kubectl apply -f app-data.yaml + +kubectl get pv +# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +# app-data 200Mi RWX Retain Available manual +``` + +
+ +```yaml +cat << EOF > task-pv-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: task-pv-volume +spec: + storageClassName: manual + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" +EOF + +kubectl apply -f task-pv-volume.yaml + +kubectl get pv +# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +# task-pv-volume 10Mi RWO Retain Available manual 6s +``` + +```yaml +cat << EOF > task-pv-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: task-pv-claim +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi +EOF + +kubectl apply -f task-pv-claim.yaml + +kubectl get pvc +#NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +#task-pv-claim Bound task-pv-volume 10Mi RWO manual 12s +kubectl get pv # check status bound +#NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +#task-pv-volume 10Mi RWO Retain Bound default/task-pv-claim manual 64s +``` + +```yaml +cat << EOF > task-pv-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: task-pv-pod +spec: + volumes: + - name: task-pv-storage + persistentVolumeClaim: + claimName: task-pv-claim + containers: + - name: task-pv-pod + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage +EOF + +kubectl apply -f task-pv-pod.yaml +``` + +
+ +```bash +kubectl get storageclass +# OR +kubectl get sc +``` +
+ +```bash +rm nginx-3.yaml nginx-4.yaml redis.yaml +kubectl delete pod task-pv-pod redis nginx-3 nginx-4 --force +kubectl delete pvc task-pv-claim +kubectl delete pv task-pv-volume +``` + +