From c04274e7b73f36c0807e0e2358089a558efddf40 Mon Sep 17 00:00:00 2001 From: seifrajhi Date: Sat, 9 Mar 2024 08:36:58 +0100 Subject: [PATCH] feat: add k8s-certifications --- k8s-certifications/README.md | 44 + ...architecture_installation_configuration.md | 50 + .../cka/2.workloads_scheduling.md | 52 + .../cka/3.services_networking.md | 43 + k8s-certifications/cka/4.storage.md | 35 + k8s-certifications/cka/5.troubleshooting.md | 54 ++ k8s-certifications/cka/README.md | 16 + .../ckad/1.application_design_build.md | 35 + .../ckad/2.application_deployment.md | 25 + ...3.application_observability_maintenance.md | 32 + ...tion_environment_configuration_security.md | 53 + .../ckad/5.services_networking.md | 27 + k8s-certifications/ckad/README.md | 17 + k8s-certifications/cks/1.cluster_setup.md | 51 + k8s-certifications/cks/2.cluster_hardening.md | 35 + k8s-certifications/cks/3.system_hardening.md | 36 + ...4.minimize_microservice_vulnerabilities.md | 37 + .../cks/5.supply_chain_security.md | 33 + .../6.monitoring_logging_runtime_security.md | 53 + k8s-certifications/cks/README.md | 21 + .../data/ImagePolicyWebhook/webhook.crt | 23 + .../data/ImagePolicyWebhook/webhook.key | 28 + k8s-certifications/data/Seccomp/audit.json | 3 + k8s-certifications/data/kubeconfig.yaml | 67 ++ k8s-certifications/data/tls.crt | 31 + k8s-certifications/data/tls.key | 52 + k8s-certifications/topics/README.md | 45 + .../topics/admission_controllers.md | 214 +++++ k8s-certifications/topics/annotations.md | 32 + k8s-certifications/topics/api_deprecations.md | 59 ++ k8s-certifications/topics/apis.md | 36 + k8s-certifications/topics/apparmor.md | 136 +++ k8s-certifications/topics/auditing.md | 74 ++ k8s-certifications/topics/authentication.md | 132 +++ .../topics/binary_verification.md | 22 + k8s-certifications/topics/cluster_upgrade.md | 328 +++++++ k8s-certifications/topics/configmaps.md | 260 +++++ k8s-certifications/topics/daemonsets.md | 74 ++ k8s-certifications/topics/debugging.md | 205 ++++ k8s-certifications/topics/deployments.md | 905 ++++++++++++++++++ k8s-certifications/topics/docker.md | 59 ++ k8s-certifications/topics/etcd.md | 99 ++ k8s-certifications/topics/falco.md | 31 + k8s-certifications/topics/ingress.md | 179 ++++ k8s-certifications/topics/init_containers.md | 170 ++++ k8s-certifications/topics/jobs.md | 475 +++++++++ k8s-certifications/topics/jsonpath.md | 94 ++ k8s-certifications/topics/kube-bench.md | 80 ++ k8s-certifications/topics/kubeconfig.md | 109 +++ k8s-certifications/topics/kubelet_security.md | 57 ++ k8s-certifications/topics/kubesec.md | 263 +++++ k8s-certifications/topics/labels.md | 223 +++++ k8s-certifications/topics/logging.md | 79 ++ k8s-certifications/topics/monitoring.md | 30 + .../topics/multi_container_pods.md | 265 +++++ k8s-certifications/topics/namespaces.md | 95 ++ k8s-certifications/topics/network_policies.md | 219 +++++ k8s-certifications/topics/nodes.md | 90 ++ .../topics/pod_security_context.md | 145 +++ .../topics/pod_security_policies.md | 255 +++++ k8s-certifications/topics/pods.md | 464 +++++++++ k8s-certifications/topics/probes.md | 181 ++++ k8s-certifications/topics/rbac.md | 221 +++++ k8s-certifications/topics/replica_set.md | 206 ++++ k8s-certifications/topics/runtimes.md | 42 + k8s-certifications/topics/seccomp.md | 205 ++++ k8s-certifications/topics/secrets.md | 323 +++++++ k8s-certifications/topics/service_accounts.md | 108 +++ k8s-certifications/topics/services.md | 115 +++ .../topics/taints_tolerations.md | 18 + k8s-certifications/topics/trivy.md | 74 ++ k8s-certifications/topics/volumes.md | 308 ++++++ 72 files changed, 8757 insertions(+) create mode 100644 k8s-certifications/README.md create mode 100644 k8s-certifications/cka/1.cluster_architecture_installation_configuration.md create mode 100644 k8s-certifications/cka/2.workloads_scheduling.md create mode 100644 k8s-certifications/cka/3.services_networking.md create mode 100644 k8s-certifications/cka/4.storage.md create mode 100644 k8s-certifications/cka/5.troubleshooting.md create mode 100644 k8s-certifications/cka/README.md create mode 100644 k8s-certifications/ckad/1.application_design_build.md create mode 100644 k8s-certifications/ckad/2.application_deployment.md create mode 100644 k8s-certifications/ckad/3.application_observability_maintenance.md create mode 100644 k8s-certifications/ckad/4.application_environment_configuration_security.md create mode 100644 k8s-certifications/ckad/5.services_networking.md create mode 100644 k8s-certifications/ckad/README.md create mode 100644 k8s-certifications/cks/1.cluster_setup.md create mode 100644 k8s-certifications/cks/2.cluster_hardening.md create mode 100644 k8s-certifications/cks/3.system_hardening.md create mode 100644 k8s-certifications/cks/4.minimize_microservice_vulnerabilities.md create mode 100644 k8s-certifications/cks/5.supply_chain_security.md create mode 100644 k8s-certifications/cks/6.monitoring_logging_runtime_security.md create mode 100644 k8s-certifications/cks/README.md create mode 100644 k8s-certifications/data/ImagePolicyWebhook/webhook.crt create mode 100644 k8s-certifications/data/ImagePolicyWebhook/webhook.key create mode 100644 k8s-certifications/data/Seccomp/audit.json create mode 100644 k8s-certifications/data/kubeconfig.yaml create mode 100644 k8s-certifications/data/tls.crt create mode 100644 k8s-certifications/data/tls.key create mode 100644 k8s-certifications/topics/README.md create mode 100644 k8s-certifications/topics/admission_controllers.md create mode 100644 k8s-certifications/topics/annotations.md create mode 100644 k8s-certifications/topics/api_deprecations.md create mode 100644 k8s-certifications/topics/apis.md create mode 100644 k8s-certifications/topics/apparmor.md create mode 100644 k8s-certifications/topics/auditing.md create mode 100644 k8s-certifications/topics/authentication.md create mode 100644 k8s-certifications/topics/binary_verification.md create mode 100644 k8s-certifications/topics/cluster_upgrade.md create mode 100644 k8s-certifications/topics/configmaps.md create mode 100644 k8s-certifications/topics/daemonsets.md create mode 100644 k8s-certifications/topics/debugging.md create mode 100644 k8s-certifications/topics/deployments.md create mode 100644 k8s-certifications/topics/docker.md create mode 100644 k8s-certifications/topics/etcd.md create mode 100644 k8s-certifications/topics/falco.md create mode 100644 k8s-certifications/topics/ingress.md create mode 100644 k8s-certifications/topics/init_containers.md create mode 100644 k8s-certifications/topics/jobs.md create mode 100644 k8s-certifications/topics/jsonpath.md create mode 100644 k8s-certifications/topics/kube-bench.md create mode 100644 k8s-certifications/topics/kubeconfig.md create mode 100644 k8s-certifications/topics/kubelet_security.md create mode 100644 k8s-certifications/topics/kubesec.md create mode 100644 k8s-certifications/topics/labels.md create mode 100644 k8s-certifications/topics/logging.md create mode 100644 k8s-certifications/topics/monitoring.md create mode 100644 k8s-certifications/topics/multi_container_pods.md create mode 100644 k8s-certifications/topics/namespaces.md create mode 100644 k8s-certifications/topics/network_policies.md create mode 100644 k8s-certifications/topics/nodes.md create mode 100644 k8s-certifications/topics/pod_security_context.md create mode 100644 k8s-certifications/topics/pod_security_policies.md create mode 100644 k8s-certifications/topics/pods.md create mode 100644 k8s-certifications/topics/probes.md create mode 100644 k8s-certifications/topics/rbac.md create mode 100644 k8s-certifications/topics/replica_set.md create mode 100644 k8s-certifications/topics/runtimes.md create mode 100644 k8s-certifications/topics/seccomp.md create mode 100644 k8s-certifications/topics/secrets.md create mode 100644 k8s-certifications/topics/service_accounts.md create mode 100644 k8s-certifications/topics/services.md create mode 100644 k8s-certifications/topics/taints_tolerations.md create mode 100644 k8s-certifications/topics/trivy.md create mode 100644 k8s-certifications/topics/volumes.md diff --git a/k8s-certifications/README.md b/k8s-certifications/README.md new file mode 100644 index 0000000..b6f2304 --- /dev/null +++ b/k8s-certifications/README.md @@ -0,0 +1,44 @@ +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE) +[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com) + +# Kubernetes Exercises + +This collection covers a set of exercises that are categorized topics wise and referred back to the individual Kubernetes certification exams. +As the exam pattern and topics keep on changing, however, the topics remain more or less the same, I have created the exercises per topics and mapped them back to the exam. + +## Kubernetes Playground + +Try out the Killercoda Kubernetes playgroud which provides 2 node Kubernetes cluster, which is good enough to complete almost all of the exercises. + +[Killercoda](https://killercoda.com/playgrounds/scenario/kubernetes) +~~[Katacode Kubernetes Playgroud](https://www.katacoda.com/courses/kubernetes/playground)~~ + + + + +## Structure + + - [Certified Kubernetes Administrator (CKA)](cka) covers topics for CKA exam. + - [Certified Kubernetes Application Developer (CKAD)](ckad) covers topics for CKAD exam. + - [Certified Kubernetes Security Specialist (CKS)](cks) covers topics for CKS exam. + - [Data](data) provides any data required for the exercises. + - [Topics](topics) covers individual topics. + +## Exam Pattern & Tips + + - CKA/CKAD/CKS are open book test. + - Exams keep on upgrading as per the latest Kubernetes version and is currently on 1.28 + - Exams require you to solve 15-20 questions in 2 hours. + - Make use of imperative commands as much as possible. + - You will have an online notepad on the right corner to note down. I hardly used it, but it can be useful to type and modify text instead of using Vi editor. + - You are allowed to open another browser tab which can be from kubernetes.io or other products documentation like Falco. Do not open any other windows. + - Exam questions can be attempted in any order and don't have to be sequential. So be sure to move ahead and come back later. + + + + + + + + + diff --git a/k8s-certifications/cka/1.cluster_architecture_installation_configuration.md b/k8s-certifications/cka/1.cluster_architecture_installation_configuration.md new file mode 100644 index 0000000..d2cdc77 --- /dev/null +++ b/k8s-certifications/cka/1.cluster_architecture_installation_configuration.md @@ -0,0 +1,50 @@ +# Cluster Architecture, Installation & Configuration - 25% + +
+ +## Manage role based access control (RBAC) + +
+ +Refer [RBAC](../topics/rbac.md) + +
+ +## Use Kubeadm to install a basic cluster + +
+ +Refer [Creating cluster using Kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) + +
+ +## Manage a highly-available Kubernetes cluster + +
+ +Refer [Creating HA Kubernete cluster](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) + +
+ +## Provision underlying infrastructure to deploy a Kubernetes cluster + +
+ +TBD + +
+ +## Perform a version upgrade on a Kubernetes cluster using Kubeadm + +
+ +Refer [Upgrading Kubeadm Clusters](../topics/cluster_upgrade.md) + +
+ +## Implement etcd backup and restore + +
+ +Refer [ETCD](../topics/etcd.md) + diff --git a/k8s-certifications/cka/2.workloads_scheduling.md b/k8s-certifications/cka/2.workloads_scheduling.md new file mode 100644 index 0000000..bbf0571 --- /dev/null +++ b/k8s-certifications/cka/2.workloads_scheduling.md @@ -0,0 +1,52 @@ +# Workloads & Scheduling - 15% + +
+ +## Understand deployments and how to perform rolling update and rollbacks + +
+ +Refer [Deployment Rollouts](../topics/deployments.md#deployment-rollout) + +
+ +## Use ConfigMaps and Secrets to configure applications + +
+ +Refer [ConfigMaps](../topics/configmaps.md) +Refer [Secrets](../topics/secrets.md) + +
+ +## Know how to scale applications + +
+ +Refer [Deployment Scaling](../topics/deployments.md#deployment-scaling) + +
+ +## Understand the primitives used to create robust, self-healing, application deployments + +
+ +Refer [Deployment Scaling](../topics/deployments.md##deployment-self-healing) + +
+ +## Understand how resource limits can affect Pod scheduling + +
+ +Refer [Resources - Requests & Limits](../topics/pods.md#resources) + +
+ +## Awareness of manifest management and common templating tools + +
+ +TBD + +
diff --git a/k8s-certifications/cka/3.services_networking.md b/k8s-certifications/cka/3.services_networking.md new file mode 100644 index 0000000..e24ae63 --- /dev/null +++ b/k8s-certifications/cka/3.services_networking.md @@ -0,0 +1,43 @@ +# Services & Networking - 20% + +
+ +## Understand host networking configuration on the cluster nodes + +
+ +TBD + +
+ +## Understand connectivity between Pods + +
+ +Refer [Cluster Networking](https://kubernetes.io/docs/concepts/cluster-administration/networking/) + +
+ +## Understand ClusterIP, NodePort, LoadBalancer service types and endpoints + +Refer [Services](../topics/services.md) + +## Know how to use Ingress controllers and Ingress resources + +Refer [Ingress](../topics/ingress.md) + +## Know how to configure and use CoreDNS + +
+ +Refer [CoreDNS for Service Discovery](https://kubernetes.io/docs/tasks/administer-cluster/coredns/) + +
+ +## Choose an appropriate container network interface plugin + +
+ +Refer [Network Plugins](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) + +
diff --git a/k8s-certifications/cka/4.storage.md b/k8s-certifications/cka/4.storage.md new file mode 100644 index 0000000..560f887 --- /dev/null +++ b/k8s-certifications/cka/4.storage.md @@ -0,0 +1,35 @@ +# Storage - 10% + +
+ +## Understand storage classes, persistent volumes + +
+ +Refer [Volumes](../topics/volumes.md) + +
+ +## Understand volume mode, access modes and reclaim policies for volumes + +
+ +Refer [PV Volume mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#volume-mode) -- Refer [PV Access modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes), Refer [PV Reclaim policies](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaim-policy) + +
+ +## Understand persistent volume claims primitive + +
+ +Refer [Persistent Volume Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) + +
+ +## Know how to configure applications with persistent storage + +
+ +Refer [Volumes](../topics/volumes.md) + +
\ No newline at end of file diff --git a/k8s-certifications/cka/5.troubleshooting.md b/k8s-certifications/cka/5.troubleshooting.md new file mode 100644 index 0000000..3a5a445 --- /dev/null +++ b/k8s-certifications/cka/5.troubleshooting.md @@ -0,0 +1,54 @@ +# Troubleshooting - 30% + +
+ +## Evaluate cluster and node logging + +
+ +Refer [Cluster Logging](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-cluster/#looking-at-logs) + +
+ +## Understand how to monitor applications + +
+ +Refer [Monitoring](../topics/monitoring.md) + +
+ +## Manage container stdout & stderr logs + +
+ +TBD + +
+ +## Troubleshoot application failure + +
+ +Refer [Deployment Troubleshooting](../topics/deployments.md#troubleshooting) +Refer [Probes Troubleshooting](../topics/probes.md#troubleshooting) +Refer [Application Troubleshooting](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-application/) + +
+ +## Troubleshoot cluster component failure + +
+ +TBD + +
+ +## Troubleshoot networking + +
+ +TBD + +
+ diff --git a/k8s-certifications/cka/README.md b/k8s-certifications/cka/README.md new file mode 100644 index 0000000..211909f --- /dev/null +++ b/k8s-certifications/cka/README.md @@ -0,0 +1,16 @@ +# Certified Kubernetes Administrator (CKA) + +## [CKA Curriculum](https://github.com/cncf/curriculum/blob/master/CKA_Curriculum_v1.22.pdf) + +1. [Cluster Architecture, Installation & Configuration - 25%](1.cluster_architecture_installation_configuration.md) +2. [Workloads & Scheduling - 15%](2.workloads_scheduling.md) +3. [Services & Networking - 20%](3.services_networking.md) +4. [Storage - 10%](4.storage.md) +5. [Troubleshooting - 30%](5.troubleshooting.md) + +## Resources + + - [Certified Kubernetes Administrator - CKA learning path](https://jayendrapatil.com/certified-kubernetes-administrator-cka-learning-path/) + - [KodeKloud Certified Kubernetes Administrator Course](https://shareasale.com/r.cfm?b=2319101&u=2367365&m=132199&urllink=&afftrack=) + + \ No newline at end of file diff --git a/k8s-certifications/ckad/1.application_design_build.md b/k8s-certifications/ckad/1.application_design_build.md new file mode 100644 index 0000000..bc4948f --- /dev/null +++ b/k8s-certifications/ckad/1.application_design_build.md @@ -0,0 +1,35 @@ +# Application Design and Build - 20% + +
+ +## Define, build and modify container images + +
+ +Refer [Docker](../topics/docker.md) + +
+ +## Understand Jobs and CronJobs + +
+ +Refer [Jobs & Cron Jobs](../topics/jobs.md) + +
+ +## Understand multi-container Pod design patterns (e.g. sidecar, init and others) + +
+ +Refer [Multi-container Pods](../topics/multi_container_pods.md) + +
+ +## Utilize persistent and ephemeral volumes + +
+ +Refer [Volumes](../topics/volumes.md) + +
\ No newline at end of file diff --git a/k8s-certifications/ckad/2.application_deployment.md b/k8s-certifications/ckad/2.application_deployment.md new file mode 100644 index 0000000..3d5b791 --- /dev/null +++ b/k8s-certifications/ckad/2.application_deployment.md @@ -0,0 +1,25 @@ +# Application Deployment - 20% + +
+ +## Use Kubernetes primitives to implement common deployment strategies (e.g. blue/green or canary) + +
+ +- Kubernetes supports only Recreate and Rolling deployments within the same cluster. +- A service mesh like Istio can be used for [traffic management and canary deployments](https://istio.io/latest/docs/tasks/traffic-management/traffic-shifting/). + +
+ +## Understand Deployments and how to perform rolling updates + +Refer [Deployment Rollouts](../topics/deployments.md#deployment-rollout) + +## Use the Helm package manager to deploy existing packages + +
+ + - [Helm](https://helm.sh/) can be used for templating and deployment. + +
+ diff --git a/k8s-certifications/ckad/3.application_observability_maintenance.md b/k8s-certifications/ckad/3.application_observability_maintenance.md new file mode 100644 index 0000000..9854cc1 --- /dev/null +++ b/k8s-certifications/ckad/3.application_observability_maintenance.md @@ -0,0 +1,32 @@ +# Application Observability and Maintenance - 15% + +
+ +## Understand API deprecations + +
+ +Refer [API Deprectations](../topics/api_deprecations.md) + +
+ +## Implement probes and health checks + +Refer [Readiness & Liveness probes](../topics/probes.md) + +## Use provided tools to monitor Kubernetes applications + +Refer [Monitoring](../topics/monitoring.md) + +## Utilize container logs + +Refer [Logging](../topics/logging.md) + +## Debugging in Kubernetes + +
+ +TBD + +
+ diff --git a/k8s-certifications/ckad/4.application_environment_configuration_security.md b/k8s-certifications/ckad/4.application_environment_configuration_security.md new file mode 100644 index 0000000..48be2d8 --- /dev/null +++ b/k8s-certifications/ckad/4.application_environment_configuration_security.md @@ -0,0 +1,53 @@ +# Application Environment, Configuration and Security + +
+ +## Discover and use resources that extend Kubernetes (CRD) + +
+ +Refer [Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) + +
+ +## Understand authentication, authorization and admission control + +Refer [Authentication](../topics/authentication.md) +Refer [RBAC](../topics/rbac.md) +Refer [Admission Controllers](../topics/admission_controllers.md) + +## Understanding and defining resource requirements, limits and quotas + +
+ +Refer [Resources - Requests & Limits](../topics/pods.md#resources) + +
+ +## Understand ConfigMaps + +
+ +Refer [ConfigMaps](../topics/configmaps.md) + +
+ +## Create & consume Secrets + +Refer [Secrets](../topics/secrets.md) + +## Understand ServiceAccounts + +
+ +Refer [Service Accounts](../topics/service_accounts.md) + +
+ +## Understand SecurityContexts + +
+ +Refer [Security Context](../topics/pod_security_context.md) + +
\ No newline at end of file diff --git a/k8s-certifications/ckad/5.services_networking.md b/k8s-certifications/ckad/5.services_networking.md new file mode 100644 index 0000000..bf774df --- /dev/null +++ b/k8s-certifications/ckad/5.services_networking.md @@ -0,0 +1,27 @@ +# Services and Networking + +
+ +## Demonstrate basic understanding of NetworkPolicies + +
+ +Refer [Network Policies](../topics/network_policies.md) + +
+ +## Provide and troubleshoot access to applications via services + +
+ +Refer [Services](../topics/services.md) + +
+ +## Use Ingress rules to expose applications + +
+ +Refer [Ingress](../topics/ingress.md) + +
\ No newline at end of file diff --git a/k8s-certifications/ckad/README.md b/k8s-certifications/ckad/README.md new file mode 100644 index 0000000..a26371d --- /dev/null +++ b/k8s-certifications/ckad/README.md @@ -0,0 +1,17 @@ +# Certified Kubernetes Application Developer (CKAD) + +## [CKAD Curriculum](https://github.com/cncf/curriculum/blob/master/CKAD_Curriculum_v1.28.pdf) + + - [Application Design and Build - 20%](1.application_design_build.md) + - [Application Deployment - 20%](2.application_deployment.md) + - [Application observability and maintenance - 15%](3.application_observability_maintenance.md) + - [Application Environment, Configuration and Security - 25%](4.application_environment_configuration_security.md) + - [Services & Networking - 20%](5.services_networking.md) + +## Resources + + - [Certified Kubernetes Application Developer - CKAD learning path](https://jayendrapatil.com/certified-kubernetes-application-developer-ckad-learning-path/) + - [KodeKloud Certified Kubernetes Application Developer Course](https://shareasale.com/r.cfm?b=2319509&u=2367365&m=132199&urllink=&afftrack=) + + + \ No newline at end of file diff --git a/k8s-certifications/cks/1.cluster_setup.md b/k8s-certifications/cks/1.cluster_setup.md new file mode 100644 index 0000000..169c107 --- /dev/null +++ b/k8s-certifications/cks/1.cluster_setup.md @@ -0,0 +1,51 @@ +# Cluster Setup - 10% + +
+ +## Use Network security policies to restrict cluster level access + +
+ +Refer [Network Policies](../topics/network_policies.md) + +
+ +## Use CIS benchmark to review the security configuration of Kubernetes components (etcd, kubelet, kubedns, kubeapi) + +
+ +Refer [Kube-bench](../topics/kube-bench.md) + +
+ +## Properly set up Ingress objects with security control + +
+ +Refer [Ingress with tls cert](../topics/ingress.md#ingress-security) + +
+ +## Protect node metadata and endpoints + +
+ +Refer [Kubelet Security](../topics/kubelet_security.md) + +
+ +## Minimize use of, and access to, GUI elements + +
+ +Kubernetes Dashboard + +
+ +## Verify platform binaries before deploying + +
+ +Refer [Platform Binary Verification](../topics/binary_verification.md) + +
\ No newline at end of file diff --git a/k8s-certifications/cks/2.cluster_hardening.md b/k8s-certifications/cks/2.cluster_hardening.md new file mode 100644 index 0000000..fe269e9 --- /dev/null +++ b/k8s-certifications/cks/2.cluster_hardening.md @@ -0,0 +1,35 @@ +# Cluster Hardening - 15% + +
+ +## Restrict access to Kubernetes API + +
+ +Refer [Controlling Access to Kubernetes API](https://kubernetes.io/docs/concepts/security/controlling-access/) + +
+ +## Use Role Based Access Controls to minimize exposure + +
+ +Refer [RBAC](../topics/rbac.md) + +
+ +## Exercise caution in using service accounts e.g. disable defaults, minimize permissions on newly created ones + +
+ +Refer [Service Accounts](../topics/service_accounts.md) + +
+ +## Update Kubernetes frequently + +
+ +Refer [Upgrading Kubeadm Clusters](../topics/cluster_upgrade.md) + +
\ No newline at end of file diff --git a/k8s-certifications/cks/3.system_hardening.md b/k8s-certifications/cks/3.system_hardening.md new file mode 100644 index 0000000..19a2e6b --- /dev/null +++ b/k8s-certifications/cks/3.system_hardening.md @@ -0,0 +1,36 @@ +# System Hardening - 15% + +
+ +## Minimize host OS footprint (reduce attack surface) + +
+ +Refer [Docker](../topics/docker.md) + +
+ +## Minimize IAM roles + +
+ +IAM Roles are mainly related to Cloud and should follow the principle of least privilege. + +
+ +## Minimize external access to the network + +
+ +Refer [Network Policies](../topics/network_policies.md) + +
+ +## Appropriately use kernel hardening tools such as AppArmor, seccomp + +
+ +Refer [Seccomp - Secure Computing](../topics/seccomp.md) +Refer [AppArmor](../topics/apparmor.md) + +
\ No newline at end of file diff --git a/k8s-certifications/cks/4.minimize_microservice_vulnerabilities.md b/k8s-certifications/cks/4.minimize_microservice_vulnerabilities.md new file mode 100644 index 0000000..1ee83ef --- /dev/null +++ b/k8s-certifications/cks/4.minimize_microservice_vulnerabilities.md @@ -0,0 +1,37 @@ +# Minimize Microservice Vulnerabilities - 20% + +## Setup appropriate OS level security domains e.g. using PSP, OPA, security contexts + +
+ +Refer [Pod Security Policies](../topics/pod_security_policies.md) + +Refer [Pod Security Context](../topics/pod_security_context.md) + +Refer [Open Policy Agent](https://kubernetes.io/blog/2019/08/06/opa-gatekeeper-policy-and-governance-for-kubernetes/) + +
+ +## Manage kubernetes secrets + +
+ +Refer [Secrets](../topics/secrets.md) + +
+ +## Use container runtime sandboxes in multi-tenant environments (e.g. gvisor, kata containers) + +
+ +Refer [Runtime Class](../topics/runtimes.md) + +
+ +## Implement pod to pod encryption by use of mTLS + +
+ +Refer [Istio MTLS](https://istio.io/latest/docs/tasks/security/authentication/authn-policy/#auto-mutual-tls) + +
\ No newline at end of file diff --git a/k8s-certifications/cks/5.supply_chain_security.md b/k8s-certifications/cks/5.supply_chain_security.md new file mode 100644 index 0000000..d317dcb --- /dev/null +++ b/k8s-certifications/cks/5.supply_chain_security.md @@ -0,0 +1,33 @@ +# Supply Chain Security - 20% + +## Minimize base image footprint + +
+ +Refer [Docker best practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) + +
+ +## Secure your supply chain: whitelist allowed image registries, sign and validate images + +
+ +Refer [Admission Controllers ImagePolicyWebhook](../topics/admission_controllers.md#imagepolicywebhook) + +
+ +## Use static analysis of user workloads (e.g. kubernetes resources, docker files) + +
+ +Refer [Kubesec](../topics/kubesec.md) + +
+ +## Scan images for known vulnerabilities + +
+ +Refer [Trivy](../topics/trivy.md) + +
\ No newline at end of file diff --git a/k8s-certifications/cks/6.monitoring_logging_runtime_security.md b/k8s-certifications/cks/6.monitoring_logging_runtime_security.md new file mode 100644 index 0000000..1cf3ef6 --- /dev/null +++ b/k8s-certifications/cks/6.monitoring_logging_runtime_security.md @@ -0,0 +1,53 @@ +# Monitoring, Logging and Runtime Security - 20% + +
+ +## Perform behavioral analytics of syscall process and file activities at the host and container level to detect malicious activities + +
+ +Refer [Falco](../topics/falco.md) + +Other tools include `strace` and `tracee` + +
+ +## Detect threats within physical infrastructure, apps, networks, data, users and workloads + +
+ +TBD + +
+ +## Detect all phases of attack regardless where it occurs and how it spreads + +
+ +TBD + +
+ +## Perform deep analytical investigation and identification of bad actors within environment + +
+ +TBD + +
+ +## Ensure immutability of containers at runtime + +
+ +Refer [Pod Security Context Immutability](../topics/pod_security_context.md#immutability) + +
+ +## Use Audit Logs to monitor access + +
+ +Refer [Kubernetes Auditing](../topics/auditing.md) + +
\ No newline at end of file diff --git a/k8s-certifications/cks/README.md b/k8s-certifications/cks/README.md new file mode 100644 index 0000000..491847d --- /dev/null +++ b/k8s-certifications/cks/README.md @@ -0,0 +1,21 @@ +# Certified Kubernetes Security Specialist (CKS) + +## [CKA Curriculum](https://github.com/cncf/curriculum/blob/master/CKA_Curriculum_v1.22.pdf) + +- [Cluster Setup - 10%](1.cluster_setup.md) +- [Cluster Hardening - 15%](2.cluster_hardening.md) +- [System Hardening - 15%](3.system_hardening.md) +- [Minimize Microservice Vulnerabilities - 20%](4.minimize_microservice_vulnerabilities.md) +- [Supply Chain Security - 20%](5.supply_chain_security.md) +- [Monitoring, Logging and Runtime Security - 20%](6.monitoring_logging_runtime_security.md) + +## Resources + +- [Certified Kubernetes Security Specialist - CKS learning path](https://jayendrapatil.com/certified-kubernetes-security-specialist-cks-learning-path/) +- [KodeKloud Certified Kubernetes Security Specialist Course](https://shareasale.com/r.cfm?b=2319531&u=2367365&m=132199&urllink=&afftrack=) +- [Udemy Kubernetes CKS 2021 Complete Course – Theory – Practice](https://click.linksynergy.com/link?id=l7C703x9gqw&offerid=507388.3573079&type=2&murl=https%3A%2F%2Fwww.udemy.com%2Fcourse%2Fcertified-kubernetes-security-specialist%2F) + + + + + diff --git a/k8s-certifications/data/ImagePolicyWebhook/webhook.crt b/k8s-certifications/data/ImagePolicyWebhook/webhook.crt new file mode 100644 index 0000000..103176b --- /dev/null +++ b/k8s-certifications/data/ImagePolicyWebhook/webhook.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID5TCCAs2gAwIBAgIUL1k7p/ksn6VRIuAKmeDMctyCUwcwDQYJKoZIhvcNAQEL +BQAwRjFEMEIGA1UEAww7c3lzdGVtOm5vZGU6aW1hZ2UtYm91bmNlci13ZWJob29r +LmRlZmF1bHQucG9kLmNsdXN0ZXIubG9jYWwwHhcNMjExMjE1MDY1MTMyWhcNMzEx +MjEzMDY1MTMyWjBGMUQwQgYDVQQDDDtzeXN0ZW06bm9kZTppbWFnZS1ib3VuY2Vy +LXdlYmhvb2suZGVmYXVsdC5wb2QuY2x1c3Rlci5sb2NhbDCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAK2/gOl+AEJjnbc5DG4iFg2WvD8JAjgwXHd3zQ6A +HujxMz1EjJmDksc6S7aKrCJmP42tDdzQatVINMFHBR/8kb5bVN+f0LSNEM3iktfE +KmB7VsfEk6gaPJg8VOitA/7KpVDyZ4yJZmb2iaGLFzFF41XwiCP2pzihBUTj669Q +6MWDKxbONSrUpA60vvfhpWbnZxTbX8BfB1xDXOK51kK7rnXRfiJt6NHg+n87+1Lk +SFcUoZ/BRarSfweHorCu8c/agZfN9rKyj5tPNb3ZCvp3WJs3ZElK2+j/abZwW6cY +PIorQM0Zl3BZMFCdhoBEcqkeccb1DFjz0RB09SbH8WHCH3cCAwEAAaOByjCBxzAd +BgNVHQ4EFgQUgcvgsxHiAEkdgZgWa6XWuEApS6swHwYDVR0jBBgwFoAUgcvgsxHi +AEkdgZgWa6XWuEApS6swDwYDVR0TAQH/BAUwAwEB/zB0BgNVHREEbTBrghVpbWFn +ZS1ib3VuY2VyLXdlYmhvb2uCIWltYWdlLWJvdW5jZXItd2ViaG9vay5kZWZhdWx0 +LnN2Y4IvaW1hZ2UtYm91bmNlci13ZWJob29rLmRlZmF1bHQuc3ZjLmNsdXN0ZXIu +bG9jYWwwDQYJKoZIhvcNAQELBQADggEBAAofI9qArTMFQ4W19OsE3Sp1GLdTie2P +GIVFoiyedYwF+mJWbSgBxklnAKkJf7/sj0PHUEPP4cs7BUM6YHUrjC3OUPhbiH9f +CB8cVjVJhrI4mWDbAXiPa1mvo44x5eZeWDoz+DkUK+nna1/6ik40yOlonoyPXS/y +1qEWPijRr/3nJ6Vfy6823UNasEQN6mqeUWAO29M1vrYvq0rzUGiU4xTUvWH3JA26 +1sk+ZYAWyZe2/kOTRMjTnKAaki+dnWt14ed1ipuyHxfR6vHKS80eZuJEd2hmytoE +PRljY4asLiazIAP5j9/T4Xj66n0fvgTh75iUwAMkQHS2swC4ZjVS7nc= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/k8s-certifications/data/ImagePolicyWebhook/webhook.key b/k8s-certifications/data/ImagePolicyWebhook/webhook.key new file mode 100644 index 0000000..1c03ca3 --- /dev/null +++ b/k8s-certifications/data/ImagePolicyWebhook/webhook.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCtv4DpfgBCY523 +OQxuIhYNlrw/CQI4MFx3d80OgB7o8TM9RIyZg5LHOku2iqwiZj+NrQ3c0GrVSDTB +RwUf/JG+W1Tfn9C0jRDN4pLXxCpge1bHxJOoGjyYPFTorQP+yqVQ8meMiWZm9omh +ixcxReNV8Igj9qc4oQVE4+uvUOjFgysWzjUq1KQOtL734aVm52cU21/AXwdcQ1zi +udZCu6510X4ibejR4Pp/O/tS5EhXFKGfwUWq0n8Hh6KwrvHP2oGXzfayso+bTzW9 +2Qr6d1ibN2RJStvo/2m2cFunGDyKK0DNGZdwWTBQnYaARHKpHnHG9QxY89EQdPUm +x/Fhwh93AgMBAAECggEAfW5S0j10Unk30p4MqzVQVl8LZzZJs+a12klSb7VumxwF +saVbGzgxLkKXhiB2RB8sokrcRxzvAyota5qpyH29eX7VttrZAH8WMovvFnU3Yo+o +Bm+TaTgHpp9nbNH6oGYLEnTs7DgFBS/WDBktlRSvGcubfNsDvY4BD8q6ysXORUdL +Mji+JiPgIxlvHLZleP5zAyLWesSvKpUZxvE3/8G0M6rJD70Ufq9w3O2/UbrXoOEK +vdKn3MIarI8x3O7dDauFdA+LbBMMG3Pl+GbkRuG5eFwMhUHzqks+sx0M8vz5YDzw +mUxO1gzktvmSDiEcnIS5aINXgItviQp545KCCd+RAQKBgQDgFg8c5yUk9pMSIrIC +kUT6uWfi0rnHREBfrCZUkso4acIt1PBEOOYKJoLwbdjE1w7fuysk6Ok7o5rg9Cch +qen7hIFoWwKhfNO7dcwozs6gnT7QVUpHnID3t23m8wGtf7d2QRAXqCDRmaQUfHRc +zupj7LPRsbrrc1ZBCI3i9g1mDwKBgQDGfivUe5n9+W6215SR3ofHrzkr6GQBl1bb +H9WRhmvxNpLARdbKoGeBYMggdFte/SlzHdN6c5gaXIM7OJZj3NMSU/Flaqe/drOR +76zN1nACvNZazpxHLnVklgSesRdFYZkvzhwnuS3sPiBEseV/Zi/Hp+Lc9XguqH5a +LZHmGMJYGQKBgCZOPwkezi+yYtOv0KQ1twfxF7wjb5SLq0FviSHd8emQ0pvJEcVn +wJMtoCZ/cJW9eZJvSWHG2s/SGNCpi+LqS9AuB30SSbHXR858xYiYSaQVHT65xbfW +Hgm6dnQLSFcjRPZXCuwwVmPeErlZyP5wdIreVKLc8en7zlvRnYeVrharAoGBAIf9 +QUIePG6ISZXzNNKLRzNDlUPDv2BnsxYFRWiiU6m63efk8Td5lfBJwlKZ5U+62n8H +3C90qqzE3RPhvQdF70YLRMNawvql9HjzX8zWMX9uqN0l2GPcLIlxTlD6uxrJtw3N +g/SjJhdIqQrnZnhWJj3/g6omcuRkg8x8lAy0wdFhAoGAS2dEds2M9/OtAHSvGScr +Pb7hXWT+5cX3PqgPiLc1R0TRTjCzUEJYtuSpwb6/JHuVNXmpek1xzLfkykXu7LsG +sy0GXILOBAX5lxYrIgHIMv4a3pjI4UbwB1OzvthRc4kJXyBBT7L7LlPgaJ97xelf +L4TAluWzris5Xa7Y53IfkhE= +-----END PRIVATE KEY----- \ No newline at end of file diff --git a/k8s-certifications/data/Seccomp/audit.json b/k8s-certifications/data/Seccomp/audit.json new file mode 100644 index 0000000..1f2d5df --- /dev/null +++ b/k8s-certifications/data/Seccomp/audit.json @@ -0,0 +1,3 @@ +{ + "defaultAction": "SCMP_ACT_LOG" +} \ No newline at end of file diff --git a/k8s-certifications/data/kubeconfig.yaml b/k8s-certifications/data/kubeconfig.yaml new file mode 100644 index 0000000..5bc7cf4 --- /dev/null +++ b/k8s-certifications/data/kubeconfig.yaml @@ -0,0 +1,67 @@ +apiVersion: v1 +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +clusters: +- cluster: + certificate-authority: /etc/kubernetes/pki/ca.crt + server: https://controlplane:6443 + name: kubernetes +- name: labs + cluster: + certificate-authority: /etc/kubernetes/pki/ca.crt + server: https://controlplane:6443 +- name: development + cluster: + certificate-authority: /etc/kubernetes/pki/ca.crt + server: https://controlplane:6443 +- name: qa + cluster: + certificate-authority: /etc/kubernetes/pki/ca.crt + server: https://controlplane:6443 +- name: production + cluster: + certificate-authority: /etc/kubernetes/pki/ca.crt + server: https://controlplane:6443 +users: +- name: kubernetes-admin + user: + client-certificate: /etc/kubernetes/pki/users/user/user.crt + client-key: /etc/kubernetes/pki/users/user/user.key +- name: labs-user + user: + client-certificate: /etc/kubernetes/pki/users/test-user/labs-user.crt + client-key: /etc/kubernetes/pki/users/test-user/labs-user.key +- name: dev-user + user: + client-certificate: /etc/kubernetes/pki/users/dev-user/dev-user.crt + client-key: /etc/kubernetes/pki/users/dev-user/dev-user.key +- name: qa-user + user: + client-certificate: /etc/kubernetes/pki/users/qa-user/qa-user.crt + client-key: /etc/kubernetes/pki/users/qa-user/qa-user.key +- name: prod-user + user: + client-certificate: /etc/kubernetes/pki/users/prod-user/prod-user.crt + client-key: /etc/kubernetes/pki/users/prod-user/prod-user.key +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +- name: labs-user@labs + context: + cluster: labs + user: labs-user +- name: development-user@labs + context: + cluster: development + user: development-user +- name: qa-user@qa + context: + cluster: qa + user: qa-user +- name: prod-user@prod + context: + cluster: prod + user: prod-user \ No newline at end of file diff --git a/k8s-certifications/data/tls.crt b/k8s-certifications/data/tls.crt new file mode 100644 index 0000000..8b2e031 --- /dev/null +++ b/k8s-certifications/data/tls.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFZDCCA0wCCQCLkCF9TN02ITANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJJ +TjELMAkGA1UECAwCTUgxDTALBgNVBAcMBGNpdHkxEDAOBgNVBAoMB2NvbXBhbnkx +EDAOBgNVBAsMB3NlY3Rpb24xCzAJBgNVBAMMAkRLMRgwFgYJKoZIhvcNAQkBFglh +LmJAYy5jb20wHhcNMjExMjEwMTMzMTA0WhcNMjIxMjEwMTMzMTA0WjB0MQswCQYD +VQQGEwJJTjELMAkGA1UECAwCTUgxDTALBgNVBAcMBGNpdHkxEDAOBgNVBAoMB2Nv +bXBhbnkxEDAOBgNVBAsMB3NlY3Rpb24xCzAJBgNVBAMMAkRLMRgwFgYJKoZIhvcN +AQkBFglhLmJAYy5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDc +56DteFTMyJeLn2qP+5AIuumvW3B4ndk/h8p7489J5EH6KNlL4gp5P4q0rZRXJqaX +sRzBZD2nM2kWDwRC+KjgffQHxTESZOe8jLBl4kz2iPWLIsa2nfIVgoi0U9qZ6bGN +LU4yxYWyliKgD0xweTV9EsUHCgYjLO8lkwRPcMCAHNPckcXooOO/PLKHz5Kzg4J/ +au6TNF3GqzX5ECpArgZOd+67rM1ZFg9jxGyQZmfAnklILOBuN9DCsHqVHScdcATi +Y105KLFAg8KCJ8+BSPzBVRNjuWhmfzmHBqPAWg4N50D10IHgeJWcdg51VbgC0aYO +sbx4JSCUfvjKHDAQfd0PhQDpfvam2tERc1HQfKFAa89SWPVblRE4szaI+uSqJdIg +P+XJ3YVqIHJUblC1mM85EAfSRmEv3Tn2C+gwi65gpYLkjvJr4ucRs+vCvF/s3qYA +QnP87FyXa7GEZSpLop/lVb2J5o7muc69FKNOpUHYDkVxjlmMs+T5RZgOXL7lvjnN +c09rjVs+lVZ/fW+Ej4p0lF4HJuG+vaGU79w8SJz7nUQiU+A9ayoJfbld7BgCv4UQ +yS0G2uuKlxRVw+NZGCNSmthDAvytNBR2C4qpXw8pK+BrAc7jibOOvJWg1Zl7KY89 +taD0RLpd9WE+6QTvyXnS88p+uY6fjhAivS85tW+7LwIDAQABMA0GCSqGSIb3DQEB +CwUAA4ICAQAZ0lH73nsPbm40JtqElGCzdf/OjlbfiPPATOy+6FvR5e2myg2hnDu8 +nPYSKs3F5hRdYm90a6r3q4+Cyej58259WOK5r0gW6GTJFoT/A/cKyqsolXZ4jjK6 +RPT0a5Vll0M8uRMPysRc8hGI1s06DFOfRWYDwtAfn20UpHjmLvjRYjXDS4FNLAh1 +c4G1GGGFVTpQo6yL881m+iErDUqU9pOR3Yu+NbOG7FFQXQtSuy7tFlRL65oyASHx +I3REB6VL7CL37E9LDhdGoLRAWARRFWCGvZLRj9IBF/dQKXGjeD8BGnmNEUIMA9JW +KiXmx41Rnf41v1v77LonCBveU2oubuc4YfnNcbAQHnoiN7sjcNIkIBFWspbhSstc +761G7bejMgP8HUYp0NZySABRsL+3bXtkVX8tmOx7/riR4TxMVjyPp8wGg/cuo8AJ +DpizNmUQAg1YEo+5xe9tQV+C7ScvbbtTDkrWm+vXci4qXaXaJZv4VFvDCnQnfhL1 +mKbLZp7L7vpoWfezE0jNw7NV1Ys75AZDJBcOp2RyNaP+MCWf6/EQs2/UL0YntexE +c7eqGREkFsxyaF960B2K73qbMlxahCwK3h7Q2Z7udmWGvayaIr7V3V2sBHDr8u36 +99bwdR/h/t8Y2slP3kuuIteJSYpKAtQqt/FvoFtTDc91ZZ6ugYqnVg== +-----END CERTIFICATE----- diff --git a/k8s-certifications/data/tls.key b/k8s-certifications/data/tls.key new file mode 100644 index 0000000..a175c0f --- /dev/null +++ b/k8s-certifications/data/tls.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDc56DteFTMyJeL +n2qP+5AIuumvW3B4ndk/h8p7489J5EH6KNlL4gp5P4q0rZRXJqaXsRzBZD2nM2kW +DwRC+KjgffQHxTESZOe8jLBl4kz2iPWLIsa2nfIVgoi0U9qZ6bGNLU4yxYWyliKg +D0xweTV9EsUHCgYjLO8lkwRPcMCAHNPckcXooOO/PLKHz5Kzg4J/au6TNF3GqzX5 +ECpArgZOd+67rM1ZFg9jxGyQZmfAnklILOBuN9DCsHqVHScdcATiY105KLFAg8KC +J8+BSPzBVRNjuWhmfzmHBqPAWg4N50D10IHgeJWcdg51VbgC0aYOsbx4JSCUfvjK +HDAQfd0PhQDpfvam2tERc1HQfKFAa89SWPVblRE4szaI+uSqJdIgP+XJ3YVqIHJU +blC1mM85EAfSRmEv3Tn2C+gwi65gpYLkjvJr4ucRs+vCvF/s3qYAQnP87FyXa7GE +ZSpLop/lVb2J5o7muc69FKNOpUHYDkVxjlmMs+T5RZgOXL7lvjnNc09rjVs+lVZ/ +fW+Ej4p0lF4HJuG+vaGU79w8SJz7nUQiU+A9ayoJfbld7BgCv4UQyS0G2uuKlxRV +w+NZGCNSmthDAvytNBR2C4qpXw8pK+BrAc7jibOOvJWg1Zl7KY89taD0RLpd9WE+ +6QTvyXnS88p+uY6fjhAivS85tW+7LwIDAQABAoICAGSN62stQyyUgqdDwbYYxM+0 +hXsVHHVLJQEORtVuNYVlKcM9pOwt0KawjesAuG2TYnHaZUSC5K2fcU5hN4dkuTq3 +GsYOtO+yjun9AK7f/Dicz2iuQ9YMv42bBa9QHEnDXtbssJPb5agNP2WskRcBlZ+B +U76IiZKpeZKZAXVH1dh7RtU4ZeYmloUOlBXOHvEoA9cMTd0kESvF86OUACfBD43Y +egtj9XV/3TGE0AZLFx9O7fy0sNR7A8QboTEPPCbiPtbudBj4tPaxA3FLveET4DoB +B/p1A1jkwML9+rwsQgmCIsfCSdxsB25ZLuuqQUDHPdeigDAQdmwiAA3AFwDqyhzV +wuBeQH7OitOq7kBZAZ1Sv6jT3IkeM53ysMOfCa0LCvOCZt+GYtxxlH3XGQVKjBPi +mm9txjpbpxBdYfi15lr+SXfy48YUCXiihNkIQ2XevQlFEn4c8axW7l34j9eF1vnf +d1IQ6cBbNP8QQVHnMr/xH+EJa4D4EBBwGDclEanCXgeVuhcJU+qi92gUnVnKKqA2 +EHseNJhgrNEff6od2xlDC2NiM8DskaHCSG15E8mVMr+N1WKjZEJPG5kjjyQ5DU/8 +v/pqHzwOK6hG2D7fJiuSVTaEClF72qWHCIEG8M46h+lpZ6DaQvzkMoQ/ga1Ebc28 +b3ghJdkt4JwtizIC9tQBAoIBAQD2f+maNqHR2InJKJ/e2s8C3e3+kxXpgZiGujr+ +06Whhz2a073zx8UX21PBlro1J0JdlIF3DVSUshSk3qu7KQZPCQr4x8IH5JPFdDi0 +ZRXshm5ByUSyWnmmDVku3pvl521Gwd8XdEKHNmFaq/wpT/aRkrLs++vapx41iLcr +qBa7grh/0wGej/ec3xtfGClymfqNuLQIPpLCmJG+gMM9Kcoc3s5L1oA6yM1NneZB +7rYjNG9HraF17wXY+wp1/pqu5dhCwwhRAahifvuYMRirPX2J9dqAAsSoePHf2CkF +HA7ToDyXIa6GmpdSng2sE2A/GgXD2X0ev0QmO8b4iGChe3fvAoIBAQDlay9AzcZu ++OxCAC1T0jJZzAPeN3Wz08K4RTE4tWbsBj8j/GenimgaFn4jXYo4vMdFPa6ET9+p +Lem9YVcGfRtp3a8N3Lx2KkT8SZTD+itMt8UPmbxIJviO1Z/KdqlNyxNt6tWlMbKA +z72CWvwvbXXPFMKIROS2xRgXmx7r0C0750IXYEtsIColjXh5ME6faECgsWWqsCi1 +cnH1awrzGkw5BwPeGYB/pmGRtd2q1kb5BoP7GuME8/T1T/A2I0ltjU9rX8qjeyMv +S43tEFWHxTijNsKK/UvLFn2K/lfCVQMQnKhpHKuJOtTsFkGg2Ukwe8rnDtSQdgWg +3P2p0IXjerDBAoIBADWx/2z8YZuYk8sh8lFVUKrLNUCzQZ6wAE2424kPCZF6KE1F +uqcT6TcdK82Ly9wwRSClbN5GJRqPADg52SbX9OvaiG1Q9k9J13a3rnJ9Yp03W2Ux +NqmzU7R8S+UN0N/v3boAGVy+ko9ppSNfO3q0VH25ewhsiCAFL2tx8JSt9OW7v/z4 +Ne4YZlPhtdCtLrosGIwuo+j32HhTS8w3uE/mfoRzdHTIsP4dJ7u0nafXHA3nKiZv +CDDsdFWjuc+iOofGwakpWvJqbgemqZ+pcjo7FtGqoIIqGDSqw+WC7MyUJBatXQV+ +7Mmde0Ef9NJ7Fggo3wCeq8a621mIw/r3mjUS9DkCggEAUYA8bzcrEW1Y8TGC6M45 +mPEDRsRJCjNmb3QVQmIfSCYH9E7MvBZNWUc4VHP8kJ9v40dAYjzF5iIrcV3NPr7f +KELa13/da9UkYMP7F4weKcj3Ns2Ut8Uwc/2sII77ImnMYzYT4/W9xkkGt/J+uJKY +UZK8cRCYd92Y63nuCDQSfb9wGUHaSXU7w894RwVESRkOLIgY6ARg0eTwWxFF+IsV +HQVC+HnyzmZbLxp+vxwUZo9L/77Te4T3NtbJLVJn2YVj+28yW9V48GpU5yzwVaVY +s5LWle3aKTG6M9CbeKwexJ4CriTDQ6Mk1SIq+mt2tsSjlmYMWa2z3ivj6Znslp2V +gQKCAQEAlwe8NW+4NhvhXL1dSx4iJNfZwTeuZdTRjja9en9CtY82v2cSdRRILH57 +hfPjva5T/hqAFIkKzCkAFzgkBSF2s1oVx5tJ7fdSzUEPAMqvfWCQF06lHeUPbPM4 +fDblgStfNcCfIXKBN7LJXw2GymKK5NUqrrN8j3oT1QVGwGvQPsZyJ59mKTwX801M +/0Qy2SRTT+97nIAHYV9iBCrw8zXGaUCevn4Jn76ps0BJ1deTZe+MHpF8mb3g/WTC +cY/4JoaCfM6l8zjuopayxlRYaW80H6HXgUvbXZfCJFZPbJkGEHO8OnJnkAUTn08q +Lf/09ItIfuMr+ifYGoRA2pQwUulv4g== +-----END PRIVATE KEY----- diff --git a/k8s-certifications/topics/README.md b/k8s-certifications/topics/README.md new file mode 100644 index 0000000..074663f --- /dev/null +++ b/k8s-certifications/topics/README.md @@ -0,0 +1,45 @@ +# Topics + +Topics cover test exercises for each topics + + - [Admission Controllers](./admission_controllers.md) + - [Annotations](./annotations.md) + - [APIs](./apis.md) + - [AppArmor](./apparmor.md) + - [Auditing](./auditing.md) + - [Authentication](../authentication.md) + - [Platform Binary Verfication](./binary_verification.md) + - [Cluster Upgrade](./cluster_upgrade.md) + - [ConfigMaps](./configmaps.md) + - [DaemonSets](./daemonsets.md) + - [Deployments](./deployments.md) + - [ETCD](./etcd.md) + - [Falco](./falco.md) + - [Ingress](./ingress.md) + - [Init Containers](../init_containers.md) + - [Jobs](./jobs.md) + - [Kubectl Jsonpath](./jsonpath.md) + - [kube-bench](./kube-bench.md) + - [Kubeconfig](./kubeconfig.md) . + - [Kubelet Security](./kubelet_security.md) + - [Kubesec](./kubesec.md) + - [Labels](./labels.md) + - [Logging](./logging.md) + - [Monitoring](./monitoring.md) + - [Namespaces](./namespaces.md) + - [Network Policies](./network_policies.md) + - [Nodes](./nodes.md) + - [Pod Security Context](./pod_security_context.md) + - [Pod Security Policies](./pod_security_policies.md) + - [Pods](./pods.md) + - [Readiness & Liveness Probes](./probes.md) + - [RBAC](./rbac.md) + - [ReplicaSets](./replica_set.md) + - [Runtime Classes](./runtimes.md) + - [Seccomp](./seccomp.md) + - [Secrets](./secrets.md) + - [Service Accounts](./service_accounts.md) + - [Services](./services.md) + - [Taints & Tolerations](./taints_tolerations.md) + - [Trivy](./trivy.md) + - [Volumes](./volumes.md) \ No newline at end of file diff --git a/k8s-certifications/topics/admission_controllers.md b/k8s-certifications/topics/admission_controllers.md new file mode 100644 index 0000000..65cdaa9 --- /dev/null +++ b/k8s-certifications/topics/admission_controllers.md @@ -0,0 +1,214 @@ +# [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) + +An admission controller is a piece of code that intercepts requests to the Kubernetes API server prior to persistence of the object, but after the request is authenticated and authorized. + + - [ImagePolicyWebhook](#imagepolicywebhook) + - [PodSecurityPolicy](#podsecuritypolicy) + +
+ +## Basics + +
+ +### Check the admission controller enabled by default + +
show

+ +```bash +kubectl exec -it kube-apiserver-controlplane -n kube-system -- kube-apiserver -h | grep 'enable-admission-plugins' +``` + +

+ +
+ +### Check the admission controller enabled explicitly. + +
show

+ +#### Check the `--enable-admission-plugins` property in the `/etc/kubernetes/manifests/kube-apiserver.yaml` file + +

+ +
+ +### Disable `DefaultStorageClass` admission controller + +
show

+ +#### Add `--disable-admission-plugins=DefaultStorageClass` to the `/etc/kubernetes/manifests/kube-apiserver.yaml` file + +

+ +
+ +## [ImagePolicyWebhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook) + +
+ +### [Set Up](https://github.com/kainlite/kube-image-bouncer) + +```bash +# add image-bouncer-webhook to the host file +echo "127.0.0.1 image-bouncer-webhook" >> /etc/hosts + +# make directory to host the keys - using /etc/kubernetes/pki as the volume is already mounted +mkdir -p /etc/kubernetes/pki/kube-image-bouncer +cd /etc/kubernetes/pki/kube-image-bouncer + +# generate webhook certificate OR use the one in data folder +openssl req -x509 -new -days 3650 -nodes \ + -keyout webhook.key -out webhook.crt -subj "/CN=system:node:image-bouncer-webhook.default.pod.cluster.local" \ + -addext "subjectAltName=DNS:image-bouncer-webhook,DNS:image-bouncer-webhook.default.svc,DNS:image-bouncer-webhook.default.svc.cluster.local" + +# create secret +kubectl create secret tls tls-image-bouncer-webhook --cert=/etc/kubernetes/pki/kube-image-bouncer/webhook.crt --key=/etc/kubernetes/pki/kube-image-bouncer/webhook.key + +# create webhook deployment exposed as node port service +cat << EOF > image-bouncer-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: image-bouncer-webhook + name: image-bouncer-webhook +spec: + type: NodePort + ports: + - name: https + port: 443 + targetPort: 1323 + protocol: "TCP" + nodePort: 30080 + selector: + app: image-bouncer-webhook +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: image-bouncer-webhook +spec: + selector: + matchLabels: + app: image-bouncer-webhook + template: + metadata: + labels: + app: image-bouncer-webhook + spec: + containers: + - name: image-bouncer-webhook + imagePullPolicy: Always + image: "kainlite/kube-image-bouncer:latest" + args: + - "--cert=/etc/admission-controller/tls/tls.crt" + - "--key=/etc/admission-controller/tls/tls.key" + - "--debug" + - "--registry-whitelist=docker.io,k8s.gcr.io" + volumeMounts: + - name: tls + mountPath: /etc/admission-controller/tls + volumes: + - name: tls + secret: + secretName: tls-image-bouncer-webhook +EOF + +kubectl apply -f image-bouncer-webhook.yaml + +# define the admission configuration file @ /etc/kubernetes/pki/kube-image-bouncer/admission_configuration.yaml +cat << EOF > admission_configuration.yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: ImagePolicyWebhook + configuration: + imagePolicy: + kubeConfigFile: /etc/kubernetes/pki/kube-image-bouncer/kube-image-bouncer.yml + allowTTL: 50 + denyTTL: 50 + retryBackoff: 500 + defaultAllow: false +EOF + +OR + +# Define the admission configuration file in json format @ /etc/kubernetes/admission_configuration.json +cat << EOF > admission_configuration.json +{ + "imagePolicy": { + "kubeConfigFile": "/etc/kubernetes/pki/kube-image-bouncer/kube-image-bouncer.yml", + "allowTTL": 50, + "denyTTL": 50, + "retryBackoff": 500, + "defaultAllow": false + } +} +EOF + +# Define the kube config file @ /etc/kubernetes/pki/kube-image-bouncer/kube-image-bouncer.yml + +cat << EOF > kube-image-bouncer.yml +apiVersion: v1 +kind: Config +clusters: +- cluster: + certificate-authority: /etc/kubernetes/pki/kube-image-bouncer/webhook.crt + server: https://image-bouncer-webhook:30080/image_policy + name: bouncer_webhook +contexts: +- context: + cluster: bouncer_webhook + user: api-server + name: bouncer_validator +current-context: bouncer_validator +preferences: {} +users: +- name: api-server + user: + client-certificate: /etc/kubernetes/pki/apiserver.crt + client-key: /etc/kubernetes/pki/apiserver.key +EOF + +``` + +#### Check if can create pods with nginx:latest image + +```bash +kubectl create deploy nginx --image nginx +# deployment.apps/nginx created +kk get pods -w +# NAME READY STATUS RESTARTS AGE +# nginx-f89759699-5qbv5 1/1 Running 0 13s +kubectl delete deploy nginx +# deployment.apps "nginx" deleted +``` + +#### Enable the addmission controller. + +Edit the `/etc/kubernetes/manifests/kube-apiserver.yaml` file as below. + +```yaml + - --enable-admission-plugins=NodeRestriction,ImagePolicyWebhook # update + - --admission-control-config-file=/etc/kubernetes/pki/kube-image-bouncer/admission_configuration.yaml # add +``` + +#### Verify + +Wait for the kube-apiserver to restart and trying creating deployment with nginx:latest image + +```bash +kubectl get deploy nginx +# NAME READY UP-TO-DATE AVAILABLE AGE +# nginx 0/1 0 0 12s + +kubectl get events +# 7s Warning FailedCreate replicaset/nginx-f89759699 (combined from similar events): Error creating: pods "nginx-f89759699-b2r4k" is forbidden: image policy webhook backend denied one or more images: Images using latest tag are not allowed +``` + +
+ +## PodSecurityPolicy + +Refer [Pod Security Policy Admission Controller](./pod_security_policies.md) \ No newline at end of file diff --git a/k8s-certifications/topics/annotations.md b/k8s-certifications/topics/annotations.md new file mode 100644 index 0000000..602425f --- /dev/null +++ b/k8s-certifications/topics/annotations.md @@ -0,0 +1,32 @@ +# [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + +
+ +### Create pod `nginx-annotations` and Annotate it with `description='my description'` value + +
+ +
show

+ +```bash +kubectl run nginx-annotations --image nginx +kubectl annotate pod nginx-annotations description='my description' +``` + +

+ +
+ + \ No newline at end of file diff --git a/k8s-certifications/topics/api_deprecations.md b/k8s-certifications/topics/api_deprecations.md new file mode 100644 index 0000000..097b293 --- /dev/null +++ b/k8s-certifications/topics/api_deprecations.md @@ -0,0 +1,59 @@ +# [Kubernetes API deprecations policy](https://kubernetes.io/docs/reference/using-api/deprecation-policy/) + +
+ +### Given deployment defination `nginx-deployment` for an older version of kubernetes. Fix any API depcreation issues in the manifest so that the application can be deployed on a recent version cluster k8s. + +```yaml +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.20 + name: nginx +``` + +
+ +
show

+ +```yaml +apiVersion: apps/v1 # Update from apps/v1beta1 to apps/v1 and apply +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.20 + name: nginx +``` + +

+ +
+ + diff --git a/k8s-certifications/topics/apis.md b/k8s-certifications/topics/apis.md new file mode 100644 index 0000000..07859c2 --- /dev/null +++ b/k8s-certifications/topics/apis.md @@ -0,0 +1,36 @@ +# [APIs](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) + +
+ +### Get all `api-resource` and check the short names, api version. + +
show

+ +```bash +kubectl api-resources +``` +

+ +
+ +### Get the Api Group for `jobs` api + +
show

+ +```bash +kubectl api-resources | grep jobs +#cronjobs cj batch/v1beta1 true CronJob +#jobs batch/v1 true Job +``` + +

+ +
+ +### Enable the `v1alpha1` version for `rbac.authorization.k8s.io` API group on the controlplane node. + +
show

+ +Add `--runtime-config=rbac.authorization.k8s.io/v1alpha1` to the `/etc/kubernetes/manifests/kube-apiserver.yaml` file and let the kube-apiserver restart + +

\ No newline at end of file diff --git a/k8s-certifications/topics/apparmor.md b/k8s-certifications/topics/apparmor.md new file mode 100644 index 0000000..6ad5357 --- /dev/null +++ b/k8s-certifications/topics/apparmor.md @@ -0,0 +1,136 @@ +# [AppArmor](https://kubernetes.io/docs/tutorials/clusters/apparmor/) + +
+ +### Check if AppArmor is available on the cluster + +
+ +```bash +systemctl status apparmor +# ● apparmor.service - AppArmor initialization +# Loaded: loaded (/lib/systemd/system/apparmor.service; enabled; vendor preset: enabled) +# Active: active (exited) since Thu 2021-12-16 02:19:57 UTC; 40s ago +# Docs: man:apparmor(7) +# http://wiki.apparmor.net/ +# Main PID: 312 (code=exited, status=0/SUCCESS) +# Tasks: 0 (limit: 2336) +# CGroup: /system.slice/apparmor.service + +# Dec 16 02:19:57 controlplane systemd[1]: Starting AppArmor initialization... +# Dec 16 02:19:57 controlplane apparmor[312]: * Starting AppArmor profiles +# Dec 16 02:19:57 controlplane apparmor[312]: Skipping profile in /etc/apparmor.d/disable: usr.sbin.rsyslogd +# Dec 16 02:19:57 controlplane apparmor[312]: ...done. +# Dec 16 02:19:57 controlplane systemd[1]: Started AppArmor initialization. +``` + +
+ +### Check if the AppArmor module is loaded and the profiles loaded by AppArmor in different modes. + +
+ +```bash +aa-status +# apparmor module is loaded. +# 12 profiles are loaded. +# 12 profiles are in enforce mode. +# /sbin/dhclient +# /usr/bin/man +# /usr/lib/NetworkManager/nm-dhcp-client.action +# /usr/lib/NetworkManager/nm-dhcp-helper +# /usr/lib/connman/scripts/dhclient-script +# /usr/lib/snapd/snap-confine +# /usr/lib/snapd/snap-confine//mount-namespace-capture-helper +# /usr/sbin/ntpd +# /usr/sbin/tcpdump +# docker-default +# man_filter +# man_groff +# 0 profiles are in complain mode. +# 9 processes have profiles defined. +# 9 processes are in enforce mode. +# /sbin/dhclient (639) +# docker-default (2008) +# docker-default (2026) +# docker-default (2044) +# docker-default (2058) +# docker-default (2260) +# docker-default (2277) +# docker-default (2321) +# docker-default (2334) +# 0 processes are in complain mode. +# 0 processes are unconfined but have a profile defined. +``` + +
+ +### Use the following `k8s-apparmor-example-deny-write` AppArmor profile with the `hello-apparmor` pod. + +
+ +```cpp +cat << EOF > k8s-apparmor-example-deny-write +#include +profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { + #include + file, + # Deny all file writes. + deny /** w, +} +EOF +``` + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: hello-apparmor +spec: + containers: + - name: hello + image: busybox + command: [ "sh", "-c", "echo 'Hello AppArmor!' && sleep 1h" ] +``` + +
show

+ +#### Load the AppArmor profile + +**NOTE** : Profile needs to be loaded on all the nodes. + +```bash +apparmor_parser -q k8s-apparmor-example-deny-write # load the apparmor profile + +aa-status | grep k8s-apparmor-example-deny-write # verify its loaded +# k8s-apparmor-example-deny-write +``` + +#### Enable AppArmor for the pod + +```yaml +cat << EOF > hello-apparmor.yaml +apiVersion: v1 +kind: Pod +metadata: + name: hello-apparmor + annotations: # add apparmor annotations + container.apparmor.security.beta.kubernetes.io/hello: localhost/k8s-apparmor-example-deny-write # add this +spec: + containers: + - name: hello + image: busybox + command: [ "sh", "-c", "echo 'Hello AppArmor!' && sleep 1h" ] +EOF + +kubectl apply -f hello-apparmor.yaml +``` + +#### Verify + +```bash +kubectl exec hello-apparmor -- cat /proc/1/attr/current +# k8s-apparmor-example-deny-write (enforce) +``` + +

\ No newline at end of file diff --git a/k8s-certifications/topics/auditing.md b/k8s-certifications/topics/auditing.md new file mode 100644 index 0000000..0d7cc95 --- /dev/null +++ b/k8s-certifications/topics/auditing.md @@ -0,0 +1,74 @@ +# [Auditing](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/) + + - Kubernetes auditing provides a security-relevant, chronological set of records documenting the sequence of actions in a cluster. + - The cluster audits the activities generated by users, by applications that use the Kubernetes API, and by the control plane itself. + +
+ +### Enable Auditing with the Kubernetes cluster + - Capture all events for `pods` at `RequestResponse` level + - Capture `delete` events for `secrets` in `prod namespace` at `Metadata` level + - Define policy at `/etc/kubernetes/audit-policy.yaml` + - Log should be redirected to `/var/log/kubernetes/audit/audit.log` + - Maximum days to keep the logs is `30` + +
+ +
show

+ +#### Create the audit policy file + +```yaml +cat << EOF > /etc/kubernetes/audit-policy.yaml +apiVersion: audit.k8s.io/v1 # This is required. +kind: Policy +rules: + # Log pod changes at RequestResponse level + - level: RequestResponse + resources: + - group: "" + resources: ["pods"] + + # Log secret delete events in prod namespaces at the Metadata level. + - level: Metadata + verbs: ["delete"] + resources: + - group: "" # core API group + resources: ["secrets"] + namespaces: ["prod"] +EOF +``` + +#### Backup the original file `cp kube-apiserver.yaml kube-apiserver.yaml_org` + +#### Update the `/etc/kubernetes/manifests/kube-apiserver.yaml` to add audit configs and volume mounts. + +```yaml +- --audit-policy-file=/etc/kubernetes/audit-policy.yaml +- --audit-log-path=/var/log/kubernetes/audit/audit.log +- --audit-log-maxage=30 +``` + +```yaml +volumeMounts: + - mountPath: /etc/kubernetes/audit-policy.yaml + name: audit + readOnly: true + - mountPath: /var/log/kubernetes/audit/ + name: audit-log + readOnly: false + +volumes: +- name: audit + hostPath: + path: /etc/kubernetes/audit-policy.yaml + type: File +- name: audit-log + hostPath: + path: /var/log/kubernetes/audit/ + type: DirectoryOrCreate +``` + +#### Check the `/var/log/kubernetes/audit/audit.log` for audit log entries + +

\ No newline at end of file diff --git a/k8s-certifications/topics/authentication.md b/k8s-certifications/topics/authentication.md new file mode 100644 index 0000000..9c972fc --- /dev/null +++ b/k8s-certifications/topics/authentication.md @@ -0,0 +1,132 @@ +# [Authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) + +
+ +## [Certificates API](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/) + +
+ +### Create a user certificate signing request using certs and specs as below and submit it for approval. + +#### Create user certs + +```bash +openssl genrsa -out normal.key 2048 +openssl req -new -key normal.key -out normal.csr +``` + +#### Use below CertificateSigningRequest specs + +```yaml +cat << EOF > normal-csr.yaml +apiVersion: certificates.k8s.io/v1 +kind: CertificateSigningRequest +metadata: + name: normal-csr +spec: + request: ?? + signerName: kubernetes.io/kube-apiserver-client + usages: + - client auth +EOF +``` + +
+ +
show

+ +```yaml +cat << EOF > normal-csr.yaml +apiVersion: certificates.k8s.io/v1 +kind: CertificateSigningRequest +metadata: + name: normal-csr +spec: + request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ2lqQ0NBWElDQVFBd1JURUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlZCQWdNQ2xOdmJXVXRVM1JoZEdVeApJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MFpEQ0NBU0l3RFFZSktvWklodmNOCkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNellTKzhhTXdBVmkwWHovaVp2Z2k0eGtNWTkyMWZRSmd1bGM2eDYKS0Q4UjNteEMyRkxlWklJSHRYTDZadG5KSHYxY0g0eWtMUEZtR2hDRURVNnRxQ2FpczNaWWV3MVBzVG5nd1Jzego3TG1oeDV4dzVRc3lRaFBkNjRuY3h1MFRJZmFGbmducU9UT0NGWERyaXBtZzJ5TExvbTIxL1ZxbjNQMVJQeE51CjZJdDlBOHB6aURlTVg5VTlaTHhzT0Jld2FzaFJzM29jb3NIcHp5cXN1SnQralVvUjNmaGducVB3UkNBZmQ3YUUKaUhKOWFxblhHVVNUWENXb2g2OEtPL3VkU3p2djNmcExhV1JxUUdHWi9HSWpjM1ZiZzNHN0FqNWNITUp2WHV3bwp3M0JkV1pZaEpycU9Ld21sMW9QVHJRNlhMQ2FBTFZ2NnFqZWVOSFNvOVZyVmM0OENBd0VBQWFBQU1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUFEZGNmMHZVSnVtcmRwcGxOa0pwSERSVFI2ZlFzYk84OFM3cnlndC9vcFEvOCsKNVkyUVVjVzhSUUdpVGdvQjFGUG1FeERVcFRna2p1SEtDQ0l3RWdjc3pPRm5YdC95N1FsWXBuc0E3dG01V1ppUAozbG1xSFpQMU9tQlRBRU45L2swSFpKdjc4Rytmcm0xNnRJbWtzUHpSK2lBajZ2WDZtT1RNVEk3Y1U5cmIvSElLCmVOTTZjV2dYQzYrbU9PbDFqM3BjS1hlVlB0YS9MbDZEVFc0VWdnR0J1NVJPb3FWRS9sTDNQNnc4K2R3M0lWQngKWlBrK0JDNVQrMkZLMFNzd3VvSCtaKzhtbi8weHR2bk1nL3FPTWIwdXVvcDNSTklVZmFhR1pRSjRmSnVrMGdkQwpXZHFselJMREsydXZYcWVFUXFjMENxZmVVdXRGdzVuOWNWZVdvRFVwCi0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo= # use base64 encoded value of normal.csr file + signerName: kubernetes.io/kube-apiserver-client + usages: + - client auth +EOF + +kubectl apply -f normal-csr.yaml +``` + +#### Verify its submitted and in Pending status + +```bash +kubectl get csr normal-csr +# NAME AGE SIGNERNAME REQUESTOR CONDITION +# normal-csr 37s kubernetes.io/kube-apiserver-client kubernetes-admin Pending +``` + +

+ +
+ +### Approve the `normal-csr` request + +
+ +
show

+ +```bash +kubectl certificate approve normal-csr +# certificatesigningrequest.certificates.k8s.io/normal-csr approved +``` + +#### Verify its in Approved,Issued status + +```bash +kubectl get csr normal-csr +# NAME AGE SIGNERNAME REQUESTOR CONDITION +# normal-csr 4m15s kubernetes.io/kube-apiserver-client kubernetes-admin Approved,Issued +``` + +

+ +
+ +### Create the below csr request and reject the same. + +
+ +```yaml +cat << EOF > hacker-csr.yaml +apiVersion: certificates.k8s.io/v1 +kind: CertificateSigningRequest +metadata: + name: hacker-csr +spec: + groups: + - system:masters + - system:authenticated + request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ2lqQ0NBWElDQVFBd1JURUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlZCQWdNQ2xOdmJXVXRVM1JoZEdVeApJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MFpEQ0NBU0l3RFFZSktvWklodmNOCkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNellTKzhhTXdBVmkwWHovaVp2Z2k0eGtNWTkyMWZRSmd1bGM2eDYKS0Q4UjNteEMyRkxlWklJSHRYTDZadG5KSHYxY0g0eWtMUEZtR2hDRURVNnRxQ2FpczNaWWV3MVBzVG5nd1Jzego3TG1oeDV4dzVRc3lRaFBkNjRuY3h1MFRJZmFGbmducU9UT0NGWERyaXBtZzJ5TExvbTIxL1ZxbjNQMVJQeE51CjZJdDlBOHB6aURlTVg5VTlaTHhzT0Jld2FzaFJzM29jb3NIcHp5cXN1SnQralVvUjNmaGducVB3UkNBZmQ3YUUKaUhKOWFxblhHVVNUWENXb2g2OEtPL3VkU3p2djNmcExhV1JxUUdHWi9HSWpjM1ZiZzNHN0FqNWNITUp2WHV3bwp3M0JkV1pZaEpycU9Ld21sMW9QVHJRNlhMQ2FBTFZ2NnFqZWVOSFNvOVZyVmM0OENBd0VBQWFBQU1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUFEZGNmMHZVSnVtcmRwcGxOa0pwSERSVFI2ZlFzYk84OFM3cnlndC9vcFEvOCsKNVkyUVVjVzhSUUdpVGdvQjFGUG1FeERVcFRna2p1SEtDQ0l3RWdjc3pPRm5YdC95N1FsWXBuc0E3dG01V1ppUAozbG1xSFpQMU9tQlRBRU45L2swSFpKdjc4Rytmcm0xNnRJbWtzUHpSK2lBajZ2WDZtT1RNVEk3Y1U5cmIvSElLCmVOTTZjV2dYQzYrbU9PbDFqM3BjS1hlVlB0YS9MbDZEVFc0VWdnR0J1NVJPb3FWRS9sTDNQNnc4K2R3M0lWQngKWlBrK0JDNVQrMkZLMFNzd3VvSCtaKzhtbi8weHR2bk1nL3FPTWIwdXVvcDNSTklVZmFhR1pRSjRmSnVrMGdkQwpXZHFselJMREsydXZYcWVFUXFjMENxZmVVdXRGdzVuOWNWZVdvRFVwCi0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo= + signerName: kubernetes.io/kube-apiserver-client + usages: + - digital signature + - key encipherment + - server auth +EOF + +kubectl apply -f hacker-csr.yaml +``` + +
show

+ +```bash +ubectl certificate deny hacker-csr +# certificatesigningrequest.certificates.k8s.io/hacker-csr denied + +``` + +#### Verify its in Approved,Issued status + +```bash +kubectl get csr hacker-csr +# NAME AGE SIGNERNAME REQUESTOR CONDITION +# hacker-csr 16s kubernetes.io/kube-apiserver-client kubernetes-admin Denied +``` + +

+ +
\ No newline at end of file diff --git a/k8s-certifications/topics/binary_verification.md b/k8s-certifications/topics/binary_verification.md new file mode 100644 index 0000000..89f61dd --- /dev/null +++ b/k8s-certifications/topics/binary_verification.md @@ -0,0 +1,22 @@ +# Verify Platform Binaries + +- Kubernetes provides the binaries and their checksum hash for us the verify the authenticity of the same. +- Check the Kubernetes [CHANGELOG](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG) + +
+ +### Download the https://dl.k8s.io/v1.22.0/kubernetes.tar.gz and verify the sha matches with `d1145ec29a8581a4c94a83cefa3658a73bfc7d8e2624d31e735d53551718c9212e477673f74cfa4e430a8367a47bba65e2573162711613e60db54563dc912f00`. + +
+ +
show

+ +```bash +curl https://dl.k8s.io/v1.22.0/kubernetes.tar.gz -L -o kubernetes.tar.gz +shasum -a 512 kubernetes.tar.gz +# d1145ec29a8581a4c94a83cefa3658a73bfc7d8e2624d31e735d53551718c9212e477673f74cfa4e430a8367a47bba65e2573162711613e60db54563dc912f00 kubernetes.tar.gz +``` + +

+ +
\ No newline at end of file diff --git a/k8s-certifications/topics/cluster_upgrade.md b/k8s-certifications/topics/cluster_upgrade.md new file mode 100644 index 0000000..2233a79 --- /dev/null +++ b/k8s-certifications/topics/cluster_upgrade.md @@ -0,0 +1,328 @@ +# [Cluster Upgrade](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) + +~~**NOTE** - This was performed on the [katacoda playground](https://www.katacoda.com/courses/kubernetes/playground) with two node cluster at v1.18.0 version. It was upgrade to 1.19.3 version. Version 1.19.4 was not used as it had issues upgrading on the worker node.~~ + +
+ +### Upgrade Control Panel nodes + +
+ +#### Check current version + +```bash +kubectl get nodes + +# NAME STATUS ROLES AGE VERSION +# controlplane Ready master 4m53s v1.18.0 +# node01 Ready 4m25s v1.18.0 +``` + +#### Determine which version to upgrade to - Choosing 1.19.3 + +```bash +apt update +apt-cache madison kubeadm +# kubeadm | 1.19.3-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages +``` + +#### Upgrading control plane nodes + +```bash +# upgrade kubeadm +apt-get update && \ +apt-get install -y --allow-change-held-packages kubeadm=1.19.3-00 + +# Setting up kubernetes-cni (0.8.7-00) ... +# Setting up kubeadm (1.19.3-00) ... +``` + +```bash +# Verify that the download works and has the expected version: +kubeadm version + +# kubeadm version: &version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.3", GitCommit:"1e11e4a2108024935ecfcb2912226cedeafd99df", GitTreeState:"clean", BuildDate:"2020-10-14T12:47:53Z", GoVersion:"go1.15.2", Compiler:"gc", Platform:"linux/amd64"} +``` + +```bash +sudo kubeadm upgrade plan + +# [upgrade/config] Making sure the configuration is correct: +# [upgrade/config] Reading configuration from the cluster... +# [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' +# [preflight] Running pre-flight checks. +# [upgrade] Running cluster health checks +# [upgrade] Fetching available versions to upgrade to +# [upgrade/versions] Cluster version: v1.18.0 +# [upgrade/versions] kubeadm version: v1.19.3 +# I1217 07:14:14.966727 9206 version.go:252] remote version is much newer: v1.23.1; falling back to: stable-1.19 +# [upgrade/versions] Latest stable version: v1.19.16 +# [upgrade/versions] Latest stable version: v1.19.16 +# [upgrade/versions] Latest version in the v1.18 series: v1.18.20 +# [upgrade/versions] Latest version in the v1.18 series: v1.18.20 + +# Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': +# COMPONENT CURRENT AVAILABLE +# kubelet 2 x v1.18.0 v1.18.20 + +# Upgrade to the latest version in the v1.18 series: + +# COMPONENT CURRENT AVAILABLE +# kube-apiserver v1.18.0 v1.18.20 +# kube-controller-manager v1.18.0 v1.18.20 +# kube-scheduler v1.18.0 v1.18.20 +# kube-proxy v1.18.0 v1.18.20 +# CoreDNS 1.6.7 1.7.0 +# etcd 3.4.3-0 3.4.3-0 + +# You can now apply the upgrade by executing the following command: + +# kubeadm upgrade apply v1.18.20 + +# _____________________________________________________________________ + +# Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': +# COMPONENT CURRENT AVAILABLE +# kubelet 2 x v1.18.0 v1.19.16 + +# Upgrade to the latest stable version: + +# COMPONENT CURRENT AVAILABLE +# kube-apiserver v1.18.0 v1.19.16 +# kube-controller-manager v1.18.0 v1.19.16 +# kube-scheduler v1.18.0 v1.19.16 +# kube-proxy v1.18.0 v1.19.16 +# CoreDNS 1.6.7 1.7.0 +# etcd 3.4.3-0 3.4.13-0 + +# You can now apply the upgrade by executing the following command: + +# kubeadm upgrade apply v1.19.16 + +# Note: Before you can perform this upgrade, you have to update kubeadm to v1.19.16. + +# _____________________________________________________________________ + + +# The table below shows the current state of component configs as understood by this version of kubeadm. +# Configs that have a "yes" mark in the "MANUAL UPGRADE REQUIRED" column require manual config upgrade or +# resetting to kubeadm defaults before a successful upgrade can be performed. The version to manually +# upgrade to is denoted in the "PREFERRED VERSION" column. + +# API GROUP CURRENT VERSION PREFERRED VERSION MANUAL UPGRADE REQUIRED +# kubeproxy.config.k8s.io v1alpha1 v1alpha1 no +# kubelet.config.k8s.io v1beta1 v1beta1 no +# _____________________________________________________________________ +``` + +```bash +sudo kubeadm upgrade apply v1.19.3 + +# [upgrade/config] Making sure the configuration is correct: +# [upgrade/config] Reading configuration from the cluster... +# [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' +# [preflight] Running pre-flight checks. +# [upgrade] Running cluster health checks +# [upgrade/version] You have chosen to change the cluster version to "v1.19.3" +# [upgrade/versions] Cluster version: v1.18.0 +# [upgrade/versions] kubeadm version: v1.19.3 +# [upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y +# [upgrade/prepull] Pulling images required for setting up a Kubernetes cluster +# [upgrade/prepull] This might take a minute or two, depending on the speed of your internet connection +# [upgrade/prepull] You can also perform this action in beforehand using 'kubeadm config images pull' +# [upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.19.3"... +# Static pod: kube-apiserver-controlplane hash: 32d269b25126efaf2f4d5b79beada591 +# Static pod: kube-controller-manager-controlplane hash: f9b9c6969be80756638e9cf4927b5881 +# Static pod: kube-scheduler-controlplane hash: 5795d0c442cb997ff93c49feeb9f6386 +# [upgrade/etcd] Upgrading to TLS for etcd +# Static pod: etcd-controlplane hash: 7831b536f3a79e96fe34049ff61c499b +# [upgrade/staticpods] Preparing for "etcd" upgrade +# [upgrade/staticpods] Renewing etcd-server certificate +# [upgrade/staticpods] Renewing etcd-peer certificate +# [upgrade/staticpods] Renewing etcd-healthcheck-client certificate +# [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/etcd.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2021-12-17-07-15-37/etcd.yaml" +# [upgrade/staticpods] Waiting for the kubelet to restart the component +# [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) +# Static pod: etcd-controlplane hash: 7831b536f3a79e96fe34049ff61c499b +# Static pod: etcd-controlplane hash: 7831b536f3a79e96fe34049ff61c499b +# Static pod: etcd-controlplane hash: f291ed490602f9995ce3fae0c7278fde +# [apiclient] Found 1 Pods for label selector component=etcd +# [upgrade/staticpods] Component "etcd" upgraded successfully! +# [upgrade/etcd] Waiting for etcd to become available +# [upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests684409789" +# [upgrade/staticpods] Preparing for "kube-apiserver" upgrade +# [upgrade/staticpods] Renewing apiserver certificate +# [upgrade/staticpods] Renewing apiserver-kubelet-client certificate +# [upgrade/staticpods] Renewing front-proxy-client certificate +# [upgrade/staticpods] Renewing apiserver-etcd-client certificate +# [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2021-12-17-07-15-37/kube-apiserver.yaml" +# [upgrade/staticpods] Waiting for the kubelet to restart the component +# [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) +# Static pod: kube-apiserver-controlplane hash: 32d269b25126efaf2f4d5b79beada591 +# Static pod: kube-apiserver-controlplane hash: 5bd0c975123753bb782dc1caf5ae2380 +# [apiclient] Found 1 Pods for label selector component=kube-apiserver +# [upgrade/staticpods] Component "kube-apiserver" upgraded successfully! +# [upgrade/staticpods] Preparing for "kube-controller-manager" upgrade +# [upgrade/staticpods] Renewing controller-manager.conf certificate +# [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2021-12-17-07-15-37/kube-controller-manager.yaml" +# [upgrade/staticpods] Waiting for the kubelet to restart the component +# [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) +# Static pod: kube-controller-manager-controlplane hash: f9b9c6969be80756638e9cf4927b5881 +# Static pod: kube-controller-manager-controlplane hash: 27ef001ee9e1781a258a9c2a188cd888 +# [apiclient] Found 1 Pods for label selector component=kube-controller-manager +# [upgrade/staticpods] Component "kube-controller-manager" upgraded successfully! +# [upgrade/staticpods] Preparing for "kube-scheduler" upgrade +# [upgrade/staticpods] Renewing scheduler.conf certificate +# [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2021-12-17-07-15-37/kube-scheduler.yaml" +# [upgrade/staticpods] Waiting for the kubelet to restart the component +# [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) +# Static pod: kube-scheduler-controlplane hash: 5795d0c442cb997ff93c49feeb9f6386 +# Static pod: kube-scheduler-controlplane hash: c4e7975f4329949f35219b973dfc69c5 +# [apiclient] Found 1 Pods for label selector component=kube-scheduler +# [upgrade/staticpods] Component "kube-scheduler" upgraded successfully! +# [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +# [kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster +# [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +# [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes +# [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +# [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +# [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +# [addons] Applied essential addon: CoreDNS +# [addons] Applied essential addon: kube-proxy + +# [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.19.3". Enjoy! + +# [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so. +``` + +#### Upgrade additional control plane nodes + +```bash +# for any additional control panel nodes (if any) - currently none +sudo kubeadm upgrade node +``` + +#### Drain the control plane node + +```bash +kubectl drain controlplane --ignore-daemonsets + +# node/controlplane cordoned +# WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-amd64-jjlnt, kube-system/kube-proxy-np9zl +# evicting pod kube-system/coredns-f9fd979d6-j6w5s +# pod/coredns-f9fd979d6-j6w5s evicted +# node/controlplane evicted +``` + +#### Upgrade kubelet and kubectl + +```bash +apt-get update && \ +apt-get install -y --allow-change-held-packages kubelet=1.19.3-00 kubectl=1.19.3-00 + +# Unpacking kubelet (1.19.3-00) over (1.18.0-00) ... +# Setting up kubelet (1.19.3-00) ... +# Setting up kubectl (1.19.3-00) ... + +sudo systemctl daemon-reload +sudo systemctl restart kubelet +``` + +#### Uncordon the control plane node + +```bash +kubectl uncordon controlplane +# node/controlplane uncordoned +``` + +#### Check the nodes + +```bash +kubectl get nodes +NAME STATUS ROLES AGE VERSION +# controlplane Ready master 15m v1.19.3 +# node01 Ready 14m v1.18.0 +``` + +
+ +### Upgrade worker nodes + +
+ +#### Upgrade kubeadm +```bash +apt update +apt-cache madison kubeadm + +apt-get update && \ +apt-get install -y --allow-change-held-packages kubeadm=1.19.3-00 +# Unpacking kubeadm (1.19.3-00) over (1.18.0-00) ... +# Setting up kubernetes-cni (0.8.7-00) ... +# Setting up kubeadm (1.19.3-00) ... +``` + +#### Upgrade the kubelet configuration + +```bash +sudo kubeadm upgrade node + +# [upgrade] Reading configuration from the cluster... +# [upgrade] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' +# [preflight] Running pre-flight checks +# [preflight] Skipping prepull. Not a control plane node. +# [upgrade] Skipping phase. Not a control plane node. +# [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +# [upgrade] The configuration for this node was successfully updated! +# [upgrade] Now you should go ahead and upgrade the kubelet package using your package manager. +``` + +#### Drain the node - Execute this on the master/control panel node + +```bash +kubectl drain node01 --ignore-daemonsets + +# node/node01 cordoned +# WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-amd64-26gz5, kube-system/kube-keepalived-vip-dskqw, kube-system/kube-proxy-jwpgs +# evicting pod kube-system/coredns-f9fd979d6-gjfpn +# evicting pod kube-system/coredns-f9fd979d6-xvh8h +# evicting pod kube-system/katacoda-cloud-provider-5f5fc5786f-565r6 +# pod/katacoda-cloud-provider-5f5fc5786f-565r6 evicted +# pod/coredns-f9fd979d6-gjfpn evicted +# pod/coredns-f9fd979d6-xvh8h evicted +# node/node01 evicted +``` + +#### Upgrade kubelet and kubectl + +```bash +apt-get update && \ +> apt-get install -y --allow-change-held-packages kubelet=1.19.3-00 kubectl=1.19.3-00 + +# .... +# kubectl is already the newest version (1.19.3-00). +# kubelet is already the newest version (1.19.3-00). +# The following packages were automatically installed and are no longer required: +# libc-ares2 libhttp-parser2.7.1 libnetplan0 libuv1 nodejs-doc python3-netifaces +# Use 'apt autoremove' to remove them. +# 0 upgraded, 0 newly installed, 0 to remove and 201 not upgraded. + +sudo systemctl daemon-reload +sudo systemctl restart kubelet +``` + +#### Uncordon the node - Execute this on the master/control panel node + +```bash +kubectl uncordon node01 +# node/node01 uncordoned +``` + +#### Verify nodes are upgraded + +```shell +kubectl get nodes +# NAME STATUS ROLES AGE VERSION +# controlplane Ready master 22m v1.19.3 +# node01 Ready 22m v1.19.3 +``` \ No newline at end of file diff --git a/k8s-certifications/topics/configmaps.md b/k8s-certifications/topics/configmaps.md new file mode 100644 index 0000000..bbcf9b9 --- /dev/null +++ b/k8s-certifications/topics/configmaps.md @@ -0,0 +1,260 @@ +# [ConfigMaps](https://kubernetes.io/docs/concepts/configuration/configmap/) + + - A ConfigMap is an API object used to store non-confidential data in key-value pairs. + - Pods can consume ConfigMaps as environment variables, command-line arguments, or as configuration files in a volume. + - A ConfigMap allows you to decouple environment-specific configuration from your container images, so that your applications are easily portable. + +
+ +### Check the configmaps on the cluster in the default namespace + +
+ +
show

+ +```bash +kubectl get configmaps +# OR +kubectl get cm +``` + +

+ +
+ +### Check the configmaps on the cluster in all the namespaces + +
+ +
show

+ +```bash +kubectl get configmaps --all-namespaces +# OR +kubectl get configmaps -A +``` +

+ +
+ +### Create a new pod `nginx-1` with `nginx` image and add env variable for `DB_HOST=db.example.com`, `DB_USER=development`, `DB_PASSWD=password` + +
+ +
show

+ +```bash +kubectl run nginx-1 --image=nginx --env="DB_HOST=db.example.com" --env="DB_USER=development" --env="DB_PASSWD=password" +``` + +```bash +# verify env variables +kubectl exec nginx-1 -- env | grep DB_ +# DB_HOST=db.example.com +# DB_USER=development +# DB_PASSWD=password +``` + +

+ +
+ +### Create a configmap named `db-config-1` with data `DB_HOST=db.example.com`, `DB_USER=development`, `DB_PASSWD=password` + +
+ +
show

+ +```bash +kubectl create configmap db-config-1 --from-literal=DB_HOST=db.example.com --from-literal=DB_USER=development --from-literal=DB_PASSWD=password +``` + +OR + +```yaml +cat << EOF > db-config-1.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: db-config-1 +data: + DB_HOST: db.example.com + DB_PASSWD: password + DB_USER: development +EOF + +kubectl apply -f db-config-1.yaml +``` + +```bash +# verify +kubectl describe configmap db-config-1 +# Name: db-config-1 +# Namespace: default +# Labels: +# Annotations: + +# Data +# ==== +# DB_USER: +# ---- +# development +# DB_HOST: +# ---- +# db.example.com +# DB_PASSWD: +# ---- +# password +``` + +

+ +
+ +### Create a configmap named `db-config-2` with data from file `db.properties` + +
+ +```bash +cat <> db.properties +DB_HOST=db.example.com +DB_USER=development +DB_PASSWD=password +EOT +``` + +
show

+ +```bash +kubectl create configmap db-config-2 --from-file=db.properties +``` + +```bash +# verify +kubectl describe configmap db-config-2 +# Name: db-config-2 +# Namespace: default +# Labels: +# Annotations: + +# Data +# ==== +# db.properties: +# ---- +# DB_HOST=db.example.com +# DB_USER=development +# DB_PASSWD=password +``` + +

+ +
+ +### Create a new pod `nginx-2` with `nginx` image and add env variable for `DB_HOST` from configmap map `db-config-1` + +
+ +
show

+ +```yaml +cat << EOF > nginx-2.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-2 +spec: + containers: + - image: nginx + name: nginx-2 + env: + - name: DB_HOST + valueFrom: + configMapKeyRef: + name: db-config-1 + key: DB_HOST +EOF + +kubectl apply -f nginx-2.yaml + +kubectl exec nginx-2 -- env | grep DB_HOST # verify env variables +# DB_HOST=db.example.com +``` + +

+ +
+ +### Create a new pod `nginx-3` with `nginx` image and add all env variables from from configmap map `db-config-1` + +
+ +
show

+ +```yaml +cat << EOF > nginx-3.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-3 +spec: + containers: + - image: nginx + name: nginx-3 + envFrom: + - configMapRef: + name: db-config-1 +EOF + +kubectl apply -f nginx-3.yaml + +kubectl exec nginx-3 -- env | grep DB_ # verify env variables +# DB_HOST=db.example.com +# DB_PASSWD=password +# DB_USER=development +``` + +

+ +
+ +### Create a new pod `nginx-4` with `nginx` image and mount the configmap `db-config-1` as a volume named `db-config` and mount path `/config` + +
show

+ +```yaml +cat << EOF > nginx-4.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-4 +spec: + containers: + - image: nginx + name: nginx-4 + volumeMounts: + - name: db-config + mountPath: "/config" + readOnly: true + volumes: + - name: db-config + configMap: + name: db-config-1 +EOF + +kubectl apply -f nginx-4.yaml + +kubectl exec nginx-4 -- cat /config/DB_HOST # verify env variables +# db.example.com +``` + +

+ +
+ +### Clean up + +```bash +kubectl delete pod nginx-1 nginx-2 nginx-3 nginx-4 --force --grace-period=0 +kubectl delete configmap db-config-1 db-config-2 +rm db.properties nginx-2.yaml nginx-3.yaml nginx-4.yaml +``` diff --git a/k8s-certifications/topics/daemonsets.md b/k8s-certifications/topics/daemonsets.md new file mode 100644 index 0000000..eebc959 --- /dev/null +++ b/k8s-certifications/topics/daemonsets.md @@ -0,0 +1,74 @@ +# [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) + +A DaemonSet ensures that all (or some) Nodes run a copy of a Pod. As nodes are added to the cluster, Pods are added to them. As nodes are removed from the cluster, those Pods are garbage collected. Deleting a DaemonSet will clean up the Pods it created. + +
+ +### Get the daemonset in all namespaces + +
show

+ +```bash +kubectl get daemonsets --all-namespaces +# OR +kubectl get ds -A +``` + +

+ +
+ +### Ensure a single instance of pod nginx is running on each node of the Kubernetes cluster where nginx also represents the image name which has to be used. Do not override anytaints currently in place. + +
show

+ +```bash +kubectl create deploy nginx --image=nginx --dry-run=client -o yaml > nginx-ds.yaml +``` + +#### Edit the deployment to daemonset + +```yaml +cat << EOF > nginx-ds.yaml +apiVersion: apps/v1 +kind: DaemonSet # Update from Deployment to DaemonSet +metadata: + labels: + app: nginx + name: nginx +spec: +# replicas: 1 - remove replicas + selector: + matchLabels: + app: nginx +# strategy: {} - remove strategy + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx + resources: {} +EOF + +kubectl apply -f nginx-ds.yaml + +kk get pods -o wide +# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +# nginx-5k7dk 1/1 Running 0 6m10s 10.244.1.3 node01 + +kk get daemonset +# NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +# nginx 1 1 1 1 1 6m24s + +kk get ds +# NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +# nginx 1 1 1 1 1 6m30s + +``` + +

+ +
\ No newline at end of file diff --git a/k8s-certifications/topics/debugging.md b/k8s-certifications/topics/debugging.md new file mode 100644 index 0000000..3ee1839 --- /dev/null +++ b/k8s-certifications/topics/debugging.md @@ -0,0 +1,205 @@ +# [Kubernetes Debugging](https://kubernetes.io/docs/tasks/debug/) + +
+ +### Given deployment defination `nginx-deployment` does not work. Identify and fix the problems by updating the associated resources so that the Deployment works. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + kind: frontend + name: nginx-deployment +spec: + replicas: 3 + selector: + matchLabels: + kind: frontend + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx +``` + +
+ +
show

+ +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + kind: frontend + name: nginx-deployment +spec: + replicas: 3 + selector: + matchLabels: + app: nginx # Update the selector label to app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx +``` + +

+ +
+ +### Given deployment defination `nginx-deployment` exposed using the Service `frontend-svc`. Identify and fix the problems by updating the associated resources so that the Service works. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + kind: frontend + name: nginx-deployment +spec: + replicas: 3 + selector: + matchLabels: + kind: frontend + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + kind: frontend + name: frontend-svc +spec: + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + selector: + kind: frontend +status: + loadBalancer: {} +``` + +
+ +
show

+ +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + kind: frontend + name: frontend-svc +spec: + ports: + - port: 8080 # Update the port to 80 + protocol: TCP + targetPort: 8080 # Update the port to 80 + selector: + kind: frontend # Update the selector label to app: nginx +status: + loadBalancer: {} +``` + +

+ +
+ +### A Deployment named `web` is exposed via Ingress `web-ingress`. The Deployment is supposed to be reachable at http://dk8s.local/web-ingress, but requesting this URL is currently returning an error. Identify and fix the problems by updating the associated resources so that the Deployment becomes externally reachable as planned. + +```bash +kubectl create deployment web --image=gcr.io/google-samples/hello-app:1.0 +kubectl expose deployment web --name web-svc --port 80 +``` + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: web-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - http: + paths: + - backend: + service: + name: web + port: + number: 80 + path: / + pathType: Prefix +status: + loadBalancer: {} +``` + +
+ +
show

+ +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: web + name: web-svc +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8080 # Update target port to 8080 as exposed by the deployment + selector: + app: web + type: ClusterIP +status: + loadBalancer: {} +``` + +**NOTE**: The ingress might not work if there are ingress controllers deployed on the cluster. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: web-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: hello-world.info # add host entry + http: + paths: + - backend: + service: + name: web # update to web-svc + port: + number: 80 + path: / + pathType: Prefix +status: + loadBalancer: {} +``` + +

+ +
+ diff --git a/k8s-certifications/topics/deployments.md b/k8s-certifications/topics/deployments.md new file mode 100644 index 0000000..4cc5465 --- /dev/null +++ b/k8s-certifications/topics/deployments.md @@ -0,0 +1,905 @@ +# [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) + +A Deployment provides declarative updates for Pods and ReplicaSets. + + 1. [Basics](#basics) + 2. [Deployment HA & Self Healing](#deployment-self-healing) + 2. [Deployment Scaling](#deployment-scaling) + 3. [Deployment Rollout](#deployment-rollout) + 4. [Deployment Deletion](#deployment-deletion) + 5. [HPA](#hpa) + +
+ +## Basics + +
+ +### Check number of deployments in the default namespace + +
+ +
show

+ +```bash +kubectl get deployments +# OR +kubectl get deploy +``` + +

+ +
+ +### Create deployment named `nginx-deployment` with `nginx:1.20` image with `3` replicas + +
+ +
show

+ +```bash +kubectl create deploy nginx-deployment --image nginx:1.20 && kubectl scale deploy nginx-deployment --replicas 3 +# deployment.apps/nginx-deployment created +# deployment.apps/nginx-deployment scaled + +kubectl get replicaset # check the replica set created +# NAME DESIRED CURRENT READY AGE +# nginx-deployment-bd78d5dc6 3 3 3 37s + +``` + +OR + +```yaml +cat << EOF > nginx-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.20 + name: nginx +EOF + +kubectl apply -f nginx-deployment.yaml + +``` + +

+ +
+ +### View the YAML of `nginx-deployment` deployment + +
+ +
show

+ +```bash +kubectl get deploy nginx-deployment -o yaml +``` + +

+ +
+ +### Create a new deployment for running nginx with the following parameters +- Name the deployment `frontend` and configure with 4 replicas +- Configure the pod with a container image of nginx:1.21 +- Set an environment variable of NGINX PORT=8080 and also expose that port for the container above + +
+ +
show

+ +```bash +kubectl create deployment frontend --replicas=4 --image=nginx:1.21 --dry-run=client -o yaml > frontend.yaml +# --replicas is newly introduced and if it does not work use it without the replicas +kubectl create deployment frontend --image=nginx:1.21 --dry-run=client -o yaml > frontend.yaml +``` + +#### Edit the frontend.yaml for replicas, port and env variable + +```yaml +cat << EOF > frontend.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: frontend + name: frontend +spec: + replicas: 4 + selector: + matchLabels: + app: frontend + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: frontend + spec: + containers: + - image: nginx:1.21 + name: nginx + ports: + - containerPort: 8080 + env: + - name: NGINX_PORT + value: "8080" + resources: {} +status: {} +EOF + +kubectl apply -f frontend.yaml +``` + +

+ +
+ +### Create a deployment as follows: +- Name: nginx-random using the nginx image +- Exposed via a service nginx-random +- Ensure that the service & pod are accessible via their respective DNS records +- Use the utility nslookup to lookup the DNS records of the service & pod + +
+ +
show

+ +```bash +kubectl create deployment nginx-random --image=nginx +kubectl expose deployment nginx-random --name=nginx-random --port=80 --target-port=80 +``` + +#### Verify the nslookup works. (Busybox latest version had issues with service nslookup, so using dnsutils) + +```yaml +cat << EOF > dnsutils.yaml +apiVersion: v1 +kind: Pod +metadata: + name: dnsutils +spec: + containers: + - name: dnsutils + image: k8s.gcr.io/e2e-test-images/jessie-dnsutils:1.3 + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + restartPolicy: Always +EOF + +kubectl apply -f dnsutils.yaml + +kubectl exec dnsutils -- nslookup nginx-random +# Server: 10.96.0.10 +# Address: 10.96.0.10#53 + +# Name: nginx-random.default.svc.cluster.local +# Address: 10.110.119.135 + +kubectl get pods -l app=nginx-random -o wide +# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +# nginx-random-77fb464776-sbp8v 1/1 Running 0 8m25s 10.50.0.8 controlplane + +kubectl exec dnsutils -- nslookup 10.50.0.8 +# Server: 10.96.0.10 +# Address: 10.96.0.10#53 + +# 8.0.50.10.in-addr.arpa name = 10-50-0-8.nginx-random.default.svc.cluster.local. + +``` + +

+ +
+ +## Deployment Self healing + +
+ +### Create a deployment named nginx-ha using nginx image with 3 replicas to test self-healing properties. Delete all pod and check the behaviour. + +
+ +
show

+ +#### Create deployment + +```bash +kubectl create deployment nginx-ha --image=nginx --replicas=3 + +kubectl get pods -l app=nginx-ha +# NAME READY STATUS RESTARTS AGE +# nginx-ha-684994c76-2j4w8 1/1 Running 0 57s +# nginx-ha-684994c76-7ssm8 1/1 Running 0 57s +# nginx-ha-684994c76-kdp28 1/1 Running 0 57s +``` + +#### Delete all the pods and check behaviour as new pods are created. + +```bash +kubectl delete pods -l app=nginx-ha --force + +kubectl get pods -l app=nginx-ha -w +# NAME READY STATUS RESTARTS AGE +# nginx-ha-684994c76-m5n28 0/1 ContainerCreating 0 3s +# nginx-ha-684994c76-pqfj4 0/1 ContainerCreating 0 3s +# nginx-ha-684994c76-qxgfl 0/1 ContainerCreating 0 2s +# nginx-ha-684994c76-pqfj4 1/1 Running 0 7s +# nginx-ha-684994c76-m5n28 1/1 Running 0 9s +# nginx-ha-684994c76-qxgfl 1/1 Running 0 8s +``` + +

+ +
+ +## Deployment Scaling + +
+ +### Scale up the `nginx-deployment` from 3 replica to 5 replicas + +
+ +
show

+ +```bash +kubectl scale deployment nginx-deployment --replicas=5 +``` + +OR + +#### Edit the replica set definition file and use `kubectl apply -f nginx-deployment.yaml` + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + replicas: 5 # Update the replicas count + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.20 + name: nginx +``` + +

+ +
+ +### Scale down the `nginx-deployment` from 5 replica to 3 replicas + +
+ +
show

+ +```bash +kubectl scale deployment nginx-deployment --replicas=3 +``` + +OR + +#### Edit the replica set definition file and use `kubectl apply -f nginx-deployment.yaml` + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + replicas: 3 # Update the replicas count + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.20 + name: nginx +``` + +

+ +
+ +### Scale the deployment with below specs for availability, and create a service to expose the deployment within your infrastructure. Start with the deployment named ha-deployment which has already been deployed to the namespace ha . +Edit it to: +- create namespace ha +- Add the func=frontend key/value label to the pod template metadata to identify the pod for the service definition +- Have 4 replicas +- Exposes the service on TCP port 8080 +- is mapped to the pods defined by the specification of ha-deployment +- Is of type NodePort +- Has a name of cherry + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: ha-deployment + name: ha-deployment +spec: + replicas: 1 + selector: + matchLabels: + app: ha-deployment + strategy: {} + template: + metadata: + labels: + app: ha-deployment + spec: + containers: + - image: nginx + name: nginx + resources: {} +status: {} +EOF +``` + +
show

+ +#### Create namespace `ha` + +```bash +kubectl create namespace ha +``` + +#### Edit the deployment specs for 4 replicas and label + +```yaml +cat << EOF > ha-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: ha-deployment + name: ha-deployment +spec: + replicas: 4 # 4 replicas + selector: + matchLabels: + app: ha-deployment + strategy: {} + template: + metadata: + labels: + app: ha-deployment + func: frontend # label added to pod + spec: + containers: + - image: nginx + name: nginx + resources: {} +status: {} +EOF + +kubectl apply -f ha-deployment.yaml -n ha + +kubectl get pods -n ha +# NAME READY STATUS RESTARTS AGE +# ha-deployment-66b7f8d45b-4pndp 1/1 Running 0 22s +# ha-deployment-66b7f8d45b-5r77r 1/1 Running 0 22s +# ha-deployment-66b7f8d45b-7hq7q 1/1 Running 0 22s +# ha-deployment-66b7f8d45b-szklj 1/1 Running 0 22s +``` + +#### Expose the deployment as a service with name cherry + +```bash +kubectl expose deployment ha-deployment --name cherry --type NodePort --port 8080 --target-port 80 --namespace ha + +kubectl get svc -n ha +# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +# cherry NodePort 10.104.241.152 8080:30321/TCP 20s + +# test using any node ip and node port +# curl http://:30321/ +``` + +

+ +
+ +## Deployment Rollout + +
+ +### Check the rollout for `nginx-deployment` deployment + +
+ +
show

+ +```bash +kubectl rollout status deploy nginx-deployment +# deployment "nginx-deployment" successfully rolled out +``` + +

+ +
+ +### Update the `nginx-deployment` deployment image to `nginx:1.20.2` + +
+ +
show

+ +```bash +kubectl set image deploy nginx-deployment nginx=nginx:1.20.2 +# deployment.apps/nginx-deployment image updated +``` + +OR + +#### Update the `nginx-deployment.yaml` file and `kubectl apply -f nginx-deployment.yaml` + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.20.2 # Update the image + name: nginx +``` + +

+ +
+ +### Check the rollout history for `nginx-deployment` deployment and confirm that the replicas are OK + +
+ +
show

+ +```bash +kubectl rollout history deploy nginx-deployment +# deployment.apps/nginx-deployment +# REVISION CHANGE-CAUSE +# 1 +# 2 + +kubectl get replicaset # check that a new replica set has been created +# NAME DESIRED CURRENT READY AGE +# nginx-deployment-7cd9b6bb76 3 3 3 36s +# nginx-deployment-bd78d5dc6 0 0 0 2m40s +``` + +

+ +
+ +### Undo the latest rollout and verify that new pods have the old image (nginx:1.20) + +
+ +
show

+ +```bash +kubectl rollout undo deploy nginx-deployment # wait a bit +# deployment.apps/nginx-deployment rolled back + +# verify the rollback + +kubectl rollout history deploy nginx-deployment +# deployment.apps/nginx-deployment +# REVISION CHANGE-CAUSE +# 2 +# 3 + +kubectl get replicaset +# NAME DESIRED CURRENT READY AGE +# nginx-deployment-7cd9b6bb76 0 0 0 4m12s +# nginx-deployment-bd78d5dc6 3 3 3 6m16s + +kubectl get pod # select one 'Running' Pod + +kubectl describe pod nginx-deployment-xxx-xxx | grep -i image # should be nginx:1.20 - need to update !! + +``` + +

+ +
+ +### Do an on purpose update of the deployment with a wrong image `nginx:1.202.333` + +
+ +
show

+ +```bash +kubectl set image deploy nginx-deployment nginx=nginx:1.202.333 +``` + +OR + +#### Update the `nginx-deployment.yaml` file and `kubectl apply -f nginx-deployment.yaml` + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.202.333 # Update the image + name: nginx +``` + +

+ +
+ +### Verify that something's wrong with the rollout + +
+ +
show

+ +```bash +kubectl rollout status deploy nginx-deployment # would show 'Waiting for deployment "nginx-deployment" rollout to finish: 1 out of 3 new replicas have been updated...' + +kubectl get pod # would show status as 'ErrImagePull' or 'ImagePullBackOff' +# NAME READY STATUS RESTARTS AGE +# nginx-deployment-68b88f4dcf-8drvq 0/1 ErrImagePull 0 71s +# nginx-deployment-bd78d5dc6-59x4r 1/1 Running 0 7m16s +# nginx-deployment-bd78d5dc6-cxg7l 1/1 Running 0 7m19s +# nginx-deployment-bd78d5dc6-xxkdj 1/1 Running 0 7m14s +``` + +

+ +
+ +### Return the deployment to the second revision (number 2) and verify the image is nginx:1.19.8 + +
+ +
show

+ +```bash +kubectl rollout undo deploy nginx-deployment --to-revision=2 + +# verify +kubectl rollout history deploy nginx-deployment +# deployment.apps/nginx-deployment +# REVISION CHANGE-CAUSE +# 3 +# 4 +# 5 + +kubectl describe deploy nginx-deployment | grep Image: # should show nginx:1.20.2 + +kubectl rollout status deploy nginx-deployment # Everything should be OK + +``` + +

+ +
+ +### Check the details of the fourth revision (number 4) + +
+ +
show

+ +```bash +kubectl rollout history deploy nginx-deployment --revision=4 # check the wrong image displayed here +# deployment.apps/nginx-deployment with revision #4 +# Pod Template: +# Labels: app=nginx-deployment +# pod-template-hash=68b88f4dcf +# Containers: +# nginx: +# Image: nginx:1.202.333 +# Port: +# Host Port: +# Environment: +# Mounts: +# Volumes: +``` + +

+ +
+ +## Deployment Deletion + +
+ +### Delete the `nginx-deployment` deployment + +
+ +
show

+ +```bash +kubectl delete deployment nginx-deployment +# OR +kubectl delete -f nginx-deployment.yaml +``` + +

+ +
+ +### As a Kubernetes application developer you will often find yourself needing to update a running application. Please complete the following using the following specs: +- Update the web1 deployment with a maxSurge of 5% and a maxUnavailable of 2% +- Perform a rolling update of the web1 deployment, changing the nginx image version to 1.21 +- Roll back the web1 deployment to the previous version + +```yaml +cat << EOF > web1.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: web1 + name: web1 +spec: + replicas: 10 + selector: + matchLabels: + app: web1 + template: + metadata: + labels: + app: web1 + spec: + containers: + - image: nginx:1.12-alpine + name: web1 +EOF + +kubectl apply -f web1.yaml +``` + +
show

+ +#### Edit the deployment to update the rolling update strategy for maxSurge & maxUnavailable + +```yaml +cat << EOF > web1.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: web1 + name: web1 +spec: + replicas: 10 + selector: + matchLabels: + app: web1 + strategy: + rollingUpdate: # update the rolling update strategy for maxSurge & maxUnavailable + maxSurge: 5% + maxUnavailable: 2% + type: RollingUpdate + template: + metadata: + labels: + app: web1 + spec: + containers: + - image: nginx:1.20-alpine + name: web1 +EOF + +kubectl apply -f web1.yaml +``` + +#### Update the image to 1.21-alpine + +```bash +kubectl set image deployment web1 web1=nginx:1.21-alpine +``` + +#### Check rollout history and undo rollout + +```bash +kubectl rollout history deployment web1 +# deployment.apps/web1 +# REVISION CHANGE-CAUSE +# 1 +# 2 +# 3 + +kubectl rollout history deploy web1 --revision=3 +# deployment.apps/web1 with revision #3 +# Pod Template: +# Labels: app=web1 +# pod-template-hash=848b67cbfc +# Containers: +# web1: +# Image: nginx:1.21-alpine +# Port: +# Host Port: +# Environment: +# Mounts: +# Volumes: + +kubectl rollout undo deploy web1 --to-revision=2 +# deployment.apps/web1 rolled back + +kubectl rollout history deployment web1 +# deployment.apps/web1 +# REVISION CHANGE-CAUSE +# 1 +# 3 +# 4 +``` + +

+ +
+ +### Create a deployment as follows: + - Name: nginx-app + - Using container nginx with version 1.20-alpine + - The deployment should contain 3 replicas + - Next, deploy the application with new version 1.21.4-alpine, by performing a rolling update. + - Finally, rollback that update to the previous version 1.11.10-alpine. + +
+ +
show

+ +```bash +kubectl create deployment nginx-app --image=1.20-alpine --replicas=3 +kubectl set image deployment nginx-app 1-20-alpine-2v92q=1.21.4-alpine # use the container name +deployment.apps/nginx-app image updated +kubectl rollout history deployment nginx-app +# deployment.apps/nginx-app +# REVISION CHANGE-CAUSE +# 1 +# 2 +kubectl rollout undo deploy nginx-app +# deployment.apps/nginx-app rolled back +kubectl rollout history deployment nginx-app +# deployment.apps/nginx-app +# REVISION CHANGE-CAUSE +# 2 +# 3 +``` + +

+ +
+ +## Troubleshooting + +
+ +### A deployment is falling on the cluster. Identify the issue and fix the problem. + +```yaml +cat << EOF > nginx-fix.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-fix + name: nginx-fix +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nqinx + name: nginx +EOF + +kubectl apply -f nginx-fix.yaml +``` + +
show

+ +#### The image nquix is invalid. Change the image to nginx. + +```bash +kubectl get deploy nginx-fix +# NAME READY UP-TO-DATE AVAILABLE AGE +# nginx-fix 0/3 3 0 16s + +kubectl get pods -l app=nginx +# NAME READY STATUS RESTARTS AGE +# nginx-fix-7cf9964fc7-9bkln 0/1 ErrImagePull 0 38s +# nginx-fix-7cf9964fc7-jcz6p 0/1 ErrImagePull 0 38s +# nginx-fix-7cf9964fc7-n5dqh 0/1 ErrImagePull 0 38s + +kubectl describe pod nginx-fix-7cf9964fc7-9bkln +# Warning Failed 1s (x4 over 97s) kubelet, node01 Failed to pull image "nqinx": rpc error: code = Unknown desc = Error response from daemon: pull access denied for nqinx, repository does not exist or may require 'docker login': denied: requested access to the resource is denied +# Warning Failed 1s (x4 over 97s) kubelet, node01 Error: ErrImagePull + +# fix the image +kubectl set image deployment.v1.apps/nginx-fix nqinx=nginx + +kubectl get pods -l app=nginx +# NAME READY STATUS RESTARTS AGE +# nginx-fix-f89759699-gn8q9 1/1 Running 0 27s +# nginx-fix-f89759699-lmwpc 1/1 Running 0 30s +# nginx-fix-f89759699-vbpln 1/1 Running 0 38s + +``` + +

+ +### Clean up + +
+ +```bash +rm nginx-deployment.yaml web1.yaml dnsutils.yaml +kubectl delete pod dnsutils +kubectl delete deploy nginx-deployment nginx-random web1 nginx-app +``` diff --git a/k8s-certifications/topics/docker.md b/k8s-certifications/topics/docker.md new file mode 100644 index 0000000..8c4fd67 --- /dev/null +++ b/k8s-certifications/topics/docker.md @@ -0,0 +1,59 @@ +# [Docker](https://docs.docker.com/get-started/overview/) + +
+ +### Given the docker file, +- Build a container image with the name nginxer and tag 3.0. +- Export the built container image in OCI-format and store in at `nginxer-3.0.tar` +- Run container from image `nginxer:3.0` with name `nginxer-go` expsoing port `80` + +```bash +FROM nginx:alpine +CMD ["nginx", "-g", "daemon off;"] +``` + +
+ +
show

+ +```bash +docker build . -t nginxer:3.0 +docker save nginxer:3.0 -o nginxer-3.0.tar +docker run --name nginxer-go -p 80:80 nginxer:3.0 +``` + +

+ +
+ +### Given the docker file which creates an alpine container exposing port 80. Apply 2 best practices to improve security of the image. + +```bash +FROM alpine:3.12 +RUN adduser -D myuser && chown -R myuser /myapp-data +USER root +ENTRYPOINT ["/myapp"] +EXPOSE 80 8080 22 +``` + +
+ +
show

+ +```bash +FROM alpine:3.12 +RUN adduser -D myuser && chown -R myuser /myapp-data +USER myuser # Avoid unnecessary privileges - run as a custom user. +ENTRYPOINT ["/myapp"] +EXPOSE 80 # Exposed ports - Expose only neccesary port +``` + +

+ +
+ + + + + + diff --git a/k8s-certifications/topics/etcd.md b/k8s-certifications/topics/etcd.md new file mode 100644 index 0000000..9ab1388 --- /dev/null +++ b/k8s-certifications/topics/etcd.md @@ -0,0 +1,99 @@ +# [ETCD](https://etcd.io/) + +
+ +### Check the version of ETCD + +
+ +```bash +kubectl get pod etcd-controlplane -n kube-system -o yaml | grep image +# image: k8s.gcr.io/etcd:3.4.3-0 +``` + +## Backup and Restore +Refer [Backing up ETCD Cluster](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#backing-up-an-etcd-cluster) & [Restoring ETCD Cluster](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#restoring-an-etcd-cluster) + +
+ +#### Create a snapshot of the etcd instance running at https://127.0.0.1:2379, saving the snapshot to the file path /opt/snapshot-pre-boot.db. Restore the snapshot. The following TLS certificates/key are supplied for connecting to the server with etcdctl: + - CA certificate: /etc/kubernetes/pki/etcd/ca.crt + - Client certificate: /etc/kubernetes/pki/etcd/server.crt + - Client key: /etc/kubernetes/pki/etcd/server.key + +
show

+ +#### Install ETCD Client + +```bash +snap install etcd # version 3.4.5, or +apt install etcd-client +``` + +#### Create deployment before backup for testing + +```bash +kubectl create deploy nginx --image=nginx --replicas=3 +``` + +#### Backup ETCD + +```bash +ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt \ + --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key \ + snapshot save /opt/snapshot-pre-boot.db +# Snapshot saved at /opt/snapshot-pre-boot.db +``` + +#### Delete the deployment + +```bash +kubectl delete deployment nginx +``` + +#### Restore ETCD Snapshot to a new folder + +```bash +ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt \ + --name=master \ + --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key \ + --data-dir /var/lib/etcd-from-backup \ + --initial-cluster=master=https://127.0.0.1:2380 \ + --initial-cluster-token etcd-cluster-1 \ + --initial-advertise-peer-urls=https://127.0.0.1:2380 \ + snapshot restore /opt/snapshot-pre-boot.db +# 2021-12-21 13:56:56.460862 I | mvcc: restore compact to 1288 +# 2021-12-21 13:56:56.716540 I | etcdserver/membership: added member e92d66acd89ecf29 [https://127.0.0.1:2380] to cluster 7581d6eb2d25405b +``` + + #### Modify /etc/kubernetes/manifests/etcd.yaml + +```bash + # Update --data-dir to use new target location + --data-dir=/var/lib/etcd-from-backup + +# Update new initial-cluster-token to specify new cluster + --initial-cluster-token=etcd-cluster-1 + +# Update volumes and volume mounts to point to new path + volumeMounts: + - mountPath: /var/lib/etcd-from-backup + name: etcd-data + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + volumes: + - hostPath: + path: /var/lib/etcd-from-backup + type: DirectoryOrCreate + name: etcd-data +``` + +#### Verify the deployment exists after restoration + +```bash +kubectl get deployment nginx +``` + +

+ +
\ No newline at end of file diff --git a/k8s-certifications/topics/falco.md b/k8s-certifications/topics/falco.md new file mode 100644 index 0000000..a31e86f --- /dev/null +++ b/k8s-certifications/topics/falco.md @@ -0,0 +1,31 @@ +# [Falco](https://falco.org/) + +
+ +### Installation + +
+ +```bash +curl -s https://falco.org/repo/falcosecurity-3672BA8F.asc | apt-key add - +echo "deb https://download.falco.org/packages/deb stable main" | tee -a /etc/apt/sources.list.d/falcosecurity.list +apt-get update -y +apt-get -y install linux-headers-$(uname -r) +apt-get install -y falco +systemctl enable falco +systemctl start falco +``` + +### Check logs and events + +
+ +```bash +journalctl -fu falco +``` + +### Clean up + +```bash +apt-get remove falco +``` \ No newline at end of file diff --git a/k8s-certifications/topics/ingress.md b/k8s-certifications/topics/ingress.md new file mode 100644 index 0000000..5b96167 --- /dev/null +++ b/k8s-certifications/topics/ingress.md @@ -0,0 +1,179 @@ +# [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) + +- Ingress manages external access to the services in a cluster, typically HTTP. +- Ingress may provide load balancing, SSL termination and name-based virtual hosting. + +
+ +### Create the following + - Deployment `web` with image `gcr.io/google-samples/hello-app:1.0` with 3 replicas. + - Service `web` to expose the deployment as Node Port + - Ingress `web-ingress` to point to the `web` service using host `hellow-world.info`. + +
+ +
show

+ +```bash +kubectl create deployment web --image=gcr.io/google-samples/hello-app:1.0 +kubectl expose deployment web --type=NodePort --port=8080 +kubectl get service web +# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +# web NodePort 10.104.218.215 8080:30807/TCP 12s +``` + +#### Create Ingress with the below specs and apply using `kubectl apply -f web-ingress.yaml` + +```bash +kubectl create ingress web-ingress --rule="hello-world.info/=web:8080" +``` + +OR + +```yaml +cat << EOF > web-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: web-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: hello-world.info + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: web + port: + number: 8080 +EOF + +kubectl apply -f web-ingress.yaml +``` + +OR below for older versions + +```yaml +cat << EOF > web-ingress.yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: web-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: hello-world.info + http: + paths: + - path: / + pathType: Prefix + backend: + serviceName: web + servicePort: 8080 +EOF + +kubectl apply -f web-ingress.yaml +``` + +```bash +# verification +kubectl get nodes -o wide # get node ip +kubectl get deploy web # check status +kubectl get svc web # check node port ip +curl http://10.0.26.3:32104 # use node ip:node port +kubectl get ingress web-ingress # you will get an ip address of the ingress controller if installed +# NAME CLASS HOSTS ADDRESS PORTS AGE +# web-ingress hello-world.info 80 11s +``` + +

+ +
+ +## Ingress Security + +
+ +### Create a tls secret `testsecret-tls` using tls.crt from file `../data/tls.crt` and `../data/tls.key`. Enable tls for the ingress below. + +
+ +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tls-example-ingress +spec: + rules: + - host: https-example.foo.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: service1 + port: + number: 80 +``` + +
show

+ +```bash +kubectl create secret tls testsecret-tls --cert=tls.crt --key=tls.key +``` + +```yaml + +cat << EOF > tls-example-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tls-example-ingress +spec: + tls: # add tls entry + - hosts: + - https-example.foo.com + secretName: testsecret-tls + rules: + - host: https-example.foo.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: service1 + port: + number: 80 +EOF + +kubectl apply -f tls-example-ingress.yaml + +``` + +```bash +# verification +kubectl get secret testsecret-tls +kubectl get ingress tls-example-ingress +``` + +

+ +
+ +### Clean up + +
+ +```bash +kubectl delete secret testsecret-tls +kubectl delete ingress web-ingress tls-example-ingress +kubectl delete svc web +kubectl delete deployment web +``` \ No newline at end of file diff --git a/k8s-certifications/topics/init_containers.md b/k8s-certifications/topics/init_containers.md new file mode 100644 index 0000000..d5825d5 --- /dev/null +++ b/k8s-certifications/topics/init_containers.md @@ -0,0 +1,170 @@ +# [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) + +### Update the below specs for nginx pod with `/usr/share/nginx/html` directory mounted on volume `workdir`. + - Add an init container named `install` with image `busybox`. + - Mount the workdir to the init container. + - `wget` the `http://info.cern.ch` and save as `index.html` to the `workdir` in the init container. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: init-demo +spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + volumeMounts: + - name: workdir + mountPath: /usr/share/nginx/html + dnsPolicy: Default + volumes: + - name: workdir + emptyDir: {} +``` + +
show

+ +```yaml +cat << EOF > init-demo.yaml +apiVersion: v1 +kind: Pod +metadata: + name: init-demo +spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + volumeMounts: + - name: workdir + mountPath: /usr/share/nginx/html + # Add the init container + initContainers: + - name: install + image: busybox + command: + - wget + - "-O" + - "/work-dir/index.html" + - http://info.cern.ch + volumeMounts: + - name: workdir + mountPath: "/work-dir" + dnsPolicy: Default + volumes: + - name: workdir + emptyDir: {} +EOF + +kubectl apply -f init-demo.yaml + +kubectl exec init-demo -- curl http://localhost +# % Total % Received % Xferd Average Speed Time Time Time Current +# Dload Upload Total Spent Left Speed +# 100 646 100 646 0 0 34000 0 --:--:-- --:--:-- --:--:-- 34000 +#

+# http://info.cern.ch +#
+ +#

http://info.cern.ch - home of the first website

+#

From here you can:

+# +# +``` + +

+ +
+ +### Add an init container `maker` with image `alpine` to maker-checker pod with the spec given below. + - The init container should create an empty file named /workdir/calm.txt + - If /workdir/calm.txt is not detected, the pod should exit + - Once the spec file has been updated with the init container definition, the pod should be created. + +
+ +```yaml +cat << EOF > maker-checker.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: maker-checker + name: maker-checker +spec: + containers: + - image: alpine + name: checker + command: ["/bin/sh", "-c", "if /workdir/calm.txt; then sleep 3600; else exit 1; fi;"] + volumeMounts: + - name: workdir + mountPath: "/work-dir" + dnsPolicy: Default + volumes: + - name: workdir + emptyDir: {} + restartPolicy: Always +status: {} +EOF +``` + +
show

+ +```yaml +cat << EOF > maker-checker.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: maker-checker + name: maker-checker +spec: + containers: + - image: alpine + name: checker + command: ["/bin/sh", "-c", "if [ -f /workdir/calm.txt ]; then sleep 3600; else exit 1; fi;"] + volumeMounts: + - name: workdir + mountPath: "/workdir" + # Add the init container + initContainers: + - name: maker + image: alpine + command: ["/bin/sh", "-c", "touch /workdir/calm.txt"] + volumeMounts: + - name: workdir + mountPath: "/workdir" + dnsPolicy: Default + volumes: + - name: workdir + emptyDir: {} + restartPolicy: Always +status: {} +EOF + +kubectl apply -f maker-checker.yaml +``` + +

+ +
+ +### Clean up + +
+ +```bash +rm init-demo.yaml maker-checker.yaml +kubectl delete pod init-demo maker-checker --force --grace-period=0 +``` \ No newline at end of file diff --git a/k8s-certifications/topics/jobs.md b/k8s-certifications/topics/jobs.md new file mode 100644 index 0000000..863d684 --- /dev/null +++ b/k8s-certifications/topics/jobs.md @@ -0,0 +1,475 @@ +# Jobs & Cron Jobs +A Job creates one or more Pods and will continue to retry execution of the Pods until a specified number of them successfully terminate. +A CronJob creates Jobs on a repeating schedule. + +
+ +- [Jobs](#jobs) +- [Cron Jobs](#cron-jobs) + +
+ +## Jobs + +
+ +### Create job named `pi` with image `perl` that runs the command with arguments `"perl -Mbignum=bpi -wle 'print bpi(2000)'"` + +
+ +
show

+ +`kubectl create job pi --image=perl -- perl -Mbignum=bpi -wle 'print bpi(2000)'` + +OR + +```bash +cat << EOF > pi.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: pi +spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never +EOF + +kubectl apply -f pi.yaml +``` + +

+ +
+ +### Wait till it's done, get the output + +
+ +
show

+ +```bash +kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big) +# NAME COMPLETIONS DURATION AGE +# pi 1/1 2m18s 2m47s +kubectl get pod # get the pod name +# NAME READY STATUS RESTARTS AGE +# pi-vkj8b 0/1 Completed 0 2m50s +kubectl logs pi-vkj8b # get the pi numbers +# 3.141592653589793238462643383279502884........ +kubectl delete job pi +``` +OR + +```bash +kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big) +kubectl logs job/pi +kubectl delete job pi +``` +OR + +```bash +kubectl wait --for=condition=complete --timeout=300s job pi +kubectl logs job/pi +kubectl delete job pi +``` + +

+ +
+ +### Create a job `busybox` with `busybox` image that would be automatically terminated by kubernetes if it takes more than 30 seconds to execute. + +
+ +
show

+ +```bash +kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'while true; do echo hello; sleep 10;done' > busybox-job.yaml +``` + +#### Edit `busybox-job.yaml` to add `job.spec.activeDeadlineSeconds=30` and apply `kubectl apply -f busybox-job.yaml` + +```yaml +cat << EOF > busybox-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + name: busybox +spec: + activeDeadlineSeconds: 30 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - while true; do echo hello; sleep 10;done + image: busybox + name: busybox + resources: {} + restartPolicy: Never +status: {} +EOF + +kubectl apply -f busybox-job.yaml +``` + +#### Describe the job with the statement `Warning DeadlineExceeded xxs job-controller Job was active longer than specified deadline` + +

+ +
+ +### Delete the job + +
+ +
show

+ +```bash +kubectl delete job busybox +``` + +

+ +
+ +### Create a job `busybox-completions-job` with `busybox` image that would be run 5 times + +
+ +
show

+ +```bash +kubectl create job busybox-completions-job --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'echo hello;sleep 10;echo world' > busybox-completions-job.yaml +``` + +#### Edit `busybox-completions-job.yaml` to add `job.spec.completions=5` and apply `kubectl apply -f busybox-completions-job.yaml` + +```yaml +cat << EOF > busybox-completions-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + name: busybox-completions-job +spec: + completions: 5 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - echo hello;sleep 10;echo world + image: busybox + name: busybox-completions-job + resources: {} + restartPolicy: Never +status: {} +EOF + +kubectl apply -f busybox-completions-job.yaml +``` + +#### Check the job pod status `kk get pods -l job-name=busybox-completions-job` or `kubectl get jobs -w` are in completed status after 2-3 minutes. + +```bash +kubectl get jobs -w +# NAME COMPLETIONS DURATION AGE +# busybox-completions-job 0/5 7s 7s +# busybox-completions-job 1/5 15s 15s +# busybox-completions-job 2/5 28s 28s +# busybox-completions-job 3/5 42s 42s +# busybox-completions-job 4/5 56s 56s +# busybox-completions-job 5/5 70s 70s +``` + +

+ +
+ +### Create a job `busybox-parallelism-job` with `busybox` image that would be run parallely 5 times. + +
+ +
show

+ +```bash +kubectl create job busybox-parallelism-job --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'echo hello;sleep 10;echo world' > busybox-parallelism-job.yaml +``` + +#### Edit `busybox-parallelism-job.yaml` to add `job.spec.parallelism=5` and apply `kubectl apply -f busybox-parallelism-job.yaml` + +```yaml +cat << EOF > busybox-parallelism-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + name: busybox-parallelism-job +spec: + parallelism: 5 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - echo hello;sleep 10;echo world + image: busybox + name: busybox-parallelism-job + resources: {} + restartPolicy: Never +status: {} +EOF + +kubectl apply -f busybox-parallelism-job.yaml +``` + +#### Check the job pod status `kk get pods -l job-name=busybox-parallelism-job` or `kubectl get jobs -w` are in completed status after 1 minute, as it would quicker as compared to before. + +```bash +kubectl get jobs -w +# NAME COMPLETIONS DURATION AGE +# busybox-parallelism-job 1/1 of 5 15s 15s +# busybox-parallelism-job 2/1 of 5 16s 16s +# busybox-parallelism-job 3/1 of 5 17s 17s +# busybox-parallelism-job 4/1 of 5 18s 18s +# busybox-parallelism-job 5/1 of 5 19s 19s +``` + +

+ +
+ +## [Cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) + +
+ +### Create a cron job `busybox-cron-job` with image `busybox` that runs every minute on a schedule of `*/1 * * * *` and writes `date; echo Hello from the Kubernetes cluster` to standard output + +
show

+ +```bash +kubectl create cronjob busybox-cron-job --image=busybox --schedule="*/1 * * * *" -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' +``` + +

+ +
+ +### See its logs and delete it + +
+ +
show

+ +```bash +kubectl get cj +kubectl get jobs --watch # Bear in mind that Kubernetes will run a new job/pod for each new cron job +# NAME COMPLETIONS DURATION AGE +# busybox-cron-job-1639638720 0/1 0s +# busybox-cron-job-1639638720 0/1 0s 0s +# busybox-cron-job-1639638720 1/1 8s 9s +# busybox-cron-job-1639638780 0/1 0s +# busybox-cron-job-1639638780 0/1 1s 1s +# busybox-cron-job-1639638780 1/1 9s 9s +kubectl get pod --show-labels # observe that the pods have a label that mentions their 'parent' job +kubectl logs busybox-1529745840-m867r +kubectl delete cj busybox +``` + +

+ +
+ +### Create a cron job with image busybox that runs every minute and writes 'date; echo Hello from the Kubernetes cluster' to standard output. The cron job should be terminated if it takes more than 17 seconds to start execution after its scheduled time (i.e. the job missed its scheduled time). + +
+ +
show

+ +```bash +kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml +vi time-limited-job.yaml +``` + +#### Add `cronjob.spec.startingDeadlineSeconds=17` and apply + +```bash +cat << EOF > time-limited-job.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + creationTimestamp: null + name: time-limited-job +spec: + startingDeadlineSeconds: 17 # add this line + jobTemplate: + metadata: + creationTimestamp: null + name: time-limited-job + spec: + template: + metadata: + creationTimestamp: null + spec: + containers: + - args: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + image: busybox + name: time-limited-job + resources: {} + restartPolicy: Never + schedule: '* * * * *' +status: {} +EOF + +kubectl apply -f time-limited-job.yaml +``` + +

+ +
+ +### Create a cron job with image busybox that runs every minute and writes 'date; echo Hello from the Kubernetes cluster' to standard output. The cron job should be terminated if it successfully starts but takes more than 12 seconds to complete execution. + +
+ +
show

+ +```bash +kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml +vi time-limited-job.yaml +``` + +#### Add cronjob.spec.jobTemplate.spec.activeDeadlineSeconds=12 + +```bash +cat << EOF > time-limited-job.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + creationTimestamp: null + name: time-limited-job +spec: + jobTemplate: + metadata: + creationTimestamp: null + name: time-limited-job + spec: + activeDeadlineSeconds: 12 # add this line + template: + metadata: + creationTimestamp: null + spec: + containers: + - args: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + image: busybox + name: time-limited-job + resources: {} + restartPolicy: Never + schedule: '* * * * *' +status: {} +EOF + +kubectl apply -f time-limited-job.yaml +``` + +

+ +
+ +### Create a CronJob named `hello` that executes a Pod running the following single container. +- name: hello +- image: busybox:1.28 +- command: ["/bin/sh", "-c", "date; echo Hello from the Kubernetes cluster"] +Configure the CronJob to +- Execute once every 2 minutes +- Keep 3 completed Job +- Keep 3 failed job +- Never restart Pods +- Terminate Pods after 10 seconds +Manually create and execute on job named `hello-test` from the `hello` CronJob for testing purpose. + +
+ +
show

+ +```bash +kubectl create cronjob hello --image=busybox --restart=Never --dry-run=client --schedule="*/2 * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > hello-cronjob.yaml +vi hello-cronjob.yaml +``` + +#### Add the following specs. + +```yaml +cat << EOF > hello-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + creationTimestamp: null + name: hello +spec: + jobTemplate: + metadata: + creationTimestamp: null + name: hello + spec: + activeDeadlineSeconds: 10 # Terminate Pods after 10 seconds + template: + metadata: + creationTimestamp: null + spec: + containers: + - command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + image: busybox + name: hello + resources: {} + restartPolicy: Never # Never restart Pods + schedule: '*/2 * * * *' # Execute once every 2 minutes + successfulJobsHistoryLimit: 3 # Keep 3 completed Job + failedJobsHistoryLimit: 3 # Keep 3 failed job +status: {} +EOF + +kubectl apply -f hello-cronjob.yaml + +# Trigger the job manually +kubectl create job --from=cronjob/hello hello-test +``` + +

+ +
+ +## Clean up + +
+ +```bash +rm hello-cronjob.yaml time-limited-job.yaml busybox-parallelism-job.yaml +kubectl delete job pi busybox-parallelism-job busybox-completions-job hello-test +kubectl delete cronjob time-limited-job hello-cronjob +``` \ No newline at end of file diff --git a/k8s-certifications/topics/jsonpath.md b/k8s-certifications/topics/jsonpath.md new file mode 100644 index 0000000..6704c75 --- /dev/null +++ b/k8s-certifications/topics/jsonpath.md @@ -0,0 +1,94 @@ +# [Kubectl jsonpath](https://kubernetes.io/docs/reference/kubectl/jsonpath/) + +
+ +### Get node details as custom fields with NODE_NAME for nodename, CPU_COUNT for cpu. + +
show

+ +```bash +$ kubectl get nodes -o=custom-columns=NODE_NAME:.metadata.name,CPU_COUNT:.status.capacity.cpu +# NODE_NAME CPU_COUNT +# controlplane 2 +# node01 2 +``` + +

+ +
+ +### Setup few containers and deployments + +```bash +kubectl run nginx-dev --image nginx:1.21.4-alpine +kubectl run nginx-qa --image nginx:1.21 +kubectl run nginx-prod --image nginx:1.21 +``` + +### List all Container images in all namespaces + +
show

+ +```bash +kubectl get pods --all-namespaces -o jsonpath='{.items[*].spec.containers[*].image}}' | tr " " "\n" +# nginx:1.21.4-alpine +# nginx:1.21 +# nginx:1.21 +# k8s.gcr.io/coredns:1.6.7 +# k8s.gcr.io/coredns:1.6.7 +# k8s.gcr.io/etcd:3.4.3-0 +# katacoda/katacoda-cloud-provider:0.0.1 +# k8s.gcr.io/kube-apiserver:v1.18.0 +# k8s.gcr.io/kube-controller-manager:v1.18.0 +# quay.io/coreos/flannel:v0.12.0-amd64 +# quay.io/coreos/flannel:v0.12.0-amd64 +# gcr.io/google_containers/kube-keepalived-vip:0.9 +# k8s.gcr.io/kube-proxy:v1.18.0 +# k8s.gcr.io/kube-proxy:v1.18.0 +# k8s.gcr.io/kube-scheduler:v1.18.0} +``` + +

+ +
+ +### List all the pods sorted by name + +
show

+ +```bash +kubectl get pods --sort-by=.metadata.name +# NAME READY STATUS RESTARTS AGE +# nginx-dev 1/1 Running 0 91s +# nginx-prod 1/1 Running 0 91s +# nginx-qa 1/1 Running 0 91s +``` + +

+ +
+ +### Check the Image version of nginx-dev pod using jsonpath + +
show

+ +```bash +kubectl get pod nginx-dev -o jsonpath='{.spec.containers[0].image}' +# nginx:1.21.4-alpine +``` + +

+ +
+ +### List the nginx pod with custom columns POD_NAME and POD_STATUS + +
show

+ +```bash +kubectl get po -o=custom-columns="POD_NAME:.metadata.name, POD_STATUS:.status.containerStatuses[].state" | tr " " "\n" +``` + +

+ +
\ No newline at end of file diff --git a/k8s-certifications/topics/kube-bench.md b/k8s-certifications/topics/kube-bench.md new file mode 100644 index 0000000..8c95130 --- /dev/null +++ b/k8s-certifications/topics/kube-bench.md @@ -0,0 +1,80 @@ +# [Kube-bench](https://github.com/aquasecurity/kube-bench) + +Aqua Security Kube-bench is a tool that checks whether Kubernetes is deployed securely by running the checks documented in the [CIS Kubernetes Benchmark](https://www.cisecurity.org/benchmark/kubernetes/). + +
+ +### Installation + +```bash +curl -L https://github.com/aquasecurity/kube-bench/releases/download/v0.6.5/kube-bench_0.6.5_linux_amd64.tar.gz -o kube-bench_0.6.5_linux_amd64.tar.gz +tar -xvf kube-bench_0.6.5_linux_amd64.tar.gz +``` + +
+ +### Execute Kubebench on the cluster + +
+ +```bash +./kube-bench --config-dir `pwd`/cfg --config `pwd`/cfg/config.yaml +# ..... +# == Summary total == +# 71 checks PASS +# 11 checks FAIL +# 40 checks WARN +# 0 checks INFO +``` + +
+ +### Check the failed tests on the cluster + +
+ +```bash +./kube-bench --config-dir `pwd`/cfg --config `pwd`/cfg/config.yaml | grep "\[FAIL\] " +# [FAIL] 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) +# [FAIL] 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) +# [FAIL] 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) +# [FAIL] 1.2.21 Ensure that the --profiling argument is set to false (Automated) +# [FAIL] 1.2.22 Ensure that the --audit-log-path argument is set (Automated) +# [FAIL] 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) +# [FAIL] 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) +# [FAIL] 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) +# [FAIL] 1.3.2 Ensure that the --profiling argument is set to false (Automated) +# [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated) +# [FAIL] 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) +``` + +
+ +### Fix the failing test `Fix this failed test 1.4.1: Ensure that the --profiling argument is set to false` + +
+ +#### Check the remediation for 1.4.1 which is as below. Edit `/etc/kubernetes/manifests/kube-scheduler.yaml` to add `--profiling=false` + +``` +1.4.1 Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file +on the master node and set the below parameter. +--profiling=false +``` + +#### Rerun the kubebench and verify 1.4.1 is remediated. + +```bash +./kube-bench --config-dir `pwd`/cfg --config `pwd`/cfg/config.yaml | grep "\[FAIL\] " +# [FAIL] 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) +# [FAIL] 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) +# [FAIL] 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) +# [FAIL] 1.2.21 Ensure that the --profiling argument is set to false (Automated) +# [FAIL] 1.2.22 Ensure that the --audit-log-path argument is set (Automated) +# [FAIL] 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) +# [FAIL] 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) +# [FAIL] 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) +# [FAIL] 1.3.2 Ensure that the --profiling argument is set to false (Automated) +# [FAIL] 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) +``` + diff --git a/k8s-certifications/topics/kubeconfig.md b/k8s-certifications/topics/kubeconfig.md new file mode 100644 index 0000000..adb1bf4 --- /dev/null +++ b/k8s-certifications/topics/kubeconfig.md @@ -0,0 +1,109 @@ +# Kubeconfig + +
+ +NOTE : use the [kubeconfig.yaml](../data/kubeconfig.yaml) for the exercise + +### View the config file + +
+ +
show

+ +```bash +kubectl config view --kubeconfig kubeconfig.yaml +``` + +

+ +
+ +### Get the clusters from the kubeconfig file + +
+ +
show

+ +```bash +kubectl config get-clusters --kubeconfig kubeconfig.yaml +# NAME +# development +# qa +# production +# kubernetes +# labs +``` + +

+ +
+ +### Get the users from the kubeconfig file + +
+ +
show

+ +```bash +kubectl config get-users --kubeconfig kubeconfig.yaml # will not work for older versions +# NAME +# dev-user +# kubernetes-admin +# labs-user +# prod-user +# qa-user +``` + +

+ +
+ +### Get the contexts from the kubeconfig file + +
+ +
show

+ +```bash +kubectl config get-contexts --kubeconfig kubeconfig.yaml +# CURRENT NAME CLUSTER AUTHINFO NAMESPACE +# development-user@labs development development-user +# * kubernetes-admin@kubernetes kubernetes kubernetes-admin +# labs-user@labs labs labs-user +# prod-user@prod prod prod-user +# qa-user@qa qa qa-user +``` + +

+ +
+ +### Get the current context + +
+ +
show

+ +```bash +kubectl config current-context --kubeconfig kubeconfig.yaml +# kubernetes-admin@kubernetes +``` + +

+ +
+ +### Switch to context `prod-user@prod` as the current context + +
+ +
show

+ +```bash +kubectl config use-context prod-user@prod --kubeconfig kubeconfig.yaml +# Switched to context "prod-user@prod". +kubectl config current-context --kubeconfig kubeconfig.yaml +# prod-user@prod +``` + +

\ No newline at end of file diff --git a/k8s-certifications/topics/kubelet_security.md b/k8s-certifications/topics/kubelet_security.md new file mode 100644 index 0000000..b677e74 --- /dev/null +++ b/k8s-certifications/topics/kubelet_security.md @@ -0,0 +1,57 @@ +# [Kubelet Security](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/) + +
+ +### Check the Kubelet Security + +
+ +#### Check Kubelet configuration + +```bash +ps -ef | grep kubelet # check the --config parameter +# root 2600 1 3 05:21 ? 00:00:02 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.2 --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +#### Viewing the kubelet configuration file `/var/lib/kubelet/config.yaml` + +```yaml +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false # anonymous auth should be disabled - It should not be true + webhook: # Authn mechanism set to webhook as certificate based auth instead of AlwaysAllow + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt +authorization: + mode: Webhook # Authz mechanism set to webhook, instead of AlwaysAllow + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s +clusterDNS: +- 10.96.0.10 +clusterDomain: cluster.local +cpuManagerReconcilePeriod: 0s +evictionPressureTransitionPeriod: 0s +# additional lines omitted for brevity +``` + +#### Check the key and certificate in the `kube-apiserver.yaml` file + +```bash +cat kube-apiserver.yaml | grep kubelet-client +# - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt +# - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key +``` + +#### Verify the authentication using the above cert and key + +```bash +curl -sk https://localhost:10250/pods/ +# Unauthorized + +curl -sk https://localhost:10250/pods/ --key /etc/kubernetes/pki/apiserver-kubelet-client.key --cert /etc/kubernetes/pki/apiserver-kubelet-client.crt +# {"kind":"PodList","apiVersion":"v1","metadata":{},"items":[{"metadata":{"name":"etcd-controlplane","namespace": ... +``` \ No newline at end of file diff --git a/k8s-certifications/topics/kubesec.md b/k8s-certifications/topics/kubesec.md new file mode 100644 index 0000000..709b24b --- /dev/null +++ b/k8s-certifications/topics/kubesec.md @@ -0,0 +1,263 @@ +# [Kubesec](https://kubesec.io/) + +Security risk analysis for Kubernetes resources + +
+ +### Installation + +
+ +```bash +wget https://github.com/controlplaneio/kubesec/releases/download/v2.11.0/kubesec_linux_amd64.tar.gz +tar -xvf kubesec_linux_amd64.tar.gz +mv kubesec /usr/bin/ +``` + +
+ +### Scan the following specs, identify the issues, fix and rescan + +
+ +```yaml +cat << EOF > unsecured.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + name: nginx + resources: {} + securityContext: + privileged: true # security issue + readOnlyRootFilesystem: false # security issue + dnsPolicy: ClusterFirst + restartPolicy: Never +EOF +``` + +
show

+ +```bash +kubesec scan unsecured.yaml + +# [ +# { +# "object": "Pod/nginx.default", +# "valid": true, +# "fileName": "unsecured.yaml", +# "message": "Failed with a score of -30 points", +# "score": -30, +# "scoring": { +# "critical": [ +# { +# "id": "Privileged", +# "selector": "containers[] .securityContext .privileged == true", +# "reason": "Privileged containers can allow almost completely unrestricted host access", +# "points": -30 +# } +# ], +# "advise": [ +# { +# "id": "ApparmorAny", +# "selector": ".metadata .annotations .\"container.apparmor.security.beta.kubernetes.io/nginx\"", +# "reason": "Well defined AppArmor policies may provide greater protection from unknown threats. WARNING: NOT PRODUCTION READY", +# "points": 3 +# }, +# { +# "id": "ServiceAccountName", +# "selector": ".spec .serviceAccountName", +# "reason": "Service accounts restrict Kubernetes API access and should be configured with least privilege", +# "points": 3 +# }, +# { +# "id": "SeccompAny", +# "selector": ".metadata .annotations .\"container.seccomp.security.alpha.kubernetes.io/pod\"", +# "reason": "Seccomp profiles set minimum privilege and secure against unknown threats", +# "points": 1 +# }, +# { +# "id": "LimitsCPU", +# "selector": "containers[] .resources .limits .cpu", +# "reason": "Enforcing CPU limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .limits .memory", +# "reason": "Enforcing memory limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsCPU", +# "selector": "containers[] .resources .requests .cpu", +# "reason": "Enforcing CPU requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .requests .memory", +# "reason": "Enforcing memory requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "CapDropAny", +# "selector": "containers[] .securityContext .capabilities .drop", +# "reason": "Reducing kernel capabilities available to a container limits its attack surface", +# "points": 1 +# }, +# { +# "id": "CapDropAll", +# "selector": "containers[] .securityContext .capabilities .drop | index(\"ALL\")", +# "reason": "Drop all capabilities and add only those required to reduce syscall attack surface", +# "points": 1 +# }, +# { +# "id": "ReadOnlyRootFilesystem", +# "selector": "containers[] .securityContext .readOnlyRootFilesystem == true", +# "reason": "An immutable root filesystem can prevent malicious binaries being added to PATH and increase attack cost", +# "points": 1 +# }, +# { +# "id": "RunAsNonRoot", +# "selector": "containers[] .securityContext .runAsNonRoot == true", +# "reason": "Force the running image to run as a non-root user to ensure least privilege", +# "points": 1 +# }, +# { +# "id": "RunAsUser", +# "selector": "containers[] .securityContext .runAsUser -gt 10000", +# "reason": "Run as a high-UID user to avoid conflicts with the host's user table", +# "points": 1 +# } +# ] +# } +# } +# ] + +``` + +#### Edit the specs to remove the below + +```yaml + securityContext: + privileged: true # security issue + readOnlyRootFilesystem: false # security issue +``` + +```yaml +cat << EOF > unsecured.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + name: nginx + resources: {} + dnsPolicy: ClusterFirst + restartPolicy: Never +EOF +``` + +```bash +kubesec scan unsecured.yaml + +# [ +# { +# "object": "Pod/nginx.default", +# "valid": true, +# "fileName": "unsecured.yaml", +# "message": "Passed with a score of 0 points", +# "score": 0, +# "scoring": { +# "advise": [ +# { +# "id": "ApparmorAny", +# "selector": ".metadata .annotations .\"container.apparmor.security.beta.kubernetes.io/nginx\"", +# "reason": "Well defined AppArmor policies may provide greater protection from unknown threats. WARNING: NOT PRODUCTION READY", +# "points": 3 +# }, +# { +# "id": "ServiceAccountName", +# "selector": ".spec .serviceAccountName", +# "reason": "Service accounts restrict Kubernetes API access and should be configured with least privilege", +# "points": 3 +# }, +# { +# "id": "SeccompAny", +# "selector": ".metadata .annotations .\"container.seccomp.security.alpha.kubernetes.io/pod\"", +# "reason": "Seccomp profiles set minimum privilege and secure against unknown threats", +# "points": 1 +# }, +# { +# "id": "LimitsCPU", +# "selector": "containers[] .resources .limits .cpu", +# "reason": "Enforcing CPU limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .limits .memory", +# "reason": "Enforcing memory limits prevents DOS via resource exhaustion", +# "points": 1 +# }, +# { +# "id": "RequestsCPU", +# "selector": "containers[] .resources .requests .cpu", +# "reason": "Enforcing CPU requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "RequestsMemory", +# "selector": "containers[] .resources .requests .memory", +# "reason": "Enforcing memory requests aids a fair balancing of resources across the cluster", +# "points": 1 +# }, +# { +# "id": "CapDropAny", +# "selector": "containers[] .securityContext .capabilities .drop", +# "reason": "Reducing kernel capabilities available to a container limits its attack surface", +# "points": 1 +# }, +# { +# "id": "CapDropAll", +# "selector": "containers[] .securityContext .capabilities .drop | index(\"ALL\")", +# "reason": "Drop all capabilities and add only those required to reduce syscall attack surface", +# "points": 1 +# }, +# { +# "id": "ReadOnlyRootFilesystem", +# "selector": "containers[] .securityContext .readOnlyRootFilesystem == true", +# "reason": "An immutable root filesystem can prevent malicious binaries being added to PATH and increase attack cost", +# "points": 1 +# }, +# { +# "id": "RunAsNonRoot", +# "selector": "containers[] .securityContext .runAsNonRoot == true", +# "reason": "Force the running image to run as a non-root user to ensure least privilege", +# "points": 1 +# }, +# { +# "id": "RunAsUser", +# "selector": "containers[] .securityContext .runAsUser -gt 10000", +# "reason": "Run as a high-UID user to avoid conflicts with the host's user table", +# "points": 1 +# } +# ] +# } +# } +# ] +``` + +

\ No newline at end of file diff --git a/k8s-certifications/topics/labels.md b/k8s-certifications/topics/labels.md new file mode 100644 index 0000000..cad743d --- /dev/null +++ b/k8s-certifications/topics/labels.md @@ -0,0 +1,223 @@ +# [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels) + +
+ +## Nodes + +
+ +### Show all labels for the node `node01` + +
+ +
show

+ +```bash +kubectl get nodes node01 --show-labels +# NAME STATUS ROLES AGE VERSION LABELS +# node01 Ready 61m v1.18.0 accelerator=nvidia-tesla-p100,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node01,kubernetes.io/os=linux +``` + +

+ +
+ +### Label worker node `node01` with label `type=critical` + +
+ +
show

+ +```bash +kubectl label node node01 type=critical +# node/node01 labeled +``` + +

+ +
+ +### Remove label `type=critical` from worker node `node01` + +
+ +
show

+ +```bash +kubectl label node node01 type- +# node/node01 labeled +``` + +

+ +
+ +## Namespaces + +
+ +### Create and label Label namespace `alpha` with label `type:critical` + +
+ +
show

+ +```bash +kubectl create namespace alpha +kubectl label namespace alpha type=critical + +kubectl get namespace alpha --show-labels +# NAME STATUS AGE LABELS +# alpha Active 70s type=critical +``` + +

+ +
+ +## Pods + +
+ +### Create a new pod with name `nginx-labels` and using the nginx image and labels `tier=frontend` + +
+ +
show

+ +```bash +kubectl run nginx-labels --image=nginx --labels=tier=frontend +``` + +```bash +# verification +kubectl get pod nginx-labels --show-labels +# NAME READY STATUS RESTARTS AGE LABELS +# nginx-labels 1/1 Running 0 16s tier=frontend +``` + +

+ +
+ +### Create pod `nginx-labels` with `nginx` image and label `name=nginx`, `tier=frontend`, `env=dev` + +
+ +
show

+ +```bash +kubectl run nginx-labels --image=nginx --labels=name=nginx,tier=frontend,env=dev,version=v1 +``` + +OR + +```yaml +cat << EOF > nginx-labels.yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + env: dev + name: nginx + tier: frontend + version: v1 + name: nginx-labels +spec: + containers: + - image: nginx + name: nginx +EOF + +kubectl apply -f nginx-labels.yaml +``` + +

+ +
+ +### Show all labels of the pod `nginx-labels` + +
+ +
show

+ +```bash +kubectl get pod nginx-labels --show-labels +# NAME READY STATUS RESTARTS AGE LABELS +# nginx-labels 1/1 Running 0 26s env=dev,name=nginx,tier=frontend,version=v1 +``` + +

+ +
+ +### Change the labels of pod 'nginx-labels' to be `version=v2` + +
show

+ +```bash +kubectl label pod nginx-labels version=v2 --overwrite + +kubectl get pod nginx-labels --show-labels +# NAME READY STATUS RESTARTS AGE LABELS +# nginx-labels 1/1 Running 0 110s env=dev,name=nginx,tier=frontend,version=v2 +``` + +

+ +
+ +### Get the label `env` for the pods (show a column with env labels) + +
show

+ +```bash +kubectl get pod -L env +# OR +kubectl get pod --label-columns=env +# NAME READY STATUS RESTARTS AGE ENV +# nginx-labels 1/1 Running 0 25s dev +``` + +

+ +
+ +### Get only the `version=v2` pods + +
show

+ +```bash +kubectl get pod -l version=v2 +# OR +kubectl get pod -l 'version in (v2)' +OR +kubectl get pod --selector=version=v2 +``` + +

+ +
+ +### Remove the `name` label from the `nginx-labels` pod + +
show

+ +```bash +kubectl label pod nginx-labels name- + +kubectl get pod nginx-labels --show-labels +NAME READY STATUS RESTARTS AGE LABELS +nginx-labels 1/1 Running 0 4m49s env=dev,tier=frontend,version=v2 +``` + +

+ + +### Clean up + +```bash +kubectl delete namespace alpha +kubectl delete pod nginx-labels --force --grace-period=0 +``` \ No newline at end of file diff --git a/k8s-certifications/topics/logging.md b/k8s-certifications/topics/logging.md new file mode 100644 index 0000000..90e0d79 --- /dev/null +++ b/k8s-certifications/topics/logging.md @@ -0,0 +1,79 @@ +# [Logging](https://kubernetes.io/docs/concepts/cluster-administration/logging/) + +
+ +### Create a pod with below specs. Check the logs of the pod. Retrieve all currently available application logs from the running pod and store them in the file /tmp/counter.log. + +
+ +```yaml +cat << EOF > counter.yaml +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox + args: [/bin/sh, -c, 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'] +EOF + +kubectl apply -f counter.yaml + +``` + +
show

+ +```bash +kubectl logs counter +OR +kubectl logs counter -f # for tailing the logs +``` + +#### Copy the logs to the /tmp/counter.log folder. + +```bash +kubectl logs counter > /tmp/counter.log +``` + +

+ +
+ +### Create a multi-container pod with below specs. Check the logs of the counter pod. + +
+ +```yaml +cat << EOF > nginx-counter.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-counter +spec: + containers: + - name: nginx + image: nginx + - name: counter + image: busybox + args: [/bin/sh, -c, 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'] +EOF + +kubectl apply -f nginx-counter.yaml +``` + +
show

+ +`kubectl logs nginx-counter counter` OR `kubectl logs nginx-counter -c counter` + +

+ +
+ +### Cleanup + +```bash +rm /tmp/counter.log +kubectl delete pod nginx-counter +``` diff --git a/k8s-certifications/topics/monitoring.md b/k8s-certifications/topics/monitoring.md new file mode 100644 index 0000000..5778703 --- /dev/null +++ b/k8s-certifications/topics/monitoring.md @@ -0,0 +1,30 @@ +# [Monitoring](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-usage-monitoring/) + +
+ +### Monitor the node consumption + +
+ +```bash +kubectl top nodes +``` + +
+ +### Monitor the pod consumption + +```bash +kubectl top pods +``` + +
+ +### Find pod with label `name=high-cpu` running with high CPU workloads + +```bash +kubectl top pods -l name=high-cpu --sort-by=CPU +``` + +
+ diff --git a/k8s-certifications/topics/multi_container_pods.md b/k8s-certifications/topics/multi_container_pods.md new file mode 100644 index 0000000..2db29fe --- /dev/null +++ b/k8s-certifications/topics/multi_container_pods.md @@ -0,0 +1,265 @@ +# Multi-container Pods + +
+ +### Create a multi-container pod `multi-container-pod` with 2 containers + - first container name `nginx` with image `nginx` + - second container name `redis` with image `redis` + +
show

+ +```yaml +cat << EOF > multi-container-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: multi-container-pod +spec: + containers: + - image: nginx + name: nginx + - image: redis + name: redis +EOF + +kubectl apply -f multi-container-pod.yaml +``` + +

+ +
+ +### Create a pod named multi-container-nrm with a single app container for each of the following images running inside: nginx + redis + memcached. + +
show

+ +```yaml +cat << EOF > multi-container-nrm.yaml +apiVersion: v1 +kind: Pod +metadata: + name: multi-container-nrm +spec: + containers: + - image: nginx + name: nginx + - image: redis + name: redis + - image: memcached + name: memcached +EOF + +kubectl apply -f multi-container-nrm.yaml +``` + +

+ +
+ +### Create a pod named `sidecar-pod` with a single app container using the following spec. - PENDING +```yaml +cat << EOF > sidecar-pod.yaml +apiVersion: batch/v1 +kind: Pod +metadata: + name: sidecar-pod +spec: + template: + spec: + containers: + - name: myapp + image: alpine:latest + command: ['sh', '-c', 'while true; do echo "logging" >> /opt/logs.txt; sleep 1; done'] + volumeMounts: + - name: data + mountPath: /opt + volumes: + - name: data + emptyDir: {} + +kubectl apply -f sidecar-pod.yaml +``` +- Add a sidecar container named `sidecar`, using the `busybox` image, to the existing `sidecar-pod` . The new sidecar container has to run the following command. `sh -c "tail -F /opt/logs.txt"` +- Use a Volume mounted at the `/opt` to make the log file logs.txt available to the sidecar container. +- Don't modify the specific of the existing container other than adding the required volume mount. + +
show

+ +```yaml +cat << EOF > sidecar-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: sidecar-pod +spec: + template: + spec: + containers: + - name: myapp + image: alpine:latest + command: ['sh', '-c', 'while true; do echo "logging" >> /opt/logs.txt; sleep 1; done'] + volumeMounts: + - name: data + mountPath: /opt + - name: sidecar + image: busybox + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + volumes: + - name: data + emptyDir: {} + +kubectl apply -f multi-container-nrm.yaml +``` + +

+ +
+ +### Create a multi-container pod using the fluentd acting as a sidecar container. with the given specs below. Update the deployment such that it runs both containers and the log files from the first container can be shared/used by the second container. Mount a shared volume /var/log on both containers, which does not persist when the pod is deleted. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + - name: count-agent + image: k8s.gcr.io/fluentd-gcp:1.30 + env: + - name: FLUENTD_ARGS + value: -c /etc/fluentd-config/fluentd.conf + volumeMounts: + - name: config-volume + mountPath: /etc/fluentd-config + volumes: + - name: config-volume + configMap: + name: fluentd-config +``` + +#### Create fluentd-config as its needed for fluentd and mounted as config. + +```yaml +cat << EOF > fluentd-sidecar-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: fluentd-config +data: + fluentd.conf: | + + type tail + format none + path /var/log/1.log + pos_file /var/log/1.log.pos + tag count.format1 + + + + type tail + format none + path /var/log/2.log + pos_file /var/log/2.log.pos + tag count.format2 + + + + type google_cloud + +EOF + +kubectl apply -f fluentd-sidecar-config.yaml +``` + +
show

+ +```yaml +cat << EOF > two-files-counter-pod-agent-sidecar.yaml +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog # mount the varlog volume as the /var/log path + mountPath: /var/log + - name: count-agent + image: k8s.gcr.io/fluentd-gcp:1.30 + env: + - name: FLUENTD_ARGS + value: -c /etc/fluentd-config/fluentd.conf + volumeMounts: + - name: varlog # mount the varlog volume as the /var/log path + mountPath: /var/log + - name: config-volume + mountPath: /etc/fluentd-config + volumes: + - name: varlog # define varlog volume as empty dir which does not persist when the pod is deleted. + emptyDir: {} + - name: config-volume + configMap: + name: fluentd-config +EOF + +kubectl apply -f two-files-counter-pod-agent-sidecar.yaml +``` + +```bash +kubectl get pod counter +# NAME READY STATUS RESTARTS AGE +# counter 2/2 Running 0 24s + +kubectl exec counter -c count -- cat /var/log/1.log +# : Sat Dec 18 02:34:35 UTC 2021 +# : Sat Dec 18 02:34:35 UTC 2021 + +kubectl exec counter -c count-agent -- cat /var/log/1.log +# : Sat Dec 18 02:34:35 UTC 2021 +# : Sat Dec 18 02:34:35 UTC 2021 +``` + +

+ +
+ +### Clean up + +```bash +rm multi-container-nrm.yaml two-files-counter-pod-agent-sidecar.yaml fluentd-sidecar-config.yaml multi-container-pod.yaml sidecar-pod.yaml +kubectl delete config fluentd-sidecar-config +kubectl delete pod multi-container-nrm counter two-files-counter-pod-agent-sidecar multi-container-pod sidecar-pod --force +``` \ No newline at end of file diff --git a/k8s-certifications/topics/namespaces.md b/k8s-certifications/topics/namespaces.md new file mode 100644 index 0000000..05987e8 --- /dev/null +++ b/k8s-certifications/topics/namespaces.md @@ -0,0 +1,95 @@ +# [Namespaces](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) + + - Namespaces provide a mechanism for isolating groups of resources within a single cluster. + - Namespace-based scoping is applicable only for namespaced objects (e.g. Deployments, Services, etc) and not for cluster-wide objects (e.g. StorageClass, Nodes, PersistentVolumes, etc). + - Names of resources need to be unique within a namespace, but not across namespaces. + +
+ +### Check the namespaces on the cluster + +
+ +
show

+ +```bash +kubectl get namespaces +``` + +

+ +
+ +### Create namespace named `alpha` + +
+ +
show

+ +```bash +kubectl create namespace alpha +``` + +

+ +
+ +### Get pods from `alpha` namespace + +
+ +
show

+ +```bash +kubectl get pods --namespace=alpha +# OR +kubectl get pods -n=alpha +``` + +

+ +
+ +### Get pods from all namespaces + +
+ +
show

+ +```bash +kubectl get pods --all-namespaces +#OR +kubectl get pods -A +``` + +

+ +
+ +### Label namespace `alpha` with label `type:critical` + +
+ +
show

+ +```bash +kubectl label namespace alpha type=critical + +kubectl get namespace alpha --show-labels +# NAME STATUS AGE LABELS +# alpha Active 70s type=critical +``` + +

+ +### Delete namespace `alpha` + +
+ +
show

+ +```bash +kubectl delete namespace alpha +``` + +

\ No newline at end of file diff --git a/k8s-certifications/topics/network_policies.md b/k8s-certifications/topics/network_policies.md new file mode 100644 index 0000000..f67b004 --- /dev/null +++ b/k8s-certifications/topics/network_policies.md @@ -0,0 +1,219 @@ +# [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) + +
+ +**NOTE** : [Flannel does not support Network Policies](https://github.com/flannel-io/flannel/issues/558) and does work with the current katacoda cluster. Try the setup with the cluster with network plugin supporting network policies. + +### Get network policies in the default namespace + +
+ +
show

+ +```bash +kubectl get networkpolicy +``` + +

+ +
+ +### Create a default `deny-all` Network Policy that denies ingress and egress traffic + +
show

+ +```bash +cat << EOF > deny-all.yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: deny-all +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress + ingress: # deny all ingress + - {} + egress: # deny all egress + - {} +EOF + +kubectl apply -f limit-consumer.yaml +``` + +

+ +
+ +### Create three pods as per the below specs. Create a NetworkPolicy `limit-consumer` so that `consumer` pods can only be access from producer pods and not from web pods. +1. Pod named `consumer` with image `nginx`. Expose via a ClusterIP service on port 80. +2. Pod named `producer` with image `nginx`. Expose via a ClusterIP service on port 80. +3. Pod named `web` with image `nginx`. Expose via a ClusterIP service on port 80. + +
+ +
show

+ +#### Create the deployments and expose as service + +```bash +kubectl run consumer --image=nginx && kubectl expose pod consumer --port=80 +kubectl run producer --image=nginx && kubectl expose pod producer --port=80 +kubectl run web --image=nginx && kubectl expose pod web --port=80 +``` + +#### Verify the communication + +```bash +# verify if web and producer can access consumer +kubectl exec producer -- curl http://consumer:80 # success +kubectl exec web -- curl http://consumer:80 # success +``` + +#### Create and apply the network policy + +```yaml +cat << EOF > limit-consumer.yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: limit-consumer +spec: + podSelector: + matchLabels: + run: consumer # selector for the pods + policyTypes: + - Ingress + ingress: # allow ingress traffic only from producer pods + - from: + - podSelector: # from pods + matchLabels: # with this label + run: producer +EOF + +kubectl apply -f limit-consumer.yaml +``` + +#### Verify the communication + +```bash +# verify if web and producer can access consumer +kubectl exec producer -- curl http://consumer:80 # success +kubectl exec web -- curl http://consumer:80 # failure +``` + +```bash +# Cleanup +kubectl delete pod web producer consumer --force +kubectl delete svc web producer consumer +rm limit-consumer.yaml +``` + +

+ +
+ +### You have rolled out a new pod to your infrastructure and now you need to allow it to communicate with the `backend` and `storage` pods but nothing else. Given the running pod `web` edit it to use a network policy that will allow it to send traffic only to the `backend` and `storage` pods. + +#### Setup + +```bash +kubectl run web --image nginx --labels name=web && kubectl expose pod web --port 80 +kubectl run backend --image nginx --labels name=backend && kubectl expose pod backend --port 80 +kubectl run storage --image nginx --labels name=storage && kubectl expose pod storage --port 80 +kubectl run dummy --image nginx --labels name=dummy && kubectl expose pod dummy --port 80 +``` + +#### Verify the communication + +```bash +# verify if web and producer can access consumer +kubectl exec web -- curl http://backend:80 # success +kubectl exec web -- curl http://storage:80 # success +kubectl exec web -- curl http://dummy:80 # success - but should be failure +``` + +#### Allow dns lookups for all pods + +```yaml +kubectl label namespace kube-system name=kube-system + +cat << EOF > egress-deny-all.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-all-egress +spec: + podSelector: {} + egress: + - to: + - namespaceSelector: + matchLabels: + name: kube-system + ports: + - protocol: TCP + port: 53 + - protocol: UDP + port: 53 + policyTypes: + - Egress +EOF + +kubectl apply -f egress-deny-all.yaml +``` + +
show

+ +#### Create and apply the network policy + +```yaml +cat << EOF > limit-web.yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: limit-web +spec: + podSelector: + matchLabels: + name: web # selector for the pods + policyTypes: + - Ingress + - Egress + ingress: + - {} + egress: # allow egress traffic only to backend & storage pods + - to: + - podSelector: # from pods + matchLabels: # with backend label + name: backend + - podSelector: # from pods + matchLabels: # with storage label + name: storage + ports: + - protocol: TCP + port: 80 +EOF + +kubectl apply -f limit-web.yaml +``` + +#### Verify the previous curl work. Create a dummy pod and verify it should not be able to reach the same. + +```bash +# verify if web and producer can access consumer +kubectl exec web -- curl http://backend:80 # success +kubectl exec web -- curl http://storage:80 # success +kubectl exec web -- curl http://dummy:80 # failure +``` + +```bash +# Cleanup +kubectl label namespace kube-system name- +kubectl delete networkpolicy default-deny-all-egress limit-web +kubectl delete pod web backend storage dummy --force +kubectl delete svc web backend storage dummy +rm limit-web.yaml egress-deny-all.yaml +``` + +

\ No newline at end of file diff --git a/k8s-certifications/topics/nodes.md b/k8s-certifications/topics/nodes.md new file mode 100644 index 0000000..20d9b2f --- /dev/null +++ b/k8s-certifications/topics/nodes.md @@ -0,0 +1,90 @@ +# Nodes + +
+ +### Get the nodes of the cluster + +
show

+ +```bash +kubectl get nodes +# NAME STATUS ROLES AGE VERSION +# controlplane Ready master 62m v1.18.0 +# node01 Ready 61m v1.18.0 +``` + +

+ +
+ +### Show all labels for the node `node01` + +
show

+ +```bash +kubectl get nodes node01 --show-labels +# NAME STATUS ROLES AGE VERSION LABELS +# node01 Ready 61m v1.18.0 accelerator=nvidia-tesla-p100,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node01,kubernetes.io/os=linux +``` + +

+ +
+ +### Label worker node `node01` with label `type=critical` + +
show

+ +```bash +kubectl label node node01 type=critical +# node/node01 labeled +``` + +

+ +
+ +### Remove label `type=critical` from worker node `node01` + +
show

+ +```bash +kubectl label node node01 type- +# node/node01 labeled +``` + +

+ +
+ +### Get usage metrics such CPU and Memory of the cluster nodes + +
show

+ +```bash +kubectl top nodes +``` + +

+ +
+ +### Set the node named node01 unavailable and reschedule all the pods running on it. + +
show

+ +```bash +kubectl drain node01 --ignore-daemonsets --force # drain will cordon the node as well +# node/node01 cordoned +# Pods: kube-system/kube-flannel-ds-amd64-6mrm2, kube-system/kube-keepalived-vip-zchjw, kube-system/kube-proxy-ms2mf +# WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/multi-container-nrm; ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-amd64-6mrm2, kube-system/kube-keepalived-vip-zchjw, kube-system/kube-proxy-ms2mf +# evicting pod default/multi-container-nrm +# evicting pod kube-system/katacoda-cloud-provider-6c46f89b5c-jvb7g +# pod/multi-container-nrm evicted +# pod/katacoda-cloud-provider-6c46f89b5c-jvb7g evicted +# node/node01 evicted +``` + +

+ +
\ No newline at end of file diff --git a/k8s-certifications/topics/pod_security_context.md b/k8s-certifications/topics/pod_security_context.md new file mode 100644 index 0000000..e171d60 --- /dev/null +++ b/k8s-certifications/topics/pod_security_context.md @@ -0,0 +1,145 @@ +# [Pod Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) + +A security context defines privilege and access control settings for a Pod or Container. Security context settings include, but are not limited to: + - [`Discretionary Access Control`](#discretionary_access_control): Permission to access an object, like a file, is based on user ID (UID) and group ID (GID). + - [`Security Enhanced Linux (SELinux)`](#selinux): Objects are assigned security labels. + - Running as `privileged` or unprivileged. + - [`Linux Capabilities`](#): Give a process some privileges, but not all the privileges of the root user. + - [`AppArmor`](#apparmor): Use program profiles to restrict the capabilities of individual programs + - [`Seccomp`](#seccomp): Filter a process's system calls. + - [`AllowPrivilegeEscalation`]: Controls whether a process can gain more privileges than its parent process. This bool directly controls whether the no_new_privs flag gets set on the container process. + - [`readOnlyRootFilesystem`](#immutability): Mounts the container's root filesystem as read-only. + +
+ +## Discretionary Access Control + +
+ +### Run as `busybox-user` pod immutable using the following settings + - `user`: `1000` + - `group`: `3000` + +
show

+ +```yaml +cat << EOF > busybox-user.yaml +apiVersion: v1 +kind: Pod +metadata: + name: busybox-user +spec: + securityContext: # add this + runAsUser: 1000 # add user + runAsGroup: 3000 # add group + containers: + - image: busybox + name: busybox-user + command: ["sh", "-c", "sleep 600"] +EOF + +kubectl apply -f busybox-user.yaml +``` + +```bash +# verify - will have a proper user if the user exists +kk exec busybox-user -- whoami +# whoami: unknown uid 1000 +# command terminated with exit code 1 +``` + +

+ +
+ +## SELinux + +
+ +### Create a nginx pod with `SYS_TIME` & `NET_ADMIN` capabilities. + +
show

+ +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + securityContext: + capabilities: + add: ["SYS_TIME", "NET_ADMIN"] +EOF + +kubectl apply -f nginx.yaml +``` + +

+ +
+ +## App Armor + +
+ +Refer [AppArmor](./apparmor.md) + +
+ +## Seccomp + +Refer [Seccomp - Secure Computing](./seccomp.md) + +
+ +## Immutability + +- Image Immutability: Containerized applications are meant to be immutable, and once built are not expected to change between different environments. + +
+ +### Make the `busybox-immutable` pod immutable using the following settings + - `readOnlyRootFilesystem`: `true` + - `privileged`: `false` + - `command` : `[ "sh", "-c", "sleep 600" ]` + +
show

+ +```yaml +cat << EOF > busybox-immutable.yaml +apiVersion: v1 +kind: Pod +metadata: + name: busybox-immutable +spec: + containers: + - image: busybox + name: busybox-immutable + command: ["sh", "-c", "sleep 600"] + securityContext: # add this + readOnlyRootFilesystem: true # add this to make container immutable + privileged: false # add this to prevent container making any node changes +EOF + +kubectl apply -f busybox-immutable.yaml +``` + +```bash +# verify +kubectl exec busybox-immutable -- touch echo.txt +# touch: echo.txt: Read-only file system +# command terminated with exit code 1 +``` + +

+ +## Clean up + +```bash +rm busybox-immutable.yaml +kubectl delete pod busybox-immutable --force --grace-period=0 +``` \ No newline at end of file diff --git a/k8s-certifications/topics/pod_security_policies.md b/k8s-certifications/topics/pod_security_policies.md new file mode 100644 index 0000000..7625b60 --- /dev/null +++ b/k8s-certifications/topics/pod_security_policies.md @@ -0,0 +1,255 @@ +# [Pod Security Policies - DEPRECATED](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) + + - Pod Security Policies enable fine-grained authorization of pod creation and updates. + - PodSecurityPolicy is deprecated as of Kubernetes v1.21, and will be removed in v1.25. + +
+ +### Create the following + - Pod Security Policy `psp-example` to prevent pods with `privileged` as true and + - Enable PodSecurityPolicy in Kubernetes API server + - Try creating the nginx pod with following specs. + +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + securityContext: + privileged: true + restartPolicy: Always +EOF +``` + +
show

+ +#### Create Pod Security Policy + +```yaml +cat << EOF > psp.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-example +spec: + privileged: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny +EOF + +kubectl apply -f psp.yaml +``` + +#### Pods need to have access to use Pod Security Policies and the Service Account i.e. default needs to have access to the same. + +```yaml +cat << EOF > role-psp.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: role-psp +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] +EOF + +kubectl apply -f role-psp.yaml + +cat << EOF > role-psp-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: role-psp-binding +roleRef: + kind: ClusterRole + name: role-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: default + namespace: default +EOF + +kubectl apply -f role-psp-binding.yaml +``` + +#### Update `/etc/kubernetes/manifests/kube-apiserver.yaml` to enable `PodSecurityPolicy` + +```yaml +--enable-admission-plugins=NodeRestriction,PodSecurityPolicy # update the admission plugins +``` + +#### Verify +```bash +kubectl apply -f nginx.yaml +# Error from server (Forbidden): error when creating "nginx.yaml": pods "nginx" is forbidden: PodSecurityPolicy: unable to admit pod: [spec.volumes[0]: Invalid value: "secret": secret volumes are not allowed to be used spec.containers[0].securityContext.privileged: Invalid value: true: Privileged containers are not allowed] +``` + +

+ +
+ +### Update the `psp-example` Pod Security Policy to allow only `configMap` and `secret` volumes. Try creating the nginx pod with following specs. + +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + volumeMounts: + - mountPath: /cache + name: cache-volume + volumes: + - name: cache-volume + emptyDir: {} +EOF +``` + +
show

+ +```yaml +cat << EOF > psp.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-example +spec: + privileged: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + volumes: # add the volumes + - 'configMap' + - 'secret' +EOF + +kubectl apply -f psp.yaml +``` + +#### Verify + +```bash +kubectl apply -f nginx.yaml +# Error from server (Forbidden): error when creating "nginx.yaml": pods "nginx" is forbidden: PodSecurityPolicy: unable to admit pod: [spec.volumes[0]: Invalid value: "emptyDir": emptyDir volumes are not allowed to be used] + +# NOTE : If the pod is created check for other psp which allows the creation and delete the same. +``` + +

+ +
+ +### Update the following Pod Security Policy `psp-example` to allow only `/data` as host paths in `readOnly` mode. Try creating the nginx pod with following specs. + +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + resources: {} + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /data + type: Directory +EOF +``` + +
show

+ +```yaml +cat << EOF > psp.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-example +spec: + privileged: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + volumes: + - 'configMap' + - 'secret' + - 'hostPath' + allowedHostPaths: # add the allowed host paths + - pathPrefix: "/data" + readOnly: true +EOF + +kubectl apply -f psp.yaml +``` + +#### Verify + +```bash +kubectl apply -f nginx.yaml +# Error from server (Forbidden): error when creating "nginx.yaml": pods "nginx" is forbidden: PodSecurityPolicy: unable to admit pod: [spec.containers[0].volumeMounts[0].readOnly: Invalid value: false: must be read-only] +``` + +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + resources: {} + volumeMounts: + - mountPath: /test-pd + name: test-volume + readOnly: true # add this + volumes: + - name: test-volume + hostPath: + path: /data + type: Directory +EOF +``` + +#### Verify + +```bash +kubectl apply -f nginx.yaml +# pod/nginx created +``` + +

\ No newline at end of file diff --git a/k8s-certifications/topics/pods.md b/k8s-certifications/topics/pods.md new file mode 100644 index 0000000..d991f90 --- /dev/null +++ b/k8s-certifications/topics/pods.md @@ -0,0 +1,464 @@ +# [Pod](https://kubernetes.io/docs/concepts/workloads/pods/) + + - A Kubernetes pod is a group of containers, and is the smallest unit that Kubernetes administers. + - Pods have a single IP address that is applied to every container within the pod. + - Pods can have single or multiple containers. + - Containers in a pod share the same resources such as memory and storage. + - Remember, you CANNOT edit specifications of an existing POD other than the below. + - spec.containers[*].image + - spec.initContainers[*].image + - spec.activeDeadlineSeconds + - spec.tolerations + - Edit the pod for changes and a tmp file is created. Delete and Recreate the pod using the tmp file. + +
+ + - [Basics](#basics) + - [Multi-container Pods](#multi-container-pods) + - [Node Selector](#node-selector) + - [Resources - Requests and limits](#resources) + - [Static Pods](#static-pods) + - [Init Containers](#init-containers) + +## Basics + +
+ +### Check number of pods in the default namespace + +
+ +
show

+ +```bash +kubectl get pods +# OR +kubectl get po +``` +

+ +
+ +### Create a new pod with name `nginx` and using the `nginx` image + +
show

+ +```bash +kubectl run nginx --image=nginx +``` + +

+ +
+ + +### Create a pod named `mongo` using image `mongo` in a new Kubernetes namespace `my-website` + +
show

+ +```bash +kubectl create namespace my-website +kubectl run mongo --image=mongo --namespace=my-website +``` + +

+ +
+ + +### Create a new pod with name nginx and using the nginx image in the `alpha` namespace + +
show

+ +```bash +kubectl create namespace alpha +kubectl run nginx --image=nginx --namespace=alpha +``` + +

+ +
+ +### Create a new pod `custom-nginx` using the `nginx` image and expose it on container port 8080. + +
show

+ +```bash +kubectl run custom-nginx --image=nginx --port=8080 +``` + +

+ +
+ +### Check which node the pod is hosted on + +
show

+ +```bash +kubectl get pods -o wide +``` + +

+ +
+ +### Get only the pods name + +
show

+ +```bash +kubectl get pods -o name +``` + +

+ +
+ +### Delete the pod with the name nginx + +
show

+ +```bash +kubectl delete pod nginx +``` + +

+ +
+ +### Delete the pod with the name nginx in the `alpha` namespace + +
show

+ +```bash +kubectl delete pod nginx --namespace=alpha +``` + +

+ +
+ +### Create pod `nginx-labels` with `nginx` image and label `name=nginx`, `tier=frontend`, `env=dev` + +
+ +
show

+ +```bash +kubectl run nginx-labels --image=nginx --labels=name=nginx,tier=frontend,env=dev,version=v1 +``` + +OR + +```yaml +cat << EOF > nginx-labels.yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + env: dev + name: nginx + tier: frontend + version: v1 + name: nginx-labels +spec: + containers: + - image: nginx + name: nginx +EOF + +kubectl apply -f nginx-labels.yaml +``` + +

+ +
+ +### Delete the pod with name `nginx-labels` with force and no grace period + +
show

+ +```bash +kubectl delete pod nginx-labels --force --grace-period=0 +``` + +

+ +
+ +### Create a pod with name `nginx-file` and image nginx using pod defination file + +
show

+ +```bash +kubectl run nginx-file --image=nginx --dry-run=client -o yaml > nginx-file.yaml +kubectl apply -f nginx-file.yaml +``` +

+ +
+ +### Create a nginx pod with name nginx and copy the pod definition file to a nginx_definition.yaml file + +
show

+ +```bash +kubectl run nginx --image=nginx +kubectl get nginx -o yaml > nginx_definition.yaml +``` + +

+ +
+ +### Create a `ubuntu-1` pod with image `ubuntu` with command `sleep 4800` + +
show

+ +```bash +kubectl run ubuntu-1 --image=ubuntu --command sleep 4800 +``` + +

+ +
+ +### A web application requires a specific version of redis to be used as a cache. Create a pod with the following characteristics, and leave it running when complete: +- The pod must run in the web namespace. +- The name of the pod should be cache +- Use the redis image with the 3.2 tag +- Expose port 6379 + +
+ +
show

+ +```bash +kubectl create namespace web +kubectl run cache --image redis:3.2 --port 6379 --namespace web +``` + +

+ +
+ +## Multi-container Pods + +
+ +Refer [Multi-container Pods](multi_container_pods.md) + +
+ +## [Node Selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) + +
+ +### Create a pod `nginx-node-selector` that will be deployed to a Node that has the label `accelerator=nvidia-tesla-p100` + +
show

+ +Add the label to a node: + +```bash +kubectl label nodes node01 accelerator=nvidia-tesla-p100 +``` + +We can use the 'nodeSelector' property on the Pod YAML: + +```yaml +cat << EOF > nginx-node-selector.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-node-selector +spec: + containers: + - name: nginx-node-selector + image: nginx + nodeSelector: # add this + accelerator: nvidia-tesla-p100 # the selection label +EOF + +kubectl apply -f nginx-node-selector.yaml +``` + +OR + +Use node affinity (https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/#schedule-a-pod-using-required-node-affinity) + +```yaml +cat << EOF > nginx-node-selector.yaml +apiVersion: v1 +kind: Pod +metadata: + name: affinity-pod +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: accelerator + operator: In + values: + - nvidia-tesla-p100 + containers: + - name: nginx-node-selector + image: nginx +EOF + +kubectl apply -f nginx-node-selector.yaml + +``` + +

+ +
+ +### Remove the `description` annotations for pod `nginx-annotations` + +
show

+ +```bash +kubectl annotate pod nginx-annotations description- +``` + +

+ +
+ +### Remove these `nginx-annotations` pod to have a clean state in your cluster + +
show

+ +```bash +kubectl delete pod nginx-annotations --force +``` + +

+ +
+ +## [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) + +
+ +### Create an nginx pod name `nginx-resources` with `requests` `cpu=100m,memory=256Mi` and `limits` `cpu=200m,memory=512Mi` + +
+ +
show

+ +```bash +kubectl run nginx-resources --image=nginx --restart=Never --requests='cpu=100m,memory=256Mi' --limits='cpu=200m,memory=512Mi' +``` + +OR + +```yaml +cat << EOF > nginx-resources.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx-resources + name: nginx-resources +spec: + containers: + - image: nginx + name: nginx-resources + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +EOF + +kubectl apply -f nginx-resources.yaml +``` + +

+ +
+ +## [Static Pods](https://kubernetes.io/docs/concepts/workloads/pods/#static-pods) + +
+ +### Configure the kubelet systemd-managed service, on the node labelled with name=node01, to launch a pod containing a single container of Image httpd named webtool automatically. Any spec files required should be placed in the /etc/kubernetes/manifests directory on the node. + +
+ +
show

+ +#### Check the static pod path in the kubelet config file + +```bash +ps -ef | grep kubelet +# root 2794 1 3 07:43 ? 00:01:05 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.2 --resolv-conf=/run/systemd/resolve/resolv.conf + +# Check the config file @ /var/lib/kubelet/config.yaml for the staticPodPath property +staticPodPath: /etc/kubernetes/manifests +``` + +#### Execute the below on node01 + +```yaml +mkdir /etc/kubernetes/manifests # create the static pod path, if it does not exist. + +cat << EOF > webtool.yaml +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: webtool + name: webtool +spec: + containers: + - image: httpd + name: webtool + resources: {} + dnsPolicy: ClusterFirst + restartPolicy: Always +status: {} +EOF + +systemctl restart kubelet # if required +``` + +#### Check on controlpanel node + +```bash +kubectl get pods +# NAME READY STATUS RESTARTS AGE +# webtool-node01 1/1 Running 0 11s +``` + +

+ +
+ +## Init Containers + +Refer [Init Containers](./init) + +### Clean up + +
+ +```bash +rm nginx-labels.yaml nginx-file.yaml nginx_definition.yaml nginx-resources.yaml +kubectl delete pod mongo -n my-website --force --grace-period=0 +kubectl delete pod cache -n web --force --grace-period=0 +kubectl delete pod nginx -n alpha --force --grace-period=0 +kubectl delete namespace alpha web my-website +``` \ No newline at end of file diff --git a/k8s-certifications/topics/probes.md b/k8s-certifications/topics/probes.md new file mode 100644 index 0000000..3a7c4b7 --- /dev/null +++ b/k8s-certifications/topics/probes.md @@ -0,0 +1,181 @@ +# Readiness & Liveness Probes + +- Readiness probes helps kubelet to know when a container is ready to start accepting traffic. A Pod is considered ready when all of its containers are ready + - Liveness probes helps kubelet to know when the pod is unhealthy and needs to be restarted. + +
+ + - [Readiness probes](#readiness-probes) + - [Liveness probes](#liveness-probes) + +## Readiness probes + +
+ +### Create a `nginx-readiness` pod with a readiness probe that just runs the http request on `/` with port `80` + +
show

+ +```bash +kubectl run nginx-readiness --image=nginx --restart=Never --dry-run=client -o yaml > nginx-readiness.yaml +``` + +Edit `nginx-readiness.yaml` file to add `readinessProbe` probe as below and apply `kubectl apply -f nginx-readiness.yaml` + +```YAML +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + readinessProbe: # declare the readiness probe + httpGet: # add this line + path: / # + port: 80 # + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +``` + +

+ +
+ +## Liveness probes + +
+ +### Create a `nginx-liveness` pod with a liveness probe that just runs the command 'ls'. + +
show

+ +```bash +kubectl run nginx-liveness --image=nginx --restart=Never --dry-run=client -o yaml > nginx-liveness.yaml +``` + +Edit `nginx-liveness.yaml` file to add `livenessProbe` probe as below and apply `kubectl apply -f nginx-liveness.yaml` + +```YAML +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + livenessProbe: # add liveness probe + exec: # add this line + command: # command definition + - ls # ls command + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +``` +

+ +
+ +### Modify `nginx-liveness` pod to add a delay of 30 seconds whereas the interval between probes would be 5 seconds. + +
show

+ +#### Edit `nginx-liveness.yaml` file to update `livenessProbe` probe as below. Delete and recreate pod using `kubectl apply -f nginx-liveness.yaml` + +```YAML +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + run: nginx + name: nginx +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + livenessProbe: + initialDelaySeconds: 30 # add this line + periodSeconds: 5 # add this line as well + exec: + command: + - ls + dnsPolicy: ClusterFirst + restartPolicy: Never +status: {} +``` + +

+ +
+ +## Troubleshooting + +### Create a pod `liveness-exec` with the following specs. Wait for 30 secs and check if the pod restarts. Identify the reason. + +```bash +cat << EOF > exec-liveness.yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: liveness-exec +spec: + containers: + - name: liveness + image: k8s.gcr.io/busybox + args: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + initialDelaySeconds: 5 + periodSeconds: 5 +EOF + +kubectl apply -f exec-liveness.yaml +``` + +
show

+ +```bash +kubectl get pod liveness-exec -w # pod restarts due to failed liveness check +# NAME READY STATUS RESTARTS AGE +# liveness-exec 1/1 Running 0 17s +# liveness-exec 1/1 Running 1 76s + +kubectl describe pod liveness-exec + +# Normal Started 69s (x2 over 2m22s) kubelet, node01 Started container liveness +# Warning Unhealthy 25s (x6 over 110s) kubelet, node01 Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory +# Normal Killing 25s (x2 over 100s) kubelet, node01 Container liveness failed liveness probe, will be restarted +``` + +

+ +### Clean up + +```bash +rm nginx-liveness.yaml nginx-readiness.yaml +kubectl delete pod nginx-readiness nginx-liveness liveness-exec --force +``` \ No newline at end of file diff --git a/k8s-certifications/topics/rbac.md b/k8s-certifications/topics/rbac.md new file mode 100644 index 0000000..133f34c --- /dev/null +++ b/k8s-certifications/topics/rbac.md @@ -0,0 +1,221 @@ +# [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + +- Role and Role bindings are namespace scoped for e.g. pods, deployments, configmaps, etc. +- Cluster Role and Cluster Role bindings are cluster scoped resources and not limited to namespaces for e.g. nodes, pv, etc. + +## Table of Contents +1. [Role and Role Bindings](#role-and-role-bindings) +2. [Cluster Role and Cluster Role Bindings](#cluster-role-and-cluster-role-bindings) + +
+ +### Check the current authorization used by the cluster + +
show

+ +Check the `/etc/kubernetes/manifests/kube-apiserver.yaml` for the `--authorization-mode=Node,RBAC` + +

+ +
+ +## Role and Role Bindings + +
+ +### Create the role `pods-read` to `get, create, list and delete` `pods` in the default namespace. + +
show

+ +```bash +kubectl create role pods-read --verb=get,create,list,delete --resource=pods +``` + +OR + +```yaml +cat << EOF > pods-read.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pods-read +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - create + - list + - delete +EOF + +kubectl apply -f pods-read.yaml +``` + +```bash +# verify +kubectl get role pods-read +# NAME CREATED AT +# pods-read 2021-12-13T01:35:10Z +``` + +

+ +
+ +### Create a service account `sample-sa` + +
show

+ +```bash +kubectl create sa sample-sa +``` + +OR + +```yaml +cat << EOF > sample-sa.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + name: sample-sa +EOF + +kubectl apply -f sample-sa.yaml +``` + +```bash +# verify +kubectl get serviceaccount sample-sa +# NAME SECRETS AGE +# sample-sa 1 14s +``` + +

+ +
+ +### Create a role binding `sample-sa-pods-read-role-binding` binding service account `sample-sa` and role `pods-read` + +
show

+ +```bash +kubectl create rolebinding sample-sa-pods-read-role-binding --serviceaccount=default:sample-sa --role=pods-read +``` + +OR + +```yaml +cat << EOF > sample-sa-pods-read-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + name: sample-sa-pods-read-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pods-read +subjects: +- kind: ServiceAccount + name: sample-sa + namespace: default +EOF + +kubectl apply -f sample-sa-pods-read-role-binding.yaml +``` + +```bash +# verify +kubectl get rolebinding sample-sa-pods-read-role-binding +# NAME ROLE AGE +# sample-sa-pods-read-role-binding Role/pods-read 18s +``` + +

+ +
+ +### Verify service account `sample-sa` can get pods using the `auth can-i` command. + +
show

+ +```bash +# verify +kubectl auth can-i get pods --as system:serviceaccount:default:sample-sa +# yes +``` +

+ +
+ +## Cluster Role and Cluster Role Bindings + +
+ +### Create the following for a user `proxy-admin` (which does not exist) + - Cluster role `proxy-admin-role` with permissions to `nodes` with `get, list,create, update` actions + - Cluster role binding `proxy-admin-role-binding` to bind cluster role `proxy-admin-role` to user `proxy-admin` + +
+ +
show

+ +```bash +kubectl create clusterrole proxy-admin-role --resource=nodes --verb=get,list,create,update +kubectl create clusterrolebinding proxy-admin-role-binding --user=proxy-admin --clusterrole=proxy-admin-role +``` + +```bash +# verify +kubectl auth can-i get nodes --as proxy-admin +# yes +``` + +

+ +
+ +### Create the following - PENDING + - Create a new role named `deployent-role` which only allows to `create` the following resource types in the `finance` namespace. + - Deployment + - StatefuleSet + - DaemonSet +- Create a new Service Account named `cicd-token` in the existing namespace `finance` +- Bind the new Role `deployment-role` to the new serviceaccount `cicd-token` using Role binding `deployent-role-binding` limited to the namespace `finance` + +
+ +
show

+ +```bash +kubectl create serviceaccount cicd-token -n finance +kubectl create role deployent-role --resource=nodes --verb=get,list,create,update -n finance +kubectl create rolebinding deployent-role-binding --serviceaccount=finance/cicd-token --role=deployent-role -n finance +``` + +```bash +# verify +kubectl auth can-i get nodes --as proxy-admin +# yes +``` + +

+ +
+ +## Clean up + +
+ +```bash +rm sample-sa-pods-read-role-binding.yaml pods-read.yaml +kubectl delete rolebinding sample-sa-pods-read-role-binding +kubectl delete serviceaccount sample-sa +kubectl delete role pods-read +kubectl delete clusterrolebinding proxy-admin-role-binding +kubectl delete clusterole proxy-admin-role +``` \ No newline at end of file diff --git a/k8s-certifications/topics/replica_set.md b/k8s-certifications/topics/replica_set.md new file mode 100644 index 0000000..537da89 --- /dev/null +++ b/k8s-certifications/topics/replica_set.md @@ -0,0 +1,206 @@ +# [ReplicaSet](https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/) + + - ReplicaSet ensures that a specified number of pod replicas are running at any given time. + - It is recommended to use Deployments instead of directly using ReplicaSets, as they help manage ReplicaSets and provide declarative updates to Pods. + +
+ +### Check number of replica sets in the default namespace + +
+ +
show

+ +```bash +kubectl get replicasets +# OR +kubectl get rs +``` + +

+ +
+ +### Create a replica set named `replica-set-demo` using a pod named ngnix using a nginx image and labeled as `tier=frontend` with a single replica. + +
+ +
show

+ +```yaml +cat << EOF > replica-set-demo.yaml +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: replica-set-demo +spec: + replicas: 1 + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: nginx + image: nginx +EOF + +kubectl apply -f replica-set-demo.yaml +``` + +

+ +
+ +### Scale up the `replica-set-demo` from 1 replica to 2 replicas + +
+ +
show

+ +```bash +kubectl scale replicaset replica-set-demo --replicas=2 +``` + +OR + +Edit the replica set definition file `replica-set-demo.yaml` and apply `kubectl apply -f replica-set-demo.yaml` + +```yaml +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: replica-set-demo +spec: + replicas: 2 # update this + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: nginx + image: nginx +EOF +``` + +

+ +
+ +### Scale up the `replica-set-demo` from 2 replicas to 1 replica + +
+ +
show

+ +```bash +kubectl scale replicaset replica-set-demo --replicas=1 +``` + +OR + +#### Edit the replica set definition file `replica-set-demo.yaml` and apply `kubectl apply -f replica-set-demo.yaml` + +```yaml +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: replica-set-demo +spec: + replicas: 1 # update this + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: nginx + image: nginx +EOF +``` + +

+ +
+ +### Create a replica set using the below definition and fix any issues. + +
+ +```yaml +apiVersion: v1 +kind: ReplicaSet +metadata: + name: replicaset-1 +spec: + replicas: 1 + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: nginx + image: nginx +``` + +
show

+ +#### Check the apiVersion using `kubectl explain replicasets` which is `apps/v1`. +Update the version and apply again. + +

+ +
+ +### Create a replica set using the below definition and fix any issues. + +
+ +```yaml +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: replicaset-2 +spec: + replicas: 1 + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: nginx + spec: + containers: + - name: nginx + image: nginx +``` + +
show

+ +The replica set selector field `tier: frontend` does not match the pod labels `tier: nginx`. Correct either of them and reapply. + +

+ +
+ +### Clean up + +```bash +kubectl delete replicaset replica-set-demo replicaset-1 replicaset-2 +rm replica-set-demo.yaml +``` diff --git a/k8s-certifications/topics/runtimes.md b/k8s-certifications/topics/runtimes.md new file mode 100644 index 0000000..44177dd --- /dev/null +++ b/k8s-certifications/topics/runtimes.md @@ -0,0 +1,42 @@ +# [Runtime Class](https://kubernetes.io/docs/concepts/containers/runtime-class/) + +
+ +### Create the following `gvisor` runtime class and create a nginx pod referring the `gvisor` runtime. + +
+ +```yaml +cat << EOF > gvisor.yaml +apiVersion: node.k8s.io/v1 +kind: RuntimeClass +metadata: + name: gvisor +handler: runsc +EOF + +kubectl apply -f gvisor.yaml +``` + +
show

+ +```yaml +cat << EOF > nginx.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + runtimeClassName: gvisor + containers: + - image: nginx + name: nginx + restartPolicy: Always +EOF + +kubectl apply -f nginx.yaml + +# NOTE : Pod may not come up as the runtime does not actually exist +``` +

+ diff --git a/k8s-certifications/topics/seccomp.md b/k8s-certifications/topics/seccomp.md new file mode 100644 index 0000000..1b5d797 --- /dev/null +++ b/k8s-certifications/topics/seccomp.md @@ -0,0 +1,205 @@ +# [Seccomp - Secure Computing](https://kubernetes.io/docs/tutorials/clusters/seccomp/) + + - Seccomp stands for secure computing mode and has been a feature of the Linux kernel. + - It can be used to sandbox the privileges of a process, restricting the calls it is able to make from userspace into the kernel. + - Kubernetes lets you automatically apply seccomp profiles loaded onto a node to your Pods and containers. + + **NOTE** : Seccomp is available in kubernetes 1.19 and above only. + +
+ +### Check the syscalls made by `ls` using the `strace` command + +
+ +```bash +strace -c ls + +# % time seconds usecs/call calls errors syscall +# ------ ----------- ----------- --------- --------- ---------------- +# 18.15 0.000051 4 12 mprotect +# 16.01 0.000045 5 9 openat +# 9.96 0.000028 3 11 close +# 9.96 0.000028 14 2 getdents +# 7.47 0.000021 3 7 read +# 6.41 0.000018 2 10 fstat +# 6.05 0.000017 9 2 2 statfs +# 4.98 0.000014 14 1 munmap +# 4.27 0.000012 6 2 ioctl +# 3.56 0.000010 10 1 write +# 3.20 0.000009 3 3 brk +# 2.14 0.000006 0 17 mmap +# 2.14 0.000006 1 8 8 access +# 1.78 0.000005 3 2 rt_sigaction +# 1.07 0.000003 3 1 set_tid_address +# 0.71 0.000002 2 1 rt_sigprocmask +# 0.71 0.000002 2 1 arch_prctl +# 0.71 0.000002 2 1 set_robust_list +# 0.71 0.000002 2 1 prlimit64 +# 0.00 0.000000 0 1 execve +# ------ ----------- ----------- --------- --------- ---------------- +# 100.00 0.000281 93 10 total +``` + +
+ +### Check if the OS supports Seccomp + +
+ + ```bash +grep -i seccomp /boot/config-$(uname -r) +# CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +# CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP=y + ``` + +
+ +### Check the status of Seccomp on the Kubernetes cluster + +
+ +```bash +kubectl run amicontained --image jess/amicontained -- amicontained +kk logs amicontained +# Container Runtime: docker +# Has Namespaces: +# pid: true +# user: false +# AppArmor Profile: docker-default (enforce) +# Capabilities: +# BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap +# Seccomp: disabled +# Blocked Syscalls (22): +# MSGRCV SYSLOG SETPGID SETSID VHANGUP PIVOT_ROOT ACCT SETTIMEOFDAY UMOUNT2 SWAPON SWAPOFF REBOOT SETHOSTNAME SETDOMAINNAME INIT_MODULE DELETE_MODULE LOOKUP_DCOOKIE KEXEC_LOAD FANOTIFY_INIT OPEN_BY_HANDLE_AT FINIT_MODULE KEXEC_FILE_LOAD +# Looking for Docker.sock + ``` + +
+ +### Enable Seccomp for the `amicontained` using following specs and Seccomp `type: RuntimeDefault`. + +
+ +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: amicontained +spec: + containers: + - args: + - amicontained + image: jess/amicontained + name: amicontained + restartPolicy: Always +``` + +
show

+ +#### Apply Seccomp security context + + ```yaml +cat << EOF > amicontained.yaml +apiVersion: v1 +kind: Pod +metadata: + name: amicontained +spec: + securityContext: # add the security context with seccomp profile + seccompProfile: + type: RuntimeDefault + containers: + - args: + - amicontained + image: jess/amicontained + name: amicontained + restartPolicy: Always +EOF + +kubectl apply -f amicontained.yaml +``` + +#### Verify + +```bash + +kk logs amicontained +# Container Runtime: kube +# Has Namespaces: +# pid: true +# user: false +# AppArmor Profile: docker-default (enforce) +# Capabilities: +# BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap +# Seccomp: filtering +# Blocked Syscalls (62): +# SYSLOG SETPGID SETSID USELIB USTAT SYSFS VHANGUP PIVOT_ROOT _SYSCTL ACCT SETTIMEOFDAY MOUNT UMOUNT2 SWAPON SWAPOFF REBOOT SETHOSTNAME SETDOMAINNAME IOPL IOPERM CREATE_MODULE INIT_MODULE DELETE_MODULE GET_KERNEL_SYMS QUERY_MODULE QUOTACTL NFSSERVCTL GETPMSG PUTPMSG AFS_SYSCALL TUXCALL SECURITY LOOKUP_DCOOKIE CLOCK_SETTIME VSERVER MBIND SET_MEMPOLICY GET_MEMPOLICY KEXEC_LOAD ADD_KEY REQUEST_KEY KEYCTL MIGRATE_PAGES UNSHARE MOVE_PAGES PERF_EVENT_OPEN FANOTIFY_INIT NAME_TO_HANDLE_AT OPEN_BY_HANDLE_AT CLOCK_ADJTIME SETNS PROCESS_VM_READV PROCESS_VM_WRITEV KCMP FINIT_MODULE KEXEC_FILE_LOAD BPF USERFAULTFD MEMBARRIER PKEY_MPROTECT PKEY_ALLOC PKEY_FREE +# Looking for Docker.sock +``` + +

+ +
+ +### Create a nginx pod named `audit-pod` using audit.json seccomp profile. + +
+ +```bash +# file is also available in the [data/seccomp](../data/Seccomp/audit.json) folder +curl -L -o audit.json https://k8s.io/examples/pods/security/seccomp/profiles/audit.json +``` + +
show

+ +#### Copy the audit.json file to the default profiles location `/var/lib/kubelet/seccomp/` + +```bash +mkdir -p /var/lib/kubelet/seccomp/profiles +cp audit.json /var/lib/kubelet/seccomp/profiles +``` + +#### Create nginx pod using the seccomp profile + +```yaml +cat << EOF > audit-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: audit-pod + labels: + app: audit-pod +spec: + securityContext: + seccompProfile: + type: Localhost + localhostProfile: profiles/audit.json + containers: + - name: audit-pod + image: nginx +EOF + +kubectl apply -f audit-pod.yaml + +``` + +#### Verify + +````bash +tail -f /var/log/syslog +# Dec 16 02:07:21 vagrant kernel: [ 2253.183862] audit: type=1326 audit(1639620441.516:20): auid=4294967295 uid=0 gid=0 ses=4294967295 pid=20123 comm="runc:[2:INIT]" exe="/" sig=0 arch=c000003e syscall=233 compat=0 ip=0x55e57ef09bc8 code=0x7ffc0000 +# Dec 16 02:07:21 vagrant kernel: [ 2253.183864] audit: type=1326 audit(1639620441.516:21): auid=4294967295 uid=0 gid=0 ses=4294967295 pid=20123 comm="runc:[2:INIT]" exe="/" sig=0 arch=c000003e syscall=138 compat=0 ip=0x55e57ef5e230 code=0x7ffc0000 +```` + +

+ +
+ +### Clean up + +```bash +kubectl delete pod audit-pod amicontained --force +rm audit-pod.yaml amicontained.yaml +``` \ No newline at end of file diff --git a/k8s-certifications/topics/secrets.md b/k8s-certifications/topics/secrets.md new file mode 100644 index 0000000..e3b1e24 --- /dev/null +++ b/k8s-certifications/topics/secrets.md @@ -0,0 +1,323 @@ +# [Namespaces](https://kubernetes.io/docs/concepts/configuration/secret/) + + - A secret is an API object used to store non-confidential data in key-value pairs. + - Pods can consume secrets as environment variables, command-line arguments, or as configuration files in a volume. + - A secret allows you to decouple environment-specific configuration from your container images, so that your applications are easily portable. + +
+ +### Check the secrets on the cluster in the default namespace + +
show

+ +```bash +kubectl get secrets +``` + +

+ +
+ +### Check the secrets on the cluster in all the namespaces + +
show

+ +```bash +kubectl get secrets --all-namespaces +# OR +kubectl get secrets -A +``` + +

+ +
+ +### Create a secret named `db-secret-1` with data `DB_HOST=db.example.com`, `DB_USER=development`, `DB_PASSWD=password` + +
show

+ +```bash +kubectl create secret generic db-secret-1 --from-literal=DB_HOST=db.example.com --from-literal=DB_USER=development --from-literal=DB_PASSWD=password +``` + +OR + +```yaml +cat << EOF > db-secret-1.yaml +apiVersion: v1 +kind: Secret +metadata: + name: db-secret-1 +data: + DB_HOST: ZGIuZXhhbXBsZS5jb20= + DB_PASSWD: cGFzc3dvcmQ= + DB_USER: ZGV2ZWxvcG1lbnQ= +EOF + +kubectl apply -f db-secret-1.yaml +``` + +```bash +kubectl describe secret db-secret-1 # verify +Name: db-secret-1 +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +DB_HOST: 14 bytes +DB_PASSWD: 8 bytes +DB_USER: 11 bytes +``` + +

+ +
+ +### Create a secret named `db-secret-2` with data from file `secret.properties` + +```bash +cat <> secret.properties +DB_HOST=db.example.com +DB_USER=development +DB_PASSWD=password +EOT +``` + +
show

+ +```bash +kubectl create secret generic db-secret-2 --from-file=secret.properties +``` + +```bash +kubectl describe secret db-secret-2 # verify +Name: db-secret-2 +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +secret.properties: 62 bytes +``` + +

+ +
+ +### Create a new pod `nginx-2` with `nginx` image and add env variable for `DB_HOST` from secret `db-secret-1` + +
show

+ +```yaml +cat << EOF > nginx-2.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-2 +spec: + containers: + - image: nginx + name: nginx-2 + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: db-secret-1 + key: DB_HOST +EOF + +kubectl apply -f nginx-2.yaml +``` + +```bash +kubectl exec nginx-2 -- env | grep DB_HOST # verify env variables +# DB_HOST=db.example.com +``` + +

+ +
+ +### You are tasked to create a secret and consume the secret in a pod using environment variables as follow: +- Create a secret named another-secret with a key/value pair; key1/value4 +- Start an nginx pod named nginx-secret using container image nginx, and add an environment variable exposing the value of the secret key key 1, using COOL_VARIABLE as the name for the environment variable inside the pod + +
show

+ +```bash +kubectl create secret generic another-secret --from-literal=key1=value4 +``` + +```yaml +cat << EOF > nginx-secret.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-secret +spec: + containers: + - image: nginx + name: nginx-secret + env: + - name: COOL_VARIABLE + valueFrom: + secretKeyRef: + name: another-secret + key: key1 +EOF + +kubectl apply -f nginx-secret.yaml +``` + +```bash +kubectl exec nginx-2 -- env | grep DB_HOST # verify env variables +# DB_HOST=db.example.com +``` + +

+ +
+ +### Create a new pod `nginx-3` with `nginx` image and add all env variables from from secret map `db-secret-1` + +
show

+ +```yaml +cat << EOF > nginx-3.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-3 +spec: + containers: + - image: nginx + name: nginx-3 + envFrom: + - secretRef: + name: db-secret-1 +EOF + +kubectl apply -f nginx-3.yaml +``` + +``` +kubectl exec nginx-3 -- env | grep DB_ # verify env variables +# DB_HOST=db.example.com +# DB_PASSWD=password +# DB_USER=development +``` + +

+ +
+ +### Create a new pod `nginx-4` with `nginx` image and mount the secret `db-secret-1` as a volume named `db-secret` and mount path `/secret` + +
show

+ +```yaml +cat << EOF > nginx-4.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-4 +spec: + containers: + - image: nginx + name: nginx-4 + volumeMounts: + - name: db-secret + mountPath: "/secret" + readOnly: true + volumes: + - name: db-secret + secret: + secretName: db-secret-1 +EOF + +kubectl apply -f nginx-4.yaml +``` + +```bash +kubectl exec nginx-4 -- cat /secret/DB_HOST # verify env variables +# db.example.com +``` + +

+ +
+ +### Create a tls secret using tls.crt and tls.key in the data folder. + +
show

+ +```bash +kubectl create secret tls my-tls-secret --cert=../data/tls.crt --key=../data/tls.key +``` + +```bash +kubectl describe secret my-tls-secret #verify +Name: my-tls-secret +Namespace: default +Labels: +Annotations: + +Type: kubernetes.io/tls + +Data +==== +tls.crt: 1932 bytes +tls.key: 3273 bytes +``` + +

+ +
+ +### Create a docker registry secret `regcred` with below details and create new pod `nginx-5` with `nginx` image and use the private docker registry. + - docker-server : example.com + - docker-username : user_name + - docker-password : password + - docker-email : user_name@example.com + +
show

+ +```bash +kubectl create secret docker-registry regcred --docker-server=example.com --docker-username=user_name --docker-password=password --docker-email=user_name@example.com +``` + +```yaml +cat << EOF > nginx-5.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-5 +spec: + containers: + - name: nginx-5 + image: nginx + imagePullSecrets: + - name: regcred +EOF + +kubectl apply -f nginx-5.yaml +``` + +

+ +
+ +### Clean up + +```bash +kubectl delete pod nginx-1 nginx-2 nginx-3 nginx-4 nginx-5 nginx-secret --force --grace-period=0 +kubectl delete secret db-secret-1 db-secret-2 my-tls-secret regcred +rm secret.properties nginx-2.yaml nginx-3.yaml nginx-4.yaml nginx-5.yaml nginx-secret.yaml +``` diff --git a/k8s-certifications/topics/service_accounts.md b/k8s-certifications/topics/service_accounts.md new file mode 100644 index 0000000..7342756 --- /dev/null +++ b/k8s-certifications/topics/service_accounts.md @@ -0,0 +1,108 @@ +# Service Account + +A service account provides an identity for processes that run in a Pod. +**NOTE**: From k8s 1.24, when a ServiceAccount is created, token and secrets would not be create automatically. You can create the token and secrets manually. + +
+ +### Create Service Account `sample-sa` + +
show

+ +```bash +kubectl create serviceaccount sample-sa +# OR +kubectl create sa sample-sa +``` + +OR + +```yaml +cat << EOF > sample-sa.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sample-sa +EOF + +kubectl apply -f sample-sa.yaml +``` + +```bash +kubectl describe serviceaccount sample-sa # Verify, no secret and token are created automatically +Name: sample-sa +Namespace: default +Labels: +Annotations: +Image pull secrets: +Mountable secrets: +Tokens: +Events: + +``` + +

+ +
+ +### Create Service Account `sample-sa-no-auto-mount` with auto mounting disabled + +
+ +
show

+ +```yaml +cat << EOF > sample-sa-no-auto-mount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sample-sa-no-auto-mount +automountServiceAccountToken: false +EOF + +kubectl apply -f sample-sa-no-auto-mount.yaml +``` + +

+ +
+ +### Create a pod with name `nginx-sa` and with image `nginx` and service account `sample-sa` + +
+ +
show

+ +```bash +kubectl run nginx-sa --image=nginx --serviceaccount=sample-sa +``` + +OR + +```yaml +cat << EOF > nginx-sa.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-sa +spec: + containers: + - image: nginx + name: nginx-sa + serviceAccountName: sample-sa +EOF + +kubectl apply -f nginx-sa.yaml +``` + +

+ +
+ +### Clean up + +```bash +rm nginx-sa.yaml sample-sa-no-auto-mount.yaml sample-sa.yaml +kubectl delete pod nginx-sa --force --grace-period=0 +kubectl delete serviceaccount sample-sa-no-auto-mount sample-sa +``` \ No newline at end of file diff --git a/k8s-certifications/topics/services.md b/k8s-certifications/topics/services.md new file mode 100644 index 0000000..0aa86e6 --- /dev/null +++ b/k8s-certifications/topics/services.md @@ -0,0 +1,115 @@ +# [Services](https://kubernetes.io/docs/concepts/workloads/controllers/services/) + +
+ +### Create a pod `nginx-clusterip` with image `nginx`. Expose it as a ClusterIP service. + +
show

+ +```bash +kubectl run nginx-clusterip --image=nginx --restart=Never --port=80 --expose + +kubectl get service nginx-clusterip # verification +# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +# nginx-clusterip ClusterIP 10.104.163.30 80/TCP 6s +``` + +

+ +
+ +### Create a pod `nginx-nodeport` with image `nginx`. Expose it as a NodePort service `nginx-nodeport-svc` + +
show

+ +```bash +kubectl run nginx-nodeport --image=nginx --restart=Never --port=80 +kubectl expose pod nginx-nodeport --name nginx-nodeport-svc --type NodePort --port 80 --target-port 80 +``` + +OR + +```yaml +cat << EOF > nginx-nodeport.yaml +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + run: nginx-nodeport + name: nginx-nodeport-svc +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + run: nginx-nodeport + type: NodePort +status: + loadBalancer: {} +EOF + +kubectl apply -f nginx-nodeport.yaml +``` + +```bash +# verification - port expose might change +kubectl get svc nginx-nodeport-svc +# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +# nginx-nodeport-svc NodePort 10.106.55.131 80:31287/TCP 12s +``` + +

+ +
+ +### Create a deployment `nginx-deployment` with image `nginx` and 3 replicas. Expose it as a NodePort service `nginx-deployment-svc` on port 30080. + +
show

+ +```bash +kubectl create deploy nginx-deployment --image nginx && kubectl scale deploy nginx-deployment --replicas 3 +kubectl expose deployment nginx-deployment --type NodePort --port 80 --target-port 80 --dry-run=client -o yaml > nginx-deployment-svc.yaml +``` + +Edit `nginx-deployment-svc.yaml` to add `nodePort: 30080` and apply `kubectl apply -f nginx-deployment-svc.yaml` + +```yaml +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app: nginx-deployment + name: nginx-deployment +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + nodePort: 30080 # add node port + selector: + app: nginx-deployment + type: NodePort +status: + loadBalancer: {} +``` + +```bash +# verification +kubectl get service nginx-deployment +# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +# nginx-deployment NodePort 10.43.166.122 80:30080/TCP 38s +``` +

+ +
+ +### Clean up + +```bash +# clean up +kubectl delete service nginx-deployment nginx-nodeport-svc +kubectl delete deployment nginx-deployment nginx-nodeport +``` \ No newline at end of file diff --git a/k8s-certifications/topics/taints_tolerations.md b/k8s-certifications/topics/taints_tolerations.md new file mode 100644 index 0000000..2b3f436 --- /dev/null +++ b/k8s-certifications/topics/taints_tolerations.md @@ -0,0 +1,18 @@ +# [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) + +
+ +### Create a taint on worker node `node01` with key `app` with value `critical` and effect of `NoSchedule` + +
+ +
show

+ +```bash +kk taint node node01 app=critical:NoSchedule +``` + +

+ +
+ diff --git a/k8s-certifications/topics/trivy.md b/k8s-certifications/topics/trivy.md new file mode 100644 index 0000000..b374ab0 --- /dev/null +++ b/k8s-certifications/topics/trivy.md @@ -0,0 +1,74 @@ +# [Trivy](https://aquasecurity.github.io/trivy) + +A Simple and Comprehensive Vulnerability Scanner for Containers and other Artifacts, Suitable for CI. + +
+ +### Trivy Installation +```bash +apt-get update +apt-get install wget apt-transport-https gnupg lsb-release -y +wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo apt-key add - +echo deb https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main | sudo tee -a /etc/apt/sources.list.d/trivy.list + +#Update Repo and Install trivy +apt-get update +apt-get install trivy -y + +# OR (if you encounter issues 'Unable to locate package trivy') + +wget https://github.com/aquasecurity/trivy/releases/download/v0.17.0/trivy_0.17.0_Linux-64bit.deb +sudo dpkg -i trivy_0.17.0_Linux-64bit.deb + +``` + +
+ +### Scan the following images with Trivy and check the `CRITICAL` issues. + - nginx:1.21.4-alpine + - amazonlinux:2.0.20211201.0 + - nginx:1.21.4 + +
show

+ +```bash +docker pull nginx:1.21.4 +trivy image --severity CRITICAL nginx:1.21.4 +# nginx:1.21.4 (debian 11.1) +# ========================== +# Total: 7 (CRITICAL: 7) + +docker pull nginx:1.21.4-alpine +trivy image --severity CRITICAL nginx:1.21.4-alpine +# nginx:1.21.4-alpine (alpine 3.14.3) +# =================================== +# Total: 0 (CRITICAL: 0) + +docker pull amazonlinux:2.0.20211201.0 +trivy image --severity CRITICAL amazonlinux:2.0.20211201.0 +# amazonlinux:2.0.20211201.0 (amazon 2 (Karoo)) +# ============================================= +# Total: 0 (CRITICAL: 0) + +``` + +

+ +
+ +### Scan the following images with Trivy and check the `HIGH` issues with output in json format and redirected to `/root/nginx.json` +- nginx:1.21.4 + +
show

+ +```bash +docker pull nginx:1.21.4 +trivy image --severity HIGH --format json --output /root/nginx.json nginx:1.21.4 +# nginx:1.21.4 (debian 11.1) +# ========================== +# Total: 7 (CRITICAL: 7) + +``` + +

+ diff --git a/k8s-certifications/topics/volumes.md b/k8s-certifications/topics/volumes.md new file mode 100644 index 0000000..ae22545 --- /dev/null +++ b/k8s-certifications/topics/volumes.md @@ -0,0 +1,308 @@ +# [Volumes](https://kubernetes.io/docs/concepts/storage/volumes/) + +Kubernetes supports many types of volumes. A Pod can use any number of volume types simultaneously. Ephemeral volume types have a lifetime of a pod, but persistent volumes exist beyond the lifetime of a pod. When a pod ceases to exist, Kubernetes destroys ephemeral volumes; however, Kubernetes does not destroy persistent volumes. For any kind of volume in a given pod, data is preserved across container restarts. + +- [Config Volumes](#config-volumes) +- [Secret Volumes](#secret-volumes) +- [Ephemeral Volumes](#ephemeral-volumes) +- [Persistent Volumes](#persistent-volumes) + +
+ +## Config Volumes + +
+ +### Create a new pod `nginx-3` with `nginx` image and mount the configmap `db-config-1` as a volume named `db-config` and mount path `/config` + +
show

+ +```yaml +cat << EOF > nginx-3.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-3 +spec: + containers: + - image: nginx + name: nginx-3 + volumeMounts: + - name: db-config + mountPath: "/config" + readOnly: true + volumes: + - name: db-config + configMap: + name: db-config-1 +EOF + +kubectl apply -f nginx-3.yaml + +kubectl exec nginx-4 -- cat /config/DB_HOST # verify env variables +# db.example.com +``` + +

+ +
+ +## Secret Volumes + +
+ +### Create a new pod `nginx-4` with `nginx` image and mount the secret `db-secret-1` as a volume named `db-secret` and mount path `/secret` + +
show

+ +```yaml +cat << EOF > nginx-4.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx-4 +spec: + containers: + - image: nginx + name: nginx-4 + volumeMounts: + - name: db-secret + mountPath: "/secret" + readOnly: true + volumes: + - name: db-secret + secret: + secretName: db-secret-1 +EOF + +kubectl apply -f nginx-4.yaml +``` + +```bash +kubectl exec nginx-4 -- cat /secret/DB_HOST # verify env variables +# db.example.com +``` + +

+ +
+ +## [Ephemeral Volumes](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/) + +
+ +### Create the redis pod with `redis` image with volume `redis-storage` as ephemeral storage mounted at `/data/redis`. + +
show

+ +```yaml +cat << EOF > redis.yaml +apiVersion: v1 +kind: Pod +metadata: + name: redis +spec: + containers: + - name: redis + image: redis + volumeMounts: + - name: redis-storage + mountPath: /data/redis + volumes: + - name: redis-storage + emptyDir: {} # Ephemeral storage +EOF + +kubectl apply -f redis.yaml +``` + +

+ +
+ +### Create a pod as follows: + - Name: non-persistent-redis + - container Image:redis + - Volume with name: cache-control + - Mount path: /data/redis + - The pod should launch in the staging namespace and the volume must not be persistent. + +
show

+ +```yaml +kubectl create namespace staging + +cat << EOF > non-persistent-redis.yaml +apiVersion: v1 +kind: Pod +metadata: + name: non-persistent-redis + namespace: staging +spec: + containers: + - name: redis + image: redis + volumeMounts: + - name: cache-control + mountPath: /data/redis + volumes: + - name: cache-control + emptyDir: {} +EOF + +kubectl apply -f non-persistent-redis.yaml +``` + +

+ +
+ +## [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + +
+ +### Create a persistent volume with name `app-data`, of capacity `200Mi` and access mode `ReadWriteMany`. The type of volume is `hostPath` and its location is `/srv/app-data`. + +
show

+ +```yaml +cat << EOF > app-data.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: app-data +spec: + storageClassName: manual + capacity: + storage: 200Mi + accessModes: + - ReadWriteMany + hostPath: + path: "/srv/app-data" +EOF + +kubectl apply -f app-data.yaml + +kubectl get pv +# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +# app-data 200Mi RWX Retain Available manual +``` + +

+ +
+ +### Create the following + - PV `task-pv-volume` with storage `10Mi`, Access Mode `ReadWriteOnce` on hostpath `/mnt/data`. + - PVC `task-pv-claim` to use the PV. + - Create a pod `task-pv-pod` with `nginx` image to use the PVC mounted on `/usr/share/nginx/html` + +
show

+ +```yaml +cat << EOF > task-pv-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: task-pv-volume +spec: + storageClassName: manual + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" +EOF + +kubectl apply -f task-pv-volume.yaml + +kubectl get pv +# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +# task-pv-volume 10Mi RWO Retain Available manual 6s +``` + +```yaml +cat << EOF > task-pv-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: task-pv-claim +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi +EOF + +kubectl apply -f task-pv-claim.yaml + +kubectl get pvc +#NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +#task-pv-claim Bound task-pv-volume 10Mi RWO manual 12s +kubectl get pv # check status bound +#NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +#task-pv-volume 10Mi RWO Retain Bound default/task-pv-claim manual 64s +``` + +```yaml +cat << EOF > task-pv-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: task-pv-pod +spec: + volumes: + - name: task-pv-storage + persistentVolumeClaim: + claimName: task-pv-claim + containers: + - name: task-pv-pod + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage +EOF + +kubectl apply -f task-pv-pod.yaml +``` + +

+ +
+ +### Get the storage classes (Storage class does not belong to namespace) + +
+ +
show

+ +```bash +kubectl get storageclass +# OR +kubectl get sc +``` +

+ +
+ +### Clean up + +
show

+ +```bash +rm nginx-3.yaml nginx-4.yaml redis.yaml +kubectl delete pod task-pv-pod redis nginx-3 nginx-4 --force +kubectl delete pvc task-pv-claim +kubectl delete pv task-pv-volume +``` + +

+ +
+