l2t

Kubernetes CheatSheet

Learn how to troubleshoot kubernetes with handy coomands.

Genearal Info

# get the current version
kubectl version

# get cluster info
kubectl cluster-info

# get the configurations
kubectl config view

# watch the kublet logs
watch -n 2 cat /var/log/kublet.log

Namespaces

# list namespace
kubectl get ns

# display yaml for namespaces
kubectl get ns -o yaml

# describe namespace
kubectl describe ns

# edit a namespac
kubectl edit ns <NAME>

# delete a namespac
kubectl delete ns <NAME>

Deployments

# list all deployment
kubectl get deploy

# describe a deployment
kubectl describe deploy <NAME>

# display wide output of deployments
kubectl get deploy -o wide

# display yaml of deployments
kubectl get deploy -o yaml
kubectl get deploy <NAME> -o yaml

# edit a deployment
kubectl edit deploy <NAME>

# delete a deployment
kubectl delete deploy <NAME>

# scale a deployment
kubectl scale deploy <NAME> --replicas=5

# create a deployment
kubectl create -f <FILE.yaml>

# apply deployment
kubectl apply -f <FILE.yaml>

Services

# Display all services
kubectl get svc

# describe service
kubectl describe svc <NAME>

# display wide output
kubectl get svc -o wide

# display yaml of a service
kubectl get svc <NAME> -o yaml

# show labels of the service
kubectl get svc --show-labels

# delete a service
kubectl delete svc <NAME>

# edit a service
kubectl edit svc <NAME>

Daemon Sets

# display all daemon sets
kubectl get ds

# describe a daemon sets from a namespace
kubectl describe ds <NAME> -n <NAMESPACE>

# display yaml of a daemon set
kubectl get ds <NAME> -n <NAMESPACE> -o yaml

# edit a daemon sets
kubectl edit ds <NAME> -n <NAMESPACE>

# delete a daemon set
kubectl delete ds <NAME>

Events

# get all events
kubectl get events

# show events from a namespace
kubectl get events -n <NAMESPACE>

Service Account

# get a service account
kubectl get sa

# display yaml of a service account
kubectl get sa -o yaml

# edit a servie account
kubectl edit sa <NAME>

# delete a service account
kubectl delete sa <NAME>

Replica Sets

# list all replica sets
kubectl get rs

# describe a replica set
kubectl describe rs

# display wide output
kubectl get rs -o wide

# display yaml of a replica set
kubectl get rs <NAME> -o yaml

# edit a replica set
kubectl edit rs <NAME>

# delete a replica set
kubectl delete rs <NAME>

Roles

# get all roles from all namespaces
kubectl get roles --all-namespaes

# display yaml of a role
kubectl get roles <ROLE> -o yaml

Secrets

# get all secrets
kubectl get secrets

# display a secrets from a namespace
kubectl get secrets -n <NAMESPACE>

# display yaml of a secret
kubectl get secrets -o yaml
kubectl get secrets <NAME> -o yaml

ConfigMaps

# get all config maps
kubectl get cm

# get config maps from a namespace
kubectl get cm -n <NAMESPACE>

# edit a config map
kubectl edit cm <NAME>

# delete a config map
kubectl delete cm <NAME>

Ingress

# get ingress
kubectl get ing

# get ingress from a namespace
kubectl get ing -n <NAMESPACE>

Persistent Volume

# get persistent volumes
kubectl get pv

# describe a persistent volume
kubectl describe pv <NAME>

Annotate

kubectl annotate po <POD> <ANNO>
kubectl annotate no <NODE> <ANNO>

Nodes

# display all nodes
kubectl get no

# describe a node
kubectl describe no

# display yaml for a node
kubectl get no <NODE_NAME> -o yaml

# display wide output
kubectl get no -o wide

# get node by selector
kubectl get no --selector=[LABEL_NAME]

# delete a node
kubectl delete no <NAME>

# edit a node
kubectl edit no <NAME>

Pods

# get list of pods
kubectl get po
kubectl get pods

# get pod details
kubectl describe po <name>
kubectl describe pod <name>

# find pod by label
kubectl get po -l app=frontend

# get pod logs
kubectl logs <pod-name>

# find log for specific container in pod
kubectl logs <pod-name> -c <container-name> 
kubectl logs -l name=myLabel -c <container-name> 

# get pod's yaml
kubectl get po <pod-name> -o yaml
kubectl get pod <pod-name> -o yaml

# get a pod's YAML without cluster specific information
kubectl get po <pod-name> -o yaml --export
kubectl get pod <pod-name> -o yaml --export

# list all pods in the namespace, with more details
kubectl get po -o wide
kubectl get pods -o wide

# get the pod log with label name
kubectl logs -l name=myLabel

# attach to running container
kubectl attach my-pod -i

# listen on port 8080 on the local machine 
# and forward to port 8000 on my-pod
kubectl port-forward my-pod 8080:8000

# show metrics for a given pod and its containers
kubectl top pod <pod-name> --containers

# delete a pod
kubectl delete po <NAME>

# edit a pod
kubectl edit po <NAME>

# create a pod on-fly
kubectl run <POD_NAME> --image=<IMAGE_NAME> --restart=Never

Clusters

# get list of all services
kubectl get services

# get list of all pods
kubectl get pods

# watch nodes continuosly
kubectl get nodes -w

# get info about single node
kubectl describe node <node>

# get list of services
kubectl get svc

# describe the service name
kubectl describe svc <name>

# get list of replication controllers
kubectl get rc

# describe replication controller
kubectl describe rc <name>

# get replication controllers in namespace
kubectl get rc --namespace="<namespace>"

# get the perticular deployment info
kubectl get deployment my-deployment

Context & Configuration

# show kubeconfig settings
kubectl config view

# display the first user
kubectl config view -o jsonpath='{.users[].name}'

# get list of users
kubectl config view -o jsonpath='{.users[*].name}'

# display the context
kubectl config get-contexts

# display current context
kubectl config current-context

# set default context to cluster-name
kubectl config use-context cluster-name

# add a new cluster to your kubeconf that supports basic auth
kubectl config set-credentials kubeuser/foo.kubernetes.com --username=foo --password=bar

# set a context utilizing a specific username and namespace
kubectl config set-context gcd --user=cluster-user --namespace=foo ** 
kubectl config use-context gcd

# permanently save the namespace for all subsequent kubectl commands in that context
kubectl config set-context --current --namespace=gcp-s2

How to debug a container in a pod?

# first thing is to check what is inside a pod
kubectl describe pods <pod-name>

# Look at the state of the containers in the pod. 
# Are they all Running?
# State: Running/Waiting etc...  & Reason

# You can check node capacities
kubectl get nodes -o yaml | egrep '\sname:|cpu:|memory:'
kubectl get nodes -o json | jq '.items[] | {name: .metadata.name, cap: .status.capacity}'

# debug a container within a pod
kubectl logs <pod-name> <container-name>

# if container died previously find logs
kubectl logs --previous <pod-name> <container-name>

# execute a command in a container
kubectl exec <pod-name> -c <container-name> -- <cmd> <arg1> <arg2>

# check logs for a pod for last hour
kubectl logs --since=1h <POD>

# show last 20 logs 
kubectl logs --tail=20 <POD>

Thank you for reading, sharing and loving this article.