---
tags: K8S, jsonpath, K8S_output, jq
---
# K8S generic patterns
[toc]
## Output formatting, sorting and filtering
### Format using 'custom_columns'
This format is actually using 'jsonpath' under the hood:
```custom-columns=<header>:<json-path-expr>,<header>:<json-path-expr>```
[Reference](https://thegusmao.tech/extracting-useful-information-from-your-kubernetes-cluster-with-custom-columns-and-jq)
Get node name and pod capacity
```bash!
k get nodes -A -o=custom-columns=NODE_NAME:.metadata.name,MAX_PODS:.status.capacity.pods
```
Getting nodes status, disk/ram/pid pressure flags
```bash!
k get nodes -o custom-columns="NAME:.metadata.name,IP:.status.addresses[0].address,RAM_PRESSURE:.status.conditions[0].status,DISK_PRESSURE:.status.conditions[1].status,PID_PRESSURE:.status.conditions[2].status,STATE:.status.conditions[4].status"
```
```bash
NAME IP RAM_PRESSURE DISK_PRESSURE PID_PRESSURE STATE
lrda0 10.0.0.1 False False False True
lrda1 10.0.0.2 False False False True
lrda2 10.0.0.3 False False False True
lrdw0 10.0.0.4 False False False True
lrdw1 10.0.0.5 False False False True
lrdw2 10.0.0.6 False False False True
lrdw3 10.0.0.7 False False False True
lrdw4 10.0.0.8 False False False True
lrdw5 10.0.0.9 False False False True
lrdw6 10.0.0.10 False False False True
lrdw7 10.0.0.11 False False False True
```
### Filter using 'field-selector'
[K8S field-selector documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/)
Get all pods with state 'running' in all namespaces.
```bash!
k get pods -A -o wide --field-selector status.phase="Running"
```
Get all NON running pods in all namespaces.
```bash!
k get pods -A -o wide --field-selector status.phase!="Running"
```
Get all NON running pods AND NOT in 'regionadmin' namespace.
```bash!
k get pods -A --field-selector=status.phase!=Running,metadata.namespace!=regionadmin
```
Get all pods running on a given node:
```bash!
k get pods --all-namespaces -o wide --field-selector spec.nodeName=$NODE_NAME
```
## JSON format extraction
### Raw json output for a pod
```json=
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/restartedAt": "2022-07-06T09:06:23+02:00"
},
"creationTimestamp": "2023-03-24T12:30:07Z",
"generateName": "restic-",
"labels": {
"app.kubernetes.io/instance": "velero",
"app.kubernetes.io/managed-by": "Helm",
"app.kubernetes.io/name": "velero",
"controller-revision-hash": "58f5f7967b",
"helm.sh/chart": "velero-2.27.2",
"name": "restic",
"pod-template-generation": "11"
},
"name": "restic-4jtzl",
"namespace": "regionadmin",
"ownerReferences": [
{
"apiVersion": "apps/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "DaemonSet",
"name": "restic",
"uid": "f30e32e1-7421-4004-b932-63c73313baea"
}
],
"resourceVersion": "358137749",
"uid": "12eef855-891a-45ed-b37c-028399248b0c"
},
"spec": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchFields": [
{
"key": "metadata.name",
"operator": "In",
"values": [
"lrdw5"
]
}
]
}
]
}
}
},
"containers": [
{
"args": [
"restic",
"server",
"--log-level=info",
"--log-format=text"
],
"command": [
"/velero"
],
"env": [
{
"name": "VELERO_NAMESPACE",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
}
}
},
{
"name": "NODE_NAME",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "spec.nodeName"
}
}
},
{
"name": "VELERO_SCRATCH_DIR",
"value": "/scratch"
},
{
"name": "AWS_SHARED_CREDENTIALS_FILE",
"value": "/credentials/cloud"
}
],
"image": "atos/oci-ubi8-velero:1.7.1-4",
"imagePullPolicy": "IfNotPresent",
"name": "restic",
"resources": {
"limits": {
"cpu": "1",
"memory": "512Mi"
},
"requests": {
"cpu": "500m",
"memory": "128Mi"
}
},
"securityContext": {
"privileged": false
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/credentials",
"name": "cloud-credentials"
},
{
"mountPath": "/host_pods",
"mountPropagation": "HostToContainer",
"name": "host-pods"
},
{
"mountPath": "/scratch",
"name": "scratch"
},
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "kube-api-access-xjmqz",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"nodeName": "lrdw5",
"preemptionPolicy": "PreemptLowerPriority",
"priority": 0,
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {
"runAsUser": 0
},
"serviceAccount": "velero-server",
"serviceAccountName": "velero-server",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists"
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists"
},
{
"effect": "NoSchedule",
"key": "node.kubernetes.io/disk-pressure",
"operator": "Exists"
},
{
"effect": "NoSchedule",
"key": "node.kubernetes.io/memory-pressure",
"operator": "Exists"
},
{
"effect": "NoSchedule",
"key": "node.kubernetes.io/pid-pressure",
"operator": "Exists"
},
{
"effect": "NoSchedule",
"key": "node.kubernetes.io/unschedulable",
"operator": "Exists"
}
],
"volumes": [
{
"name": "cloud-credentials",
"secret": {
"defaultMode": 420,
"secretName": "velero-secret"
}
},
{
"hostPath": {
"path": "/var/lib/kubelet/pods",
"type": ""
},
"name": "host-pods"
},
{
"emptyDir": {},
"name": "scratch"
},
{
"name": "kube-api-access-xjmqz",
"projected": {
"defaultMode": 420,
"sources": [
{
"serviceAccountToken": {
"expirationSeconds": 3607,
"path": "token"
}
},
{
"configMap": {
"items": [
{
"key": "ca.crt",
"path": "ca.crt"
}
],
"name": "kube-root-ca.crt"
}
},
{
"downwardAPI": {
"items": [
{
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
},
"path": "namespace"
}
]
}
}
]
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2023-03-24T12:30:07Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-10-18T11:31:00Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-10-18T11:31:00Z",
"status": "True",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-03-24T12:30:07Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "cri-o://97538298e5572532b9dcb94eb77370afb4bd76d35952e4bd132e24c4a9569e3d",
"image": "registry.regionadmin.svc.kube.local:5000/atos/oci-ubi8-velero:1.7.1-4",
"imageID": "registry.regionadmin.svc.kube.local:5000/atos/oci-ubi8-velero@sha256:fa62ff66150463c690bf55426e7259c302fbbe60e0aeab79be21bdc12a77cfe8",
"lastState": {},
"name": "restic",
"ready": true,
"restartCount": 0,
"started": true,
"state": {
"running": {
"startedAt": "2023-10-18T11:30:59Z"
}
}
}
],
"hostIP": "10.0.0.9",
"phase": "Running",
"podIP": "192.168.8.23",
"podIPs": [
{
"ip": "192.168.8.23"
}
],
"qosClass": "Burstable",
"startTime": "2023-03-24T12:30:07Z"
}
}
```
### Format using 'jq' parser
Get all pods with non ready status:
```bash!
k get pods -A -o json | jq -r '.items[] | select(.status.containerStatuses[].ready==false) | .metadata.name'
```
```bash!
conman-logrotate-28012020-zh28n
conman-logrotate-28012050-hn5wt
conman-logrotate-28012080-49x92
se-cloud-library-server-cleaner-28008000-9rv4q
se-cloud-library-server-cleaner-28009440-hbvtm
se-cloud-library-server-cleaner-28010880-w8h6c
se-mongodb-5994f74d55-5dhzt
se-remote-build-cronjobs-purger-28008000-jzbgt
se-remote-build-cronjobs-purger-28009440-6qzdx
se-remote-build-cronjobs-purger-28010880-sftkm
```
### Formating using 'jsonpath'
[Reference](https://kubernetes.io/docs/reference/kubectl/jsonpath/)
```bash!
kubectl get pods -o json
kubectl get pods -o=jsonpath='{@}'
kubectl get pods -o=jsonpath='{.items[0]}'
kubectl get pods -o=jsonpath='{.items[0].metadata.name}'
kubectl get pods -o=jsonpath="{.items[*]['metadata.name', 'status.capacity']}"
kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.startTime}{"\n"}{end}'
```
## Real life examples
### Output resources
### Output deployment limits and requests to 'csv' format using 'jsonpath'
```bash!
echo -e "namespace;deployment;RAM_req;CPU_req;RAM_limit;CPU_limit\n$(k get deployments -A -o jsonpath='{range .items[*]}{.metadata.namespace}{";"}{.metadata.name}{";"}{.spec.template.spec.containers[].resources.requests.memory}{";"}{.spec.template.spec.containers[].resources.requests.cpu}{";"}{.spec.template.spec.containers[].resources.limits.memory}{";"}{.spec.template.spec.containers[].resources.limits.cpu}{"\n"}{end}')"|head
```
```csv!
namespace;deployment;RAM_req;CPU_req;RAM_limit;CPU_limit
kube-system;coredns;70Mi;100m;170Mi;
kube-system;vpa-admission-controller;200Mi;50m;500Mi;200m
kube-system;vpa-recommender;500Mi;50m;1000Mi;200m
kube-system;vpa-updater;500Mi;50m;1000Mi;200m
monitoring;grafana;;;;
monitoring;ipmi-exporter-region1;;;;
monitoring;ipmi-exporter-region10;;;;
monitoring;ipmi-exporter-region11;;;;
monitoring;ipmi-exporter-region12;;;;
```
### Output deployment limits and requests to 'MD' table format using 'jsonpath'
```bash!
echo -e "|namespace|deployment|RAM_req|CPU_req|RAM_limit|CPU_limit|\n| -------- | -------- | -------- | -------- | -------- | -------- |\n$(k get deployments -A -o jsonpath='{range .items[*]}{.metadata.namespace}{"|"}{.metadata.name}{"|"}{.spec.template.spec.containers[].resources.requests.memory}{"|"}{.spec.template.spec.containers[].resources.requests.cpu}{"|"}{.spec.template.spec.containers[].resources.limits.memory}{"|"}{.spec.template.spec.containers[].resources.limits.cpu}{"|\n"}{end}')"|head
```
|namespace|deployment|RAM_req|CPU_req|RAM_limit|CPU_limit|
| -------- | -------- | -------- | -------- | -------- | -------- |
kube-system|coredns|70Mi|100m|170Mi||
kube-system|vpa-admission-controller|200Mi|50m|500Mi|200m|
kube-system|vpa-recommender|500Mi|50m|1000Mi|200m|
kube-system|vpa-updater|500Mi|50m|1000Mi|200m|
monitoring|grafana|||||
monitoring|ipmi-exporter-region1|||||
monitoring|ipmi-exporter-region10|||||
monitoring|ipmi-exporter-region11|||||
### Output nodes+pods limits and requests to 'csv' format using 'jq'
```bash!
k get pods -n regionadmin -o json|jq -r '["namespace,node_name, pod_name, container_name,RAM_req,RAM_limit,CPU_req,CPU_limit"], (.items[] | [.metadata.namespace , .spec.nodeName , .metadata.name ] + (.spec.containers[] | [.name, .resources.requests.memory, .resources.limits.memory, .resources.limits.cpu, .requests.limits.cpu]) ) | @csv'
```
```csvpreview
namespace,node_name,pod_name,container_name,RAM_req,RAM_limit,CPU_req,CPU_limit
"regionadmin","lrdw4","ceph-csi-rbd-nodeplugin-6wjgl","csi-rbdplugin",,,,
"regionadmin","lrdw0","virt-handler-wctff","virt-handler","230Mi",,,
"regionadmin","lrdw3","virt-handler-wjg6b","virt-handler","230Mi",,,
"regionadmin","lrdw4","virt-operator-5cf9bcb8db-2n4vv","virt-operator","150Mi",,,
"regionadmin","lrdw0","virt-operator-5cf9bcb8db-nv2v7","virt-operator","150Mi",,,
```
### Get QOS for each pod NOT in 'best effort'
**'status.qosClass' field ~~could~~ should be filtered in a more efficient way from inside jsonpath expression itself but I actually could not combine it properly for now*
```bash!
k get pods -A -o jsonpath='{range .items[*]}{.metadata.namespace}{"/"}{.metadata.name}{" ("}{.status.qosClass}{")\n"}{"RAM Request: "}{..resources.requests.memory}{"/ "}{"CPU Request: "}{..resources.requests.cpu}{"\n"}{"RAM Limit: "}{..resources.limits.memory}{"/ "}{"CPU Limit: "}{..resources.limits.cpu}{"\n"}{end}' | egrep -A2 'Guar|Burst'
```
```bash!
regionadmin/metallb-speaker-vm7tl (Guaranteed)
RAM Req: 100Mi CPU Req: 100m
RAM Limit: 100Mi CPU Limit: 100m
regionadmin/metallb-speaker-wwdtt (Guaranteed)
RAM Req: 100Mi CPU Req: 100m
RAM Limit: 100Mi CPU Limit: 100m
--
regionadmin/quay-psql-cluster-c5c6cd75d-m4xzq (Burstable)
RAM Req: 128Mi CPU Req:
RAM Limit: CPU Limit:
regionadmin/quay-psql-cluster-ezgj-6988b4b746-648nq (Burstable)
RAM Req: 128Mi CPU Req:
RAM Limit: CPU Limit:
...
```
### Same for each pod in 'best effort'
```bash!
k get pods -A -o jsonpath='{range .items[*]}{.metadata.namespace}{"/"}{.metadata.name}{" ("}{.status.qosClass}{")\n"}{end}'|grep 'Best'
```
```bash!
userservices/se-remote-build-cronjobs-purger-27999360-d22jv (BestEffort)
userservices/se-remote-build-cronjobs-purger-28000800-mmvhj (BestEffort)
userservices/se-remote-build-cronjobs-purger-28002240-2zjqp (BestEffort)
userservices/se-remote-build-jim-7db9cfc8c-8kwxs (BestEffort)
userservices/se-remote-build-manager-7568498878-szr5c (BestEffort)
userservices/se-remote-build-server-7fd75bd764-bftmj (BestEffort)
userservices/se-token-service-c94c89c64-z9zl7 (BestEffort)
userservices/slurm-controller-6489c98ccb-mck5s (BestEffort)
userservices/slurm-exporter-c8f749947-vvsxb (BestEffort)
userservices/slurm-mariadb-6b6c695c4-8pqj2 (BestEffort)
userservices/slurm-slurmdbd-6c87fdb477-tk7v2 (BestEffort)
...
```
### Output pods 'qosClass', 'Requests' and 'Limits' to 'csv' format
```bash!
echo -e "namespace;podname;qosClass;RAM_req;CPU_req;RAM_limit;CPU_limit\n$(k get pods -A -o jsonpath='{range .items[*]}{.metadata.namespace}{";"}{.metadata.name}{";"}{.status.qosClass}{";"}{..resources.requests.memory}{";"}{..resources.requests.cpu}{";"}{..resources.limits.memory}{";"}{..resources.limits.cpu}{"\n"}{end}')"
```
```csv!
namespace;podname;qosClass;RAM_req;CPU_req;RAM_limit;CPU_limit
kube-system;coredns-65c885f8bd-m9h87;Burstable;70Mi;100m;170Mi;
kube-system;coredns-65c885f8bd-ww7p7;Burstable;70Mi;100m;170Mi;
kube-system;etcd-lrda0;Burstable;100Mi;100m;;
kube-system;etcd-lrda1;Burstable;100Mi;100m;;
kube-system;etcd-lrda2;Burstable;100Mi;100m;;
kube-system;kube-apiserver-lrda0;Burstable;;250m;;
kube-system;kube-apiserver-lrda1;Burstable;;250m;;
kube-system;kube-apiserver-lrda2;Burstable;;250m;;
kube-system;kube-controller-manager-lrda0;Burstable;;200m;;
...
```
### Output pods 'qosClass', 'Requests' and 'Limits' to MD table format
```bash!
echo -e "|namespace|podname|qosClass|RAM_req|CPU_req|RAM_limit|CPU_limit\n| -------- | -------- | -------- | -------- | -------- | -------- | -------- |\n$(k get pods -A -o jsonpath='{range .items[*]}{"|"}{.metadata.namespace}{"|"}{.metadata.name}{"|"}{.status.qosClass}{"|"}{..resources.requests.memory}{" |"}{..resources.requests.cpu}{" |"}{..resources.limits.memory}{" |"}{..resources.limits.cpu}{" |\n"}{end}')"
```
|namespace|podname|qosClass|RAM_req|CPU_req|RAM_limit|CPU_limit
| -------- | -------- | -------- | -------- | -------- | -------- | -------- |
|kube-system|coredns-65c885f8bd-m9h87|Burstable|70Mi |100m |170Mi | |
|kube-system|coredns-65c885f8bd-ww7p7|Burstable|70Mi |100m |170Mi | |
|kube-system|etcd-lrda0|Burstable|100Mi |100m | | |
|kube-system|etcd-lrda1|Burstable|100Mi |100m | | |
|kube-system|etcd-lrda2|Burstable|100Mi |100m | | |
|kube-system|kube-apiserver-lrda0|Burstable| |250m | | |
|kube-system|kube-apiserver-lrda1|Burstable| |250m | | |
|kube-system|kube-apiserver-lrda2|Burstable| |250m | | |
### Output pods metrics to 'csv' format
```bash
sed -e 's/[[:space:]]\+/;/g' <<< $(k top pod -A --use-protocol-buffers)
```
```bash
NAMESPACE;NAME;CPU(cores);MEMORY(bytes);
kube-system;coredns-65c885f8bd-m9h87;232m;96Mi;
kube-system;coredns-65c885f8bd-ww7p7;133m;89Mi;
kube-system;etcd-lrda0;103m;127Mi;
kube-system;etcd-lrda1;81m;111Mi;
kube-system;etcd-lrda2;93m;126Mi;
kube-system;kube-apiserver-lrda0;208m;1808Mi;
kube-system;kube-apiserver-lrda1;29m;1315Mi;
kube-system;kube-apiserver-lrda2;51m;1549Mi;
kube-system;kube-controller-manager-lrda0;1m;52Mi;
...
```
### Output top 20 pods CPU metrics
```bash
k top pod -A --sort-by=cpu --use-protocol-buffers|head -n25
```
### Output top 20 pods memory metrics
```bash
k top pod -A --sort-by=memory --use-protocol-buffers|head -n25
```
### Misc unsorted yet
```bash!
k get pods -A -o jsonpath='{.items[?(@.status.qosClass=="BestEffort")].metadata.name}'
jsonpath='{.items[?(@.metadata.name=~/^test$/)].metadata.name}'
k get pods -A -o jsonpath='{.items[?(@.metadata.name=="se-redis-master-0")].metadata.name}'
```
```bash!
k get pods -A -o jsonpath='{range .items[*]}{.metadata.namespace}{"/"}{.metadata.name}{" ("}{[?(@.status.qosClass=="Guaranteed")]}{")\n"}{"RAM Limit: "}{..resources.limits.memory}{"\n"}{"CPU Limit: "}{..resources.limits.cpu}'
```