I'm trying to troubleshoot a failing pod but I cannot gather enough info to do so. Hoping someone can assist.
[server-001 ~]$ kubectl get pods sandboxed-nginx-98bb68c4d-26ljd
NAME READY STATUS RESTARTS AGE
sandboxed-nginx-98bb68c4d-26ljd 0/1 ContainerCreating 0 18m
[server-001 ~]$ kubectl logs sandboxed-nginx-98bb68c4d-26ljd
Error from server (BadRequest): container "nginx-kata" in pod "sandboxed-nginx-98bb68c4d-26ljd" is waiting to start: ContainerCreating
[server-001 ~]$ kubectl describe pods sandboxed-nginx-98bb68c4d-26ljd
Name: sandboxed-nginx-98bb68c4d-26ljd
Namespace: default
Priority: 0
Node: worker-001/100.100.230.34
Start Time: Fri, 08 Jul 2022 09:41:08 +0000
Labels: name=sandboxed-nginx
pod-template-hash=98bb68c4d
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/sandboxed-nginx-98bb68c4d
Containers:
nginx-kata:
Container ID:
Image: dummy-registry.com/test/nginx:1.17.7
Image ID:
Port: 80/TCP
Host Port: 0/TCP
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-887n4 (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-887n4:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 25m default-scheduler Successfully assigned default/sandboxed-nginx-98bb68c4d-26ljd to worker-001
Warning FailedCreatePodSandBox 5m19s kubelet Failed to create pod sandbox: rpc error: code = DeadlineExceeded desc = context deadline exceeded
[worker-001 ~]$ sudo crictl images
IMAGE TAG IMAGE ID SIZE
dummy-registry.com/test/externalip-webhook v1.0.0-1 e2e778d82e6c3 147MB
dummy-registry.com/test/flannel v0.14.1 52e470e10ebf9 209MB
dummy-registry.com/test/kube-proxy v1.22.8 93ab9e5f0c4d6 869MB
dummy-registry.com/test/nginx 1.17.7 db634ca7e0456 310MB
dummy-registry.com/test/pause 3.5 dabdc5fea3665 711kB
dummy-registry.com/test/linux 7-slim 41388a53234b5 140MB
[worker-001 ~]$ sudo crictl ps
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
b1c6d1bf2f09a db634ca7e045638213d3f68661164aa5c7d5b469631bbb79a8a65040666492d5 34 minutes ago Running nginx 0 3598c2c4d3e88
caaa14b395eb8 e2e778d82e6c3a8cc82cdf3083e55b084869cd5de2a762877640aff1e88659dd 48 minutes ago Running webhook 0 8a9697e2af6a1
4f97ac292753c 52e470e10ebf93ea5d2aa32f5ca2ecfa3a3b2ff8d2015069618429f3bb9cda7a 48 minutes ago Running kube-flannel 2 a4e4d0c14cafc
aacb3ed840065 93ab9e5f0c4d64c135c2e4593cd772733b025f53a9adb06e91fe49f500b634ab 48 minutes ago Running kube-proxy 2 9e0bc036c2d00
[worker-001 ~]$ sudo crictl pods
POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME
3598c2c4d3e88 34 minutes ago Ready nginx-9xtss default 0 (default)
8a9697e2af6a1 48 minutes ago Ready externalip-validation-webhook-7988bff847-ntv6d externalip-validation-system 0 (default)
9e0bc036c2d00 48 minutes ago Ready kube-proxy-9c7cb kube-system 0 (default)
a4e4d0c14cafc 48 minutes ago Ready kube-flannel-ds-msz7w kube-system 0 (default)
[worker-001 ~]$ cat /etc/crio/crio.conf
[crio]
[crio.image]
pause_image = "dummy-registry.com/test/pause:3.5"
registries = ["docker.io", "dummy-registry.com/test"]
[crio.network]
plugin_dirs = ["/opt/cni/bin"]
[crio.runtime]
cgroup_manager = "systemd"
conmon_cgroup = "system.slice"
conmon = "/usr/libexec/crio/conmon"
manage_network_ns_lifecycle = true
manage_ns_lifecycle = true
selinux = false
[crio.runtime.runtimes]
[crio.runtime.runtimes.kata]
runtime_path = "/usr/bin/containerd-shim-kata-v2"
runtime_type = "vm"
runtime_root = "/run/vc"
[crio.runtime.runtimes.runc]
runtime_path = "/usr/bin/runc"
runtime_type = "oci"
[worker-001 ~]$ egrep -v '^#|^;|^$' /usr/share/defaults/kata-containers/configuration-qemu.toml
[hypervisor.qemu]
initrd = "/usr/share/kata-containers/kata-containers-initrd.img"
path = "/usr/libexec/qemu-kvm"
kernel = "/usr/share/kata-containers/vmlinuz.container"
machine_type = "q35"
enable_annotations = []
valid_hypervisor_paths = ["/usr/libexec/qemu-kvm"]
kernel_params = ""
firmware = ""
firmware_volume = ""
machine_accelerators=""
cpu_features="pmu=off"
default_vcpus = 1
default_maxvcpus = 0
default_bridges = 1
default_memory = 2048
disable_block_device_use = false
shared_fs = "virtio-9p"
virtio_fs_daemon = "/usr/libexec/kata-qemu/virtiofsd"
valid_virtio_fs_daemon_paths = ["/usr/libexec/kata-qemu/virtiofsd"]
virtio_fs_cache_size = 0
virtio_fs_extra_args = ["--thread-pool-size=1", "-o", "announce_submounts"]
virtio_fs_cache = "auto"
block_device_driver = "virtio-scsi"
enable_iothreads = false
enable_vhost_user_store = false
vhost_user_store_path = "/usr/libexec/qemu-kvm"
valid_vhost_user_store_paths = ["/var/run/kata-containers/vhost-user"]
valid_file_mem_backends = [""]
pflashes = []
valid_entropy_sources = ["/dev/urandom","/dev/random",""]
[factory]
[agent.kata]
kernel_modules=[]
[runtime]
internetworking_model="tcfilter"
disable_guest_seccomp=true
disable_selinux=false
sandbox_cgroup_only=true
static_sandbox_resource_mgmt=false
sandbox_bind_mounts=[]
vfio_mode="guest-kernel"
disable_guest_empty_dir=false
experimental=[]
[image]
[server-001 ~]$ cat nginx.yaml
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: kata-containers
handler: kata
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sandboxed-nginx
spec:
replicas: 1
selector:
matchLabels:
name: sandboxed-nginx
template:
metadata:
labels:
name: sandboxed-nginx
spec:
runtimeClassName: kata-containers
containers:
- name: nginx-kata
image: dummy-registry.com/test/nginx:1.17.7
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: sandboxed-nginx
spec:
type: NodePort
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
name: sandboxed-nginx
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx
labels:
name: nginx
spec:
selector:
matchLabels:
name: nginx
template:
metadata:
labels:
name: nginx
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: nginx
image: dummy-registry.com/test/nginx:1.17.7
ports:
- containerPort: 80
[server-001 ~]$ kubectl apply -f nginx.yaml
runtimeclass.node.k8s.io/kata-containers unchanged
deployment.apps/sandboxed-nginx created
service/sandboxed-nginx created
daemonset.apps/nginx created