Kubernetes: Difference between revisions
(7 intermediate revisions by the same user not shown) | |||
Line 1: | Line 1: | ||
https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration | |||
service kubelet stop | service kubelet stop | ||
docker rm -f $(docker ps -a -q --filter "name=k8s") | docker rm -f $(docker ps -a -q --filter "name=k8s") | ||
Line 28: | Line 26: | ||
=== Rebuild Cluster === | === Rebuild Cluster === | ||
curl -LO "<nowiki>https://dl.k8s.io/release/$(curl</nowiki> -L -s <nowiki>https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl</nowiki>" | curl -LO "<nowiki>https://dl.k8s.io/release/$(curl</nowiki> -L -s <nowiki>https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl</nowiki>" | ||
curl -fsSL <nowiki>https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key</nowiki> | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg | curl -fsSL <nowiki>https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key</nowiki> | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg | ||
sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg | sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg | ||
Line 60: | Line 59: | ||
BinaryName = "/usr/sbin/runc" | BinaryName = "/usr/sbin/runc" | ||
SystemdCgroup = true | SystemdCgroup = true | ||
kubeadm reset | kubeadm reset | ||
rm -rf /etc/cni/net.d | rm -rf /etc/cni/net.d | ||
Line 81: | Line 81: | ||
kubectl get pods --all-namespaces -o wide | kubectl get pods --all-namespaces -o wide | ||
kubectl get events --namespace=kube-system | |||
kubectl get endpoints --namespace=kube-system | |||
kubectl get endpoints --namespace=kubernetes-dashboard | |||
=== Flannel Network === | === Flannel Network === | ||
kubectl apply -f <nowiki>https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml</nowiki> | kubectl apply -f <nowiki>https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml</nowiki> | ||
=== DNS Troubleshoot === | |||
<nowiki>https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/</nowiki> | |||
kubectl apply -f <nowiki>https://k8s.io/examples/admin/dns/dnsutils.yaml</nowiki> | |||
kubectl get pods dnsutils | |||
kubectl exec -i -t dnsutils -- nslookup kubernetes.default | |||
=== Kubernetes Dashboard === | === Kubernetes Dashboard === | ||
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard | |||
helm repo add kubernetes-dashboard <nowiki>https://kubernetes.github.io/dashboard/</nowiki> | |||
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443 | |||
kubectl - | https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md | ||
kubectl apply -f serviceaccount.yaml | |||
kubectl apply -f cluserbinding.yaml | |||
kubectl -n kubernetes-dashboard create token admin-user | |||
=== HAPROXY Ingress Helm === | === HAPROXY Ingress Helm === | ||
Line 150: | Line 156: | ||
ClusterIP: Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable from within the cluster. This is the default ServiceType. | ClusterIP: Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable from within the cluster. This is the default ServiceType. | ||
NodePort: Exposes the Service on each Node's IP at a static port (the NodePort). A ClusterIP Service, to which the NodePort Service routes, is automatically created. You'll be able to contact the NodePort Service, from outside the cluster, by requesting <NodeIP>:<NodePort>. | NodePort: Exposes the Service on each Node's IP at a static port (the NodePort). A ClusterIP Service, to which the NodePort Service routes, is automatically created. You'll be able to contact the NodePort Service, from outside the cluster, by requesting <NodeIP>:<NodePort>. | ||
LoadBalancer: Exposes the Service externally using a cloud provider's load balancer. NodePort and ClusterIP Services, to which the external load balancer routes, are automatically created. | LoadBalancer: Exposes the Service externally using a cloud provider's load balancer. NodePort and ClusterIP Services, to which the external load balancer routes, are automatically created. | ||
ExternalName: Maps the Service to the contents of the externalName field (e.g. foo.bar.example.com), by returning a CNAME record with its value. No proxying of any kind is set up. | ExternalName: Maps the Service to the contents of the externalName field (e.g. foo.bar.example.com), by returning a CNAME record with its value. No proxying of any kind is set up. | ||
Line 163: | Line 171: | ||
asterisk | asterisk | ||
=== | === Helm Bitnami === | ||
helm repo add bitnami https://charts.bitnami.com/bitnami | helm repo add bitnami https://charts.bitnami.com/bitnami | ||
helm install mediawiki bitnami/mediawiki | helm install mediawiki bitnami/mediawiki | ||
Line 194: | Line 202: | ||
kubectl get secret --namespace default mediawiki -o jsonpath="{.data.mediawiki-password}" | base64 --decode | kubectl get secret --namespace default mediawiki -o jsonpath="{.data.mediawiki-password}" | base64 --decode | ||
=== OpenEBS === | |||
https://github.com/openebs/lvm-localpv/blob/develop/docs/storageclasses.md | |||
helm repo add openebs <nowiki>https://openebs.github.io/openebs</nowiki> | |||
helm repo update | |||
helm install openebs --namespace openebs openebs/openebs --create-namespace | |||
helm ls -n openebs | |||
kubectl apply -f sc.yaml | |||
kubectl get sc | |||
=== Gluster === | === Gluster === | ||
https://kubernetes.io/docs/concepts/storage/volumes/ | https://kubernetes.io/docs/concepts/storage/volumes/ | ||
https://github.com/kubernetes/examples/tree/master/volumes/glusterfs | https://github.com/kubernetes/examples/tree/master/volumes/glusterfs | ||
https://github.com/gluster/glusterfs/issues/268 | https://github.com/gluster/glusterfs/issues/268 | ||
https://github.com/gluster/glusterfs/blob/master/libglusterfs/src/syncop.c | https://github.com/gluster/glusterfs/blob/master/libglusterfs/src/syncop.c | ||
https://github.com/gluster/glusterfs/blob/master/doc/developer-guide/syncop.md | https://github.com/gluster/glusterfs/blob/master/doc/developer-guide/syncop.md | ||
Latest revision as of 10:35, 1 September 2024
https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration
service kubelet stop docker rm -f $(docker ps -a -q --filter "name=k8s") service containerd stop
systemctl status kubelet journalctl -xeu kubelet
Cluster Info
kubectl cluster-info kubectl get nodes kubectl get pods --all-namespaces -o wide kubectl describe pod -n kubernetes-dashboard
SWAP OFF
Kubernetes does not run with swap enabled, because it may interfere. It must be disabled
swapoff /var/swapfile
SWAP ON
dd if=/dev/zero of=/var/swapfile bs=1M count=4128 mkswap /var/swapfile chmod 777 /var/swapfile echo "/var/swapfile none swap sw 0 0" >> /etc/fstab swapon /var/swapfile
Rebuild Cluster
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list
/etc/modules-load.d/modules.conf overlay br_netfilter
/etc/sysctl.d/ipv4_forward.conf net.ipv4.ip_forward = 1
sudo apt install kubeadm kubelet containerd
/etc/containerd/config.toml version = 2 root = "/var/lib/containerd" state = "/run/containerd" oom_score = 0 [plugins] [plugins."io.containerd.grpc.v1.cri"] [plugins."io.containerd.grpc.v1.cri".containerd] default_runtime_name = "runc" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] default_runtime_name = "runc" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] privileged_without_host_devices = false runtime_type = "io.containerd.runc.v2" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] BinaryName = "/usr/sbin/runc" SystemdCgroup = true
kubeadm reset rm -rf /etc/cni/net.d ip link delete cni0 ip link delete flannel.1
kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=10.0.0.203
mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm token create --print-join-command kubeadm join 10.0.0.230:6443 --token 9uujzu.c1wjk2y65uiasfwf \ --discovery-token-ca-cert-hash sha256:0ed142167d4edf1c96ec21522d2dda6b564cc139c03513a289016c357d5dc97b
kubectl get nodes kubectl label node build node-role.kubernetes.io/worker=worker kubectl describe node | grep -i taint kubectl taint nodes build node-role.kubernetes.io/control-plane:NoSchedule-
kubectl get pods --all-namespaces -o wide kubectl get events --namespace=kube-system kubectl get endpoints --namespace=kube-system kubectl get endpoints --namespace=kubernetes-dashboard
Flannel Network
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
DNS Troubleshoot
https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/
kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml kubectl get pods dnsutils kubectl exec -i -t dnsutils -- nslookup kubernetes.default
Kubernetes Dashboard
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md kubectl apply -f serviceaccount.yaml kubectl apply -f cluserbinding.yaml kubectl -n kubernetes-dashboard create token admin-user
HAPROXY Ingress Helm
helm repo add haproxytech https://haproxytech.github.io/helm-charts helm repo update helm install haproxy haproxytech/kubernetes-ingress \
--set controller.kind=DaemonSet --set controller.daemonset.useHostPort=true
HAPROXY Ingress Deployment
https://www.haproxy.com/blog/announcing-haproxy-kubernetes-ingress-controller-1-5/ https://www.haproxy.com/documentation/kubernetes/latest/configuration/controller/
kubectl apply -f https://raw.githubusercontent.com/haproxytech/kubernetes-ingress/master/deploy/haproxy-ingress.yaml
kubectl describe pod -n haproxy-controller haproxy-ingress-67f7c8b555-lw6cs kubectl describe pod -n haproxy-controller ingress-default-backend-78f5cc7d4c-2kvqv
NGINX
kubectl create namespace nginx kubectl apply -f deployment-nginx.yaml -n nginx kubectl create service nodeport nginx --tcp=80:80 -n nginx kubectl apply -f nginx-ingress.yaml -n nginx
cat <<EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment spec: selector: matchLabels: app: nginx replicas: 1 template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:alpine ports: - containerPort: 80 EOF
spec: clusterIP: 10.105.252.237 externalTrafficPolicy: Cluster greeting: hello ports:
ClusterIP: Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable from within the cluster. This is the default ServiceType. NodePort: Exposes the Service on each Node's IP at a static port (the NodePort). A ClusterIP Service, to which the NodePort Service routes, is automatically created. You'll be able to contact the NodePort Service, from outside the cluster, by requesting <NodeIP>:<NodePort>.
LoadBalancer: Exposes the Service externally using a cloud provider's load balancer. NodePort and ClusterIP Services, to which the external load balancer routes, are automatically created.
ExternalName: Maps the Service to the contents of the externalName field (e.g. foo.bar.example.com), by returning a CNAME record with its value. No proxying of any kind is set up.
Helm Mediawiki
helm repo add bitnami https://charts.bitnami.com/bitnami helm install my-release bitnami/mediawiki helm install 1.35.1 bitnami/mediawiki
redis mediawiki asterisk
Helm Bitnami
helm repo add bitnami https://charts.bitnami.com/bitnami helm install mediawiki bitnami/mediawiki
NAME: mediawiki LAST DEPLOYED: Sun Feb 7 09:53:38 2021 NAMESPACE: default STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: ############################################################################### ### ERROR: You did not provide an external host in your 'helm install' call ### ############################################################################### This deployment will be incomplete until you configure Mediawiki with a resolvable host. To configure Mediawiki with the URL of your service:
Get the Mediawiki URL by running:
kubectl get svc --namespace default -w mediawiki
export APP_HOST=$(kubectl get svc --namespace default mediawiki --template "Template:Range (index .status.loadBalancer.ingress 0){{ . }}Template:End") export APP_PASSWORD=$(kubectl get secret --namespace default mediawiki -o jsonpath="{.data.mediawiki-password}" | base64 --decode) export MARIADB_ROOT_PASSWORD=$(kubectl get secret --namespace default mediawiki-mariadb -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) export MARIADB_PASSWORD=$(kubectl get secret --namespace default mediawiki-mariadb -o jsonpath="{.data.mariadb-password}" | base64 --decode)
helm upgrade mediawiki bitnami/mediawiki \ --set mediawikiHost=$APP_HOST,mediawikiPassword=$APP_PASSWORD,mariadb.auth.rootPassword=$MARIADB_ROOT_PASSWORD,mariadb.auth.password=$MARIADB_PASSWORD
kubectl get secret --namespace default mediawiki -o jsonpath="{.data.mediawiki-password}" | base64 --decode
OpenEBS
https://github.com/openebs/lvm-localpv/blob/develop/docs/storageclasses.md
helm repo add openebs https://openebs.github.io/openebs
helm repo update
helm install openebs --namespace openebs openebs/openebs --create-namespace
helm ls -n openebs
kubectl apply -f sc.yaml
kubectl get sc
Gluster
https://kubernetes.io/docs/concepts/storage/volumes/
https://github.com/kubernetes/examples/tree/master/volumes/glusterfs
https://github.com/gluster/glusterfs/issues/268
https://github.com/gluster/glusterfs/blob/master/libglusterfs/src/syncop.c
https://github.com/gluster/glusterfs/blob/master/doc/developer-guide/syncop.md
pthread context / getcontext, swapcontext, makecontext
SUSv2, POSIX.1-2001. POSIX.1-2008 removes the specifications of makecontext() and swapcontext(), citing portability issues, and recommending that applications be rewritten to use POSIX threads instead.
brickmux-thread-reduction.md
LVM Resize
lvreduce -L -24G /dev/vg-data/data lvcreate -L 24G -n kubernetes vg-data mkfs.xfs -i size=512 /dev/vg-data/kubernetes mkdir -p /export/kubernetes/brick echo "/dev/vg-data/kubernetes /export/kubernetes xfs defaults 0 0" >> /etc/fstab mount -a
gluster volume create gv0 ubuntu:/export/kubernetes/brick gluster volume create gv0 replica 2 ubuntu:/export/kubernetes/brick u2:/export/kubernetes/brick gluster volume info gluster volume start gv0
mount -t glusterfs acer:gv0 /mnt/glusterfs
cat <<EOF | kubectl apply -f - apiVersion: v1 kind: Endpoints metadata: name: glusterfs-cluster subsets: - addresses: - ip: 10.0.0.250 ports: - port: 1 EOF
cat <<EOF | kubectl apply -f - apiVersion: v1 kind: Pod metadata: name: nginx spec: containers: - name: glusterfs image: nginx volumeMounts: - mountPath: "/usr/share/nginx/html/" name: glusterfsvol volumes: - name: glusterfsvol glusterfs: endpoints: glusterfs-cluster path: gv0 readOnly: true EOF
kubectl cp index.html nginx:/usr/share/nginx/html/
mount -t glusterfs acer:gv0 /mnt/glusterfs cp ~/index.html .
apiVersion: v1 kind: PersistentVolume metadata: name: gluster-pv spec: capacity: storage: 24Gi storageClassName: standard accessModes: - ReadWriteMany glusterfs: endpoints: gluster-cluster path: /gv0 readOnly: false persistentVolumeReclaimPolicy: Retain kind: PersistentVolumeClaim apiVersion: v1 metadata: name: nginx-pvc spec: accessModes: - ReadOnlyMany resources: requests: storage: 1Gi selector: matchLabels: name: standard
kind: PersistentVolume apiVersion: v1 metadata: name: gluster-pv labels: name: models-1-0-0 spec: capacity: storage: 200Gi storageClassName: standard accessModes: - ReadOnlyMany gcePersistentDisk: pdName: models-1-0-0 fsType: ext4 readOnly: true --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: nginx-pvc spec: accessModes: - ReadOnlyMany resources: requests: storage: 1Gi selector: matchLabels: name: glusterfs
lvreduce -L -24G /dev/vg-data/data lvcreate -L 24G -n kubernetes vg-data mkfs.xfs -i size=512 /dev/vg-data/kubernetes mkdir -p /export/kubernetes/brick echo "/dev/vg-data/kubernetes /export/kubernetes xfs defaults 0 0" >> /etc/fstab mount -a
sudo gluster volume info gluster volume create gv0 ubuntu:/export/kubernetes/brick gluster volume create gv0 replica 2 ubuntu:/export/kubernetes/brick u2:/export/kubernetes/brick gluster volume info gluster volume start gv0
mount -t glusterfs acer:gv0 /mnt/glusterfs mount -t glusterfs ubuntu:gv0 /mnt/glusterfs mount -t glusterfs ubuntu:gv1 /mnt/glusterfs mount -t glusterfs ubuntu:gv2 /mnt/glusterfs
https://kubernetes.io/docs/concepts/storage/volumes/ https://github.com/kubernetes/examples/tree/master/volumes/glusterfs
cat <<EOF | kubectl apply -f - apiVersion: v1 kind: PersistentVolume metadata: name: gfs-pv0 spec: capacity: storage: 8Gi accessModes: - ReadWriteMany glusterfs: endpoints: gfs-cluster path: /gv0 readOnly: false persistentVolumeReclaimPolicy: Retain --- kind: PersistentVolume apiVersion: v1 metadata: name: gfs-pv1 spec: capacity: storage: 8Gi accessModes: - ReadWriteMany glusterfs: endpoints: gfs-cluster path: /gv1 readOnly: false persistentVolumeReclaimPolicy: Retain --- kind: PersistentVolume apiVersion: v1 metadata: name: gfs-pv2 spec: capacity: storage: 8Gi accessModes: - ReadWriteMany glusterfs: endpoints: gfs-cluster path: /gv2 readOnly: false persistentVolumeReclaimPolicy: Retain --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: mariadb spec: accessModes: - ReadWriteMany resources: requests: storage: 8Gi --- apiVersion: v1 kind: Endpoints metadata: name: gfs-cluster subsets: - addresses: - ip: 10.0.0.230 ports: - port: 1 --- apiVersion: v1 kind: Service metadata: name: gfs-cluster spec: ports: - port: 1 EOF
helm uninstall mariadb helm install mariadb bitnami/mariadb --set primary.persistence.existingClaim=mariadb
kubectl get pods -w --namespace default -l release=mariadb kubectl get secret --namespace default mariadb -o jsonpath="{.data.mariadb-root-password}" | base64 --decode kubectl run mariadb-client --rm --tty -i --restart='Never' --image docker.io/bitnami/mariadb:10.5.8-debian-10-r69 --namespace default --command -- bash
mysql -h mariadb.default.svc.cluster.local -uroot -p my_database
ROOT_PASSWORD=$(kubectl get secret --namespace default mariadb -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) helm upgrade mariadb bitnami/mariadb --set auth.rootPassword=$ROOT_PASSWORD
Azure EKS
Import-Module Az.Accounts Connect-AzAccount Install-AzAksKubectl Write-Output $Env:Path Import-AzAksCredential -ResourceGroupName cluster -Name cluster
kubectl version kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' kubectl get nodes kubectl get namespace kubectl apply -f https://raw.githubusercontent.com/Azure-Samples/azure-voting-app-redis/master/azure-vote-all-in-one-redis.yaml kubectl get pods kubectl get services
kubectl cordon $(kubectl get nodes -o name) kubectl.exe apply -f .\vote.yaml
https://azure.github.io/application-gateway-kubernetes-ingress/features/private-ip/ https://azure.github.io/application-gateway-kubernetes-ingress/
Remove-AzResourceGroup -Name cluster
kubectl apply -f https://raw.githubusercontent.com/Azure/application-gateway-kubernetes-ingress/master/deploy/azuredeploy.json
wget https://raw.githubusercontent.com/Azure/application-gateway-kubernetes-ingress/master/deploy/azuredeploy.json
resourceGroupName="cluster" location="westeurope" deploymentName="ingress-appgw"
Google GCP GKE
gcloud container clusters get-credentials your-first-cluster-1 --zone europe-north1-c --project third-light
gcloud compute ssh username@gke-your-first-cluster-1-pool-1-34a9d362 ssh -i id_ed25519 username@35.228.24.238 ssh -i .ssh/google_compute_engine username@35.228.239.240 -L 8444:10.3.255.31:8444 curl -vk https://localhost:8444
kube.config
apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLBVEUtLS0tLQo= server: https://35.228.120.76 name: gke_third-light-233710_europe-north1-c_your-first-cluster-1 contexts: - context: cluster: gke_third-light-233710_europe-north1-c_your-first-cluster-1 user: gke_third-light-233710_europe-north1-c_your-first-cluster-1 name: gke_third-light-233710_europe-north1-c_your-first-cluster-1 current-context: gke_third-light-233710_europe-north1-c_your-first-cluster-1 kind: Config preferences: {} users: - name: gke_third-light-233710_europe-north1-c_your-first-cluster-1 user: auth-provider: config: access-token: ya29.GlsjB2Z-Sk3dGWQVofpYCb7-06xo2ZLnpNJ7Ml67pkH cmd-args: config config-helper --format=json cmd-path: /usr/bin/gcloud expiry: "2019-06-09T09:39:47Z" expiry-key: '{.credential.token_expiry}' token-key: '{.credential.access_token}' name: gcp
Ignition
https://www.codetab.org/post/kubernetes-cluster-virtualbox/
storage: files: - path: /etc/sysctl.d/20-silence-audit.conf contents: inline: | kernel.printk=4 - path: /etc/hostname mode: 420 contents: source: "data:,fcos"
https://docs.okd.io/4.11/architecture/architecture-rhcos.html
sudo podman run --pull=always --privileged --rm \ -v /dev:/dev -v /run/udev:/run/udev -v .:/data -w /data \ quay.io/coreos/coreos-installer:release \ install /dev/vdb -i config.ign
Alpine
https://wiki.alpinelinux.org/wiki/K8s
OpenEBS
kubectl apply -f https://openebs.github.io/charts/cstor-operator.yaml