| 
   ⇤ ← Revision 1 as of 2022-11-05 13:35:44   
  Size: 9 
  
  Comment:  
 | 
  
   Size: 17229 
  
  Comment:  
 | 
| Deletions are marked like this. | Additions are marked like this. | 
| Line 2: | Line 2: | 
|  * k3s  - Lightweight Kubernetes * https://k3s.io/ * https://github.com/rancher/k3s/releases/tag/v1.17.0+k3s.1 * https://rancher.com/docs/k3s/latest/en/ * https://rancher.com/docs/k3s/latest/en/quick-start/ K3S works great from something as small as a Raspberry Pi to an AWS a1.4xlarge 32GiB server. Download k3s - latest release, x86_64, ARMv7, and ARM64 are supported Situations where a PhD in k8s clusterology is infeasible {{{#!highlight bash curl -sfL https://get.k3s.io | sh - sudo curl -sfL https://get.k3s.io | sh - # [INFO] Finding latest release # [INFO] Using v1.17.0+k3s.1 as release # [INFO] Downloading hash https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/sha256sum-amd64.txt # [INFO] Downloading binary https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/k3s # [INFO] Verifying binary download # [INFO] Installing k3s to /usr/local/bin/k3s # [INFO] Creating /usr/local/bin/kubectl symlink to k3s # [INFO] Creating /usr/local/bin/crictl symlink to k3s # [INFO] Creating /usr/local/bin/ctr symlink to k3s # [INFO] Creating killall script /usr/local/bin/k3s-killall.sh # [INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh # [INFO] env: Creating environment file /etc/systemd/system/k3s.service.env # [INFO] systemd: Creating service file /etc/systemd/system/k3s.service # [INFO] systemd: Enabling k3s unit # Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service. # [INFO] systemd: Starting k3s # as root k3s kubectl cluster-info kubectl create deployment springboot-test --image=vbodocker/springboot-test:latest kubectl expose deployment springboot-test --port=8000 --target-port=8080 --type=NodePort kubectl get services IP_SPRINGBOOT=$(kubectl get services | grep springboot | awk '//{print $3}') curl http://$IP_SPRINGBOOT:8000/dummy # list containerd images and containers k3s crictl images k3s crictl ps # connect to container id crictl exec -it 997a2ad8c763a sh # connect to container/pod kubectl get pods kubectl exec -it springboot-test-6bb5fdfc48-phh8k sh cat /etc/os-release # alpine linux in container # give sudo rights to user /sbin/usermod -aG sudo user # scale pods sudo kubectl scale deployment springboot-test --replicas=3 sudo kubectl get pods -o wide # add mariadb pod/service sudo kubectl create deployment mariadb-test --image=mariadb:latest sudo kubectl get pods -o wide sudo kubectl delete deployment mariadb-test # https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/ sudo kubectl apply -f mariadb-pv.yaml #persistentvolume/mariadb-pv-volume created #persistentvolumeclaim/mariadb-pv-claim created sudo kubectl apply -f mariadb-deployment.yaml #service/mariadb created #deployment.apps/mariadb created sudo kubectl describe deployment mariadb sudo kubectl get svc -o wide # connect to mariabdb pod sudo kubectl exec -it mariadb-8578f4dc8c-r4ftv /bin/bash ss -atn # show ports tcp listening ip address # show ip addresses mysql -h localhost -p mysql -u root -h 10.42.0.12 -p # delete service, persistent volume claim and persistent volume sudo kubectl delete deployment,svc mariadb sudo kubectl delete pvc mariadb-pv-claim sudo kubectl delete pv mariadb-pv-volume }}} == mariadb-pv.yaml == {{{#!highlight yaml apiVersion: v1 kind: PersistentVolume metadata: name: mariadb-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 1Gi accessModes: - ReadWriteOnce hostPath: path: "/mnt/data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mariadb-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 1Gi }}} == mariadb-deployment.yaml == {{{#!highlight yaml apiVersion: v1 kind: Service metadata: name: mariadb spec: ports: - port: 3306 selector: app: mariadb clusterIP: None --- apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: mariadb spec: selector: matchLabels: app: mariadb strategy: type: Recreate template: metadata: labels: app: mariadb spec: containers: - image: mariadb:latest name: mariadb env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD value: password ports: - containerPort: 3306 name: mariadb volumeMounts: - name: mariadb-persistent-storage mountPath: /var/lib/mariadb volumes: - name: mariadb-persistent-storage persistentVolumeClaim: claimName: mariadb-pv-claim }}} = Init containers = * https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Init containers can contain utilities or setup scripts not present in an app image. = systemctl commands = {{{#!highlight bash systemctl start k3s systemctl stop k3s systemctl status k3s systemctl disable k3s.service systemctl enable k3s }}} = Ubuntu pod = == ubuntu.yaml == {{{#!highlight yaml apiVersion: v1 kind: Pod metadata: name: ubuntu labels: app: ubuntu spec: containers: - name: ubuntu image: ubuntu:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent restartPolicy: Always }}} {{{#!highlight bash sudo kubectl apply -f ubuntu.yaml sudo kubectl get pods sudo kubectl exec -it ubuntu -- bash sudo kubectl delete pod ubuntu }}} = Alpine pod = == alpine.yaml == {{{#!highlight yaml apiVersion: v1 kind: Pod metadata: name: alpine labels: app: alpine spec: containers: - name: alpine image: alpine:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent restartPolicy: Always }}} {{{#!highlight bash # Pods use PersistentVolumeClaims to request physical storage. sudo kubectl apply -f alpine.yaml sudo kubectl exec -it alpine -- sh }}} = Nginx with persistent volume = {{{#!highlight bash cd /tmp mkdir -p /tmp/data echo 'Hello from Kubernetes storage' > /tmp/data/index.html }}} == pv-volume.yaml == {{{#!highlight yaml apiVersion: v1 kind: PersistentVolume metadata: name: task-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 0.2Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/data" }}} == pv-claim.yaml == {{{#!highlight yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: task-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 0.2Gi }}} == pv-pod.yaml == {{{#!highlight yaml apiVersion: v1 kind: Pod metadata: name: task-pv-pod spec: volumes: - name: task-pv-storage persistentVolumeClaim: claimName: task-pv-claim containers: - name: task-pv-container image: nginx ports: - containerPort: 80 name: "http-server" volumeMounts: - mountPath: "/usr/share/nginx/html" name: task-pv-storage }}} {{{#!highlight bash sudo kubectl apply -f pv-volume.yaml sudo kubectl apply -f pv-claim.yaml sudo kubectl apply -f pv-pod.yaml sudo kubectl get pods -o wide curl http://10.42.0.28/ sudo kubectl exec -it task-pv-pod -- bash cd /usr/share/nginx/html echo "Hey from Kubernetes storage" > index.html cat /etc/os-release # debian buster kubectl delete pod task-pv-pod kubectl delete pvc task-pv-claim kubectl delete pv task-pv-volume cat /tmp/data/index.html }}} = Generate yaml = {{{#!highlight sh sudo kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test --dry-run=client --output=yaml sudo kubectl expose deployment cherrypy-test --port=8080 --type=NodePort --dry-run=client --output=yaml sudo kubectl scale deployment cherrypy-test --replicas=3 --dry-run=client --output=yaml }}} = Alpine persistent volume = == alpine-shared.yaml == {{{#!highlignt yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: alpine-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 0.2Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/alpine-data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: alpine-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 0.2Gi --- apiVersion: v1 kind: Pod metadata: name: alpine-pod labels: app: alpine-pod spec: volumes: - name: alpine-pv-storage persistentVolumeClaim: claimName: alpine-pv-claim containers: - name: alpine image: alpine:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: "/mnt/alpine/data" name: alpine-pv-storage restartPolicy: Always }}} {{{#!highlight bash mkdir -p /tmp/alpine-data/ sudo kubectl apply -f alpine-shared.yaml sudo kubectl exec -it alpine-pod -- sh # inside pod cd /mnt/alpine/data # inside pod echo "teste" > x.txt # inside pod # in host cat /tmp/alpine-data/x.txt # k8s host }}} = MariaDB + NFS = {{{#!highlight sh /vol *(rw,sync,insecure,fsid=0,no_subtree_check,no_root_squash) exportfs -rav exporting *:/vol mkdir -p /vol/mariadb-0 kubectl apply -f mariadb-nfs.yaml kubectl exec -it mariadb-79847f5d97-smbdx -- bash touch /var/lib/mariadb/b mount | grep nfs kubectl delete -f mariadb-nfs.yaml kubectl get pods kubectl get pvc kubectl get pv }}} == mariadb-nfs.yaml == {{{#!highlight yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: mdb-vol-0 labels: volume: mdb-volume spec: storageClassName: manual capacity: storage: 1Gi accessModes: - ReadWriteOnce nfs: server: 127.0.0.1 path: "/vol/mariadb-0" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mdb-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 1Gi --- apiVersion: v1 kind: Service metadata: name: mariadb spec: ports: - port: 3306 selector: app: mariadb clusterIP: None --- apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: mariadb spec: selector: matchLabels: app: mariadb strategy: type: Recreate template: metadata: labels: app: mariadb spec: containers: - image: mariadb:latest name: mariadb env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD value: password ports: - containerPort: 3306 name: mariadb volumeMounts: - name: mdb-persistent-storage mountPath: /var/lib/mariadb volumes: - name: mdb-persistent-storage persistentVolumeClaim: claimName: mdb-pv-claim }}} = Persistent volumes = * https://kubernetes.io/docs/concepts/storage/persistent-volumes/ A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes. A PersistentVolumeClaim (PVC) is a request for storage by a user. Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see AccessModes). Types of Persistent Volumes: * local - local storage devices mounted on nodes. * nfs - Network File System (NFS) storage = Ingress controller nginx example = == ingress-cherrypy-test.yml == {{{#!highlight yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress-cherrypy-test spec: rules: - host: cp.info http: paths: - path: / pathType: Prefix backend: service: name: cherrypy-test port: number: 8000 ingressClassName: nginx }}} == Steps == {{{#!highlight bash # install k3s curl -sfL https://get.k3s.io | sh - KUBECONFIG=~/.kube/config mkdir ~/.kube 2> /dev/null sudo k3s kubectl config view --raw > "$KUBECONFIG" chmod 600 "$KUBECONFIG" nano ~/.bashrc export KUBECONFIG=~/.kube/config source . ~/.bashrc sudo nano /etc/systemd/system/k3s.service ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 sudo systemctl daemon-reload sudo service k3s start sudo service k3s status kubectl get pods k3s kubectl cluster-info kubectl -n kube-system delete helmcharts.helm.cattle.io traefik sudo service k3s stop sudo nano /etc/systemd/system/k3s.service # ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 --no-deploy traefik sudo systemctl daemon-reload sudo rm /var/lib/rancher/k3s/server/manifests/traefik.yaml sudo service k3s start kubectl -n kube-system delete helmcharts.helm.cattle.io traefik sudo systemctl restart k3s kubectl get nodes kubectl delete node localhost kubectl get pods --all-namespaces kubectl get services --all-namespaces kubectl get deployment --all-namespaces # install nginx ingress controller kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml kubectl get pods --namespace=ingress-nginx kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP # cluster ip port 8000 kubectl get services kubectl apply -f ingress-cherrypy-test.yml EXTERNAL_IP=$(ip addr show | grep wlp | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}') echo $EXTERNAL_IP sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts " kubectl get ingress curl cp.info kubectl scale deployment cherrypy-test --replicas=5 curl http://cp.info/ -vvv sudo apt install apache2-utils ab -n 10 -c 10 http://cp.info/ # Push image to docker hub docker build -t vbodocker/cherrypy-test . docker run -p 8080:8080 vbodocker/cherrypy-test docker login # login to docker hub docker push vbodocker/cherrypy-test docker pull vbodocker/cherrypy-test:latest # Rollout, deploy new image kubectl get deployments -o wide # shows image urls kubectl rollout restart deployment cherrypy-test # redeploy image url for cherrypy-test kubectl rollout status deployment cherrypy-test kubectl get deployments -o wide kubectl get pods -o wide # age should be low for the newly deployed pods }}} = Install k3s static binary in Slack64 = * https://github.com/k3s-io/k3s#k3s---lightweight-kubernetes * Binaries available in https://github.com/k3s-io/k3s#manual-download * wget https://github.com/k3s-io/k3s/releases/download/v1.25.3%2Bk3s1/k3s {{{#!highlight sh sudo mv ~/Downloads/k3s /usr/bin/ sudo chmod 744 /usr/bin/k3s }}} == /etc/rc.d/rc.k3s == {{{#!highlight sh #!/bin/sh PATH=$PATH:/usr/sbin k3s_start() { /usr/bin/k3s server --write-kubeconfig-mode=644 \ --disable traefik > /var/log/k3s.log 2>&1 & } k3s_stop() { kill $(ps uax | grep "/usr/bin/k3s" | head -1 | awk '//{print $2}') ps uax | grep containerd | awk '//{print $2}' | xargs -i kill {} } k3s_restart() { k3s_stop k3s_start } case "$1" in 'start') k3s_start ;; 'stop') k3s_stop ;; 'restart') k3s_restart ;; *) echo "usage $0 start|stop|restart" esac }}} == ingress-cherrypy-test.yml == {{{#!highlight yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress-cherrypy-test spec: rules: - host: cp.info http: paths: - path: / pathType: Prefix backend: service: name: cherrypy-test port: number: 8000 ingressClassName: nginx }}} == Steps == {{{#!highlight sh echo "alias kubectl='/usr/bin/k3s kubectl'" >> ~/.bashrc source ~/.bashrc sudo sh /etc/rc.d/rc.k3s start kubectl get nodes kubectl get deployments --all-namespaces kubectl get services --all-namespaces kubectl get pods --all-namespaces kubectl cluster-info # install nginx ingress controller kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml # wait for nginx ingress controller to finish sleep 120 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP kubectl get pods --all-namespaces kubectl get services --all-namespaces kubectl apply -f ingress-cherrypy-test.yml EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}') echo $EXTERNAL_IP sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts " cat /etc/hosts kubectl get ingress curl cp.info }}}  | 
k3s
- k3s - Lightweight Kubernetes
 
K3S works great from something as small as a Raspberry Pi to an AWS a1.4xlarge 32GiB server. Download k3s - latest release, x86_64, ARMv7, and ARM64 are supported Situations where a PhD in k8s clusterology is infeasible
   1 curl -sfL https://get.k3s.io | sh -
   2 sudo curl -sfL https://get.k3s.io | sh -
   3 # [INFO]  Finding latest release
   4 # [INFO]  Using v1.17.0+k3s.1 as release
   5 # [INFO]  Downloading hash https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/sha256sum-amd64.txt
   6 # [INFO]  Downloading binary https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/k3s
   7 # [INFO]  Verifying binary download
   8 # [INFO]  Installing k3s to /usr/local/bin/k3s
   9 # [INFO]  Creating /usr/local/bin/kubectl symlink to k3s
  10 # [INFO]  Creating /usr/local/bin/crictl symlink to k3s
  11 # [INFO]  Creating /usr/local/bin/ctr symlink to k3s
  12 # [INFO]  Creating killall script /usr/local/bin/k3s-killall.sh
  13 # [INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh
  14 # [INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env
  15 # [INFO]  systemd: Creating service file /etc/systemd/system/k3s.service
  16 # [INFO]  systemd: Enabling k3s unit
  17 # Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.
  18 # [INFO]  systemd: Starting k3s
  19 # as root
  20 k3s kubectl cluster-info
  21 kubectl create deployment springboot-test --image=vbodocker/springboot-test:latest
  22 kubectl expose deployment springboot-test --port=8000 --target-port=8080 --type=NodePort
  23 kubectl get services
  24 IP_SPRINGBOOT=$(kubectl get services | grep springboot | awk '//{print $3}')
  25 curl http://$IP_SPRINGBOOT:8000/dummy
  26 # list containerd images and containers
  27 k3s crictl images
  28 k3s crictl ps
  29 # connect to container id
  30 crictl exec -it 997a2ad8c763a  sh
  31 # connect to container/pod
  32 kubectl get pods
  33 kubectl exec -it springboot-test-6bb5fdfc48-phh8k  sh
  34 cat /etc/os-release # alpine linux in container
  35 # give sudo rights to user
  36 /sbin/usermod -aG sudo user
  37 # scale pods
  38 sudo kubectl scale deployment springboot-test --replicas=3
  39 sudo kubectl get pods -o wide
  40 # add mariadb pod/service
  41 sudo kubectl create deployment mariadb-test --image=mariadb:latest
  42 sudo kubectl get pods -o wide
  43 sudo kubectl delete deployment mariadb-test
  44 # https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/
  45 sudo kubectl apply -f mariadb-pv.yaml
  46 #persistentvolume/mariadb-pv-volume created
  47 #persistentvolumeclaim/mariadb-pv-claim created
  48 sudo kubectl apply -f mariadb-deployment.yaml 
  49 #service/mariadb created
  50 #deployment.apps/mariadb created
  51 sudo kubectl describe deployment mariadb
  52 sudo kubectl get svc -o wide
  53 # connect to mariabdb pod
  54 sudo kubectl exec -it mariadb-8578f4dc8c-r4ftv /bin/bash
  55 ss -atn # show ports tcp listening
  56 ip address # show ip addresses
  57 mysql -h localhost -p
  58 mysql -u root -h 10.42.0.12 -p 
  59 
  60 # delete service, persistent volume claim and persistent volume
  61 sudo kubectl delete deployment,svc mariadb
  62 sudo kubectl delete pvc mariadb-pv-claim
  63 sudo kubectl delete pv mariadb-pv-volume
mariadb-pv.yaml
   1 apiVersion: v1
   2 kind: PersistentVolume
   3 metadata:
   4   name: mariadb-pv-volume
   5   labels:
   6     type: local
   7 spec:
   8   storageClassName: manual
   9   capacity:
  10     storage: 1Gi
  11   accessModes:
  12     - ReadWriteOnce
  13   hostPath:
  14     path: "/mnt/data"
  15 ---
  16 apiVersion: v1
  17 kind: PersistentVolumeClaim
  18 metadata:
  19   name: mariadb-pv-claim
  20 spec:
  21   storageClassName: manual
  22   accessModes:
  23     - ReadWriteOnce
  24   resources:
  25     requests:
  26       storage: 1Gi
mariadb-deployment.yaml
   1 apiVersion: v1
   2 kind: Service
   3 metadata:
   4   name: mariadb
   5 spec:
   6   ports:
   7   - port: 3306
   8   selector:
   9     app: mariadb
  10   clusterIP: None
  11 ---
  12 apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
  13 kind: Deployment
  14 metadata:
  15   name: mariadb
  16 spec:
  17   selector:
  18     matchLabels:
  19       app: mariadb
  20   strategy:
  21     type: Recreate
  22   template:
  23     metadata:
  24       labels:
  25         app: mariadb
  26     spec:
  27       containers:
  28       - image: mariadb:latest
  29         name: mariadb
  30         env:
  31           # Use secret in real usage
  32         - name: MYSQL_ROOT_PASSWORD
  33           value: password
  34         ports:
  35         - containerPort: 3306
  36           name: mariadb
  37         volumeMounts:
  38         - name: mariadb-persistent-storage
  39           mountPath: /var/lib/mariadb
  40       volumes:
  41       - name: mariadb-persistent-storage
  42         persistentVolumeClaim:
  43           claimName: mariadb-pv-claim
Init containers
Init containers can contain utilities or setup scripts not present in an app image.
systemctl commands
Ubuntu pod
ubuntu.yaml
Alpine pod
alpine.yaml
Nginx with persistent volume
pv-volume.yaml
pv-claim.yaml
pv-pod.yaml
   1 apiVersion: v1
   2 kind: Pod
   3 metadata:
   4   name: task-pv-pod
   5 spec:
   6   volumes:
   7     - name: task-pv-storage
   8       persistentVolumeClaim:
   9         claimName: task-pv-claim
  10   containers:
  11     - name: task-pv-container
  12       image: nginx
  13       ports:
  14         - containerPort: 80
  15           name: "http-server"
  16       volumeMounts:
  17         - mountPath: "/usr/share/nginx/html"
  18           name: task-pv-storage
   1 sudo kubectl apply -f pv-volume.yaml
   2 sudo kubectl apply -f pv-claim.yaml
   3 sudo kubectl apply -f pv-pod.yaml
   4 sudo kubectl get pods -o wide 
   5 curl http://10.42.0.28/
   6 sudo kubectl exec -it task-pv-pod -- bash
   7 cd /usr/share/nginx/html
   8 echo "Hey from Kubernetes storage" > index.html
   9 cat /etc/os-release # debian buster 
  10 kubectl delete pod task-pv-pod
  11 kubectl delete pvc task-pv-claim
  12 kubectl delete pv task-pv-volume
  13 cat /tmp/data/index.html 
Generate yaml
   1 sudo kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test --dry-run=client --output=yaml
   2 sudo kubectl expose deployment cherrypy-test --port=8080 --type=NodePort --dry-run=client --output=yaml
   3 sudo kubectl scale deployment cherrypy-test --replicas=3 --dry-run=client --output=yaml
Alpine persistent volume
alpine-shared.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: alpine-pv-volume
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 0.2Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/tmp/alpine-data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: alpine-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 0.2Gi
---
apiVersion: v1
kind: Pod
metadata:
  name: alpine-pod
  labels:
    app: alpine-pod
spec:
  volumes:
    - name: alpine-pv-storage
      persistentVolumeClaim:
        claimName: alpine-pv-claim
  containers:
  - name: alpine
    image: alpine:latest
    command: ["/bin/sleep", "3650d"]
    imagePullPolicy: IfNotPresent
    volumeMounts:
      - mountPath: "/mnt/alpine/data"
        name: alpine-pv-storage
  restartPolicy: Always
MariaDB + NFS
   1 /vol *(rw,sync,insecure,fsid=0,no_subtree_check,no_root_squash)
   2 exportfs -rav
   3 exporting *:/vol
   4 mkdir -p /vol/mariadb-0
   5 
   6 kubectl apply -f mariadb-nfs.yaml
   7 kubectl exec -it mariadb-79847f5d97-smbdx -- bash
   8 touch  /var/lib/mariadb/b
   9 mount | grep nfs
  10 kubectl delete -f mariadb-nfs.yaml
  11 kubectl get pods
  12 kubectl get pvc
  13 kubectl get pv
mariadb-nfs.yaml
   1 ---
   2 apiVersion: v1
   3 kind: PersistentVolume
   4 metadata:
   5   name: mdb-vol-0
   6   labels:
   7     volume: mdb-volume
   8 spec:
   9   storageClassName: manual
  10   capacity:
  11     storage: 1Gi
  12   accessModes:
  13     - ReadWriteOnce
  14   nfs:
  15     server: 127.0.0.1
  16     path: "/vol/mariadb-0"
  17 ---
  18 apiVersion: v1
  19 kind: PersistentVolumeClaim
  20 metadata:
  21   name: mdb-pv-claim
  22 spec:
  23   storageClassName: manual
  24   accessModes:
  25     - ReadWriteOnce
  26   resources:
  27     requests:
  28       storage: 1Gi
  29 ---
  30 apiVersion: v1
  31 kind: Service
  32 metadata:
  33   name: mariadb
  34 spec:
  35   ports:
  36   - port: 3306
  37   selector:
  38     app: mariadb
  39   clusterIP: None
  40 ---
  41 apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
  42 kind: Deployment
  43 metadata:
  44   name: mariadb
  45 spec:
  46   selector:
  47     matchLabels:
  48       app: mariadb
  49   strategy:
  50     type: Recreate
  51   template:
  52     metadata:
  53       labels:
  54         app: mariadb
  55     spec:
  56       containers:
  57       - image: mariadb:latest
  58         name: mariadb
  59         env:
  60           # Use secret in real usage
  61         - name: MYSQL_ROOT_PASSWORD
  62           value: password
  63         ports:
  64         - containerPort: 3306
  65           name: mariadb
  66         volumeMounts:
  67         - name: mdb-persistent-storage
  68           mountPath: /var/lib/mariadb
  69       volumes:
  70       - name: mdb-persistent-storage
  71         persistentVolumeClaim:
  72           claimName: mdb-pv-claim
Persistent volumes
A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes.
A PersistentVolumeClaim (PVC) is a request for storage by a user.
Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see AccessModes).
Types of Persistent Volumes:
- local - local storage devices mounted on nodes.
 - nfs - Network File System (NFS) storage
 
Ingress controller nginx example
ingress-cherrypy-test.yml
Steps
   1 # install k3s
   2 curl -sfL https://get.k3s.io | sh -
   3 KUBECONFIG=~/.kube/config
   4 mkdir ~/.kube 2> /dev/null
   5 sudo k3s kubectl config view --raw > "$KUBECONFIG"
   6 chmod 600 "$KUBECONFIG"
   7 nano ~/.bashrc 
   8 export KUBECONFIG=~/.kube/config
   9 source . ~/.bashrc 
  10 
  11 sudo nano /etc/systemd/system/k3s.service
  12 ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644
  13 sudo systemctl daemon-reload
  14 sudo service k3s start
  15 sudo service k3s status
  16 kubectl get pods 
  17 
  18 k3s kubectl cluster-info 
  19 
  20 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
  21 sudo service k3s stop
  22 sudo nano /etc/systemd/system/k3s.service
  23 # ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 --no-deploy traefik 
  24 sudo systemctl daemon-reload
  25 sudo rm /var/lib/rancher/k3s/server/manifests/traefik.yaml
  26 sudo service k3s start
  27 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
  28 sudo systemctl restart k3s
  29 
  30 kubectl get nodes 
  31 kubectl delete node localhost 
  32 kubectl get pods --all-namespaces 
  33 kubectl get services --all-namespaces
  34 kubectl get deployment --all-namespaces
  35 
  36 # install nginx ingress controller 
  37 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml
  38 kubectl get pods --namespace=ingress-nginx
  39 
  40 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
  41 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP # cluster ip port 8000
  42 kubectl get services
  43 
  44 kubectl apply -f ingress-cherrypy-test.yml
  45 
  46 EXTERNAL_IP=$(ip addr show | grep wlp | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
  47 echo $EXTERNAL_IP
  48 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts  "
  49 kubectl get ingress 
  50 curl cp.info 
  51 
  52 kubectl scale deployment cherrypy-test --replicas=5
  53 curl http://cp.info/ -vvv
  54 sudo apt install apache2-utils
  55 ab -n 10 -c 10 http://cp.info/
  56 
  57 # Push image to docker hub
  58 docker build -t vbodocker/cherrypy-test . 
  59 docker run -p 8080:8080 vbodocker/cherrypy-test 
  60 docker login # login to docker hub
  61 docker push vbodocker/cherrypy-test 
  62 docker pull vbodocker/cherrypy-test:latest
  63 
  64 # Rollout, deploy new image
  65 kubectl get deployments -o wide # shows image urls 
  66 kubectl rollout restart deployment cherrypy-test # redeploy image url for cherrypy-test
  67 kubectl rollout status deployment cherrypy-test
  68 kubectl get deployments -o wide 
  69 kubectl get pods -o wide # age should be low for the newly deployed pods 
  70 
Install k3s static binary in Slack64
Binaries available in https://github.com/k3s-io/k3s#manual-download
wget https://github.com/k3s-io/k3s/releases/download/v1.25.3%2Bk3s1/k3s
/etc/rc.d/rc.k3s
   1 #!/bin/sh
   2 PATH=$PATH:/usr/sbin
   3 
   4 k3s_start() {
   5   /usr/bin/k3s server --write-kubeconfig-mode=644 \
   6   --disable traefik > /var/log/k3s.log 2>&1 &
   7 }
   8 
   9 k3s_stop() {
  10   kill $(ps uax | grep "/usr/bin/k3s" | head -1 | awk '//{print $2}')
  11   ps uax | grep containerd | awk '//{print $2}' | xargs -i kill {}
  12 }
  13 
  14 k3s_restart() {
  15   k3s_stop
  16   k3s_start
  17 }
  18 
  19 case "$1" in
  20 'start')
  21   k3s_start
  22   ;;
  23 'stop')
  24   k3s_stop
  25   ;;
  26 'restart')
  27   k3s_restart
  28   ;;
  29 *)
  30   echo "usage $0 start|stop|restart"
  31 esac
ingress-cherrypy-test.yml
Steps
   1 echo "alias kubectl='/usr/bin/k3s kubectl'" >> ~/.bashrc
   2 source ~/.bashrc
   3 sudo sh /etc/rc.d/rc.k3s start
   4 kubectl get nodes
   5 kubectl get deployments --all-namespaces
   6 kubectl get services --all-namespaces
   7 kubectl get pods --all-namespaces
   8 kubectl cluster-info 
   9 # install nginx ingress controller
  10 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml 
  11 # wait for nginx ingress controller to finish 
  12 sleep 120
  13 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
  14 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP
  15 kubectl get pods --all-namespaces
  16 kubectl get services --all-namespaces
  17 kubectl apply -f ingress-cherrypy-test.yml 
  18 EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
  19 echo $EXTERNAL_IP
  20 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts  "
  21 cat /etc/hosts
  22 kubectl get ingress 
  23 curl cp.info 
