K3s cluster setup guide

Mainly a brain dump for my home lab

owner@DESKTOP-0V6SF20:~$ k3sup install --ip=192.168.50.76 --user=root --tls-san=192.168.50.200 --cluster --k3s-channel=stable --k3s-extra-args "--disable=traefik
--disable=servicelb --disable=local-storage --node-ip=192.168.50.76" --local-path $HOME/.kube/config --context=k3s-ha --ssh-key /home/owner/.ssh/id_ed25519

# ssh to the master node, deploy the ha-vip
ssh 192.168.50.76..
kubectl apply -f https://kube-vip.io/manifests/rbac.yaml
ctr image pull docker.io/plndr/kube-vip:v0.5.8
alias kube-vip="ctr run --rm --net-host docker.io/plndr/kube-vip:v0.5.8 vip /kube-vip"
kube-vip manifest daemonset \
    --arp \
    --interface eth0 \
    --address 192.168.50.200 \
    --controlplane \
    --leaderElection \
    --taint \
    --inCluster | tee /var/lib/rancher/k3s/server/manifests/kube-vip.yaml
# back on owner, join new HA nodes
owner@DESKTOP-0V6SF20:~$ k3sup join --ip=192.168.50.79 --user=root --k3s-channel stable --server --server-ip 192.168.50.200 --k3s-extra-args "--disable=traefik --disable=servicelb --disable=local-storage --node-ip=192.168.50.79" --ssh-key /home/owner/.ssh/id_ed25519
#then join workers
owner@DESKTOP-0V6SF20:~$ k3sup join --ip=192.168.50.84 --user=root --k3s-channel stable --server-ip 192.168.50.200 --k3s-extra-args "--node-ip=192.168.50.84" --ssh-key /home/owner/.ssh/id_ed25519
# upgrade operator (plans too)
kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/latest/download/system-upgrade-controller.yaml
-- tried to do it within kube-vip but got stuck in pending for load balancer. my guess is that when I made the HA control plane I needed "--services" too.
https://kube-vip.io/docs/installation/static/

try guide again? https://devopstales.github.io/kubernetes/k3s-etcd-kube-vip/
kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
kubectl create configmap -n kube-system kubevip --from-literal range-global=192.168.50.210-192.168.50.220
---
kubectl create namespace metallb-system
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: first-pool
  namespace: metallb-system
spec:
  addresses:
  - 192.168.50.210-192.168.50.220
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: l2advertisement
  namespace: metallb-system
spec:
  ipAddressPools:
   - first-pool

# deploy storage (nfs or longhorn) . label the node you want the longhorn deployed on (storage=longhorn)
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/longhorn.yaml (better to pull it down locally first to edit the number of replicas)

# registry deploy (need to update /etc/hosts in advance and /etc/rancher/k3s/registries.yaml on all nodes so don't need to use https)  or
https://cwienczek.com/2020/06/import-images-to-k3s-without-docker-registry/ .. need to label the nodes you want with "node-type: worker"

apiVersion: v1
kind: Namespace
metadata:
  name: docker-registry
  labels:
    name: docker-registry
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: longhorn-docker-registry-pvc
  namespace: docker-registry
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: longhorn
  resources:
    requests:
      storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: registry
  namespace: docker-registry
spec:
  replicas: 1
  selector:
    matchLabels:
      app: registry
  template:
    metadata:
      labels:
        app: registry
        name: registry
    spec:
      nodeSelector:
        node-type: worker
      containers:
      - name: registry
        image: registry:2
        env:
        - name: REGISTRY_STORAGE_DELETE_ENABLED
          value: "true"
        ports:
        - containerPort: 5000
        volumeMounts:
        - name: volv
          mountPath: /var/lib/registry
          subPath: registry
      volumes:
        - name: volv
          persistentVolumeClaim:
            claimName: longhorn-docker-registry-pvc
---
apiVersion: v1
kind: Service
metadata:
  name: registry-service
  namespace: docker-registry
spec:
  selector:
    app: registry
  type: LoadBalancer
  ports:
    - name: docker-port
      protocol: TCP
      port: 5000
      targetPort: 5000
  loadBalancerIP: 192.168.50.207


#/etc/rancher/k3s/registries.yaml
mirrors:
  "registry.testbed.lan":
    endpoint:
      - "http://registry.testbed.lan:5000"