Installation of Kubernetes -k3s

Master

IP4=192.168.20.138
TOKEN="TOKEN"

curl -sfL https://get.k3s.io | sh -s - server \
--node-ip="${IP4}" \
--node-name="k3s-master" \
--tls-san="${IP4}" \
--token="${TOKEN}" \
--cluster-cidr="10.44.0.0/16" \
--service-cidr="10.45.0.0/16" \
--embedded-registry \
--disable servicelb \
--disable traefik \
--write-kubeconfig-mode="0644"

Worker

IP4=192.168.20.139
TOKEN="TOKEN"

curl -sfL https://get.k3s.io | sh -s - agent \
--token="${TOKEN}" \
--server "https://192.168.20.138:6443" \
--node-ip="${IP4}" \
--node-name="k3s-worker"

Master Node

[rocky@k3s-master ~]$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k3s-master Ready control-plane,master 15m v1.30.6+k3s1
k3s-worker Ready 10m v1.30.6+k3s1

Taint and label

[rocky@k3s-master ~]$ kubectl taint nodes k3s-master node-role.kubernetes.io/master=true:NoSchedule
node/k3s-master tainted

[rocky@k3s-master ~]$ kubectl label node k3s-worker app=myapp
node/k3s-worker labeled

[rocky@k3s-master ~]$ kubectl create ns ugvcl
namespace/ugvcl created

[rocky@k3s-master ~]$ kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-7b98449c4-92jmf 1/1 Running 0 16m
kube-system local-path-provisioner-595dcfc56f-fcpv7 1/1 Running 0 16m
kube-system metrics-server-cdcc87586-mb8sq 1/1 Running 0 16m

Install helm

sudo dnf -y instal unzip git
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
./get_helm.sh

To use the inbuilt registries

vi /etc/rancher/k3s/registries.yaml
mirrors:
"*":

Install AWS ClI to pull images

curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip -q -o awscliv2.zip
sudo ./aws/install

You can now run: /usr/local/bin/aws --version

[rocky@k3s-master ~]$ aws configure
AWS Access Key ID [None]: AK5 AWS Secret Access Key [None]: N***2
Default region name [None]: ap-south-1
Default output format [None]: json

Pull Images

sh get_images.sh

check images

[rocky@k3s-master ~]$ sudo /usr/local/bin/ctr -a /var/run/k3s/containerd/containerd.sock -n k8s.io image check | grep -i local | awk '{print $1}'

Worker Node

Create PV and VG

[root@k3s-worker ~]# pvcreate /dev/nvme0n2p1
Physical volume "/dev/nvme0n2p1" successfully created.

[root@k3s-worker ~]# vgcreate data_vg /dev/nvme0n2p1
Volume group "data_vg" successfully created

[root@k3s-worker ~]# sudo modprobe dm_thin_pool && sudo lsmod | grep dm_thin_pool
dm_thin_pool 118784 0
dm_persistent_data 143360 1 dm_thin_pool
dm_bio_prison 32768 1 dm_thin_pool
dm_mod 249856 14 dm_thin_pool,dm_log,dm_mirror,dm_bufio

Configuire EBS

helm repo add openebs https://openebs.github.io/openebs

helm repo update
[rocky@k3s-master ~]$ helm install openebs --namespace openebs openebs/openebs --create-namespace
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml
NAME: openebs
LAST DEPLOYED: Sun Nov 24 06:48:49 2024
NAMESPACE: openebs
STATUS: deployed
REVISION: 1
NOTES:
Successfully installed OpenEBS.

Check the status by running: kubectl get pods -n openebs
The default values will install both Local PV and Replicated PV. However,
the Replicated PV will require additional configuration to be fuctional.
The Local PV offers non-replicated local storage using 3 different storage
backends i.e HostPath, LVM and ZFS, while the Replicated PV provides one replicated highly-available
storage backend i.e Mayastor.

For more information,
- view the online documentation at https://openebs.io/docs
- connect with an active community on our Kubernetes slack channel.
        - Sign up to Kubernetes slack: https://slack.k8s.io
        - #openebs channel: https://kubernetes.slack.com/messages/openebs
[rocky@k3s-master ~]$ helm upgrade openebs --namespace openebs openebs/openebs -f /home/rockylinux/deployment/openebs_helm_value.yaml
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml

Release "openebs" has been upgraded. Happy Helming!
NAME: openebs
LAST DEPLOYED: Sun Nov 24 02:56:57 2024
NAMESPACE: openebs
STATUS: deployed
REVISION: 2
TEST SUITE: None
NOTES:
Successfully installed OpenEBS.

Check the status by running: kubectl get pods -n openebs

The default values will install both Local PV and Replicated PV. However,
the Replicated PV will require additional configuration to be fuctional.
The Local PV offers non-replicated local storage using 3 different storage
backends i.e HostPath, LVM and ZFS, while the Replicated PV provides one replicated highly-available
storage backend i.e Mayastor.

For more information,
- view the online documentation at https://openebs.io/docs
- connect with an active community on our Kubernetes slack channel.
        - Sign up to Kubernetes slack: https://slack.k8s.io
        - #openebs channel: https://kubernetes.slack.com/messages/openebs
kubectl apply -f deployment/openebs.yaml
[rocky@k3s-master ~]$ kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-7b98449c4-92jmf 1/1 Running 0 51m
kube-system local-path-provisioner-595dcfc56f-fcpv7 1/1 Running 0 51m
kube-system metrics-server-cdcc87586-mb8sq 1/1 Running 0 51m
openebs openebs-etcd-2 0/1 Terminating 0 6m28s
openebs openebs-localpv-provisioner-6bd66f8598-sqxjb 1/1 Running 0 63s
openebs openebs-lvm-localpv-controller-6bbd64786-5bsdp 5/5 Running 0 6m30s
openebs openebs-lvm-localpv-node-kkx8x 2/2 Running 0 6m29s
openebs openebs-lvm-localpv-node-kq7xt 2/2 Running 0 6m29s

[rocky@k3s-master ~]$ kubectl apply -f deployment/openebs.yaml
storageclass.storage.k8s.io/openebs-database created
storageclass.storage.k8s.io/openebs-app created

[rocky@k3s-master ~]$ kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 52m
openebs-app local.csi.openebs.io Delete WaitForFirstConsumer true 46s
openebs-database local.csi.openebs.io Delete WaitForFirstConsumer true 46s
openebs-hostpath openebs.io/local Delete WaitForFirstConsumer false 7m38s

Ingress Controller

[rocky@k3s-master ~]$ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
serviceaccount/kube-vip-cloud-controller created
clusterrole.rbac.authorization.k8s.io/system:kube-vip-cloud-controller-role created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-vip-cloud-controller-binding created
deployment.apps/kube-vip-cloud-provider created

[rocky@k3s-master ~]$ kubectl apply -f https://kube-vip.io/manifests/rbac.yaml
serviceaccount/kube-vip created
clusterrole.rbac.authorization.k8s.io/system:kube-vip-role created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-vip-binding created

[rocky@k3s-master ~]$ kubectl apply -f deployment/kubevip_controlplane_master.yaml
daemonset.apps/kube-vip-ds created

Deploy kafka

[rocky@k3s-master ~]$ helm install kafka-operator oci://quay.io/strimzi-helm/strimzi-kafka-operator --set watchAnyNamespace=true --version 0.42.0
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml
Pulled: quay.io/strimzi-helm/strimzi-kafka-operator:0.42.0
Digest: sha256:3b7881cec579930c731d38c541c2a2d8c6079c633e24ad89c2b30650caed2b71
NAME: kafka-operator
LAST DEPLOYED: Sun Nov 24 03:03:55 2024
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
Thank you for installing strimzi-kafka-operator-0.42.0
To create a Kafka cluster refer to the following documentation.

https://strimzi.io/docs/operators/latest/deploying.html#deploying-cluster-operator-helm-chart-str
[rocky@k3s-master ~]$ kubectl apply -f deployment/kafka.yaml

Deploy DB

[rocky@k3s-master ~]$ helm repo add postgres-operator-charts https://opensource.zalando.com/postgres-operator/charts/postgres-operator
"postgres-operator-charts" has been added to your repositories
[rocky@k3s-master ~]$ helm install postgres-operator postgres-operator-charts/postgres-operator --namespace postgres-operator --version 1.11.0
NAME: postgres-operator
LAST DEPLOYED: Sun Nov 24 04:26:07 2024
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

NOTES:
To verify that postgres-operator has started, run:
kubectl --namespace=default get pods -l "app.kubernetes.io/name=postgres-operator"

[rocky@k3s-master deployment]$ kubectl apply -f db.yaml
postgresql.acid.zalan.do/postgresql configured


Deploy Minio

[rocky@k3s-master minio]$ helm repo add minio https://operator.min.io/
"minio" has been added to your repositories

[rocky@k3s-master minio]$ helm install minio-operator --namespace ugvcl-minio minio/operator --version v5.0.15
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /etc/rancher/k3s/k3s.yaml
NAME: minio-operator
LAST DEPLOYED: Mon Nov 25 01:30:55 2024
NAMESPACE: ugvcl-minio
STATUS: deployed
REVISION: 1
TEST SUITE: None

NOTES:

1. Get the JWT for logging in to the console:

kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
name: console-sa-secret
namespace: ugvcl-minio
annotations:
kubernetes.io/service-account.name: console-sa
type: kubernetes.io/service-account-token
EOF
kubectl -n ugvcl-minio get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode

2. Get the Operator Console URL by running these commands:

kubectl --namespace ugvcl-minio port-forward svc/console 9090:9090
echo "Visit the Operator Console at http://127.0.0.1:9090"
[rocky@k3s-master minio]$ kubectl get pods -n ugvcl-minio
NAME READY STATUS RESTARTS AGE
console-85fb7bd57d-fv5nb 1/1 Running 0 48s
minio-operator-5b9cc64b4b-f69kr 1/1 Running 0 48s
minio-operator-5b9cc64b4b-lm8v8 1/1 Running 0 47s

[rocky@k3s-master minio]$ kubectl get svc -n ugvcl-minio
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
console ClusterIP 10.45.226.138 9090/TCP,9443/TCP 84s
operator ClusterIP 10.45.155.36 4221/TCP 83s
sts ClusterIP 10.45.151.70 4223/TCP 83s

[rocky@k3s-master minio]$ kubectl apply -f minio-access-create.yaml -f minio-access-create-myapp-bucket.yaml -f minio-instance-create.yaml
secret/storage-configuration created
secret/myapp-minio-user created
tenant.minio.min.io/minio created

[rocky@k3s-master ~]$ kubectl describe svc -n ugvcl-minio console
Name: console
Namespace: ugvcl-minio
Labels: app.kubernetes.io/managed-by=Helm
app.kubernetes.io/version=v5.0.15
helm.sh/chart=operator-5.0.15
Annotations: meta.helm.sh/release-name: minio-operator
meta.helm.sh/release-namespace: ugvcl-minio
Selector: app.kubernetes.io/instance=minio-operator-console,app.kubernetes.io/name=operator
Type: ClusterIP #Change this to NodePort by using kubectl edit svc
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.45.91.24
IPs: 10.45.91.24
Port: http 9090/TCP
TargetPort: 9090/TCP
Endpoints: 10.44.1.45:9090
Port: https 9443/TCP
TargetPort: 9443/TCP
Endpoints: 10.44.1.45:9443
Session Affinity: None
Events:

Option 2 to change the service from ClusterIP to NodePort

kubectl patch svc console -n ugvcl-minio -p '{"spec": {"type": "NodePort"}}'

Get the exposed port

[rocky@k3s-master ~]$ kubectl get svc -n ugvcl-minio

[rocky@k3s-master ~]$ kubectl get svc -n ugvcl-minio
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
console NodePort 10.45.226.138 9090:31268/TCP,9443:31298/TCP 7m17s
operator ClusterIP 10.45.155.36 4221/TCP 7m16s
sts ClusterIP 10.45.151.70 4223/TCP 7m16s

Generate JWT for minio

[rocky@k3s-master ~]$ kubectl -n ugvcl-minio get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode
eyJhbGciOiJSUzI1NiIsImtpZCI6IjBDa2xNbzAxckZKZkpyUGpQMVJxY1JYNEd4NVN2T19wTW1mNThzbWVYY0kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJ1Z3ZjbC1taW5pbyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJjb25zb2xlLXNhLXNlY3JldCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJjb25zb2xlLXNhIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNTBjN2ZiMzMtYzZjZS00YjBhLWFjMTUtNmQ3YmQ3YjAzMTllIiwic3Vi

http://192.168.20.138:31268/login