#!/usr/bin/env bash
# Copyright (c) 2025 masezou. All rights reserved.
#########################################################
bold_msg() {
	echo -e "\033[1m$1\033[0m"
}
bold_green_msg() {
	echo -e "\033[1m\033[32m$1\033[0m"
}
green_msg() {
	echo -e "\033[1;32m$1\033[0m"
}

red_msg() {
	echo -e "\033[1;31m$1\033[0m"
}

yellow_msg() {
	echo -e "\033[1;33m$1\033[0m"
}

# Check distribution
source /etc/os-release
SUPPORTED_DISTROS=("Red Hat Enterprise Linux" "CentOS Linux" "Rocky Linux" "AlmaLinux")
SUPPORTED_MAJOR_VERSIONS=("8" "9")
is_supported_distro=false
for distro in "${SUPPORTED_DISTROS[@]}"; do
	if [[ "$NAME" == "$distro" ]]; then
		is_supported_distro=true
		break
	fi
done
major_version=$(echo "$VERSION_ID" | cut -d. -f1)
is_supported_version=false
for version in "${SUPPORTED_MAJOR_VERSIONS[@]}"; do
	if [[ "$major_version" == "$version" ]]; then
		is_supported_version=true
		break
	fi
done
if ! $is_supported_distro || ! $is_supported_version; then
	red_msg "It seemes small resource."
	exit 1
fi
bold_green_msg "Supported"

all_pass=true

arch=$(uname -m)
if [[ "$arch" == "x86_64" || "$arch" == "aarch64" ]]; then
    green_msg "[Architecture] Required: amd64(x86_64) or arm64(aarch64) | Current: $arch -> OK"
else
    red_msg "[Architecture] Required: amd64(x86_64) or arm64(aarch64) | Current: $arch -> FAIL"
    all_pass=false
fi
cpu_cores=$(nproc)
if (( cpu_cores >= 4 )); then
    green_msg "[CPU] Required: 4 cores or more | Current: $cpu_cores cores -> OK"
else
    red_msg "[CPU] Required: 4 cores or more | Current: $cpu_cores cores -> FAIL"
    all_pass=false
fi
mem_total_kb=$(grep MemTotal /proc/meminfo | awk '{print $2}')
mem_total_gb=$(( mem_total_kb / 1024 / 1024 ))  # KB -> GB 変換
if (( mem_total_gb >= 8 )); then
    green_msg "[Memory] Required: 8GB or more | Current: ${mem_total_gb}GB -> OK"
else
    red_msg "[Memory] Required: 8GB or more | Current: ${mem_total_gb}GB -> FAIL"
    all_pass=false
fi
disk_avail_gb=$(df --output=avail / | tail -n 1 | awk '{print $1}')
disk_avail_gb=$(( disk_avail_gb / 1024 / 1024 ))  # KB -> GB 変換
if (( disk_avail_gb >= 50 )); then
    green_msg "[Disk Space] Required: 50GB free | Current: ${disk_avail_gb}GB -> OK"
else
    red_msg "[Disk Space] Required: 50GB free | Current: ${disk_avail_gb}GB -> FAIL"
    all_pass=false
fi
if $all_pass; then
    green_msg "Continue..."
else
    red_msg "System does not meet the requirements."
    exit 1
fi

# Check hostname is lower case or not.
current_hostname=$(hostname)
lower_hostname=$(echo "$current_hostname" | tr 'A-Z' 'a-z')
if [[ "$current_hostname" != "$lower_hostname" ]]; then
    red_msg "Changing hostname from $current_hostname to $lower_hostname"
    hostnamectl set-hostname "$lower_hostname"
    sed -i "s/$current_hostname/$lower_hostname/g" /etc/hosts
    systemctl restart systemd-hostnamed 
    green_msg "Hostname changed successfully."
fi


tee /etc/sysctl.d/10-k8s.conf <<EOF
fs.inotify.max_user_instances = 1024
fs.inotify.max_user_watches = 1048576
EOF
sysctl -p /etc/sysctl.d/10-k8s.conf
sysctl -a | grep "fs.inotify"
swapoff -a
sed -i '/swap/s/^/#/' /etc/fstab
systemctl disable firewalld --now
systemctl stop firewalld

bold_green_msg "Install local registry"
REGISTRY=/disk/registry
dnf -y install podman
podman pull registry:2
mkdir -p ${REGISTRY}
podman run --privileged -d -p 5000:5000 -v ${REGISTRY}:/var/lib/registry registry:2
podman ps

bold_green_msg "Install k3s"
curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.32 K3S_KUBECONFIG_MODE="644" sh -
sleep 20
kubectl -n kube-system wait pod -l k8s-app=kube-dns --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l k8s-app=metrics-server --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l app=local-path-provisioner --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l svccontroller.k3s.cattle.io/svcnamespace=kube-system --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l app.kubernetes.io/instance=traefik-kube-system --for condition=Ready --timeout 60s

kubectl label node $(hostname) node-role.kubernetes.io/worker=worker

ETHDEV=$(nmcli -t -f DEVICE,TYPE device status | awk -F: '$2 == "ethernet" {print $1; exit}')
LOCALIPADDR=$(ip -f inet -o addr show $ETHDEV | cut -d\  -f 7 | cut -d/ -f 1)
REGISTRYHOST=${LOCALIPADDR}
REGISTRYPORT=5000
REGISTRY=${REGISTRYHOST}:${REGISTRYPORT}
REGISTRYURL=http://${REGISTRY}

cat <<EOF >/etc/rancher/k3s/registries.yaml
mirrors:
  "${REGISTRY}":
    endpoint:
      - "${REGISTRYURL}"
EOF
systemctl restart k3s
kubectl -n kube-system wait pod -l k8s-app=kube-dns --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l k8s-app=metrics-server --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l app=local-path-provisioner --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l svccontroller.k3s.cattle.io/svcnamespace=kube-system --for condition=Ready --timeout 60s
kubectl -n kube-system wait pod -l app.kubernetes.io/instance=traefik-kube-system --for condition=Ready --timeout 60s

# Report only actual ip address from VMware Tools
if [ -d /etc/vmware-tools ]; then
	cat <<EOF >/etc/vmware-tools/tools.conf
[guestinfo]
exclude-nics=cni*,flannel*,podman*,*@if2
EOF
	systemctl stop vmtoolsd
	systemctl start vmtoolsd
fi

mkdir -p ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
PUBLICIP=$(kubectl get node -o wide | grep control | awk '{print $6}')
sed -i -e "s/127.0.0.1/${PUBLICIP}/g" ~/.kube/config
chmod 400 ~/.kube/config
dnf -y install bash-completion.noarch
k3s completion bash >/etc/bash_completion.d/k3s
source /etc/bash_completion.d/k3s
kubectl completion bash >/etc/bash_completion.d/kubectl
source /etc/bash_completion.d/kubectl
crictl completion bash >/etc/bash_completion.d/crictl
source /etc/bash_completion.d/crictl
CONTAINERDVER=$(kubectl get node -o wide | grep -o 'containerd://[0-9.]*' | sed 's/containerd:\/\///' | sort -u | head -n1)
curl --retry 10 --retry-delay 3 --retry-connrefused -sS https://raw.githubusercontent.com/containerd/containerd/refs/tags/v${CONTAINERDVER}/contrib/autocomplete/ctr -o /etc/bash_completion.d/ctr
source /etc/bash_completion.d/ctr

bold_green_msg "Deploy local registry frontend"
ETHDEV=$(nmcli -t -f DEVICE,TYPE device status | awk -F: '$2 == "ethernet" {print $1; exit}')
LOCALIPADDR=$(ip -f inet -o addr show $ETHDEV | cut -d\  -f 7 | cut -d/ -f 1)
REGISTRYHOST=${LOCALIPADDR}
REGISTRYPORT=5000
REGISTRY=${REGISTRYHOST}:${REGISTRYPORT}
REGISTRYURL=http://${REGISTRY}
ARCH=amd64

podman image pull docker.io/ekazakov/docker-registry-frontend:latest
podman tag docker.io/ekazakov/docker-registry-frontend:latest ${REGISTRY}/docker-registry-frontend:latest
podman push --tls-verify=false ${REGISTRY}/docker-registry-frontend:latest
podman images rm docker.io/ekazakov/docker-registry-frontend:latest
podman images rm ${REGISTRY}/docker-registry-frontend:latest

kubectl create ns registoryfe
cat <<EOF | kubectl apply -n registoryfe -f -
apiVersion: apps/v1
kind: Deployment
metadata:
  name: registryfe
spec:
  replicas: 1
  selector:
    matchLabels:
      app: registryfe
  template:
    metadata:
      labels:
        app: registryfe
    spec:
      containers:
      - name: registryfe
        image: ${REGISTRY}/docker-registry-frontend:latest
        env:
        - name: ENV_DOCKER_REGISTRY_HOST
          value: "${REGISTRYHOST}"
        - name: ENV_DOCKER_REGISTRY_PORT
          value: "${REGISTRYPORT}"
        ports:
        - containerPort: 80
          name: http
EOF
cat <<EOF | kubectl apply -n registoryfe -f -
apiVersion: v1
kind: Service
metadata:
  name: registryfe
spec:
  selector:
    app: registryfe
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
      nodePort: 30000
  type: NodePort
EOF
kubectl -n registoryfe get all

bold_green_msg "Install External snapshotter"
SNAPSHOTTERVER=8.2.0
# Apply VolumeSnapshot CRDs
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v${SNAPSHOTTERVER}/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v${SNAPSHOTTERVER}/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v${SNAPSHOTTERVER}/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml

# Create Snapshot Controller
curl --retry 10 --retry-delay 3 --retry-connrefused -sSOL https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v${SNAPSHOTTERVER}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml
sed -i -e "s/namespace: default/namespace: kube-system/g" rbac-snapshot-controller.yaml
kubectl create -f rbac-snapshot-controller.yaml
rm -rf rbac-snapshot-controller.yaml
curl --retry 10 --retry-delay 3 --retry-connrefused -sSOL https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v${SNAPSHOTTERVER}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml
sed -i -e "s/namespace: default/namespace: kube-system/g" setup-snapshot-controller.yaml
kubectl create -f setup-snapshot-controller.yaml
rm -rf setup-snapshot-controller.yaml
kubectl -n kube-system wait pod -l app.kubernetes.io/name=snapshot-controller --for condition=Ready --timeout 60s

bold_green_msg "Install helm"
if type "helm" >/dev/null 2>&1; then
	echo -e "\e[32mhelm OK. \e[m"
else
	curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
	chmod 700 get_helm.sh
	./get_helm.sh
fi
helm version
helm completion bash >/etc/bash_completion.d/helm
source /etc/bash_completion.d/helm

bold_green_msg "Install Longhorn"
dnf -y install nfs-utils jq iscsi-initiator-utils cryptsetup
modprobe iscsi_tcp
modprobe dm_crypt
echo "iscsi_tcp" >/etc/modules-load.d/iscsi_tcp.conf
echo "dm_crypt" >/etc/modules-load.d/dm_crypt.conf
systemctl restart iscsid.service
LONGHORNVER=1.8.1
curl -sSfL -o longhornctl https://github.com/longhorn/cli/releases/download/v${LONGHORNVER}/longhornctl-linux-amd64
chmod +x ./longhornctl
./longhornctl --kube-config /etc/rancher/k3s/k3s.yaml check preflight
helm repo add longhorn https://charts.longhorn.io
helm repo update
PUBLICIP=$(kubectl get nodes -o jsonpath='{.items[*].metadata.annotations.flannel\.alpha\.coreos\.com\/public-ip}')
PUBLICIP_HYPHENATED=$(echo $PUBLICIP | sed 's/\./-/g')
LOWERCASE_HOSTNAME=$(cat /etc/hostname | tr 'A-Z' 'a-z')
DNSDOMAINNAME="${LOWERCASE_HOSTNAME}.${PUBLICIP_HYPHENATED}.sslip.io"
helm install longhorn longhorn/longhorn --namespace longhorn-system --set persistence.defaultClassReplicaCount=1 --set defaultSettings.defaultReplicaCount=1 --set defaultSettings.replicaZoneSoftAntiAffinity=true --set longhornUI.replicas=1 --set ingress.enabled=true --set ingress.host=longhorn.${DNSDOMAINNAME} --create-namespace --version $LONGHORNVER --wait
sleep 60
kubectl -n longhorn-system wait pod -l app=csi-attacher --for condition=Ready --timeout 300s
kubectl -n longhorn-system wait pod -l app=csi-provisioner --for condition=Ready --timeout 300s
kubectl -n longhorn-system wait pod -l app=csi-resizer --for condition=Ready --timeout 300s
kubectl -n longhorn-system wait pod -l app=csi-snapshotter --for condition=Ready --timeout 300s
kubectl label node $(cat /etc/hostname) topology.kubernetes.io/zone=$(cat /etc/hostname)

cat <<EOF | kubectl apply -f -
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1
metadata:
  annotations:
    snapshot.storage.kubernetes.io/is-default-class: "true"
  name: longhorn-snapshot-vsc
driver: driver.longhorn.io
deletionPolicy: Delete
parameters:
  type: snap
EOF
kubectl get volumesnapshotclasses

cp /var/lib/rancher/k3s/server/manifests/local-storage.yaml /var/lib/rancher/k3s/server/manifests/custom-local-storage.yaml
sed -i -e "s/storageclass.kubernetes.io\/is-default-class: \"true\"/storageclass.kubernetes.io\/is-default-class: \"false\"/g" /var/lib/rancher/k3s/server/manifests/custom-local-storage.yaml
#kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
sleep 20
kubectl get sc

bold_green_msg "setup traefilk dashboard"
cat <<'EOF' >/var/lib/rancher/k3s/server/manifests/traefik-config.yaml
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
  name: traefik
  namespace: kube-system
spec:
  valuesContent: |-
    ingressRoute:
      dashboard:
        enabled: true
      healthcheck:
        enabled: true
        entryPoints: ["traefik", "web", "websecure"]
    logs:
      access:
        enabled: true
    service:
      spec:
        externalTrafficPolicy: Local
    ports:
      traefik:
        port: 8080
        expose:
          default: true
      mysql:
        expose: false
        #exposedPort: 3306
        hostPort: 3306
        port: 3306
        protocol: TCP
      pgsql:
        expose: false
        #exposedPort: 5432
        hostPort: 5432
        port: 5432
        protocol: TCP
    providers:
      kubernetesCRD:
        allowCrossNamespace: true
EOF

bold_green_msg "Docker CLI"
curl -fsSL https://get.docker.com -o get-docker.sh
sh get-docker.sh
cat <<EOF >/etc/docker/daemon.json
{"insecure-registries": ["${REGISTRY}"]}
EOF
systemctl restart docker

echo
echo
echo "Setup was completed."
kubectl cluster-info
kubectl config get-contexts
kubectl get node -o wide
kubectl get pod -A
kubectl get svc -A
kubectl get sc

bold_msg "Local registry frontend"
echo "http://${LOCALIPADDR}:30000"
echo
bold_msg "Longhorn Dashboard"
echo "http://$(kubectl -n longhorn-system get ingress longhorn-ingress -o jsonpath='{.spec.rules[0].host}')"
echo
bold_msg "Treafik Dashboard"
echo "http://${PUBLICIP}:8080/dashboard/"