set up kubernetes 1.26.3 dashboard plugin.

Installing Kubernetes Dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

Setting up the Kubernetes Dashboard

kubectl edit service/kubernetes-dashboard -n kubernetes-dashboard
  1. Once the file is opened, change the type of service from ClusterIP to NodePort and save the file as shown below. By default, the service is only available internally to the cluster (ClusterIP) but changing to NodePort exposes the service to the outside.

NOTE: Setting the service type to NodePort allows all IPs (inside or outside of) the cluster to access the service.

n the below code snippet, the Kubernetes dashboard service is listening on TCP port 443 and maps TCP port 8443 from port 443 to the dashboard pod port TCP/8443.

apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2023-05-29T05:39:36Z"
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  resourceVersion: "29806"
  uid: 86cc0e94-20e3-4d3a-a508-7e36c0e717ea
spec:
  clusterIP: 10.103.62.243
  clusterIPs:
  - 10.103.62.243
  externalTrafficPolicy: Cluster
  internalTrafficPolicy: Cluster
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - nodePort: 32170
    port: 443
    protocol: TCP
    targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  sessionAffinity: None
  type: NodePort
status:
  loadBalancer: {}

You should see a pod that starts with kubernetes-dashboard.

kubectl get pods --all-namespaces
rgsoft@k8s-master-2305:~/Downloads/dashboard$ kubectl get pods --all-namespaces
NAMESPACE              NAME                                        READY   STATUS    RESTARTS        AGE
calico-apiserver       calico-apiserver-69bf8f84b9-hrv6j           1/1     Running   2 (3h46m ago)   4h8m
calico-apiserver       calico-apiserver-69bf8f84b9-vrh5t           1/1     Running   0               4h8m
calico-system          calico-kube-controllers-6bb86c78b4-99p96    1/1     Running   1 (3h46m ago)   4h11m
calico-system          calico-node-qm2g9                           1/1     Running   2 (3h46m ago)   4h11m
calico-system          calico-node-xhtct                           1/1     Running   0               4h11m
calico-system          calico-typha-6fbfb8d5cf-rrxtr               1/1     Running   1 (3h46m ago)   4h11m
calico-system          csi-node-driver-lphhs                       2/2     Running   0               4h11m
calico-system          csi-node-driver-x94wt                       2/2     Running   2 (3h46m ago)   4h11m
kube-system            coredns-5bbd96d687-g5rft                    1/1     Running   0               4h18m
kube-system            coredns-5bbd96d687-jmc6r                    1/1     Running   0               4h18m
kube-system            etcd-k8s-master-2305                        1/1     Running   0               4h18m
kube-system            gpushare-device-plugin-ds-fn2b6             1/1     Running   0               124m
kube-system            gpushare-schd-extender-5f4d95475b-sg9zs     1/1     Running   0               164m
kube-system            gpushare-scheduler-597795bbdc-kj6tg         1/1     Running   0               164m
kube-system            kube-apiserver-k8s-master-2305              1/1     Running   0               4h18m
kube-system            kube-controller-manager-k8s-master-2305     1/1     Running   0               4h18m
kube-system            kube-proxy-5vzc5                            1/1     Running   1               4h15m
kube-system            kube-proxy-68gj7                            1/1     Running   0               4h18m
kube-system            kube-scheduler-k8s-master-2305              1/1     Running   0               4h18m
kubernetes-dashboard   dashboard-metrics-scraper-7bc864c59-wq5qn   1/1     Running   0               15m
kubernetes-dashboard   kubernetes-dashboard-6c7ccbcf87-ctx75       1/1     Running   0               5m12s
tigera-operator        tigera-operator-5d6845b496-9h256            1/1     Running   1 (3h46m ago)   4h12m

4.Next, delete the Kubernetes dashboard pod using the name found in step three using the kubectl delete command. For this tutorial, the name of the pod is kubernetes-dashboard-6c7ccbcf87-ctx75.

NOTE: Whenever you modify the service type, you must delete the pod. Once deleted, Kubernetes will create a new one for you with the updated service type to access the entire network.

kubectl delete pod -n kubernetes-dashboard kubernetes-dashboard-6c7ccbcf87-ctx75
  1. Verify the kubernetes-dashboard service has the correct type by running the kubectl get svc –all-namespace command. You will now notice that the service type has changed to NodePort, and the service exposes the pod’s internal TCP port 30265 using the outside TCP port of 443.
kubectl get svc --all-namespaces
rgsoft@k8s-master-2305:~/Downloads/dashboard$ kubectl get svc --all-namespaces
NAMESPACE              NAME                              TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
calico-apiserver       calico-api                        ClusterIP   10.111.254.247   <none>        443/TCP                  4h11m
calico-system          calico-kube-controllers-metrics   ClusterIP   None             <none>        9094/TCP                 4h11m
calico-system          calico-typha                      ClusterIP   10.102.208.68    <none>        5473/TCP                 4h14m
default                kubernetes                        ClusterIP   10.96.0.1        <none>        443/TCP                  4h21m
kube-system            gpushare-schd-extender            NodePort    10.97.41.111     <none>        12345:32766/TCP          167m
kube-system            kube-dns                          ClusterIP   10.96.0.10       <none>        53/UDP,53/TCP,9153/TCP   4h21m
kubernetes-dashboard   dashboard-metrics-scraper         ClusterIP   10.107.85.238    <none>        8000/TCP                 18m
kubernetes-dashboard   kubernetes-dashboard              NodePort    10.103.62.243    <none>        443:32170/TCP            18m

Create a Service Account to Login dashboard

Now, create a service account using kubectl create serviceaccount in the kubernetes-dashboard namespace. You’ll need this service account to authenticate any process or application inside a container that resides within the pod.

kubectl create serviceaccount dashboard -n default
kubectl create clusterrolebinding dashboard-admin -n default --clusterrole=cluster-admin  --serviceaccount=default:dashboard

Try the token for one year using the below command. You can define duration as appropriate, say –duration=87600h for 10 years and so on

kubectl -n default create token dashboard --duration=87600h

(optional) Configure Nginx Reverse Proxy for API Server

NOTE: when using openvpn as default router, the port forward are not funtional to the api server host, need Another Local Area Network Host to reverse proxy it to access.

mkdir -p /home/rgsoft/auth
sudo cp -f /etc/kubernetes/pki/ca.crt /home/rgsoft/auth
sudo chmod 777 /home/rgsoft/auth/ca.crt

below code describe: 80 port reverse proxy api server, 81 port reverse proxy dashboard.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
server{
  listen 80;

  location / {
    proxy_redirect off;
    proxy_pass https://192.168.1.239:6443;
    proxy_ssl_trusted_certificate /home/rgsoft/auth/ca.crt;
    proxy_ssl_verify off;
    proxy_set_header Authorization "Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkkyTWM0X3dxNmtDVEdnUmgtRjlIdXh1cW5CcTY0d0hlT08tTjJsenlrTmMifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjg1MzQ4ODc3LCJpYXQiOjE2ODUzNDUyNzcsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJkZWZhdWx0Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRhc2hib2FyZCIsInVpZCI6IjM5Nzk1Y2U0LTFjNTUtNGQ1OC05NGFlLTU1YzcxYWM1YjhiMCJ9fSwibmJmIjoxNjg1MzQ1Mjc3LCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkYXNoYm9hcmQifQ.ctIJu96JdomjtkS9qoQoCFMHDpx57G0JsUYG-LQJVOjfQzSlzJAndw8D0RVMwhk7ei9z_UYz2S0BPyZXXSGBWOU-DLN0tdP4EMzMd_pjzp16TPQhhvnH-ifjCORTQ5f_14v2yA4sqmRZHlPLU5n0-ZitAZN3YgnbckfCM_g6bIdjT7GFy01ES22gddl6yqp90Omi4XiRBXY9F0-6sY7f-1UKRU3wapff73h35og_JoeDhdu3E16xNH2lXFo9j_vacyv1WF178B2ohRH5TxsM7C8kqq9dJH0uR-lUlhxjLdYbGqmQlfOlNlmvBAlyY0J_PilXl0Q7Ld57i2Kf0Hzsvw";
    proxy_http_version 1.1;
    proxy_set_header Upgrade $http_upgrade;
    proxy_set_header Connection "upgrade";
    proxy_set_header Host $http_host;
    # Show real IP
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
  }
}

server{
  listen 81;

  location / {
    proxy_redirect off;
    proxy_pass https://192.168.1.239:32170;
    proxy_http_version 1.1;
    proxy_set_header Upgrade $http_upgrade;
    proxy_set_header Connection "upgrade";
    proxy_set_header Host $http_host;
    # Show real IP
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
  }
}
sudo systemctl restart nginx