K8S之Dashboard安装
一、Dashboard安装
#在运维主机上运行
k8s-yaml]# docker pull k8scn/kubernetes-dashboard-amd64:v1.8.3
k8s-yaml]# docker images|grep dashboard
k8s-yaml]# docker tag fcac9aa03fd6 harbor.od.com/public/dashboard:v1.8.3
k8s-yaml]# docker push test-harbor.cedarhd.com/public/dashboard:v1.8.3
k8s-yaml]# mkdir dashboard
k8s-yaml]# cd dashboard/
#创建四个yaml文件
dashboard]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-admin
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-admin
namespace: kube-system
-------------------------------------------------------------------------------------
dashboard]# vi dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: test-harbor.cedarhd.com/public/dashboard:v1.8.3
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 50m
memory: 100Mi
ports:
- containerPort: 8443
protocol: TCP
args:
# PLATFORM-SPECIFIC ARGS HERE
- --auto-generate-certificates
volumeMounts:
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard-admin
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
-------------------------------------------------------------------------------------
dashboard]#vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
-------------------------------------------------------------------------------------
dashboard]vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: dashboard.od.com
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
-------------------------------------------------------------------------------------
#在其中一台运算节点运行
~]# kubectl apply -f http://k8s-yaml.cedarhd.com/dashboard/rbac.yaml
~]# kubectl apply -f http://k8s-yaml.cedarhd.com/dashboard/dp.yaml
~]# kubectl apply -f http://k8s-yaml.cedarhd.com/dashboard/svc.yaml
~]# kubectl apply -f http://k8s-yaml.cedarhd.com/dashboard/ingress.yaml
[root@test-nodes1 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coreDNS-6c69fbcc6c-6vqgr 1/1 Running 0 21h
kubernetes-dashboard-5d6f5f9cdd-plsjq 1/1 Running 0 20m
traefik-ingress-44ptk 1/1 Running 0 4h5m
traefik-ingress-vrvr4 1/1 Running 0 4h5m
[root@test-nodes1 ~]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
coredns ClusterIP 192.168.0.2 53/UDP,53/TCP,9153/TCP 21h
kubernetes-dashboard ClusterIP 192.168.177.146 443/TCP 20m
traefik-ingress-service ClusterIP 192.168.254.186 80/TCP,8080/TCP 4h5m
[root@test-nodes1 ~]# kubectl get ingress -n kube-system
NAME HOSTS ADDRESS PORTS AGE
kubernetes-dashboard test-dashboard.cedarhd.com 80 20m
traefik-web-ui test-traefik.cedarhd.com 80 3h54m
网站建设哪家好,找创新互联公司!专注于网页设计、网站建设、微信开发、成都小程序开发、集团企业网站建设等服务项目。为回馈新老客户创新互联还提供了甘谷免费建站欢迎大家使用!
本文标题:K8S之Dashboard安装
网站网址:http://azwzsj.com/article/gggggc.html