侧边栏壁纸
博主头像
SRE实战博主等级

行动起来,活在当下

  • 累计撰写 12 篇文章
  • 累计创建 1 个标签
  • 累计收到 0 条评论

目 录CONTENT

文章目录

kubesphere

Miracle
2024-05-09 / 0 评论 / 0 点赞 / 2 阅读 / 20215 字

kubesphere 部署

1. 虚拟机优化(所有节点执行)

  • 系统版本 RockyLinux-9.3
# 0.网卡名称(eth0) 启动安装镜像时加入到启动配置
# net.ifnames=0 biosdevname=0
 
# 1.关闭 && 禁用防火墙
systemctl disable --now firewalld

# 2.关闭&&禁用Selinux
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
 
 # 3.替换默认软件源
 sed -e 's|^mirrorlist=|#mirrorlist=|g' \
     -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirror.nju.edu.cn/rocky|g' \
     -i /etc/yum.repos.d/rocky-extras.repo /etc/yum.repos.d/rocky.repo

# 4.安装 epel 软件源
dnf install -y epel-release
sed -e 's!^metalink=!#metalink=!g' \
    -e 's!^#baseurl=!baseurl=!g' \
    -e 's!https\?://download\.fedoraproject\.org/pub/epel!https://mirror.nju.edu.cn/epel!g' \
    -e 's!https\?://download\.example/pub/epel!https://mirror.nju.edu.cn/epel!g' \
    -i /etc/yum.repos.d/epel{,-testing}.repo

# 4.设置时区
timedatectl set-timezone Asia/Shanghai

# 5.配置时间同步
dnf install -y chrony
sed -i '3s/.*/server ntp.aliyun.com iburst/' /etc/chrony.conf
systemctl enable --now chronyd
chronyc sources -v

# 6.配置网络
nmcli con modify eth0 \
    connection.autoconnect yes ipv6.method disable \
    ipv4.method manual ipv4.gateway 192.168.2.1 \
    ipv4.addr 192.168.2.100/24 ipv4.dns 223.5.5.5,223.6.6.6
sed -i '/uuid/d' /etc/NetworkManager/system-connections/eth0.nmconnection
nmcli con reload eth0
nmcli con up eth0

# 1.彻底禁用 ipv6
echo 'net.ipv6.conf.all.disable_ipv6 = 1' >> /etc/sysctl.d/60-disable_ipv6.conf
echo 'net.ipv6.conf.default.disable_ipv6 = 1' >> /etc/sysctl.d/60-disable_ipv6.conf

# 2.彻底禁用 ipv6
# grubby --update-kernel ALL --args ipv6.disable=1
# grubby --info DEFAULT
# 启用 ipv6  grubby --update-kernel ALL --remove-args ipv6.disable

# 7.安装常用软件及 vmtools
dnf install -y open-vm-tools bash-completion net-tools lrzsz vim
systemctl enable --now vmtoolsd
systemctl status vmtoolsd

# 8. ssh 登陆优化
sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config
sed -i "s/GSSAPIAuthentication .*/GSSAPIAuthentication no/" /etc/ssh/sshd_config
systemctl restart sshd

2. 创建 kubesphere 集群

1. 配置密钥登陆

# 192.168.2.20 为nfs/minio(后期验证集pvc备份,s3作为pvc使用)存储提供者
# 1.配置环境变量(192.168.2.21 为安装服务器)
hostip=(192.168.2.{20..27})
hostname=(node{20..27})

# 2.免密码登录(rootpasswd 系统root用户密码)
rootpasswd=zaq1@WSX
dnf install -y sshpass
[ ! -f ~/.ssh/id_rsa ] && ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
let iplen=${#hostip[@]}-1
for ((i=0;i<=$iplen;i++)); do
    sshpass -p ${rootpasswd} ssh-copy-id -o StrictHostKeyChecking=no ${hostip[i]}
    #sshpass -p ${rootpasswd} ssh-copy-id -o StrictHostKeyChecking=no ${hostname[i]}
    #scp -r /etc/hosts ${hostip[i]}:/etc/hosts
    ssh -T ${hostip[i]} "hostnamectl set-hostname ${hostname[i]}"
done

2. 安装依赖

dnf install -y conntrack-tools socat bash-completion ebtables ipset ipvsadm tar lrzsz net-tools wget nfs-utils

3. 安装 kubesphere

# 创建安装资源目录
mkdir -p /root/kubesphere-install && cd $_

# 下载安装工具 https://github.com/kubesphere/kubekey
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v3.1.1 sh -

# 查看支持的 k8s 版本
./kk version --show-supported-k8s

# 创建配置 (./kk create config --from-cluster)
./kk create config --with-kubesphere v3.4.1 --with-kubernetes v1.28.8

# 安装( --with-local-storage 安装默认存储)
export KKZONE=cn
./kk create cluster -f config-sample.yaml  --with-local-storage -y

# 查看安装日志
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

# s2i 证书过期问题处理 (更新镜像 v3.4.1-patch.0 )
# kubectl -n kubesphere-system get deployments ks-installer --no-headers -o custom-columns=:.spec.template.spec.containers[0].image
helm -n kubesphere-devops-system delete devops
kubectl -n kubesphere-system patch deployments ks-installer --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "kubesphere/ks-installer:v3.4.1-patch.0"}]'
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

3. 安装 openelb

1. 配置 kube-proxy 启用 strictARP

# 1.为 kube-proxy 启用 strictARP
kubectl get configmap kube-proxy -n kube-system -o yaml | \
        sed -e "s/strictARP: false/strictARP: true/" | \
        kubectl apply -f - -n kube-system

# 2.验证已启用 strictARP
kubectl get configmap kube-proxy -n kube-system -o jsonpath='{.data.config\.conf}' | grep -oP 'strictARP: \K\w+'
kubectl get configmap -n kube-system kube-proxy -o yaml | grep -Po 'strictARP: \w+'

# 3.重启 kube-proxy 确保配置生效
kubectl -n kube-system rollout restart ds kube-proxy
kubectl -n kube-system get pods -o wide -l k8s-app=kube-proxy

2. 创建企业空间及项目

# yaml 方式创建企业空间及项目
kubectl apply -f - <<EOF
---
apiVersion: tenant.kubesphere.io/v1alpha1
kind: Workspace
metadata:
  name: hcsystem
  annotations:
    kubesphere.io/description: "hcsystem devops"

---
apiVersion: tenant.kubesphere.io/v1alpha2
kind: WorkspaceTemplate
metadata:
  name: hcsystem
  annotations:
    kubesphere.io/description: "hcsystem devops"
spec:
  placement:
    clusterSelector: {}
  template:
    spec:
      manager: admin
      networkIsolation: false

---
apiVersion: v1
kind: Namespace
metadata:
  name: openelb-system
  labels:
    kubesphere.io/workspace: hcsystem

---
apiVersion: application.kubesphere.io/v1alpha1
kind: HelmRepo
metadata:
  name: kubesphere-stable
  annotations:
    app.kubesphere.io/sync-period: "3600s"
  labels:
    kubesphere.io/workspace: hcsystem
spec:
  credential: {}
  name: kubesphere-stable
  syncPeriod: 3600
  url: https://charts.kubesphere.io/stable
EOF

# 验证
kubectl get ns openelb-system
kubectl get Workspace,HelmRepo

3. 安装 openelb

# 1.配置 master 节点切换为尽可能阻止调度(允许运行 openelb)
master_node=$(kubectl get node -l node-role.kubernetes.io/control-plane= -o jsonpath='{.items[*].metadata.name}')
kubectl taint node $master_node node-role.kubernetes.io/control-plane-
kubectl taint node $master_node node-role.kubernetes.io/control-plane:PreferNoSchedule

# 2.master 节点添加标签(用于运行 openelb)
master_node=$(kubectl get node -l node-role.kubernetes.io/control-plane= -o jsonpath='{.items[*].metadata.name}')
kubectl label --overwrite nodes $master_node lb.kubesphere.io/v1alpha1=openelb
kubectl get node -l lb.kubesphere.io/v1alpha1=openelb

# 3.安装 openelb
# https://github.com/kubesphere/helm-charts/raw/gh-pages/stable/openelb-0.5.0.tgz
helm repo add kubesphere-stable https://charts.kubesphere.io/stable
helm repo update
helm search repo stable/openelb -l
helm install openelb \
    -n openelb-system stable/openelb \
    --set manager.webhookPort=1443 \
    --set manager.nodeSelector.'kubernetes\.io/os'=linux,manager.nodeSelector.'lb\.kubesphere\.io/v1alpha1'=openelb

# 4.调整 openelb-manager 副本数量(高可用)
kubectl -n openelb-system scale deployment --replicas=3 openelb-manager
kubectl -n openelb-system get pods -o wide

# 5. 修改 openelb webhook 端口(可忽略上面安装时候已经指定端口)
kubectl -n openelb-system patch deployments openelb-manager --type='json' -p='[
      {"op": "replace", "path": "/spec/template/spec/containers/0/args/0", "value": "--webhook-port=1443"},
      {"op": "replace", "path": "/spec/template/spec/containers/0/ports/0/hostPort", "value": 1443},
      {"op": "replace", "path": "/spec/template/spec/containers/0/ports/0/containerPort", "value": 1443}]'

# 还原默认端口(可忽略上面安装时候已经指定端口)
kubectl -n openelb-system patch deployments openelb-manager --type='json' -p='[
      {"op": "replace", "path": "/spec/template/spec/containers/0/args/0", "value": "--webhook-port=443"},
      {"op": "replace", "path": "/spec/template/spec/containers/0/ports/0/hostPort", "value": 443},
      {"op": "replace", "path": "/spec/template/spec/containers/0/ports/0/containerPort", "value": 443}]'

4. 网关配置

# 1.启用集群网关
kubectl apply -f - <<EOF
apiVersion: gateway.kubesphere.io/v1alpha1
kind: Gateway
metadata:
  name: kubesphere-router-kubesphere-system
  namespace: kubesphere-controls-system
spec:
  controller:
    config:
      worker-processes: "4"
    replicas: 1
  deployment:
    annotations:
      servicemesh.kubesphere.io/enabled: "false"
    replicas: 1
  service:
    annotations:
      lb.kubesphere.io/v1alpha1: openelb
      eip.openelb.kubesphere.io/v1alpha2: layer2-eip
      protocol.openelb.kubesphere.io/v1alpha1: layer2
    type: LoadBalancer
EOF

# 2.创建 EIP
kubectl apply -f - <<EOF
apiVersion: network.kubesphere.io/v1alpha2
kind: Eip
metadata:
  name: layer2-eip
  annotations:
    eip.openelb.kubesphere.io/is-default-eip: "true"
spec:
  address: 192.168.2.50
  interface: eth0
  protocol: layer2
EOF

# 验证
kubectl get gateway -A
kubectl get eip

4. 配置域名访问

1. openelb/minio

# openelb
kubectl apply -f - <<EOF
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: kubesphere-system
  namespace: kubesphere-system
spec:
  rules:
    - host: ks.openelb.cn
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: ks-console
                port:
                  number: 80
    - host: minio.openelb.cn
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: minio
                port:
                  number: 9000
EOF

# 验证
kubectl get ingress -A

2. jenkins/sonarqube 配置域名访问

kubectl apply -f - <<EOF
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: kubesphere-devops-system
  namespace: kubesphere-devops-system
spec:
  rules:
    - host: jenkins.openelb.cn
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: devops-jenkins
                port:
                  number: 80
    - host: sonarqube.openelb.cn
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: sonarqube-sonarqube
                port:
                  number: 9000
EOF

# 验证
kubectl get ingress -A

3. weave 配置域名访问

kubectl apply -f - <<EOF
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: weave
  namespace: weave
  annotations:
    kubesphere.io/creator: admin
spec:
  rules:
    - host: weave.openelb.cn
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: weave-scope-app
                port:
                  number: 80
EOF

# 验证
kubectl get ingress -A

5. helm 安装工具

1. 安装 nfs 存储(192.168.2.20)

1. 服务端安装配置
# 1.安装软件包
dnf install -y nfs-utils

# 2.配置 nfs
cat <<EOF   > /etc/exports
/data/nfs *(rw,sync,insecure,no_subtree_check,no_root_squash)
/home/kubesphere-nfs *(rw,sync,insecure,no_subtree_check,no_root_squash)
EOF

# 3. 创建数据目录
mkdir -p /data/nfs/default
mkdir -p /home/kubesphere-nfs/kubesphere

# 4. 启动服务 && 配置开机自启动
systemctl enable --now nfs-server
systemctl status nfs-server
exportfs -r

# 5. 验证
showmount -e
2. 安装 nfs-client-provisioner
# 1.创建命名空间&&关联到企业空间
kubectl create namespace nfs-provisioner
kubectl label --overwrite namespace nfs-provisioner kubesphere.io/workspace=hcsystem

# 2.添加仓库
helm repo add kubesphere-main https://charts.kubesphere.io/main
helm repo update

# 3.安装 nfs-client-provisioner
# https://github.com/kubesphere/helm-charts/raw/gh-pages/main/nfs-client-provisioner-4.0.11.tgz
helm upgrade --install nfs-client-provisioner kubesphere-main/nfs-client-provisioner \
    --namespace nfs-provisioner \
    --set nfs.server=192.168.2.20 \
    --set nfs.path=/data/nfs/default \
    --set storageClass.defaultClass=true

# 取消 openebs 为默认存储
kubectl patch storageclass local -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'

# 设置 nfs 为默认存储
kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

# 4.查看 pod 状态
kubectl -n nfs-provisioner get pods,storageclasses -o wide


# 5.创建 storageclasses managed-nfs-storage
kubectl delete -f - <<EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
  annotations:
    storageclass.kubernetes.io/is-default-class: "false"
provisioner: cluster.local/nfs-client-provisioner
parameters:
  archiveOnDelete: "false"
EOF

2. k8sdashboard

1. helm 安装 kubernetes-dashboard
# https://github.com/kubernetes/dashboard
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard

# https://github.com/kubernetes/dashboard/releases/download/kubernetes-dashboard-7.3.2/kubernetes-dashboard-7.3.2.tgz
helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard \
    --create-namespace --namespace kubernetes-dashboard --set kong.proxy.http.enabled=true
kubectl -n kubernetes-dashboard get all
2. 创建用户
# https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md
kubectl apply -f - <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard

---
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
EOF
3. 获取登陆token
# 获取临时 token
kubectl -n kubernetes-dashboard create token admin-user

# 获取长期 token
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d
4. 创建 ingress 使用域名访问
kubectl apply -f - <<EOF
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: kubernetes-dashboard-ssl
  namespace: kubernetes-dashboard
  annotations:
    ingress.kubernetes.io/ssl-passthrough: "true"
    nginx.ingress.kubernetes.io/ssl-redirect: "false"
    nginx.ingress.kubernetes.io/backend-protocol: HTTPS
    nginx.org/ssl-backends: kubernetes-dashboard-kong-proxy
spec:
  tls:
    - hosts:
        - kube-dashboard.openelb.cn
      secretName: kubernetes-dashboard-csrf
  rules:
    - host: kube-dashboard.openelb.cn
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: kubernetes-dashboard-kong-proxy
                port:
                  number: 443
EOF
5.删除用户
kubectl -n kubernetes-dashboard delete secret admin-user
kubectl -n kubernetes-dashboard delete serviceaccount admin-user
kubectl -n kubernetes-dashboard delete clusterrolebinding admin-user

6. devops 配置

1. 将 SonarQube 集成到流水线

1. 安装 sonarqube
# 添加 sonarqube helm 仓库
helm repo add sonarqube https://SonarSource.github.io/helm-chart-sonarqube
helm repo update

# 获取版本
helm search repo sonarqube/sonarqube -l
#helm pull sonarqube/sonarqube --version 10.5.0+2748

# 1.创建命名空间&&关联到企业空间
kubectl create namespace sonarqube
kubectl label --overwrite namespace sonarqube kubesphere.io/workspace=hcsystem

# 2.安装 sonarqube/sonarqube-lts
helm upgrade --install -n sonarqube sonarqube sonarqube/sonarqube-lts --version 2.0.0+463 \
    --set service.type=NodePort \
    --set service.nodePort=30020 \
    --set persistence.enabled=true \
    --set ingress.enabled=true \
    --set ingress.hosts[0].name=sonarqube.openelb.cn \
    --set plugins.install[0]=https://mirror.ghproxy.com/https://github.com/xuhuisheng/sonar-l10n-zh/releases/download/sonar-l10n-zh-plugin-8.9/sonar-l10n-zh-plugin-8.9.jar

# 修改 nodePort 端口(安置时已经指定此处忽略)
kubectl -n sonarqube patch svc sonarqube-sonarqube-lts --type='json' -p='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 30020 }]'
2. 配置域名访问
kubectl apply -f - <<EOF
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: sonarqube
  namespace: sonarqube
  annotations:
    kubesphere.io/creator: admin
spec:
  rules:
    - host: sonarqube.openelb.cn
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: sonarqube-sonarqube
                port:
                  number: 9000
EOF
3. 下载插件
# 进入容器下载插件
kubectl -n sonarqube exec -it sonarqube-sonarqube-0 -- /bin/sh
mkdir -p /opt/sonarqube/extensions/plugins
cd /opt/sonarqube/extensions/plugins
wget https://mirror.ghproxy.com/https://github.com/xuhuisheng/sonar-l10n-zh/releases/download/sonar-l10n-zh-plugin-10.5/sonar-l10n-zh-plugin-10.5.jar"}]'

# 重启容器
kubectl -n sonarqube rollout restart statefulset sonarqube-sonarqube
4. 修改 jenkins/sonarqube nodePort 端口
# 修改 jenkins nodePort 端口为 30010
kubectl -n kubesphere-devops-system patch svc devops-jenkins --type='json' -p='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 30010 }]'
kubectl -n kubesphere-devops-system get svc devops-jenkins

# 修改 sonarqube nodePort 端口为 30020
kubectl -n sonarqube patch svc sonarqube-sonarqube-lts --type='json' -p='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 30020 }]'
kubectl -n sonarqube get svc sonarqube-sonarqube-lts
5. 配置信息
# 创建 SonarQube 管理员令牌 (Token) 令牌名称 kubesphere
SonarQube Token:    53f43b1614309c959b0e1eecf11ef12586c47dae
SonarQube 登陆地址:  http://192.168.2.91:30020

# jenkins(前面为 jenkins 访问地址 后面 url 固定)
JenKins WebHook:  http://192.168.2.91:30010/sonarqube-webhook/

# SonarQube  项目 token
devops-sample: 1a5972069401d7ecace2e74f662653e3749072b7

# 命令行测试(需要安装maven)
mvn sonar:sonar \
  -Dsonar.projectKey=java-demo \
  -Dsonar.host.url=http://192.168.2.91:30020 \
  -Dsonar.login=1a5972069401d7ecace2e74f662653e3749072b7
0
  1. 支付宝打赏

    qrcode alipay
  2. 微信打赏

    qrcode weixin

评论区