侧边栏壁纸
博主头像
SRE实战博主等级

行动起来,活在当下

  • 累计撰写 9 篇文章
  • 累计创建 1 个标签
  • 累计收到 0 条评论

目 录CONTENT

文章目录

kubesphere 安装部署

Miracle
2024-03-01 / 0 评论 / 0 点赞 / 23 阅读 / 18027 字

01-kubesphere-0426

1. kubesphere 部署

[安装参考]

  1. 配置秘钥登录(第一台主节点执行)
# 配置环境变量
hostip=(192.168.10.{41..47})
#hostname=(node11 node12 node13 node14 node15 node16 node17 node18 node19 node20)

# 配置 hosts 解析
sed -i '3,$d' /etc/hosts
echo -e "\n# k8s-cluster" >> /etc/hosts
let iplen=${#hostip[@]}-1
for ((i=0;i<=$iplen;i++)); do
    echo "${hostip[i]}  ${hostname[i]}" >> /etc/hosts
done

# 免密码登录(rootpasswd 系统root用户密码)
rootpasswd=redhat
yum install -y sshpass
[ ! -f ~/.ssh/id_rsa ] && ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
let iplen=${#hostip[@]}-1
for ((i=0;i<=$iplen;i++)); do
    sshpass -p ${rootpasswd} ssh-copy-id -o StrictHostKeyChecking=no ${USER}@${hostip[i]}
    #sshpass -p ${rootpasswd} ssh-copy-id -o StrictHostKeyChecking=no ${USER}@${hostname[i]}
    #scp -r /etc/hosts ${USER}@${hostip[i]}:/etc/hosts
    #ssh -T ${USER}@${hostip[i]} "hostnamectl set-hostname ${hostname[i]}"
done

  1. 升级系统内核(所有节点执行)
# 添加软件源
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

# 默认安装为最新内核
yum --enablerepo=elrepo-kernel -y install kernel-ml

# 修改内核顺序
grub2-set-default  0
grub2-mkconfig -o /etc/grub2.cfg

# 确认下是否启动默认内核指向上面安装的内核
grubby --default-kernel

# 重启
reboot
  1. 系统优化(所有节点执行)
# 1.配置时间同步
yum install -y chrony
systemctl enable chronyd
systemctl start chronyd
timedatectl set-ntp true

# 2.设置时区
timedatectl set-timezone Asia/Shanghai
chronyc activity -v

# 3.关闭&&禁用防火墙
iptables -F
systemctl status firewalld
systemctl stop firewalld
systemctl disable firewalld

# 4.关闭&&禁用Selinux
setenforce 0 &> /dev/null
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
  1. 安装依赖(所有节点执行)
yum install -y conntrack-tools socat bash-completion tar
  1. 下载安装工具(第一台主节点执行)
# 官方下载地址  https://kubernetes.pek3b.qingstor.com/kubekey/releases/download/v3.0.7/kubekey-v3.0.7-linux-amd64.tar.gz
# github   https://github.com/kubesphere/kubekey
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh -
chmod +x kk

# 2. 内部资源
export KKZONE=cn
curl -o kk http://oss.hc-yun.com/script/kk
chmod +x kk
  1. 创建集群配置(第一台主节点执行)
# 查看支持的版本
./kk version --show-supported-k8s

# 1. 使用kk 创建配置模板 指定版本 kubesphere v3.3.2  kubernetes v1.24.9
./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.24.9
# 完整示例配置 https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md

# 2. 配置示例
echo '''
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
  name: sample
spec:
  hosts:
  - {name: node1, address: 192.168.10.41, internalAddress: 192.168.10.41, user: root, password: ""}
  - {name: node2, address: 192.168.10.42, internalAddress: 192.168.10.42, user: root, password: ""}
  - {name: node3, address: 192.168.10.43, internalAddress: 192.168.10.43, user: root, password: ""}
  - {name: node4, address: 192.168.10.44, internalAddress: 192.168.10.44, user: root, password: ""}
  - {name: node5, address: 192.168.10.45, internalAddress: 192.168.10.45, user: root, password: ""}
  - {name: node6, address: 192.168.10.46, internalAddress: 192.168.10.46, user: root, password: ""}
  - {name: node7, address: 192.168.10.47, internalAddress: 192.168.10.47, user: root, password: ""}
  roleGroups:
    etcd:
    - node1
    - node2
    - node3
    control-plane: 
    - node1
    - node2
    - node3
    worker:
    - node1
    - node2
    - node3
    - node4
    - node5
    - node6
    - node7
  controlPlaneEndpoint:
    ## Internal loadbalancer for apiservers 
    internalLoadbalancer: haproxy

    domain: lb.kubesphere.local
    address: ""
    port: 6443
  kubernetes:
    version: v1.24.9
    clusterName: cluster.local
    autoRenewCerts: true
    containerManager: containerd
  etcd:
    type: kubekey
  network:
    plugin: calico
    kubePodsCIDR: 10.233.64.0/18
    kubeServiceCIDR: 10.233.0.0/18
    ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
    multusCNI:
      enabled: false
  registry:
    privateRegistry: ""
    namespaceOverride: ""
    registryMirrors: ["https://docker.nju.edu.cn", "https://docker.mirrors.ustc.edu.cn"]
    insecureRegistries: []
  addons: []



---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
  name: ks-installer
  namespace: kubesphere-system
  labels:
    version: v3.3.2
spec:
  persistence:
    storageClass: ""
  authentication:
    jwtSecret: ""
  zone: ""
  local_registry: ""
  namespace_override: ""
  # dev_tag: ""
  etcd:
    monitoring: false
    endpointIps: localhost
    port: 2379
    tlsEnable: true
  common:
    core:
      console:
        enableMultiLogin: true
        port: 30880
        type: NodePort
    # apiserver:
    #  resources: {}
    # controllerManager:
    #  resources: {}
    redis:
      enabled: false
      volumeSize: 2Gi
    openldap:
      enabled: false
      volumeSize: 2Gi
    minio:
      volumeSize: 20Gi
    monitoring:
      # type: external
      endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
      GPUMonitoring:
        enabled: false
    gpu:
      kinds:
      - resourceName: "nvidia.com/gpu"
        resourceType: "GPU"
        default: true
    es:
      # master:
      #   volumeSize: 4Gi
      #   replicas: 1
      #   resources: {}
      # data:
      #   volumeSize: 20Gi
      #   replicas: 1
      #   resources: {}
      logMaxAge: 7
      elkPrefix: logstash
      basicAuth:
        enabled: false
        username: ""
        password: ""
      externalElasticsearchHost: ""
      externalElasticsearchPort: ""
  alerting:
    enabled: true
    # thanosruler:
    #   replicas: 1
    #   resources: {}
  auditing:
    enabled: true
    # operator:
    #   resources: {}
    # webhook:
    #   resources: {}
  devops:
    enabled: true
    # resources: {}
    jenkinsMemoryLim: 8Gi
    jenkinsMemoryReq: 4Gi
    jenkinsVolumeSize: 8Gi
  events:
    enabled: true
    # operator:
    #   resources: {}
    # exporter:
    #   resources: {}
    # ruler:
    #   enabled: true
    #   replicas: 2
    #   resources: {}
  logging:
    enabled: true
    logsidecar:
      enabled: true
      replicas: 2
      # resources: {}
  metrics_server:
    enabled: true
  monitoring:
    storageClass: ""
    node_exporter:
      port: 9100
      # resources: {}
    # kube_rbac_proxy:
    #   resources: {}
    # kube_state_metrics:
    #   resources: {}
    # prometheus:
    #   replicas: 1
    #   volumeSize: 20Gi
    #   resources: {}
    #   operator:
    #     resources: {}
    # alertmanager:
    #   replicas: 1
    #   resources: {}
    # notification_manager:
    #   resources: {}
    #   operator:
    #     resources: {}
    #   proxy:
    #     resources: {}
    gpu:
      nvidia_dcgm_exporter:
        enabled: false
        # resources: {}
  multicluster:
    clusterRole: none
  network:
    networkpolicy:
      enabled: true
    ippool:
      type: calico
    topology:
      type: weave-scope
  openpitrix:
    store:
      enabled: true
  servicemesh:
    enabled: true
    istio:
      components:
        ingressGateways:
        - name: istio-ingressgateway
          enabled: false
        cni:
          enabled: false
  edgeruntime:
    enabled: false
    kubeedge:
      enabled: false
      cloudCore:
        cloudHub:
          advertiseAddress:
            - ""
        service:
          cloudhubNodePort: "30000"
          cloudhubQuicNodePort: "30001"
          cloudhubHttpsNodePort: "30002"
          cloudstreamNodePort: "30003"
          tunnelNodePort: "30004"
        # resources: {}
        # hostNetWork: false
      iptables-manager:
        enabled: true
        mode: "external"
        # resources: {}
      # edgeService:
      #   resources: {}
  terminal:
    timeout: 600
''' > config-sample.yaml
  1. 安装(第一台主节点执行)
# 安装
export KKZONE=cn
./kk create cluster -f config-sample.yaml -y

# 卸载
./kk delete cluster -f config-sample.yaml
  1. 验证部署(安装日志)
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
  1. 配置命令补全(所有节点)
# 1. 配置命令补全
helm completion bash > /etc/bash_completion.d/helm
crictl completion bash > /etc/bash_completion.d/crictl
kubectl completion bash > /etc/bash_completion.d/kubectl

# 平台访问信息
Console: http://192.168.10.41:30880
Account: admin
Password: P@88w0rd

2. Kubesphere平台管理

# 部署 kubesphere (KubeSphere 中的项目等同于 Kubernetes 的命名空间)
# https://kubesphere.io/zh/docs/v3.3/quick-start/create-workspace-and-project/
# 创建用户         平台管理 → 访问控制 → 用户 →  创建     平台角色(platform-regular 平台普通用户)
# 创建企业空间     平台管理 → 访问控制 → 企业空间 → 创建     名称(hcsystem)    空间管理员为(admin)
# 企业空间授权     平台管理 → 访问控制   点击创建的空间    企业空间设置 → 企业空间成员 → 邀请    用户角色(hcsystem-self-provisioner)

# 创建项目并授权
# 企业空间管理员
# 创建项目   工作台 → 企业空间 → 项目 → 创建    点击刚创建的项目查看其详情页面
# 项目授权   工作台 → 企业空间 → 项目 → 项目设置 → 项目成员 → 邀请      加入开发人员权限 operator(项目维护者,可以管理项目中除用户和角色以外的资源。)

# 创建用户并授权
# 创建用户         平台管理 → 访问控制 → 用户 →  创建   ligth   ligth 平台角色(platform-regular 平台普通用户)
# 创建企业空间     平台管理 → 访问控制 → 企业空间       ligth   ligth(管理员)
## 企业空间授权    平台管理 → 访问控制   点击创建的空间   企业空间设置 → 企业空间成员 → 邀请    用户角色(hcsystem-self-provisioner)

# 登录创建的用户 ligth 配置私有仓库
工作台 → 项目 → 创建  ligth-uat
工作台 → 项目 → ligth-uat(项目) → 配置 → 保密字典  registry-secret 类型镜像服务信息

# 部署服务
工作台 → 项目 → ligth-uat(项目) → 应用负载 → 应用 → 自制应用

3. OpenELB

https://zhuanlan.zhihu.com/p/519440135

https://openelb.io/docs/getting-started/usage/use-openelb-in-layer-2-mode/

https://openelb.io/docs/getting-started/installation/install-openelb-on-kubesphere/

  1. 部署 OpenElb

[官方文档]

# 1.添加应用仓库
工作台 → 企业空间(hcsystem) → 应用管理 → 应用仓库 → 添加  (名称:kubesphere-test  URL:charts.kubesphere.io/test)

# 2.创建项目
工作台 → 企业空间(hcsystem) → 项目 → 创建(openelb-system)

# 3.部署应用 openELB
工作台 → 企业空间(hcsystem) → 项目 → openelb-system → 应用负载 → 应用 → 创建(从应用模板)   ( kubesphere-test, openelb) 安装
  1. 配置 OpenElb 多副本

[官方文档]

  • 在 Layer 2 模式下(在同一个路由器下),OpenELB 利用 Kubernetes 的 leader 选举特性来保证只有一个 replica 响应 ARP/NDP 请求。
# 查看 pod
kubectl -n openelb-system get pods -o wide

# 添加节点标签(openelb 容器只运行在指定label主机)
kubectl label --overwrite nodes node1 node2 node3 lb.kubesphere.io/v1alpha1=openelb

# 验证
kubectl get node -l lb.kubesphere.io/v1alpha1=openelb

# 将 openelb-manager Pod 的数量缩放为 0
kubectl -n openelb-system scale deployment openelb-manager --replicas=0

# 获取 deployment 名称
DS_NAME=$(kubectl -n openelb-system get deployments -l app.kubernetes.io/name=openelb -o jsonpath='{.items[0].metadata.name}')

# openelb-manager 部署 YAML 配置中,在spec:template:spec:下添加以下字段spec:template:spec:
kubectl -n openelb-system patch deployments $DS_NAME -p '{"spec":{"template":{"spec":{"nodeSelector":{"lb.kubesphere.io/v1alpha1":"openelb"}}}}}'

# 验证
kubectl -n openelb-system get deployments $DS_NAME -ojson | jq  .spec.template.spec.nodeSelector

# 将openelb-manager Pod的数量缩放到需要的数量
kubectl -n openelb-system scale deployment $DS_NAME --replicas=3
kubectl -n openelb-system get pods -o wide
  1. 配置 OpenElb

[官方文档]

  • 在 Layer 2 模式下,需要为 kube-proxy 启用 strictARP,以便 Kubernetes 集群中的所有网卡停止响应其他网卡的 ARP 请求,而由 OpenELB 处理 ARP 请求。
# 1.为 kube-proxy 启用 strictARP
kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e "s/strictARP: false/strictARP: true/" | kubectl apply -f - -n kube-system

# 2.验证
kubectl get configmap kube-proxy -n kube-system -o jsonpath='{.data.config\.conf}' | grep -oP 'strictARP: \K\w+'
kubectl get configmap -n kube-system kube-proxy -o yaml | grep strictARP

# 3.重启kube-proxy确保配置生效
kubectl -n kube-system rollout restart ds kube-proxy
kubectl -n kube-system get pods -o wide -l k8s-app=kube-proxy


# 7. 创建 Eip 对象(address: 同网段空闲IP)
cat <<EOF  > layer2-eip.yaml
apiVersion: network.kubesphere.io/v1alpha2
kind: Eip
metadata:
  name: layer2-eip
spec:
  address: 192.168.10.41-192.168.10.59
  interface: eth0
  protocol: layer2
EOF

kubectl apply -f layer2-eip.yaml
  1. kubesphere 配置 openELB
# 1.
工作台 → 企业空间(hcsystem) → 项目 → devops → 项目设置 → 网关设置 → 启用网关
  1. 访问模式: LoadBalancer
  2. 负载均衡器提供商: OpenELB
  3. 注解:  
     lb.kubesphere.io/v1alpha1: openelb
     eip.openelb.kubesphere.io/v1alpha2: layer2-eip
     protocol.openelb.kubesphere.io/v1alpha1: layer2
0
  1. 支付宝打赏

    qrcode alipay
  2. 微信打赏

    qrcode weixin

评论区