部署dind(docker in docker)

我们现在在k8s来部署dind服务,提供整个CI(持续集成)的功能。

我们看看docker version列出的结果 Docker采取的是C/S架构 Docker进程默认不监听任何端口,它会生成一个socket(/var/run/docker.sock)文件来进行本地进程通信 Docker C/S 之间采取Rest API作为通信协议,我们可以让Docker daemon进程监听一个端口,这就为我们用docker client调用远程调用docker daemon进程执行镜像构建提供了可行性

第15关k8s架构师课程之基于gitlab的CICD自动化六

docker in docker

# dind pip instll staus : kill -9  code 137(128+9) ,may be limits(cpu,memory) resources need change

# only have docker client ,use dind can be use normal
#dindSvc=$(kubectl -n kube-system get svc dind |awk 'NR==2{print $3}')
#export DOCKER_HOST="tcp://${dindSvc}:2375/"
#export DOCKER_DRIVER=overlay2
#export DOCKER_TLS_CERTDIR=""


---
# SVC
kind: Service
apiVersion: v1
metadata:
  name: dind
  namespace: kube-system
spec:
  selector:
    app: dind
  ports:
    - name: tcp-port
      port: 2375
      protocol: TCP
      targetPort: 2375

---
# Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: dind
  namespace: kube-system
  labels:
    app: dind
spec:
  replicas: 1
  selector:
    matchLabels:
      app: dind
  template:
    metadata:
      labels:
        app: dind
    spec:
      hostNetwork: true
      containers:
      - name: dind
        #image: docker:19-dind
        image: harbor.boge.com/library/docker:19-dind
        lifecycle:
          postStart:
            exec:
              command: ["/bin/sh", "-c", "docker login harbor.boge.com -u 'admin' -p 'boge666'"]
           # 3. when delete this pod , use this keep kube-proxy to flush role done
          preStop:
            exec:
              command: ["/bin/sh", "-c", "sleep 5"]
        ports:
        - containerPort: 2375
#        resources:
#          requests:
#            cpu: 200m
#            memory: 256Mi
#          limits:
#            cpu: 0.5
#            memory: 1Gi
        readinessProbe:
          tcpSocket:
            port: 2375
          initialDelaySeconds: 10
          periodSeconds: 30
        livenessProbe:
          tcpSocket:
            port: 2375
          initialDelaySeconds: 10
          periodSeconds: 30
        securityContext: 
            privileged: true
        env: 
          - name: DOCKER_HOST 
            value: tcp://localhost:2375
          - name: DOCKER_DRIVER 
            value: overlay2
          - name: DOCKER_TLS_CERTDIR 
            value: ''
        volumeMounts: 
          - name: docker-graph-storage
            mountPath: /var/lib/docker
          - name: tz-config
            mountPath: /etc/localtime
           # kubectl -n kube-system create secret generic harbor-ca --from-file=harbor-ca=/data/harbor/ssl/tls.cert
          - name: harbor-ca
            mountPath: /etc/docker/certs.d/harbor.boge.com/ca.crt
            subPath: harbor-ca
       # kubectl create secret docker-registry boge-secret --docker-server=harbor.boge.com --docker-username=admin --docker-password=boge666 --docker-email=admin@boge.com
      hostAliases:
      - hostnames:
        - harbor.boge.com
        ip: 10.0.1.204
      imagePullSecrets:
      - name: bogeharbor
      volumes:
#      - emptyDir:
#          medium: ""
#          sizeLimit: 10Gi
      - hostPath:
          path: /var/lib/container/docker
        name: docker-graph-storage
      - hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
        name: tz-config
      - name: harbor-ca
        secret:
          secretName: harbor-ca
          defaultMode: 0600
#
#        kubectl taint node 10.0.1.201 Ingress=:NoExecute
#        kubectl describe node 10.0.1.201 |grep -i taint
#        kubectl taint node 10.0.1.201 Ingress:NoExecute-
      nodeSelector:
        kubernetes.io/hostname: "10.0.1.201"
      tolerations:
      - operator: Exists

docker in docker 替代方案

这种docker in docker 在1.24版本后就不好用了,原因是docker被containerd替代了。解决方法是单独找一台虚拟机,部署docker专门打包镜像用,这里需要docker开启tcp。

ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -H 0.0.0.0:2375

完整的

[root@k8s-gitlab ~]# cat  /lib/systemd/system/docker.service | grep -Ev '^#|^$'
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -H 0.0.0.0:2375
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target

获取本机ip

[root@k8s-gitlab ~]# hostname -i
10.0.0.1
  • .gitlab-ci.yml
stages:
  - build
  - deploy
  - rollback

# tag name need: 20.11.21.01
variables:
  namecb: "hyperf"
  svcport: "9501"
  replicanum: "2"
  CanarylIngressNum: "20"

  ingress: "interface.mcake.com"
  certname: "mytls"

  testIngress: "tinterface.mcake.com"
  testCertname: "mytls-test"

.deploy_k8s: &deploy_k8s |
  if [ $CANARY_CB -eq 1 ];then cp -arf .project-name-canary.yaml ${namecb}-${CI_COMMIT_TAG}.yaml; sed -ri "s+CanarylIngressNum+${CanarylIngressNum}+g" ${namecb}-${CI_COMMIT_TAG}.yaml; sed -ri "s+NomalIngressNum+$(expr 100 - ${CanarylIngressNum})+g" ${namecb}-${CI_COMMIT_TAG}.yaml ;else cp -arf .project-name.yaml ${namecb}-${CI_COMMIT_TAG}.yaml;fi
  if [ $TEST_ENV -eq 1 ];then sed -ri "s+projectnamecb.boge.com+${testIngress}+g" ${namecb}-${CI_COMMIT_TAG}.yaml; sed -ri "s+mytls+${testCertname}+g" ${namecb}-${CI_COMMIT_TAG}.yaml;else    sed -ri "s+projectnamecb.boge.com+${ingress}+g" ${namecb}-${CI_COMMIT_TAG}.yaml;sed -ri "s+mytls+${certname}+g" ${namecb}-${CI_COMMIT_TAG}.yaml; fi

  sed -ri "s+projectnamecb+${namecb}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
  sed -ri "s+5000+${svcport}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
  sed -ri "s+replicanum+${replicanum}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
  sed -ri "s+mytagcb+${CI_COMMIT_TAG}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
  sed -ri "s+harbor.boge.com/library+${IMG_URL}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
  cat ${namecb}-${CI_COMMIT_TAG}.yaml
  [ -d ~/.kube ] || mkdir ~/.kube
  echo "$KUBE_CONFIG" > ~/.kube/config
  if [ $NORMAL_CB -eq 1 ];then if kubectl get deployments.|grep -w ${namecb}-canary &>/dev/null;then kubectl delete deployments.,svc ${namecb}-canary ;fi;fi
  kubectl apply -f ${namecb}-${CI_COMMIT_TAG}.yaml --record
  echo
  echo
  echo "============================================================="
  echo "                    Rollback Indx List"
  echo "============================================================="
  kubectl rollout history deployment ${namecb}|tail -5|awk -F"[ =]+" '{print $1"\t"$5}'|sed '$d'|sed '$d'|sort -r|awk '{print $NF}'|awk '$0=""NR".   "$0'

.rollback_k8s: &rollback_k8s |
  [ -d ~/.kube ] || mkdir ~/.kube
  echo "$KUBE_CONFIG" > ~/.kube/config
  last_version_command=$( kubectl rollout history deployment ${namecb}|tail -5|awk -F"[ =]+" '{print $1"\t"$5}'|sed '$d'|sed '$d'|tail -${ROLL_NUM}|head -1 )
  last_version_num=$( echo ${last_version_command}|awk '{print $1}' )
  last_version_name=$( echo ${last_version_command}|awk '{print $2}' )
  kubectl rollout undo deployment ${namecb} --to-revision=$last_version_num
  echo $last_version_num
  echo $last_version_name
  kubectl rollout history deployment ${namecb}


build:
  stage: build
  retry: 2
  variables:
    # use dind.yaml to depoy dind'service on k8s
    # DOCKER_HOST: tcp://dind.kube-system.svc:2375/
    # DOCKER_HOST: tcp://docker.host.com:2375/   # 这调用docker
    DOCKER_HOST: tcp://10.0.0.1:2375/
    DOCKER_DRIVER: overlay2
    DOCKER_TLS_CERTDIR: ""
  ##services:
    ##- docker:dind
  before_script:
    - docker login ${REGISTRY_URL} -u "$DOCKER_USER" -p "$DOCKER_PASS"
  script:
    - docker pull ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest || true
    - docker build --network host --cache-from ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest --tag ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:$CI_COMMIT_TAG --tag ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest .
    - docker push ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:$CI_COMMIT_TAG
    - docker push ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest
  after_script:
    - docker logout ${REGISTRY_URL}
  tags:
    - "docker"
  only:
    - tags






#--------------------------K8S DEPLOY--------------------------------------------------
#---------------------------------------------prod

PROD-deploy:
  stage: deploy
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_MCAKE"
    IMG_URL: "${REGISTRY_URL}/${REGISTRY_NS}"
    NORMAL_CB: 1
  script:
    - *deploy_k8s
  when: manual
  only:
    - tags


# canary start
PROD-canary-deploy:
  stage: deploy
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_MCAKE"
    IMG_URL: "${REGISTRY_URL}/${REGISTRY_NS}"
    CANARY_CB: 1
  script:
    - *deploy_k8s
  when: manual
  only:
    - tags
# canary end


PROD-rollback-1:
  stage: rollback
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_MCAKE"
    ROLL_NUM: 1
  script:
    - *rollback_k8s
  when: manual
  only:
    - tags


PROD-rollback-2:
  stage: rollback
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_MCAKE"
    ROLL_NUM: 2
  script:
    - *rollback_k8s
  when: manual
  only:
    - tags


PROD-rollback-3:
  stage: rollback
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_MCAKE"
    ROLL_NUM: 3
  script:
    - *rollback_k8s
  when: manual
  only:
    - tags


#---------------------------------------------test
TEST-deploy:
  stage: deploy
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_TEST_MCAKE"
    IMG_URL: "${REGISTRY_URL}/${REGISTRY_NS}"
    NORMAL_CB: 1
    TEST_ENV: 1
  script:
    - *deploy_k8s
  when: manual
  only:
    - tags


# canary start
TEST-canary-deploy:
  stage: deploy
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_TEST_MCAKE"
    IMG_URL: "${REGISTRY_URL}/${REGISTRY_NS}"
    CANARY_CB: 1
    TEST_ENV: 1
  script:
    - *deploy_k8s
  when: manual
  only:
    - tags
# canary end


TEST-rollback-1:
  stage: rollback
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_TEST_MCAKE"
    ROLL_NUM: 1
  script:
    - *rollback_k8s
  when: manual
  only:
    - tags


TEST-rollback-2:
  stage: rollback
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_TEST_MCAKE"
    ROLL_NUM: 2
  script:
    - *rollback_k8s
  when: manual
  only:
    - tags


TEST-rollback-3:
  stage: rollback
  image: harbor.mcake.com/gitlab/kubectl:v1.20.4-aliyun.1
  variables:
    KUBE_CONFIG: "$KUBE_CONFIG_TEST_MCAKE"
    ROLL_NUM: 3
  script:
    - *rollback_k8s
  when: manual
  only:
    - tags
文档更新时间: 2024-09-11 15:27   作者:李延召