admin 管理员组文章数量: 887006
hume项目k8s的改造
hume项目k8s的改造
一、修改构建目录结构
1、在根目录下添加build-work文件夹
目录结构如下
[root@k8s-worker-01 build-work]# tree .
.
├── Dockerfile
├── hume
│ └── start.sh
└── Jenkinsfile
2、每个文件内容如下
Dockerfile
FROM ccrs.tencentyun/xxxx/php_supervisor:kafka
USER root
ENV ZBE_PATH /biz-code
ADD hume /biz-code/hume
WORKDIR /biz-code/hume
CMD ./start.sh
Jenkinsfile
pipeline {agent {kubernetes {cloud 'kubernetes'cloud 'kubernetes-test'slaveConnectTimeout 1200workspaceVolume hostPathWorkspaceVolume(hostPath:"/opt/workspace", readOnly: false)yaml '''
apiVersion: v1
kind: Pod
spec:containers:- args: [\'$(JENKINS_SECRET)\', \'$(JENKINS_NAME)\']image: 'jenkins/jnlp-slave:latest-jdk11'name: jnlpimagePullPolicy: IfNotPresentvolumeMounts:- mountPath: "/etc/localtime"name: "localtime"readOnly: false - command:- "cat"env:- name: "LANGUAGE"value: "en_US:en"- name: "LC_ALL"value: "en_US.UTF-8"- name: "LANG"value: "en_US.UTF-8"image: "ccrs.tencentyun/xxxx/php_supervisor:kafka-k8s"imagePullPolicy: "IfNotPresent"name: "build"tty: truevolumeMounts:- mountPath: "/etc/localtime"name: "localtime"- mountPath: "/biz-code/hume/vendor"name: "phpdir"readOnly: false- command:- "cat"env:- name: "LANGUAGE"value: "en_US:en"- name: "LC_ALL"value: "en_US.UTF-8"- name: "LANG"value: "en_US.UTF-8"image: "registry-beijing.aliyuncs/citools/kubectl:self-1.17"imagePullPolicy: "IfNotPresent"name: "kubectl"tty: truevolumeMounts:- mountPath: "/etc/localtime"name: "localtime"readOnly: false- command:- "cat"env:- name: "LANGUAGE"value: "en_US:en"- name: "LC_ALL"value: "en_US.UTF-8"- name: "LANG"value: "en_US.UTF-8"image: "registry-beijing.aliyuncs/citools/docker:19.03.9-git"imagePullPolicy: "IfNotPresent"name: "docker"tty: truevolumeMounts:- mountPath: "/etc/localtime"name: "localtime"readOnly: false- mountPath: "/var/run/docker.sock"name: "dockersock"readOnly: falserestartPolicy: "Never"imagePullSecrets:- name: qcloudregistrykeynodeSelector:build: "true"securityContext: {}volumes:- hostPath:path: "/var/run/docker.sock"name: "dockersock"- hostPath:path: "/usr/share/zoneinfo/Asia/Shanghai"name: "localtime"- name: "cachedir"hostPath:path: "/opt/gopkg"- name: "phpdir"hostPath:path: "/opt/phppkg"
'''}
}
stages {stage('Pulling Code') {parallel {stage('Pulling Code by Jenkins') {when {expression {env.giteeBranch == null}}steps {git(changelog: true, poll: true, url:'.git', branch:"${BRANCH}", credentialsId: 'gitee-mima')script {COMMIT_ID = sh(returnStdout: true, script: "git log -n 1 --pretty=format:'%h'").trim()TAG = BUILD_TAG + '-' + COMMIT_IDprintln "Current branch is ${BRANCH}, Commit ID is ${COMMIT_ID}, Image TAG is ${TAG}"}}}stage('Pulling Code by trigger') {when {expression {env.giteeBranch != null}}steps {git(url: '.git', branch: env.giteeBranch, changelog: true, poll: true, credentialsId: 'gitee-mima')script {COMMIT_ID = sh(returnStdout: true, script: "git log -n 1 --pretty=format:'%h'").trim()TAG = BUILD_TAG + '-' + COMMIT_IDprintln "Current branch is ${env.giteeBranch}, Commit ID is ${COMMIT_ID}, Image TAG is ${TAG}"}}}}
}stage('Building') {steps {container(name: 'build') {sh """ pwdwhoamicp Scripts/init.sh.dev init.shchmod +x init.sh./init.shcomposer config -g --unset repos.packagistcomposer config repo.packagist composer updatesudo chmod -R 777 ./*sudo rsync -avz --exclude build-work ./* build-work/hume/"""}}}stage('Docker build for creating image') {environment {HARBOR_USER = credentials('registry-secret')}steps {container(name: 'docker') {sh """cd build-workecho ${HARBOR_USER_USR} ${HARBOR_USER_PSW} ${TAG}docker login -u ${HARBOR_USER_USR} -p ${HARBOR_USER_PSW} ${HARBOR_ADDRESS}docker build -t ${HARBOR_ADDRESS}/${REGISTRY_DIR}/${IMAGE_NAME}:${TAG} .docker push ${HARBOR_ADDRESS}/${REGISTRY_DIR}/${IMAGE_NAME}:${TAG}"""}}}stage('Deploying to K8s') {environment {MY_KUBECONFIG = credentials('k8s-config')}steps {container(name: 'kubectl'){sh """/usr/local/bin/kubectl --kubeconfig $MY_KUBECONFIG set image deploy -l app=${IMAGE_NAME} ${IMAGE_NAME}=${HARBOR_ADDRESS}/${REGISTRY_DIR}/${IMAGE_NAME}:${TAG} -n $NAMESPACE"""}}}}environment {COMMIT_ID = ""HARBOR_ADDRESS = "ccrs.tencentyun"REGISTRY_DIR = "xxxx"IMAGE_NAME = "hume"NAMESPACE = "dev"TAG = ""
}parameters {gitParameter(branch: '', branchFilter: 'origin/(.*)', defaultValue: 'dev', description: 'Branch for build and deploy', name:'BRANCH', quickFilterEnabled: false, selectedValue: 'NONE', sortMode: 'NONE', tagFilter: '*', type: 'PT_BRANCH')}
}
hume/start.sh
mkdir /biz-code/hume/data/logs/supervisor
supervisord -c /biz-code/hume/Config/supervisor.conf
supervisorctl restart all
二、jenkins添加流水线任务,将镜像构建流程跑通
1、jenkins界面添加pipeline,注意提前安装好对应插件
Git
Git Parameter
Git Pipeline for Blue Ocean
GitLab
Credentials
Credentials Binding
Blue Ocean
Blue Ocean Pipeline Editor
Blue Ocean Core JS
Pipeline SCM API for Blue Ocean
Dashboard for Blue Ocean
Build With Parameters
Dynamic Extended Choice Parameter Plug-In
Dynamic Parameter Plug-in
Extended Choice Parameter
List Git Branches Parameter
Pipeline
Pipeline: Declarative
Kubernetes
Kubernetes CLI
Kubernetes Credentials
Image Tag Parameter
Active Choices
说明: 第一步和第二步的一个流程说明
首先,点击jenkins构建,会选择对应的分支(如果是自动触发会获取到提交的分支,具体看jenkinsfile流程),然后jenkins会通过k8s插件调用k8s,根据jenkinsfile中定义的模板启动一个任务pod,pod中会先拉取代码,然后执行一系列的初始化操作(init.sh、拉取依赖包、并拷贝所有文件到build-work/hume,以便于后面构建镜像),再然后会build镜像,推送到镜像仓库,最后就是使用kubectl命令执行更新服务器pod的镜像版本发布
三、业务运行的deploy和service
1、业务pod是以sidecar模式运行的,一个业务container,一个日志收集container,通过共享目录的形式来收集业务存在容器里面的日志
hume-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: humelabels:app: hume
spec:selector:matchLabels:app: humereplicas: 1strategy:type: RollingUpdaterollingUpdate:maxUnavailable: 0maxSurge: 1# minReadySeconds: 30template:metadata:labels:app: humespec:containers:- name: filebeat image: registry-beijing.aliyuncs/dotbalo/filebeat:7.10.2 resources:requests:memory: "100Mi"cpu: "10m"limits:cpu: "200m"memory: "300Mi"imagePullPolicy: IfNotPresentenv:- name: podIpvalueFrom:fieldRef:apiVersion: v1fieldPath: status.podIP- name: podNamevalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.name- name: podNamespacevalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: podDeployNamevalue: hume- name: TZvalue: "Asia/Shanghai"securityContext:runAsUser: 0volumeMounts:- name: logpathmountPath: /data/log/- name: filebeatconfmountPath: /usr/share/filebeat/filebeat.yml subPath: usr/share/filebeat/filebeat.yml- name: humeimage: ccrs.tencentyun/xxxx/hume:jenkins-hume-dev-71-0f5bbb9aimagePullPolicy: IfNotPresentvolumeMounts:- name: logpathmountPath: /biz-code/hume/data/logs/env:- name: TZvalue: "Asia/Shanghai"- name: LANGvalue: C.UTF-8- name: LC_ALLvalue: C.UTF-8livenessProbe:failureThreshold: 2initialDelaySeconds: 30periodSeconds: 10successThreshold: 1tcpSocket:port: 7777timeoutSeconds: 2ports:- containerPort: 7777name: webprotocol: TCPreadinessProbe:failureThreshold: 2initialDelaySeconds: 30periodSeconds: 10successThreshold: 1tcpSocket:port: 7777timeoutSeconds: 2resources:limits:cpu: 994mmemory: 1170Mirequests:cpu: 300mmemory: 300MidnsPolicy: ClusterFirstimagePullSecrets:- name: qcloudregistrykeyrestartPolicy: AlwayssecurityContext: {}serviceAccountName: defaultvolumes:- name: logpathemptyDir: {}- name: filebeatconfconfigMap:name: filebeatconfitems:- key: filebeat.ymlpath: usr/share/filebeat/filebeat.yml
hume-service.yaml
---
apiVersion: v1
kind: Service
metadata:creationTimestamp: nulllabels:app: humename: hume-servicenamespace: dev
spec:ports:- name: humeport: 7777protocol: TCPtargetPort: 7777selector:app: humesessionAffinity: Nonetype: NodePort
status:loadBalancer: {}
四、日志收集流程
日志收集流程:采用filebeat+kafka+logstash+es+kibana的形式来做的
filebeat容器以sidecar模式与业务容器绑定,收集日志推送到kafka,在kafka中创建topic,logstash会读取kafka中topic消费业务日志,并推送至es,然后由kibana进行展示
下面需要文件可在如下连接查找
.10.2/filebeat
1、安装helm
wget .1.2-linux-amd64.tar.gz
tar xf helm-v3.1.2-linux-amd64.tar.gz
mv linux-amd64/ helm
cd helm/
cp -r helm /usr/local/bin/
helm version添加两个仓库
helm repo add bitnami
helm repo add ali-stable
2、安装kafka和zookeeper
helm pull bitnami/zookeeper
tar xf zookeeper-11.1.2.tgz
cd zookeeper/
helm install zookeeper -n logging --set auth.enabled=false --set allowAnonymousLogin=true --set persistence.enabled=false .
cd ../
helm pull bitnami/kafka
tar xf kafka-20.0.6.tgz
cd kafka/
helm install kafka -n logging --set zookeeper.enabled=false --set replicaCount=1 --set externalZookeeper.servers=zookeeper --set persistence.enabled=false .kubectl get pod -n logging
3、安装logstash
filebeat-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: filebeatconf
data:filebeat.yml: |-filebeat.inputs:- input_type: logpaths:- /data/log/*/*.logtail_files: truefields:pod_name: '${podName}'pod_ip: '${podIp}'pod_deploy_name: '${podDeployName}'pod_namespace: '${podNamespace}'output.kafka:hosts: ["kafka.logging:9092"]topic: "filebeat-sidecar"codec.json:pretty: falsekeep_alive: 30s
logstash-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: logstash-configmap
data:logstash.yml: |http.host: "0.0.0.0"path.config: /usr/share/logstash/pipelinelogstash.conf: |# all input will come from filebeat, no local logsinput {kafka {enable_auto_commit => trueauto_commit_interval_ms => "1000"bootstrap_servers => "kafka:9092"topics => ["filebeat-sidecar"]type => ["filebeat-sidecar"]codec=>multiline{pattern=>"^\d{4}-"negate=>truewhat=>"previous"}}}output {stdout{ codec=>rubydebug}if [type] == "filebeat-sidecar"{elasticsearch {hosts => ["172.16.64.12:6123"]index => "filebeat-%{+YYYY.MM.dd}"}} else{elasticsearch {hosts => ["172.16.64.12:6123"]index => "other-input-%{+YYYY.MM.dd}"}}}
logstash-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: logstash-deployment
spec:selector:matchLabels:app: logstashreplicas: 1template:metadata:labels:app: logstashspec:containers:- name: logstashimage: registry-beijing.aliyuncs/dotbalo/logstash:7.10.1 ports:- containerPort: 5044volumeMounts:- name: config-volumemountPath: /usr/share/logstash/config- name: logstash-pipeline-volumemountPath: /usr/share/logstash/pipelinevolumes:- name: config-volumeconfigMap:name: logstash-configmapitems:- key: logstash.ymlpath: logstash.yml- name: logstash-pipeline-volumeconfigMap:name: logstash-configmapitems:- key: logstash.confpath: logstash.conf
logstash-service.yaml
kind: Service
apiVersion: v1
metadata:name: logstash-service
spec:selector:app: logstashports:- protocol: TCPport: 5044targetPort: 5044type: ClusterIP
然后es和kibana因为之前装过,并且使用docker安装的,这里就不写了,上面logstash配置里面注意填写对应的es地址
整个流程基本就是这样,后续再添加监控及其他
本文标签: hume项目k8s的改造
版权声明:本文标题:hume项目k8s的改造 内容由网友自发贡献,该文观点仅代表作者本人, 转载请联系作者并注明出处:http://www.freenas.com.cn/jishu/1732351383h1533164.html, 本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容,一经查实,本站将立刻删除。
发表评论