配置清单

  • es.yaml ```yaml apiVersion: v1 kind: Service metadata:
    name: elasticsearch-logging
    namespace: kube-system
    labels:
    k8s-app: elasticsearch-logging
    kubernetes.io/cluster-service: “true”
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: “Elasticsearch” spec:
    ports:
    • port: 9200
      protocol: TCP
      targetPort: db
      selector:
      k8s-app: elasticsearch-logging

RBAC authn and authz

apiVersion: v1 kind: ServiceAccount metadata: name: elasticsearch-logging namespace: kube-system labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: “true”

  1. addonmanager.kubernetes.io/mode: Reconcile

kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: elasticsearch-logging labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: “true” addonmanager.kubernetes.io/mode: Reconcile rules:

  • apiGroups:
    • “” resources:
    • “services”
    • “namespaces”
    • “endpoints” verbs:
    • “get”

kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: kube-system name: elasticsearch-logging labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: “true” addonmanager.kubernetes.io/mode: Reconcile subjects:

  • kind: ServiceAccount name: elasticsearch-logging namespace: kube-system apiGroup: “” roleRef: kind: ClusterRole name: elasticsearch-logging apiGroup: “”

Elasticsearch deployment itself

apiVersion: apps/v1 kind: StatefulSet metadata: name: elasticsearch-logging namespace: kube-system labels: k8s-app: elasticsearch-logging version: v6.2.5 kubernetes.io/cluster-service: “true” addonmanager.kubernetes.io/mode: Reconcile spec: serviceName: elasticsearch-logging replicas: 1 selector: matchLabels: k8s-app: elasticsearch-logging version: v6.2.5 template: metadata: labels: k8s-app: elasticsearch-logging version: v6.2.5 kubernetes.io/cluster-service: “true” spec: serviceAccountName: elasticsearch-logging containers:

  1. - image: harbor.querycap.com/rk-infra/elasticsearch:6.8.8-v0.4
  2. name: elasticsearch-logging
  3. resources:
  4. # need more cpu upon initialization, therefore burstable class
  5. limits:
  6. cpu: 1000m
  7. requests:
  8. cpu: 100m
  9. ports:
  10. - containerPort: 9200
  11. name: db
  12. protocol: TCP
  13. - containerPort: 9300
  14. name: transport
  15. protocol: TCP
  16. volumeMounts:
  17. - name: elasticsearch-logging
  18. mountPath: /usr/share/elasticsearch/data/
  19. - mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12
  20. name: keystore
  21. readOnly: true
  22. subPath: elastic-certificates.p12
  23. env:
  24. - name: "NAMESPACE"
  25. valueFrom:
  26. fieldRef:
  27. fieldPath: metadata.namespace
  28. - name: "xpack.security.enabled"
  29. value: "true"
  30. - name: "xpack.security.transport.ssl.enabled"
  31. value: "true"
  32. - name: xpack.security.transport.ssl.verification_mode
  33. value: "certificate"
  34. - name: xpack.security.transport.ssl.keystore.path
  35. value: "/usr/share/elasticsearch/config/elastic-certificates.p12"
  36. - name: xpack.security.transport.ssl.truststore.path
  37. value: "/usr/share/elasticsearch/config/elastic-certificates.p12"
  38. - name: "xpack.ml.enabled"
  39. value: "false"
  40. - name: "xpack.monitoring.collection.enabled"
  41. value: "true"
  42. - name: "xpack.license.self_generated.type"
  43. value: "basic"
  44. volumes:
  45. - name: elasticsearch-logging
  46. hostPath:
  47. path: /data/es/
  48. - name: keystore
  49. secret:
  50. secretName: es-keystore
  51. defaultMode: 0444
  52. nodeSelector:
  53. es: data

tolerations:

- key: “dedicated”

operator: “Equal”

value: “es”

effect: “NoExecute”

  1. # Elasticsearch requires vm.max_map_count to be at least 262144.
  2. # If your OS already sets up this number to a higher value, feel free
  3. # to remove this init container.
  4. initContainers:
  5. - name: elasticsearch-logging-init
  6. image: alpine:3.6
  7. command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
  8. securityContext:
  9. privileged: true
  10. - name: elasticsearch-volume-init
  11. image: alpine:3.6
  12. command:
  13. - chmod
  14. - -R
  15. - "777"
  16. - /usr/share/elasticsearch/data/
  17. volumeMounts:
  18. - name: elasticsearch-logging
  19. mountPath: /usr/share/elasticsearch/data/
  1. - es-secret.yaml
  2. ```yaml
  3. apiVersion: v1
  4. kind: Secret
  5. metadata:
  6. name: es-keystore
  7. namespace: kube-system
  8. data:
  9. elastic-certificates.p12: MIINdwIBAzCCDTAGCSqGSIb3DQEHAaCCDSEEgg0dMIINGTCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBQs3hsugdvGL7FWl+IJ6pxClg251AIDAMNQBIIEyF81+gPdcBuo0evFTyLXRtMD+YbLyJb7o3xzzS5BwZb+kvSCSw+lGkGybsL5iglxcBogmjTUbaSfjUbzCQ0xxisKVf1yR9DeP1e6Dm6lzF1lhOKYnxEtR813TXABy85ZEtczIDZkM5C+VKUoz/k4vuSLacaRaoEw4VKvXNo9mX+quzKajWKauZwDV9iP5Kf+4UZw6cQCPT4Xv2t7rab/l6e7peHL/r8hKDMmZAfRNgzjTMV1CrhIRHLPc9b5eOTUfk0uwIdjDhEE9ElB3dhbZFzbMItFPA2L2cCFi42eY1rQKATcoDpifF9NMjWJ8GtSnRmvc0irsJku8yoVZWDyghNgXAQT8JxGy9daV3WAMMbUIsqaV0r09WyH8kAikj/C+Cz9HwyctQs3vJc3fBMq5Dh7cxqaVctvVkxzcTPTv/OnCRg8dXto07xdsnTE37/qvrzw1cwyv5oZ2eBsem+490ulyWCIMrYN+owX1RScnPnI5RjOun2fRwN9G18LJrS6iJuSZVd4cQcWvMO/zmrsKga0KW3VFtJvTE+Pxad0wqN5h0Tx0DWhTJ8iqhMwPL+uHnhAQQUFvFPFSJEWwAYZgbGxYYRXkiZSML3KMWC57+2UYPKgSQHAVDc/RI+AJlMYfRw6QD174OZBG7PiBWJEiRtLqvH2dhcC9mGJEtOgrJyn/CDtSKgBnQm/beHG47BD4esX6A+RUgbvwsn4urYVMPy2qOlefAnSoru58bueQlt+j5CcsiQtaRlw3rzS+5V6U+VSFgVMZxsUzl8n3uV7uMfDSOx6abFGBMw8QNSMQhfaC9qqyU45QkyDeEvpyXY4ThDMV8Cw75g99+LpLfz1BHS2p/iG/6056Qmt7MC/3N6VVrvh5LCWQ6uFjGebqH9HKLDNHq31V7k50W0uZZdQLR0hFfnAX02zb4Rsen1vXvWH5kpu90tNe43IIzvFCD/dHM6L7tNzdNd3ke2bSmqsqp2bDAQew3Wpj3aWdI+HzjTmwONY7S5V88MQoQTeWXcjB8snSQkXYH4fh/vGfL+MpV+pSVtylIauDnMrj2JFOETGmQGHUuk9A1ofjzGdqcMIOGalzDrOb14eC5BVgfnKPOtR4bXar+UMmIIJwAaMQRqVO5D7mNhSPBwGPEW8SQQBbSfELaYHMi9q/Ii6B6D4knxt9eoWlDj7o4/M9omTuX53/HtWB1/4FbDinBNltr55dyazxPjRgzwAIr8mqUxxQoitWVys0oXos7yhizwdKn5/EaLJMo9ii9yIW058qvwpJ+juO36NCW1YK645X/fvu8+1UVC9VVHTxJSGei+RmHXJwRCVqSVG4ckHWPdlhKYPj3auMwkbWNqDOVipT13fwpTws2aGsxld4K6iWT9urbtxxhx8Hg0q35NMEILUODdjF3BbgsXZW40Hj3FN6ALvgFHolU7Homgna+Euvs/TJM62jrr02qZsRnZun5TvYRS9W5FVMU5Vjicfnp+ieCFM2Rl8iasd+h0t3DPBgv9m/xvYqpgD5aUyO+xyL0ikC8se0Nm+IxVKKV1ZATcLaXzZE3EkKu1Fk7T66ODEpkYmzkWzyP+XexqnDUEiQ35iR1yRyoKViArHgxmVx3u3TDo+Frt7c+wP2c/VkTFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE1ODkyNTQzODI2MjkwggekBgkqhkiG9w0BBwagggeVMIIHkQIBADCCB4oGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFLzuguhJknOppoNt0cNAu/Yg9N7IAgMAw1CAggdQwSkHPuMWczUu39ME4ABWIYkp4Wk9MJ4HfM3ovhE+gskOjX6DG+o7H4sZ0okF0kcBoLAOo6Sgr/s9ZuVgYHTi5xtkGQaqbQUSMK03cpWUfue5f1ruRu93KVll6mMAz9z/s6/hG27hhanQUFDcZ4md9dFQHLv5ncBeMwrseeP2sUy/my1qYetlTIdskjfeNh2Dc2uZPTmDv6B/dj+nBHPd4B6zlqHB73JCvE8Qq6zWjjb6/d0geYMNKbdpbQjKbKxNgjzatand27Bh0Bpuk+OeqXF4+TrbwCyaiTnYYQPKbve/zsM7xVzvb5p9sdHmhOdI54+SuImIMuEQSikPmQk4MdPQaS27r7JAvsabdNm9jT1nzIMzINAj3b6LBTckbd94gP2ST2vizvq0maH2AtpZT5twX7AnfTnC2xuuKlZ1sJHsfl2Uw9JyvzNdMtZUaDO+0obesE1dBjH+nI5z75DzFLLbDl65HCJxL1qtvhSCXF676cQ2N4Mq2Sqs+nhzyVy+/NgRcfN/YtagWyYAzxdQmaW0lIQfdDcbYgW+1w+8RUht5imx5G/A0foKCcW1mGW2ocXzUkMxAqspHu3Tb3lazUpSS8BNWELtC+d3H8qx0qsPMenxe0sUxB/pUW5OFqcZCAC2hlUThtUD4E51joKJha0+DraiqmwaaUNFy2CcgqCd52WrwnpRV5gyS+Bk24d/4Dm3ezEZnMQxGiBwB55nHKWlgwAzTqnJWXmM/wYEkEVLsnN9bYBU2e8yM8Q9Qv6Vszg+8GqfIeXs4hjCAIJ8YWCDGtL/rM9+L0lsMfh73jo9Hj0UnvqLgIEaV6pqpk6O7NgdhiWEFDyIbbEWPEpFiLsGclpnELxZbxlWdDlcAM3dpU7hAMZ5AxSv6or7U7WkSfj/K8EzDjpQ66/klbsZiob/EEehdeBRv0ZFTR1uOLY+h5kJub0b/tflw36F0BA2wqxr6pCG8/zdKpET7CWu7TWKtX1Uw7/z16s3uJ6zYIyJJNXwB4c1bbdHiAjAfwh2SgVqGuRjQpN2/vKcwCRC/xWSa9ME/rAA6HhvtNgn0s5dTyKbbTKBkEvsWwfwEFAE3mlRO7t9waAmtWabNdJKSO2z70Qc3Irr+wJw+zDDD7+hfKcRxTFHZ65tlR75QgmWXRMMFMQW9nuKN9hY1k0XRv7qppD2jQRV19sR87Aq+5TbihzmbitYmeqBu87oi82HOu6/gReFGRgz1cGaS6y7+v00xoUiPU5Pc6k4bS0E2/1C/CUARN0K4ELCx+MqlWz01tDMnSzAOvDtB+EmgBIrCPCmVqwvoH2DS5hOoslYRx8LtQtQtb+DeFvxCRHCNzEi4+CwWC0GzWOW2Lz7feCyhwoDGkubRyESKbJ7VKWB4A5s5iyLp2Wc/ewFbBwaywpQ2AFezS8Ka2DiYPFLlf0shfycWjlQih6KYkxVHzd1i2E4mau0FWMNMirqRWkQ2rl5j7FoyNOqtfcy/K2N+FlzT6Y1Sjkw3imeqdxa+CWJAyC2W1dahDgnbR7JbLoKY5P8a++iJ6mbiHTRs8NcWtadcF0bbG1ycdcz/7jNzU7tDQODyfWH43Nf4GzcsPzv+WbDUmuUBuv8PrSeKIhMPc6XjmYV7Qgd8BEwwTpU3DTobvnO60HyoGBRJFsLfd7w0iXPUNqLWK4u1RSCyMjf1m+MfBHPunccqYaY4zgzj4cFkUMpAnpa6IziRDdUE0Vc3ePJSBAQ9WtA66sIIXm7lmwPzENBcUNcFLxX3H1PzWZzKuwBzTVsj7D+ECFL2jnjbq5HmvBKrC05YkT9bTonCWKYDwKwxheEZYSzexfJrHos98PZ28YiLA5F8ifQ/9Lc+T1prWuJ26hA8fUoicQHIaUS/rx3BuibnlfVroZUqsqC4zpYn3hKZDmO+jLrH3kxRTwHcS2Yo/qcseJy4OGseocvH5NDN5LnjHpEw+mqW7Qz7S9ZxmNxU9PwsYBq2Q39TKj0KjnGtIQjGYI4j+Ql6icC0Z3WbSCZseB4rEkG18LLpvdD/U0p6AwZ3MXPFYZOJn3k9TSs30Wbg4+bLUM9OT9LE8XLOFSEQCxgQyVAFpuQyfL3YnAThUoVJoWDr5DktNQ9pwtNhAo8xbQGdkAWUtKBkESRc4tekqSrxI0Zt0iD8VqluaTbt4RMJbtu2q+4vNLuJV6f3PeS0hzEsALwN3ZcNDHNRlVKhctDr0W2K1rVl+p+uGyDTvX7Yx6jEwghs0xoRhU3yU7f/FGhMhQZlZc/xLL/AJLXOAaTXIKO6PsySurWZrzHrboSk+65WmymdBOlEzNf+R/oriqeTNV/RoNVTglLyDdDmycYonbaexz03gHC/g40ypo7jECE3V130kApgnDvt6QRC/yd6NiNh1V4E8gVbm4J7k+gE++4tVE7KFGOSvWwPjkSbXe5PDejENuVX2v4UPuaMoUXAPUZSNNFao4bM4eS50U/g67pnl4PYbdtG/yrjCNnXCaRZRapDVD/MD4wITAJBgUrDgMCGgUABBR9QitEdRrOhirFpeeXYxRCiDXobgQUgdxm0RxqdfUMrM7V9NYJh5DmvWsCAwGGoA==
  1. ---
  2. # Source: filebeat/templates/filebeat-configmap.yaml
  3. ---
  4. apiVersion: v1
  5. kind: ConfigMap
  6. metadata:
  7. name: filebeat-config
  8. namespace: kube-system
  9. labels:
  10. k8s-app: filebeat
  11. data:
  12. filebeat.yml: |-
  13. filebeat.config:
  14. max_procs: 1
  15. queue.mem.events: 16
  16. queue.mem.flush.min_events: 8
  17. inputs:
  18. # Mounted `filebeat-inputs` configmap:
  19. path: ${path.config}/inputs.d/*.yml
  20. # Reload inputs configs as they change:
  21. reload.enabled: true
  22. enabled: true
  23. modules:
  24. path: ${path.config}/modules.d/*.yml
  25. # Reload module configs as they change:
  26. reload.enabled: true
  27. #output.kafka:
  28. # hosts: ["kafka-log-01:9092", "kafka-log-02:9092", "kafka-log-03:9092"]
  29. # topic: 'topic-test-log'
  30. # version: 2.0.0
  31. output.logstash:
  32. hosts: ["logstash:5044"]
  33. enabled: true
  34. ---
  35. apiVersion: v1
  36. kind: ConfigMap
  37. metadata:
  38. name: filebeat-inputs
  39. namespace: kube-system
  40. labels:
  41. k8s-app: filebeat
  42. data:
  43. kubernetes.yml: |-
  44. - type: docker
  45. #harvester_buffer_size: 4096000
  46. close_inactive: 15m
  47. scan_frequency: 30s
  48. containers.ids:
  49. - "*"
  50. enabled: true
  51. processors:
  52. - add_kubernetes_metadata:
  53. in_cluster: true
  54. ---
  55. # Source: filebeat/templates/filebeat-service-account.yaml
  56. apiVersion: v1
  57. kind: ServiceAccount
  58. metadata:
  59. name: filebeat
  60. namespace: kube-system
  61. labels:
  62. k8s-app: filebeat
  63. ---
  64. # Source: filebeat/templates/filebeat-role.yaml
  65. apiVersion: rbac.authorization.k8s.io/v1beta1
  66. kind: ClusterRole
  67. metadata:
  68. name: filebeat
  69. labels:
  70. k8s-app: filebeat
  71. rules:
  72. - apiGroups: [""] # "" indicates the core API group
  73. resources:
  74. - namespaces
  75. - pods
  76. verbs:
  77. - get
  78. - watch
  79. - list
  80. ---
  81. # Source: filebeat/templates/filebeat-role-binding.yaml
  82. apiVersion: rbac.authorization.k8s.io/v1beta1
  83. kind: ClusterRoleBinding
  84. metadata:
  85. name: filebeat
  86. subjects:
  87. - kind: ServiceAccount
  88. name: filebeat
  89. namespace: kube-system
  90. roleRef:
  91. kind: ClusterRole
  92. name: filebeat
  93. apiGroup: rbac.authorization.k8s.io
  94. ---
  95. # Source: filebeat/templates/filebeat-daemonset.yaml
  96. apiVersion: extensions/v1beta1
  97. kind: DaemonSet
  98. metadata:
  99. name: filebeat
  100. namespace: kube-system
  101. labels:
  102. k8s-app: filebeat
  103. spec:
  104. template:
  105. metadata:
  106. labels:
  107. k8s-app: filebeat
  108. spec:
  109. serviceAccountName: filebeat
  110. terminationGracePeriodSeconds: 30
  111. nodeSelector:
  112. beta.kubernetes.io/filebeat-ds-ready: "true"
  113. containers:
  114. - name: filebeat
  115. image: "harbor.querycap.com/rk-infra/filebeat:6.6.2"
  116. args: [
  117. "-c", "/etc/filebeat.yml",
  118. "-e","-httpprof","0.0.0.0:6060"
  119. ]
  120. #ports:
  121. # - containerPort: 6060
  122. # hostPort: 6068
  123. securityContext:
  124. runAsUser: 0
  125. # If using Red Hat OpenShift uncomment this:
  126. #privileged: true
  127. resources:
  128. limits:
  129. memory: 1000Mi
  130. cpu: 1000m
  131. requests:
  132. memory: 100Mi
  133. cpu: 100m
  134. volumeMounts:
  135. - name: config
  136. mountPath: /etc/filebeat.yml
  137. readOnly: true
  138. subPath: filebeat.yml
  139. - name: inputs
  140. mountPath: /usr/share/filebeat/inputs.d
  141. readOnly: true
  142. - name: data
  143. mountPath: /usr/share/filebeat/data
  144. - name: varlibdockercontainers
  145. mountPath: /var/lib/docker/containers
  146. readOnly: true
  147. - name: timezone
  148. mountPath: /etc/localtime
  149. imagePullSecrets:
  150. - name: qingcloud-registry
  151. volumes:
  152. - name: config
  153. configMap:
  154. defaultMode: 0600
  155. name: filebeat-config
  156. - name: varlibdockercontainers
  157. hostPath:
  158. path: /data/var/lib/docker/containers
  159. - name: inputs
  160. configMap:
  161. defaultMode: 0600
  162. name: filebeat-inputs
  163. # data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
  164. - name: data
  165. hostPath:
  166. path: /data/filebeat-data
  167. type: DirectoryOrCreate
  168. - name: timezone
  169. hostPath:
  170. path: /etc/localtime
  171. tolerations:
  172. - effect: NoExecute
  173. key: dedicated
  174. operator: Equal
  175. value: gpu
  176. - effect: NoSchedule
  177. operator: Exists
  • kibana.yaml ```yaml apiVersion: extensions/v1beta1 kind: Ingress metadata: name: kibana namespace: kube-system annotations: kubernetes.io/ingress.class: “nginx”
    spec: rules:
    • host: a.kibana.rockontrol.com http: paths:
      • backend: serviceName: kibana servicePort: 5601

apiVersion: v1 kind: Service metadata: name: kibana namespace: kube-system labels: k8s-app: kibana kubernetes.io/cluster-service: “true” addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: “Kibana” spec: type: ClusterIP ports:

  • port: 5601 protocol: TCP targetPort: ui selector: k8s-app: kibana

apiVersion: apps/v1 kind: Deployment metadata: name: kibana namespace: kube-system labels: k8s-app: kibana kubernetes.io/cluster-service: “true” addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 selector: matchLabels: k8s-app: kibana template: metadata: labels: k8s-app: kibana annotations: seccomp.security.alpha.kubernetes.io/pod: ‘docker/default’ spec: imagePullSecrets:

  1. - name: qingcloud-registry
  2. containers:
  3. - name: kibana
  4. image: harbor.querycap.com/rk-infra/kibana:6.8.8
  5. resources:
  6. # need more cpu upon initialization, therefore burstable class
  7. limits:
  8. cpu: 1000m
  9. requests:
  10. cpu: 100m
  11. env:
  12. - name: ELASTICSEARCH_URL
  13. value: http://elasticsearch-logging:9200
  14. - name: XPACK_SECURITY_ENABLED
  15. value: "true"
  16. - name: ELASTICSEARCH_PASSWORD
  17. value: M50zLpCI0EeDcOh7
  18. - name: ELASTICSEARCH_USERNAME
  19. value: kibana
  20. - name: XPACK_ML_ENABLED
  21. value: "false"
  22. ports:
  23. - containerPort: 5601
  24. name: ui
  25. protocol: TCP
  1. - [**logstash.yaml**](https://git.querycap.com/infra/k8s-elk/-/blob/master/logstash.yaml)
  2. ```yaml
  3. apiVersion: v1
  4. kind: Service
  5. metadata:
  6. name: logstash
  7. namespace: kube-system
  8. spec:
  9. ports:
  10. - port: 5044
  11. targetPort: beats
  12. selector:
  13. type: logstash
  14. clusterIP: None
  15. ---
  16. apiVersion: extensions/v1beta1
  17. kind: Deployment
  18. metadata:
  19. name: logstash
  20. namespace: kube-system
  21. spec:
  22. template:
  23. metadata:
  24. labels:
  25. type: logstash
  26. spec:
  27. containers:
  28. - image: harbor.querycap.com/rk-infra/logstash:v6.6.2
  29. name: logstash
  30. ports:
  31. - containerPort: 5044
  32. name: beats
  33. command:
  34. - logstash
  35. - '-f'
  36. - '/etc/logstash_c/logstash.conf'
  37. env:
  38. - name: "XPACK_MONITORING_ELASTICSEARCH_HOSTS"
  39. value: "http://elasticsearch-logging:9200"
  40. volumeMounts:
  41. - name: config-volume
  42. mountPath: /etc/logstash_c/
  43. - name: config-yml-volume
  44. mountPath: /usr/share/logstash/config/
  45. - name: timezone
  46. mountPath: /etc/localtime
  47. resources:
  48. limits:
  49. cpu: 1000m
  50. memory: 2048Mi
  51. # requests:
  52. # cpu: 512m
  53. # memory: 512Mi
  54. imagePullSecrets:
  55. - name: qingcloud-registry
  56. volumes:
  57. - name: config-volume
  58. configMap:
  59. name: logstash-conf
  60. items:
  61. - key: logstash.conf
  62. path: logstash.conf
  63. - name: timezone
  64. hostPath:
  65. path: /etc/localtime
  66. - name: config-yml-volume
  67. configMap:
  68. name: logstash-yml
  69. items:
  70. - key: logstash.yml
  71. path: logstash.yml
  72. ---
  73. apiVersion: v1
  74. kind: ConfigMap
  75. metadata:
  76. name: logstash-conf
  77. namespace: kube-system
  78. labels:
  79. type: logstash
  80. data:
  81. logstash.conf: |-
  82. input {
  83. beats {
  84. port => 5044
  85. }
  86. }
  87. filter{
  88. if [kubernetes][container][name] == "nginx-ingress-controller" {
  89. json {
  90. source => "message"
  91. target => "ingress_log"
  92. }
  93. if [ingress_log][requesttime] {
  94. mutate {
  95. convert => ["[ingress_log][requesttime]", "float"]
  96. }
  97. }
  98. if [ingress_log][upstremtime] {
  99. mutate {
  100. convert => ["[ingress_log][upstremtime]", "float"]
  101. }
  102. }
  103. if [ingress_log][status] {
  104. mutate {
  105. convert => ["[ingress_log][status]", "float"]
  106. }
  107. }
  108. if [ingress_log][httphost] and [ingress_log][uri] {
  109. mutate {
  110. add_field => {"[ingress_log][entry]" => "%{[ingress_log][httphost]}%{[ingress_log][uri]}"}
  111. }
  112. mutate{
  113. split => ["[ingress_log][entry]","/"]
  114. }
  115. if [ingress_log][entry][1] {
  116. mutate{
  117. add_field => {"[ingress_log][entrypoint]" => "%{[ingress_log][entry][0]}/%{[ingress_log][entry][1]}"}
  118. remove_field => "[ingress_log][entry]"
  119. }
  120. }
  121. else{
  122. mutate{
  123. add_field => {"[ingress_log][entrypoint]" => "%{[ingress_log][entry][0]}/"}
  124. remove_field => "[ingress_log][entry]"
  125. }
  126. }
  127. }
  128. }
  129. if [kubernetes][container][name] =~ /^srv*/ {
  130. json {
  131. source => "message"
  132. target => "tmp"
  133. }
  134. if [kubernetes][namespace] == "kube-system" {
  135. drop{}
  136. }
  137. if [tmp][level] {
  138. mutate{
  139. add_field => {"[applog][level]" => "%{[tmp][level]}"}
  140. }
  141. if [applog][level] == "debug"{
  142. drop{}
  143. }
  144. }
  145. if [tmp][msg]{
  146. mutate{
  147. add_field => {"[applog][msg]" => "%{[tmp][msg]}"}
  148. }
  149. }
  150. if [tmp][func]{
  151. mutate{
  152. add_field => {"[applog][func]" => "%{[tmp][func]}"}
  153. }
  154. }
  155. if [tmp][cost]{
  156. if "ms" in [tmp][cost]{
  157. mutate{
  158. split => ["[tmp][cost]","m"]
  159. add_field => {"[applog][cost]" => "%{[tmp][cost][0]}"}
  160. convert => ["[applog][cost]", "float"]
  161. }
  162. }
  163. else{
  164. mutate{
  165. add_field => {"[applog][cost]" => "%{[tmp][cost]}"}
  166. }
  167. }
  168. }
  169. if [tmp][method]{
  170. mutate{
  171. add_field => {"[applog][method]" => "%{[tmp][method]}"}
  172. }
  173. }
  174. if [tmp][request_url]{
  175. mutate{
  176. add_field => {"[applog][request_url]" => "%{[tmp][request_url]}"}
  177. }
  178. }
  179. if [tmp][meta._id]{
  180. mutate{
  181. add_field => {"[applog][traceId]" => "%{[tmp][meta._id]}"}
  182. }
  183. }
  184. if [tmp][project] {
  185. mutate{
  186. add_field => {"[applog][project]" => "%{[tmp][project]}"}
  187. }
  188. }
  189. if [tmp][time] {
  190. mutate{
  191. add_field => {"[applog][time]" => "%{[tmp][time]}"}
  192. }
  193. }
  194. if [tmp][status] {
  195. mutate{
  196. add_field => {"[applog][status]" => "%{[tmp][status]}"}
  197. convert => ["[applog][status]", "float"]
  198. }
  199. }
  200. }
  201. mutate{
  202. rename => ["kubernetes", "k8s"]
  203. remove_field => "beat"
  204. remove_field => "tmp"
  205. remove_field => "[k8s][labels][app]"
  206. }
  207. }
  208. output{
  209. elasticsearch {
  210. hosts => ["http://elasticsearch-logging:9200"]
  211. codec => json
  212. index => "logstash-%{+YYYY.MM.dd}"
  213. user => 'elastic'
  214. password => 'P9IKmhwuYEEej52U'
  215. }
  216. }
  217. ---
  218. apiVersion: v1
  219. kind: ConfigMap
  220. metadata:
  221. name: logstash-yml
  222. namespace: kube-system
  223. labels:
  224. type: logstash
  225. data:
  226. logstash.yml: |-
  227. http.host: "0.0.0.0"
  228. xpack.monitoring.enabled: true
  229. xpack.monitoring.elasticsearch.username: logstash_system
  230. xpack.monitoring.elasticsearch.password: hOsocygbtFEs1qty
  231. xpack.monitoring.elasticsearch.url: http://elasticsearch-logging:9200
  • 还有一个elastic-certificates.p12,可以自己用二进制生成
    1. /usr/share/elasticsearch/bin/elasticsearch-certutil ca
    2. /usr/share/elasticsearch/bin/elasticsearch-certutil cert --ca elastic-certificates.p12
    3. #两条命令均一路回车即可,不需要给秘钥再添加密码。
    4. #证书创建完成之后,默认在es的数据目录,这里统一放到etc下:
    5. ls elastic-*
    6. elastic-certificates.p12 elastic-certificates-ca.p12

    部署步骤

    给部署es的节点加上标签

  1. kubectl label node <node> es=data

部署

  1. kubectl apply -f .

创建用户

  1. kubectl -n kube-system exec -it elasticsearch-logging-0 ./init-user.sh

这里的脚本是自己提前打包到镜像中的,没有使用官方镜像

  1. cat init-user.sh
  2. #!/usr/bin/expect
  3. set timeout -1
  4. spawn bin/elasticsearch-setup-passwords interactive
  5. expect "\[y/N\]"
  6. send "y\r"
  7. expect "\[elastic\]"
  8. send "P9IKmhwuYEEej52U\r"
  9. expect "\[elastic\]"
  10. send "P9IKmhwuYEEej52U\r"
  11. expect "\[apm_system\]"
  12. send "1UmdUStBiKYqR0IJ\r"
  13. expect "\[apm_system\]"
  14. send "1UmdUStBiKYqR0IJ\r"
  15. expect "\[kibana\]"
  16. send "M50zLpCI0EeDcOh7\r"
  17. expect "\[kibana\]"
  18. send "M50zLpCI0EeDcOh7\r"
  19. expect "\[logstash_system\]"
  20. send "hOsocygbtFEs1qty\r"
  21. expect "\[logstash_system\]"
  22. send "hOsocygbtFEs1qty\r"
  23. expect "\[beats_system\]"
  24. send "xy28upiAPDNGfmgw\r"
  25. expect "\[beats_system\]"
  26. send "xy28upiAPDNGfmgw\r"
  27. expect "\[remote_monitoring_user\]"
  28. send "jooKJoYHlMiZwTXX\r"
  29. expect "\[remote_monitoring_user\]"
  30. send "jooKJoYHlMiZwTXX\r"

用户

  1. # 普通用户
  2. username: kibana
  3. password: M50zLpCI0EeDcOh7
  4. # 只读用户
  5. username: viewer
  6. password: WBQ3p4ZZ7WappBfEb

viewer 用的的权限配置

  1. 创建 viewer role 角色, 权限如下。

image.png

  1. 创建 viewer 账户, 授权 viewer 角色。