nfs-storageclass:

  1. nfs-provisioner.yaml

    1. apiVersion: apps/v1
    2. kind: Deployment
    3. metadata:
    4. name: nfs-client-provisioner
    5. spec:
    6. replicas: 1
    7. selector:
    8. matchLabels:
    9. app: nfs-client-provisioner
    10. strategy:
    11. type: Recreate
    12. template:
    13. metadata:
    14. labels:
    15. app: nfs-client-provisioner
    16. spec:
    17. serviceAccountName: nfs-client-provisioner
    18. containers:
    19. - name: nfs-client-provisioner
    20. image: quay.io/external_storage/nfs-client-provisioner:latest
    21. volumeMounts:
    22. - name: nfs-client-root
    23. mountPath: /persistentvolumes
    24. env:
    25. - name: PROVISIONER_NAME
    26. value: efk-data-storage
    27. - name: NFS_SERVER
    28. value: NFS_SERVER_IP
    29. - name: NFS_PATH
    30. value: /yunwei/efk
    31. volumes:
    32. - name: nfs-client-root
    33. nfs:
    34. server: NFS_SERVER_IP
    35. path: /yunwei/efk
  2. nfs-rbac.yaml ```yaml apiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner


apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: nfs-client-provisioner-runner rules:

  • apiGroups: [“”] resources: [“persistentvolumes”] verbs: [“get”,”list”,”watch”,”create”,”delete”]
  • apiGroups: [“”] resources: [“persistentvolumeclaims”] verbs: [“get”,”list”,”watch”,”update”]
  • apiGroups: [“storage.k8s.io”] resources: [“storageclasses”] verbs: [“get”,”list”,”watch”]
  • apiGroups: [“”] resources: [“events”] verbs: [“list”,”watch”,”create”,”update”,”patch”]
  • apiGroups: [“”] resources: [“endpoints”] verbs: [“create”,”delete”,”get”,”list”,”watch”,”patch”,”update”]

apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: run-nfs-client-provisioner subjects:

  • kind: ServiceAccount name: nfs-client-provisioner namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io ```
  1. nfs-storageclass.yaml
    1. apiVersion: storage.k8s.io/v1
    2. kind: StorageClass
    3. metadata:
    4. name: efk-data
    5. provisioner: efk-data-storage
    6. parameters:
    7. archiveOnDelete: "ture"

    01-namespace.yaml

    1. apiVersion: v1
    2. kind: Namespace
    3. metadata:
    4. name: efk

    02-es.yaml

    ```yaml apiVersion: v1 kind: Namespace metadata: name: efk

[root@master efk]# cat 02-es.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: es namespace: efk spec: serviceName: elasticsearch replicas: 3 selector: matchLabels: app: elasticsearch template: metadata: labels: app: elasticsearch spec: nodeSelector: isEs: “true” initContainers:

  1. - name: increase-vm-max-map
  2. image: busybox
  3. imagePullPolicy: IfNotPresent
  4. command: ["sysctl", "-w", "vm.max_map_count=262144"] # 添加mmap计数限制
  5. securityContext: # 仅应用到指定的容器上,并且不会影响volume
  6. privileged: true
  7. - name: increase-fd-ulimit
  8. image: busybox
  9. imagePullPolicy: IfNotPresent
  10. command: ["sh", "-c", "ulimit -n 65535"] # 修改文件描述符最大数量
  11. securityContext:
  12. privileged: true
  13. containers:
  14. - name: elasticsearch
  15. image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
  16. imagePullPolicy: IfNotPresent
  17. ports:
  18. - name: rest
  19. containerPort: 9200 # Restapi
  20. - name: inter
  21. containerPort: 9300 # 节点通信
  22. resources:
  23. limits:
  24. cpu: 1000m
  25. requests:
  26. cpu: 1000m
  27. volumeMounts:
  28. - name: data
  29. mountPath: /usr/share/elasticsearch/data
  30. env:
  31. - name: cluster.name # es集群的名称
  32. value: k8s-logs
  33. - name: node.name
  34. valueFrom: # 通过匹配上面metadata.name来当节点的名称
  35. fieldRef:
  36. fieldPath: metadata.name
  37. - name: cluster.initial_master_nodes # 初始化集群引导,需要是Pod的名称
  38. value: "es-0,es-1,es-2"
  39. - name: discovery.zen.minimum_master_nodes # 节点数量,高可用集群至少3个主节点,其中2个至少不仅投票节点
  40. value: "2"
  41. - name: discovery.seed_hosts # 用于es集群中节点互相连接发现
  42. value: "elasticsearch"
  43. - name: ES_JAVA_OPTS # 设置java的内存参数
  44. value: "-Xms512m -Xmx512m"
  45. - name: network.host
  46. value: "0.0.0.0"

volumeClaimTemplates: # 定义持久化模版

  • metadata: name: data labels:
    1. app: elasticsearch
    spec: accessModes: [“ReadWriteOnce”] # 访问模式 storageClassName: efk-data resources:
    1. requests:
    2. storage: 20Gi
    1. <a name="cGW5g"></a>
    2. #### 03-es-svc.yaml
    3. ```yaml
    4. apiVersion: v1
    5. kind: Service
    6. metadata:
    7. name: elasticsearch
    8. namespace: efk
    9. labels:
    10. app: elasticsearch
    11. spec:
    12. selector:
    13. app: elasticsearch
    14. clusterIP: None
    15. ports:
    16. - port: 9200
    17. name: rest
    18. - port: 9300
    19. name: inter-node

    04-kibana.yaml

    ```yaml apiVersion: v1 kind: Service metadata: name: kibana namespace: efk labels: app: kibana spec: selector: app: kibana ports:
  • port: 5601

apiVersion: apps/v1 kind: Deployment metadata: name: kibana namespace: efk labels: app: kibana kibana: “true” spec: selector: matchLabels: app: kibana template: metadata: labels: app: kibana kibana: “true” spec: containers:

  - name: kibana
    image: docker.elastic.co/kibana/kibana:7.6.2
    resources:
      limits:
        cpu: 200m
      requests:
        cpu: 200m
    env:
    - name: ELASTICSEARCH_HOSTS
      value: http://elasticsearch:9200
    ports:
    - containerPort: 5601
<a name="nVID5"></a>
#### 05-kibana-ingress.yaml
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kibana
  namespace: efk
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
  - host: kibana-k8s-test.test.com
    http:
      paths:
      - backend:
          serviceName: kibana
          servicePort: 5601

06-fluentd-cm.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: fluentd-config
  namespace: efk
data:
  system.conf: |-
    <system>
      root_dir /tmp/fluentd-buffers/
    </system>
  containers.input.conf: |-
    <source>
      @id fluentd-containers.log
      @type tail                              # Fluentd 内置的输入方式,其原理是不停地从源文件中获取新的日志。
      path /var/log/containers/*.log          # 挂载的服务器Docker容器日志地址
      pos_file /var/log/es-containers.log.pos
      tag raw.kubernetes.*                    # 设置日志标签
      read_from_head true
      <parse>                                 # 多行格式化成JSON
        @type multi_format                    # 使用 multi-format-parser 解析器插件
        <pattern>
          format json                         # JSON解析器
          time_key time                       # 指定事件时间的时间字段
          time_format %Y-%m-%dT%H:%M:%S.%NZ   # 时间格式
        </pattern>
        <pattern>
          format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
          time_format %Y-%m-%dT%H:%M:%S.%N%:z
        </pattern>
      </parse>
    </source>
    # 在日志输出中检测异常,并将其作为一条日志转发
    # https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions
     <match raw.kubernetes.**>          # 匹配tag为raw.kubernetes.**日志信息
      @id raw.kubernetes
      @type detect_exceptions           # 使用detect-exceptions插件处理异常栈信息
      remove_tag_prefix raw             # 移除 raw 前缀
      message log
      stream stream                     #stdout输出
      multiline_flush_interval 5
      max_bytes 500000                   #异常信息最大字节数
      max_lines 1000                     #异常信息最大行数
     </match>

      <filter **># 拼接日志
      @id filter_concat
      @type concat                # Fluentd Filter 插件,用于连接多个事件中分隔的多行日志。
      key message
      multiline_end_regexp /\n$/  # 以换行符“\n”拼接
      separator ""
     </filter>

    # 添加 Kubernetes metadata 数据
    <filter kubernetes.**>
      @id filter_kubernetes_metadata      #将获取到的日志转化,添加pod信息、命名空间,labels标签等
      @type kubernetes_metadata
    </filter>

    # JSON格式转换,如果原来是json格式,将把json格式内容从新进行转化
    # 插件地址:https://github.com/repeatedly/fluent-plugin-multi-format-parser
    <filter kubernetes.**>
      @id filter_parser
      @type parser                # multi-format-parser多格式解析器插件
      key_name log                # 在要解析的记录中指定字段名称。
      reserve_data true           # 在解析结果中保留原始键值对。
      remove_key_name_field true  # key_name 解析成功后删除字段。
      <parse>
        @type multi_format
        <pattern>
          format json
        </pattern>
        <pattern>
          format none
        </pattern>
       </parse>
    </filter>



    # 删除一些多余的属性
    <filter kubernetes.**>
      @type record_transformer
      remove_keys $.docker.container_id,$.kubernetes.container_image_id,$.kubernetes.pod_id,$.kubernetes.namespace_id,$.kubernetes.master_url,$.kubernetes.labels.pod-template-hash
    </filter>

    # 只保留具有logging=true标签的Pod日志,如果添加只会收集带有logging=true标签的pod
    #<filter kubernetes.**>
    #  @id filter_log
    #  @type grep
    #  <regexp>
    #    key $.kubernetes.labels.logging
    #    pattern ^true$
    #  </regexp>
    #</filter>

    # 过滤带某些标签的服务日志,貌似没成功,来个大神指点下
    <filter kubernetes.**>
      @id filter_log
      @type grep
      <exclude>
        #key $.kubernetes.labels.kibana
        #pattern ^true$
        key $.kubernetes.labels.pod-template-hash
        pattern ^75dfcb6b67$
      </exclude>
    </filter>

  ###### 监听配置,一般用于日志聚合用 ######
  forward.input.conf: |-
    # 监听通过TCP发送的消息
    <source>
      @id forward
      @type forward
    </source>

  output.conf: |-
    <match **>
      @id elasticsearch
      @type elasticsearch
      @log_level info
      include_tag_key true
      host elasticsearch
      port 9200
      logstash_format true
      logstash_prefix k8s  # 设置 index 前缀为 k8s
      request_timeout    30s
      <buffer>
        @type file
        path /var/log/fluentd-buffers/kubernetes.system.buffer
        flush_mode interval
        retry_type exponential_backoff
        flush_thread_count 2
        flush_interval 5s
        retry_forever
        retry_max_interval 30
        chunk_limit_size 2M
        queue_limit_length 8
        overflow_action block
      </buffer>
    </match>

07-fluentd-ds.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: fluentd-es
  namespace: efk
  labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: fluentd-es
  labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
  - ""
  resources:
  - "namespaces"
  - "pods"
  verbs:
  - "get"
  - "watch"
  - "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: fluentd-es
  labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
  name: fluentd-es
  namespace: efk
  apiGroup: ""
roleRef:
  kind: ClusterRole
  name: fluentd-es
  apiGroup: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-es
  namespace: efk
  labels:
    k8s-app: fluentd-es
spec:
  selector:
    matchLabels:
      k8s-app: fluentd-es
  template:
    metadata:
      labels:
        k8s-app: fluentd-es
        kubernetes.io/cluster-service: "true"
      # 此注释确保如果节点被驱逐,fluentd不会被驱逐,支持关键的基于 pod 注释的优先级方案。
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      serviceAccountName: fluentd-es
      containers:
      - name: fluentd-es
        image: quay.io/fluentd_elasticsearch/fluentd:v3.0.1
        env:
        - name: FLUENTD_ARGS
          value: --no-supervisor -q
        resources:
          limits:
            memory: 500Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
        - name: varlog
          mountPath: /var/log
        - name: varlibdockercontainers
          mountPath: /data/sys/var/docker/containers      #docker日志存放路径 (默认情况下)
          readOnly: true
        - name: config-volume
          mountPath: /etc/fluent/config.d
      #nodeSelector:    #这里可以设置需要打标签的节点,我们目前只有3台都需要采集,所以可以注释这个标签
      #  beta.kubernetes.io/fluentd-ds-ready: "true"
      tolerations:
      - operator: Exists
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /data/sys/var/docker/containers
      - name: config-volume
        configMap:
          name: fluentd-config

image.png
参考链接:https://i4t.com/4951.html