nfs-storageclass:
nfs-provisioner.yaml
apiVersion: apps/v1kind: Deploymentmetadata:name: nfs-client-provisionerspec:replicas: 1selector:matchLabels:app: nfs-client-provisionerstrategy:type: Recreatetemplate:metadata:labels:app: nfs-client-provisionerspec:serviceAccountName: nfs-client-provisionercontainers:- name: nfs-client-provisionerimage: quay.io/external_storage/nfs-client-provisioner:latestvolumeMounts:- name: nfs-client-rootmountPath: /persistentvolumesenv:- name: PROVISIONER_NAMEvalue: efk-data-storage- name: NFS_SERVERvalue: NFS_SERVER_IP- name: NFS_PATHvalue: /yunwei/efkvolumes:- name: nfs-client-rootnfs:server: NFS_SERVER_IPpath: /yunwei/efk
nfs-rbac.yaml ```yaml apiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: nfs-client-provisioner-runner rules:
- apiGroups: [“”] resources: [“persistentvolumes”] verbs: [“get”,”list”,”watch”,”create”,”delete”]
- apiGroups: [“”] resources: [“persistentvolumeclaims”] verbs: [“get”,”list”,”watch”,”update”]
- apiGroups: [“storage.k8s.io”] resources: [“storageclasses”] verbs: [“get”,”list”,”watch”]
- apiGroups: [“”] resources: [“events”] verbs: [“list”,”watch”,”create”,”update”,”patch”]
- apiGroups: [“”] resources: [“endpoints”] verbs: [“create”,”delete”,”get”,”list”,”watch”,”patch”,”update”]
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: run-nfs-client-provisioner subjects:
- kind: ServiceAccount name: nfs-client-provisioner namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io ```
- nfs-storageclass.yaml
apiVersion: storage.k8s.io/v1kind: StorageClassmetadata:name: efk-dataprovisioner: efk-data-storageparameters:archiveOnDelete: "ture"
01-namespace.yaml
apiVersion: v1kind: Namespacemetadata:name: efk
02-es.yaml
```yaml apiVersion: v1 kind: Namespace metadata: name: efk
[root@master efk]# cat 02-es.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: es namespace: efk spec: serviceName: elasticsearch replicas: 3 selector: matchLabels: app: elasticsearch template: metadata: labels: app: elasticsearch spec: nodeSelector: isEs: “true” initContainers:
- name: increase-vm-max-mapimage: busyboximagePullPolicy: IfNotPresentcommand: ["sysctl", "-w", "vm.max_map_count=262144"] # 添加mmap计数限制securityContext: # 仅应用到指定的容器上,并且不会影响volumeprivileged: true- name: increase-fd-ulimitimage: busyboximagePullPolicy: IfNotPresentcommand: ["sh", "-c", "ulimit -n 65535"] # 修改文件描述符最大数量securityContext:privileged: truecontainers:- name: elasticsearchimage: docker.elastic.co/elasticsearch/elasticsearch:7.6.2imagePullPolicy: IfNotPresentports:- name: restcontainerPort: 9200 # Restapi- name: intercontainerPort: 9300 # 节点通信resources:limits:cpu: 1000mrequests:cpu: 1000mvolumeMounts:- name: datamountPath: /usr/share/elasticsearch/dataenv:- name: cluster.name # es集群的名称value: k8s-logs- name: node.namevalueFrom: # 通过匹配上面metadata.name来当节点的名称fieldRef:fieldPath: metadata.name- name: cluster.initial_master_nodes # 初始化集群引导,需要是Pod的名称value: "es-0,es-1,es-2"- name: discovery.zen.minimum_master_nodes # 节点数量,高可用集群至少3个主节点,其中2个至少不仅投票节点value: "2"- name: discovery.seed_hosts # 用于es集群中节点互相连接发现value: "elasticsearch"- name: ES_JAVA_OPTS # 设置java的内存参数value: "-Xms512m -Xmx512m"- name: network.hostvalue: "0.0.0.0"
volumeClaimTemplates: # 定义持久化模版
- metadata:
name: data
labels:
spec: accessModes: [“ReadWriteOnce”] # 访问模式 storageClassName: efk-data resources:app: elasticsearch
requests:storage: 20Gi
<a name="cGW5g"></a>#### 03-es-svc.yaml```yamlapiVersion: v1kind: Servicemetadata:name: elasticsearchnamespace: efklabels:app: elasticsearchspec:selector:app: elasticsearchclusterIP: Noneports:- port: 9200name: rest- port: 9300name: inter-node
04-kibana.yaml
```yaml apiVersion: v1 kind: Service metadata: name: kibana namespace: efk labels: app: kibana spec: selector: app: kibana ports: - port: 5601
apiVersion: apps/v1 kind: Deployment metadata: name: kibana namespace: efk labels: app: kibana kibana: “true” spec: selector: matchLabels: app: kibana template: metadata: labels: app: kibana kibana: “true” spec: containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:7.6.2
resources:
limits:
cpu: 200m
requests:
cpu: 200m
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch:9200
ports:
- containerPort: 5601
<a name="nVID5"></a>
#### 05-kibana-ingress.yaml
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kibana
namespace: efk
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: kibana-k8s-test.test.com
http:
paths:
- backend:
serviceName: kibana
servicePort: 5601
06-fluentd-cm.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-config
namespace: efk
data:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
<source>
@id fluentd-containers.log
@type tail # Fluentd 内置的输入方式,其原理是不停地从源文件中获取新的日志。
path /var/log/containers/*.log # 挂载的服务器Docker容器日志地址
pos_file /var/log/es-containers.log.pos
tag raw.kubernetes.* # 设置日志标签
read_from_head true
<parse> # 多行格式化成JSON
@type multi_format # 使用 multi-format-parser 解析器插件
<pattern>
format json # JSON解析器
time_key time # 指定事件时间的时间字段
time_format %Y-%m-%dT%H:%M:%S.%NZ # 时间格式
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
# 在日志输出中检测异常,并将其作为一条日志转发
# https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions
<match raw.kubernetes.**> # 匹配tag为raw.kubernetes.**日志信息
@id raw.kubernetes
@type detect_exceptions # 使用detect-exceptions插件处理异常栈信息
remove_tag_prefix raw # 移除 raw 前缀
message log
stream stream #stdout输出
multiline_flush_interval 5
max_bytes 500000 #异常信息最大字节数
max_lines 1000 #异常信息最大行数
</match>
<filter **># 拼接日志
@id filter_concat
@type concat # Fluentd Filter 插件,用于连接多个事件中分隔的多行日志。
key message
multiline_end_regexp /\n$/ # 以换行符“\n”拼接
separator ""
</filter>
# 添加 Kubernetes metadata 数据
<filter kubernetes.**>
@id filter_kubernetes_metadata #将获取到的日志转化,添加pod信息、命名空间,labels标签等
@type kubernetes_metadata
</filter>
# JSON格式转换,如果原来是json格式,将把json格式内容从新进行转化
# 插件地址:https://github.com/repeatedly/fluent-plugin-multi-format-parser
<filter kubernetes.**>
@id filter_parser
@type parser # multi-format-parser多格式解析器插件
key_name log # 在要解析的记录中指定字段名称。
reserve_data true # 在解析结果中保留原始键值对。
remove_key_name_field true # key_name 解析成功后删除字段。
<parse>
@type multi_format
<pattern>
format json
</pattern>
<pattern>
format none
</pattern>
</parse>
</filter>
# 删除一些多余的属性
<filter kubernetes.**>
@type record_transformer
remove_keys $.docker.container_id,$.kubernetes.container_image_id,$.kubernetes.pod_id,$.kubernetes.namespace_id,$.kubernetes.master_url,$.kubernetes.labels.pod-template-hash
</filter>
# 只保留具有logging=true标签的Pod日志,如果添加只会收集带有logging=true标签的pod
#<filter kubernetes.**>
# @id filter_log
# @type grep
# <regexp>
# key $.kubernetes.labels.logging
# pattern ^true$
# </regexp>
#</filter>
# 过滤带某些标签的服务日志,貌似没成功,来个大神指点下
<filter kubernetes.**>
@id filter_log
@type grep
<exclude>
#key $.kubernetes.labels.kibana
#pattern ^true$
key $.kubernetes.labels.pod-template-hash
pattern ^75dfcb6b67$
</exclude>
</filter>
###### 监听配置,一般用于日志聚合用 ######
forward.input.conf: |-
# 监听通过TCP发送的消息
<source>
@id forward
@type forward
</source>
output.conf: |-
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
host elasticsearch
port 9200
logstash_format true
logstash_prefix k8s # 设置 index 前缀为 k8s
request_timeout 30s
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
queue_limit_length 8
overflow_action block
</buffer>
</match>
07-fluentd-ds.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-es
namespace: efk
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "pods"
verbs:
- "get"
- "watch"
- "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: fluentd-es
namespace: efk
apiGroup: ""
roleRef:
kind: ClusterRole
name: fluentd-es
apiGroup: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es
namespace: efk
labels:
k8s-app: fluentd-es
spec:
selector:
matchLabels:
k8s-app: fluentd-es
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
# 此注释确保如果节点被驱逐,fluentd不会被驱逐,支持关键的基于 pod 注释的优先级方案。
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: quay.io/fluentd_elasticsearch/fluentd:v3.0.1
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /data/sys/var/docker/containers #docker日志存放路径 (默认情况下)
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
#nodeSelector: #这里可以设置需要打标签的节点,我们目前只有3台都需要采集,所以可以注释这个标签
# beta.kubernetes.io/fluentd-ds-ready: "true"
tolerations:
- operator: Exists
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /data/sys/var/docker/containers
- name: config-volume
configMap:
name: fluentd-config

