deployment.yaml
导出 deployment
yamlkubectl create deployment java-demo --help
kubectl create deployment java-demo --image=lizhenliang/java-demo --dry-run -o yaml > deployment-java.yaml
# -o 输出 —dry-run 试运行
得到 deployment-java.yaml
kubectl apply -f deployment-java.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: java-demo
name: java-demo
spec:
replicas: 1 ## replicas
selector:
matchLabels:
app: java-demo
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: java-demo
spec:
containers:
- image: lizhenliang/java-demo ## 默认从 docker_bub 上 拉取
name: java-demo
resources: {}
status: {}
Deployment 控制器
controllers: otherwise known as workload
[root@master ~]# kubectl create deployment web --image=nginx --dry-run -o yaml > web_nging.yml
W0226 19:28:14.262671 156463 helpers.go:553] --dry-run is deprecated and can be replaced with --dry-run=client.
[root@master ~]# ls
dashboard.yaml kube-flannel.yml kubernetes-dashboard.yaml pod.yaml recommended.yaml svc.yaml test-java.yaml web_nging.yml
[root@master ~]# cat web_nging.yml
web_nging.yml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: web
name: web
spec:
replicas: 3
selector:
matchLabels:
app: web
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: web
spec:
containers:
- image: nginx
name: nginx
resources: {}
status: {}
修改下
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: web
name: web
spec:
replicas: 3
selector:
matchLabels:
app: web
strategy: {}
template:
metadata:
labels:
app: web
spec:
containers:
- image: lizhenliang/java-demo
name: nginx-container
resources: {}
status: {}
[root@master ~]# [root@master ~]# kubectl apply -f web_nging.yml
deployment.apps/web created
[root@master ~]# kubectl get svc,pod -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/java-demo NodePort 10.107.162.9 <none> 80:32331/TCP 130m app=java-demo
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5h34m <none>
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/web-57fc9cf4d4-2nj77 1/1 Running 0 38s 10.244.1.7 node1 <none> <none>
pod/web-57fc9cf4d4-8n8hf 1/1 Running 0 38s 10.244.1.9 node1 <none> <none>
pod/web-57fc9cf4d4-dfmmb 1/1 Running 0 38s 10.244.1.8 node1 <none> <none>
service.yaml
kubectl expose deployment java-demo --port=80 --name=<your_service_name> --target-port=8080 --type=NodePort --dry-run -o yaml > svc.yaml
—target-port 为 images 内 端口 —port 为 k8s 内,pods/clusters 之间, images 之外访问。 , ClusterIP 为 k8s 内,pods/clusters 之间, images 之外访问。 :<—port> —type=NodePort 则是 k8s 之外。 若不指定则随机生成一个端口。 —name=
若不指定 则 默认 和你 depoloyment 一样。
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: java-demo
name: java-demo
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8080
selector:
app: java-demo
type: NodePort
status:
loadBalancer: {}
使用kubectl apply -f svc.yaml
[root@master ~]# kubectl apply -f svc.yaml
service/java-demo created
[root@master ~]# kubectl get pods,svc
NAME READY STATUS RESTARTS AGE
pod/java-demo-56d54df448-5f5p5 1/1 Running 0 93s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/java-demo NodePort 10.107.162.9 <none> 80:32331/TCP 2s
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h23m
pod
namespace: 资源隔离,
团队内多个项目,如何隔离。
pod:
一个容器 或 多个 容器 集合。
同一个 pod 内 共享 namespace ,store
pod is temporary
亲密性 应用场景
- 多个应用间 文件交互, ——- 文件共享,存储共享———利用 data-volume, 数据卷 能在 不同的 pods 间 漂移, emptydDir (用作 公共卷 —给 多个共同挂载)
- 127.0.0.1 网络/socket 交互 ——- 网络共享
- 相互调用
docker 的设计理念: 一个容器 单个 app. 每次 run 起来 就是 一个 进程。
主要利用了 linux_namespace 隔离 和 cgroup 资源限制———— 所以 pod 创建 infrastructure-container, pause——共享网络部分
[root@master ~]# docker ps | grep pause
3887328ddc36 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" About an hour ago Up About an hour k8s_POD_kubernetes-dashboard-7b4bdcb8b8-9dvnq_kube-system_20bae962-668b-42b1-87d7-9391f48ed57e_0
571cf7cbf548 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" 2 hours ago Up 2 hours k8s_POD_dashboard-metrics-scraper-7b59f7d4df-vvs9f_kubernetes-dashboard_39008cef-f79d-439b-8019-541764c6f377_0
ff257da7e78c registry.aliyuncs.com/google_containers/pause:3.2 "/pause" 2 hours ago Up 2 hours k8s_POD_kubernetes-dashboard-74d688b6bc-mbqm7_kubernetes-dashboard_09d0c811-c783-4a06-a16c-079fb7e764d4_0
7ed2829521b6 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" 2 hours ago Up 2 hours k8s_POD_dashboard-metrics-scraper-head-58979977f8-txf76_kubernetes-dashboard-head_ca275c6b-2082-4fd7-a41c-e5a2e9782593_0
63bdce172dac registry.aliyuncs.com/google_containers/pause:3.2 "/pause" 2 hours ago Up 2 hours k8s_POD_kubernetes-dashboard-head-679f448bd7-qf7xl_kubernetes-dashboard-head_75d66e1a-5be5-4354-b816-1ebe88cf020e_0
get pod.yaml
env —— 传 环境变量
resources: ——- 资源限制
readinessProbe——-是否准备就绪—-给你重建,
livenessProbe —— 存活 探针—-转发流量
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
java-demo-56d54df448-5f5p5 1/1 Running 0 38m
[root@master ~]# kubectl get pods java-demo-56d54df448-5f5p5 -o yaml > pod.yaml
[root@master ~]# cat pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2021-02-26T09:23:48Z"
generateName: java-demo-56d54df448-
labels:
app: java-demo
pod-template-hash: 56d54df448
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:generateName: {}
f:labels:
.: {}
f:app: {}
f:pod-template-hash: {}
f:ownerReferences:
.: {}
k:{"uid":"2701f0fd-988a-44d2-bc51-4a3639098f04"}:
.: {}
f:apiVersion: {}
f:blockOwnerDeletion: {}
f:controller: {}
f:kind: {}
f:name: {}
f:uid: {}
f:spec:
f:containers:
k:{"name":"java-demo"}:
.: {}
f:image: {}
f:imagePullPolicy: {}
f:name: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:dnsPolicy: {}
f:enableServiceLinks: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext: {}
f:terminationGracePeriodSeconds: {}
manager: kube-controller-manager
operation: Update
time: "2021-02-26T09:23:48Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:conditions:
k:{"type":"ContainersReady"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
k:{"type":"Initialized"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
k:{"type":"Ready"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
f:containerStatuses: {}
f:hostIP: {}
f:phase: {}
f:podIP: {}
f:podIPs:
.: {}
k:{"ip":"10.244.1.6"}:
.: {}
f:ip: {}
f:startTime: {}
manager: kubelet
operation: Update
time: "2021-02-26T09:24:20Z"
name: java-demo-56d54df448-5f5p5
namespace: default
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: java-demo-56d54df448
uid: 2701f0fd-988a-44d2-bc51-4a3639098f04
resourceVersion: "18158"
uid: 61a5c4e3-c021-403b-95a6-b3814b26c320
spec:
containers:
- image: lizhenliang/java-demo
imagePullPolicy: Always
name: java-demo
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-z8hjj
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: node1
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-z8hjj
secret:
defaultMode: 420
secretName: default-token-z8hjj
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2021-02-26T09:23:48Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2021-02-26T09:24:20Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2021-02-26T09:24:20Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2021-02-26T09:23:48Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://7f86bece84c38a8b8539b272634ad5868a7c0047d4b07099cea50ed0c89b746d
image: lizhenliang/java-demo:latest
imageID: docker-pullable://lizhenliang/java-demo@sha256:4e43b2bcd81adf6d00b46a5c7efd384fc9f5b059c75255c8c89404ed4818bae3
lastState: {}
name: java-demo
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2021-02-26T09:24:19Z"
hostIP: 192.168.116.137
phase: Running
podIP: 10.244.1.6
podIPs:
- ip: 10.244.1.6
qosClass: BestEffort
startTime: "2021-02-26T09:23:48Z"
expose
一般有 两种方式: service, ingress 两种方式。
expose — service
- 意义:
- 防止 Pode 失联, (label—selector) ()
- 定义 Pod 的访问策略 (负载均衡) (Tcp - Udp 4 层应用)
service - expose
支持 ClusterIP, NodePort 以及 LoadBanlancer ,【】 , 四种类型
ClusterIP : service 外, virtual ip. 不能直接给 外部用户, 还得加点东西
NodePort: Node 外 :
LoadBanlance: 相当于 在 node 更上一层。
[root@master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
java-demo NodePort 10.107.162.9 <none> 80:32331/TCP 175m
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 6h19m
[root@master ~]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 6h20m
kubernetes-dashboard NodePort 10.104.129.96 <none> 80:30000/TCP 4h58m
kubectl expose deployment java-demo --port=80 --name=your_service_name --target-port=8080 --type=NodePort
[root@master ~]# kubectl apply -f svc.yaml
service/java-demo-svc-test created
[root@master ~]# cat svc.yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: java-demo
name: java-demo-svc-test
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8080
nodePort: 30033
selector:
app: java-demo
type: NodePort
status:
loadBalancer: {}
[root@master ~]# kubectl get svc,pod -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/java-demo-svc-test NodePort 10.109.80.185 <none> 80:30033/TCP 25s app=java-demo
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 6h32m <none>
负载均衡: 默认使用 iptables ——-net filter —-来实现
lvs 负载均衡器。 大并发。 11.11, 618,1212 等
Ipvs 也是 类似 lvs
kube-proxy 实现 service 功能。
service_nodePort 的问题
[root@master ~]# kubectl get svc,deployment -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/java-demo-svc-test NodePort 10.109.80.185 <none> 80:30033/TCP 11m app=web
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 6h43m <none>
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/web 3/3 3 3 69m nginx-container lizhenliang/java-demo app=web
NodePort: http://192.168.116.137:30033/
NodePort: http://192.168.116.136:30033/
NodePort: http://192.168.116.130:30033/
当我部署之后,我上面的 3 个 IP 都能访问,
本身 一个 规则,
ingress-controller 部署在 node 上。
ingress-controller 基于 域名 来转发 (借助 nginx 来实现)(upsteam)
ingress-controller.yaml 来部署
[root@master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-7f89b7bc75-dgllx 1/1 Running 0 7h14m
coredns-7f89b7bc75-rlq5s 1/1 Running 0 7h14m
etcd-master 1/1 Running 0 7h15m
kube-apiserver-master 1/1 Running 0 7h15m
kube-controller-manager-master 1/1 Running 0 7h15m
kube-flannel-ds-hpwk9 1/1 Running 0 6h53m
kube-flannel-ds-ncwv6 1/1 Running 0 7h14m
kube-flannel-ds-tmnnt 1/1 Running 0 7h7m
kube-proxy-6c2cj 1/1 Running 0 6h53m
kube-proxy-8fg9h 1/1 Running 0 7h7m
kube-proxy-g65th 1/1 Running 0 7h14m
kube-scheduler-master 1/1 Running 0 7h14m
kubernetes-dashboard-7b4bdcb8b8-9dvnq 1/1 Running 0 4h39m
ingress 安装之后
应用,
创建 yaml (包含 ingress 规则的)
应用 yaml,
域名 自己本机的 记住 需要能解析到。
namespace
- 隔离资源, (测试环境, 开发环境)
- 限制资源的大小
kubectl get ns
# 查看 all namespacekubectl get ns <namesapce_name>
# 查看 the namespacekubectl get pods -n <namesapce_name>
# get pods of the namespace_namekubectl describe ns
# describe all namespacekubectl describe ns <namesapce_name>
# describe the namespacekubectl describe pods -n <namesapce_name>
# describe pods of the namespace_name
kubectl create ns <namesapce_name>
# create the namespacekubectl delete ns <namesapce_name>
# delete the namespace
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: daiyi-test-namespace
spec: {}
status: {}
[root@master k8s]# kubectl get ns
NAME STATUS AGE
default Active 20h ## default
kube-node-lease Active 20h ## heaalth-live
kube-public Active 20h ## for all, even unauthorized user
kube-system Active 20h ## created by kubernetes itself.
kubernetes-dashboard Active 18h
kubernetes-dashboard-head Active 19h
[root@master k8s]# kubectl get ns default
NAME STATUS AGE
default Active 20h
[root@master k8s]# kubectl get ns kubernetes-dashboard
NAME STATUS AGE
kubernetes-dashboard Active 18h
[root@master k8s]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-7f89b7bc75-dgllx 1/1 Running 0 20h
coredns-7f89b7bc75-rlq5s 1/1 Running 0 20h
etcd-master 1/1 Running 0 20h
kube-apiserver-master 1/1 Running 0 20h
kube-controller-manager-master 1/1 Running 0 20h
kube-flannel-ds-hpwk9 1/1 Running 0 20h
kube-flannel-ds-ncwv6 1/1 Running 0 20h
kube-flannel-ds-tmnnt 1/1 Running 0 20h
kube-proxy-6c2cj 1/1 Running 0 20h
kube-proxy-8fg9h 1/1 Running 0 20h
kube-proxy-g65th 1/1 Running 0 20h
kube-scheduler-master 1/1 Running 0 20h
kubernetes-dashboard-7b4bdcb8b8-9dvnq 1/1 Running 0 17h
[root@master k8s]# kubectl get pods -n default
NAME READY STATUS RESTARTS AGE
web-57fc9cf4d4-2nj77 1/1 Running 0 15h
web-57fc9cf4d4-8n8hf 1/1 Running 0 15h
web-57fc9cf4d4-dfmmb 1/1 Running 0 15h
[root@master k8s]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-7b59f7d4df-vvs9f 1/1 Running 0 18h
kubernetes-dashboard-74d688b6bc-mbqm7 1/1 Running 0 18h
[root@master k8s]# kubectl describe ns default
Name: default
Labels: <none>
Annotations: <none>
Status: Active
No resource quota. ## this is resource_limit for whole namesapce
No LimitRange resource. ## this is resource_limit for every conponent of the namespaces
[root@master k8s]# kubectl create ns daiyi-test-namespace
namespace/daiyi-test-namespace created
[root@master k8s]# kubectl get ns daiyi-test-namespace
NAME STATUS AGE
daiyi-test-namespace Active 3s
[root@master k8s]# kubectl delete ns daiyi-test-namespace
namespace "daiyi-test-namespace" deleted
[root@master k8s]# kubectl create ns daiyi-test-namespace --dry-run -o yaml > namesapce_test.yaml
W0227 10:53:17.607938 232816 helpers.go:553] --dry-run is deprecated and can be replaced with --dry-run=client.
[root@master k8s]# cat namesapce_test.yaml
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: daiyi-test-namespace
spec: {}
status: {}
[root@master k8s]# kubectl apply -f namesapce_test.yaml
namespace/daiyi-test-namespace created
[root@master k8s]# kubectl get ns daiyi-test-namespace
NAME STATUS AGE
daiyi-test-namespace Active 18s
[root@master k8s]# kubectl delete ns daiyi-test-namespace
namespace "daiyi-test-namespace" deleted
[root@master k8s]# kubectl get ns daiyi-test-namespace
Error from server (NotFound): namespaces "daiyi-test-namespace" not found
[root@master k8s]#
ingress
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yml
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/provider/baremetal/service-nodeport.yml
做下替换
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.20.0
image: quay-mirror.qiniu.com/kubernetes-ingress-controller/nginx-ingress-controller:0.20.0
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kubectl apply -f kube-flannel.yml
[root@master ingress]# kubectl apply -f ingress_deploy.yaml
configmap/ingress-nginx-controller configured
clusterrole.rbac.authorization.k8s.io/ingress-nginx unchanged
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission unchanged
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx unchanged
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission unchanged
deployment.apps/ingress-nginx-controller configured
namespace/ingress-nginx unchanged
serviceaccount/ingress-nginx unchanged
serviceaccount/ingress-nginx-admission unchanged
service/ingress-nginx-controller-admission unchanged
service/ingress-nginx-controller unchanged
role.rbac.authorization.k8s.io/ingress-nginx unchanged
role.rbac.authorization.k8s.io/ingress-nginx-admission unchanged
rolebinding.rbac.authorization.k8s.io/ingress-nginx unchanged
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission unchanged
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission configured
job.batch/ingress-nginx-admission-create unchanged
job.batch/ingress-nginx-admission-patch unchanged
[root@master ingress]# kubectl get pod -n ingress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-97bm7 0/1 Completed 0 33m
ingress-nginx-admission-patch-7zhx8 0/1 Completed 0 33m
ingress-nginx-controller-67897c9494-8spfl 0/1 ImagePullBackOff 0 33m
[root@master ingress]# kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller NodePort 10.96.10.240 <none> 80:30470/TCP,443:32096/TCP 34m
ingress-nginx-controller-admission ClusterIP 10.104.22.242 <none> 443/TCP 34m