1. [k8s命令]
  2. : kubectl get service
  3. : kubectl describe service nginx
  4. : kubectl get namespace
  5. : kubectl get deployment --namespace=pro
  6. : kubectl get service --namespace=pro
  7. : kubectl get pod --namespace=pro

nginx.conf

  1. # node1 1319 -> /etc/nginx/nginx.conf
  2. #user nobody;
  3. worker_processes 1;
  4. #error_log logs/error.log;
  5. #error_log logs/error.log notice;
  6. #error_log logs/error.log info;
  7. #pid logs/nginx.pid;
  8. events {
  9. worker_connections 1024;
  10. }
  11. http {
  12. include mime.types;
  13. default_type application/octet-stream;
  14. #log_format main '$remote_addr - $remote_user [$time_local] "$request" '
  15. # '$status $body_bytes_sent "$http_referer" '
  16. # '"$http_user_agent" "$http_x_forwarded_for"';
  17. #access_log logs/access.log main;
  18. sendfile on;
  19. #tcp_nopush on;
  20. #keepalive_timeout 0;
  21. keepalive_timeout 65;
  22. #gzip on;
  23. server {
  24. listen 8086;
  25. server_name localhost;
  26. location / {
  27. root html;
  28. index index.html;
  29. }
  30. }
  31. server {
  32. listen 8084;
  33. server_name localhost;
  34. #charset koi8-r;
  35. #access_log logs/host.access.log main;
  36. location / {
  37. proxy_pass http://221.178.251.182:80;
  38. }
  39. #error_page 404 /404.html;
  40. # redirect server error pages to the static page /50x.html
  41. #
  42. error_page 500 502 503 504 /50x.html;
  43. location = /50x.html {
  44. root html;
  45. }
  46. # proxy the PHP scripts to Apache listening on 127.0.0.1:80
  47. #
  48. #location ~ \.php$ {
  49. # proxy_pass http://127.0.0.1;
  50. #}
  51. # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
  52. #
  53. #location ~ \.php$ {
  54. # root html;
  55. # fastcgi_pass 127.0.0.1:9000;
  56. # fastcgi_index index.php;
  57. # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
  58. # include fastcgi_params;
  59. #}
  60. # deny access to .htaccess files, if Apache's document root
  61. # concurs with nginx's one
  62. #
  63. #location ~ /\.ht {
  64. # deny all;
  65. #}
  66. }
  67. # another virtual host using mix of IP-, name-, and port-based configuration
  68. #
  69. #server {
  70. # listen 8000;
  71. # listen somename:8080;
  72. # server_name somename alias another.alias;
  73. # location / {
  74. # root html;
  75. # index index.html index.htm;
  76. # }
  77. #}
  78. # HTTPS server
  79. #
  80. #server {
  81. # listen 443 ssl;
  82. # server_name localhost;
  83. # ssl_certificate cert.pem;
  84. # ssl_certificate_key cert.key;
  85. # ssl_session_cache shared:SSL:1m;
  86. # ssl_session_timeout 5m;
  87. # ssl_ciphers HIGH:!aNULL:!MD5;
  88. # ssl_prefer_server_ciphers on;
  89. # location / {
  90. # root html;
  91. # index index.html index.htm;
  92. # }
  93. #}
  94. #server {
  95. # listen 443;
  96. # server_name openapi.sinoxx.com;
  97. # ssl on;
  98. # ssl_certificate cert/2193315__sinoxx.com.pem;
  99. # ssl_certificate_key cert/2193315__sinoxx.com.key;
  100. # ssl_session_timeout 5m;
  101. # ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4;
  102. # ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
  103. # ssl_prefer_server_ciphers on;
  104. # location / {
  105. # proxy_http_version 1.1;
  106. # proxy_set_header Upgrade $http_upgrade;
  107. # proxy_set_header Connection upgrade;
  108. # proxy_pass http://127.0.0.1:10001/;
  109. # }
  110. #}
  111. #server {
  112. # listen 80;
  113. # server_name openapi.sinoxx.com;
  114. # location / {
  115. # proxy_pass http://127.0.0.1:10001/;
  116. # proxy_set_header REMOTE-HOST $remote_addr;
  117. # proxy_set_header Host $host;
  118. # proxy_set_header X-Real-IP $remote_addr;
  119. # }
  120. #}
  121. }

容器 nginx

  1. apiVersion: v1
  2. kind: Service
  3. metadata:
  4. name: nginx-service
  5. namespace: pro
  6. spec:
  7. ports:
  8. - name: '80'
  9. nodePort: 30001
  10. port: 80
  11. protocol: TCP
  12. targetPort: 80
  13. - name: '81'
  14. nodePort: 30002
  15. port: 81
  16. protocol: TCP
  17. targetPort: 81
  18. - name: '443'
  19. nodePort: 30003
  20. port: 443
  21. protocol: TCP
  22. targetPort: 443
  23. selector:
  24. app: nginx
  25. type: NodePort
  26. ---
  27. -- 外网防火墙配置, 将域名 xx.com:80 端口 -> 映射到主机 node 109 : 30001
  28. -- 转发端口 主机 30001 -> 容器 80 , 主机 30002 -> 容器 81 , 主机 30003 -> 容器 443
  29. // 将主机 192.168.0.109 node1 -> 映射到容器 nginx 中
  30. -- nginx 容器配置, 将对应端口 域名, 映射到 k8s里面对应容器地址
  31. []
  32. : http://218.3.146.105:81 -> :30002 管理页面 -> # 容器nginx配置
  33. # xx:81 -> node3:30002 -> con-nginx:81
  34. # 外网转发 218.3.146.105:81 -> 192.168.0.109:30002
  35. # 容器开端口 -> 192.168.0.109:30002 -> 容器内部:81
  36. : 218.3.146.105:80 -> 109:30001
  37. -> nginx 容器, 配置 proxy_host | 域名 -> 服务容器名称映射转发
  38. # 域名:80 (防火墙) -> 内网主机:30001 (容器POD启动时打开映射) -> 容器nginx [配置] - 容器IP:80
  39. # ------------------------------------------------------------
  40. # taskadmin.sinoxx.com
  41. # ------------------------------------------------------------
  42. server {
  43. set $forward_scheme http;
  44. set $server "xxl-job-admin-service.pro.svc.cluster.local";
  45. set $port 80;
  46. listen 80;
  47. server_name taskadmin.sinoxx.com;
  48. access_log /data/logs/proxy_host-14.log proxy;
  49. location / {
  50. # Proxy!
  51. include conf.d/include/proxy.conf;
  52. }
  53. }
  1. {
  2. "kind": "Service",
  3. "apiVersion": "v1",
  4. "metadata": {
  5. "name": "nginx-service",
  6. "namespace": "pro",
  7. "selfLink": "/api/v1/namespaces/pro/services/nginx-service",
  8. "uid": "06e0f3e8-8c15-11e9-a87a-0894ef381eca",
  9. "resourceVersion": "1138992",
  10. "creationTimestamp": "2019-06-11T06:49:14Z",
  11. "annotations": {
  12. "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"name\":\"nginx-service\",\"namespace\":\"pro\"},\"spec\":{\"ports\":[{\"name\":\"80\",\"nodePort\":30001,\"port\":80,\"protocol\":\"TCP\",\"targetPort\":80},{\"name\":\"81\",\"nodePort\":30002,\"port\":81,\"protocol\":\"TCP\",\"targetPort\":81},{\"name\":\"443\",\"nodePort\":30003,\"port\":443,\"protocol\":\"TCP\",\"targetPort\":443}],\"selector\":{\"app\":\"nginx\"},\"type\":\"NodePort\"}}\n"
  13. }
  14. },
  15. "spec": {
  16. "ports": [
  17. {
  18. "name": "80",
  19. "protocol": "TCP",
  20. "port": 80,
  21. "targetPort": 80,
  22. "nodePort": 30001
  23. },
  24. {
  25. "name": "81",
  26. "protocol": "TCP",
  27. "port": 81,
  28. "targetPort": 81,
  29. "nodePort": 30002
  30. },
  31. {
  32. "name": "443",
  33. "protocol": "TCP",
  34. "port": 443,
  35. "targetPort": 443,
  36. "nodePort": 30003
  37. }
  38. ],
  39. "selector": {
  40. "app": "nginx"
  41. },
  42. "clusterIP": "10.96.32.66",
  43. "type": "NodePort",
  44. "sessionAffinity": "None",
  45. "externalTrafficPolicy": "Cluster"
  46. },
  47. "status": {
  48. "loadBalancer": {}
  49. }
  50. }
  1. {
  2. "kind": "Deployment",
  3. "apiVersion": "extensions/v1beta1",
  4. "metadata": {
  5. "name": "nginx",
  6. "namespace": "pro",
  7. "selfLink": "/apis/extensions/v1beta1/namespaces/pro/deployments/nginx",
  8. "uid": "f002b402-8690-11e9-93ab-0894ef381eca",
  9. "resourceVersion": "14643869",
  10. "generation": 3,
  11. "creationTimestamp": "2019-06-04T06:21:07Z",
  12. "labels": {
  13. "app": "nginx"
  14. },
  15. "annotations": {
  16. "deployment.kubernetes.io/revision": "1"
  17. }
  18. },
  19. "spec": {
  20. "replicas": 1,
  21. "selector": {
  22. "matchLabels": {
  23. "app": "nginx"
  24. }
  25. },
  26. "template": {
  27. "metadata": {
  28. "creationTimestamp": null,
  29. "labels": {
  30. "app": "nginx"
  31. }
  32. },
  33. "spec": {
  34. "volumes": [
  35. {
  36. "name": "data",
  37. "nfs": {
  38. "server": "node1",
  39. "path": "/home/nfs/pro"
  40. }
  41. },
  42. {
  43. "name": "letsencrypt",
  44. "nfs": {
  45. "server": "node1",
  46. "path": "/home/nfs/pro"
  47. }
  48. }
  49. ],
  50. "containers": [
  51. {
  52. "name": "nginx",
  53. "image": "registry.cn-hangzhou.aliyuncs.com/youdt/nginx:v2",
  54. "ports": [
  55. {
  56. "containerPort": 80,
  57. "protocol": "TCP"
  58. },
  59. {
  60. "containerPort": 81,
  61. "protocol": "TCP"
  62. },
  63. {
  64. "containerPort": 443,
  65. "protocol": "TCP"
  66. }
  67. ],
  68. "env": [
  69. {
  70. "name": "DB_HOST",
  71. "value": "rm-bp10h4rjh8q877420lo.mysql.rds.aliyuncs.com"
  72. },
  73. {
  74. "name": "DB_PASS",
  75. "value": "nginx"
  76. },
  77. {
  78. "name": "DB_USER",
  79. "value": "nginx"
  80. },
  81. {
  82. "name": "DB_PORT",
  83. "value": "3306"
  84. },
  85. {
  86. "name": "DB",
  87. "value": "nginx"
  88. },
  89. {
  90. "name": "NODE_ENV",
  91. "value": "production"
  92. }
  93. ],
  94. "resources": {},
  95. "volumeMounts": [
  96. {
  97. "name": "data",
  98. "mountPath": "/data",
  99. "subPath": "nginx/data"
  100. },
  101. {
  102. "name": "letsencrypt",
  103. "mountPath": "/etc/letsencrypt",
  104. "subPath": "nginx/encrypt"
  105. }
  106. ],
  107. "terminationMessagePath": "/dev/termination-log",
  108. "terminationMessagePolicy": "File",
  109. "imagePullPolicy": "IfNotPresent"
  110. }
  111. ],
  112. "restartPolicy": "Always",
  113. "terminationGracePeriodSeconds": 30,
  114. "dnsPolicy": "ClusterFirst",
  115. "securityContext": {},
  116. "schedulerName": "default-scheduler"
  117. }
  118. },
  119. "strategy": {
  120. "type": "RollingUpdate",
  121. "rollingUpdate": {
  122. "maxUnavailable": "25%",
  123. "maxSurge": "25%"
  124. }
  125. },
  126. "revisionHistoryLimit": 10,
  127. "progressDeadlineSeconds": 600
  128. },
  129. "status": {
  130. "observedGeneration": 3,
  131. "replicas": 1,
  132. "updatedReplicas": 1,
  133. "readyReplicas": 1,
  134. "availableReplicas": 1,
  135. "conditions": [
  136. {
  137. "type": "Progressing",
  138. "status": "True",
  139. "lastUpdateTime": "2019-06-04T06:22:00Z",
  140. "lastTransitionTime": "2019-06-04T06:21:07Z",
  141. "reason": "NewReplicaSetAvailable",
  142. "message": "ReplicaSet \"nginx-5d7bdf6c6f\" has successfully progressed."
  143. },
  144. {
  145. "type": "Available",
  146. "status": "True",
  147. "lastUpdateTime": "2019-09-25T06:22:38Z",
  148. "lastTransitionTime": "2019-09-25T06:22:38Z",
  149. "reason": "MinimumReplicasAvailable",
  150. "message": "Deployment has minimum availability."
  151. }
  152. ]
  153. }
  154. }

其他

  1. apiVersion: apps/v1
  2. kind: Deployment
  3. metadata:
  4. name: nginx
  5. spec:
  6. replicas: 1
  7. selector:
  8. matchLabels:
  9. name: nginx
  10. template:
  11. metadata:
  12. labels:
  13. name: nginx
  14. spec:
  15. containers:
  16. - name: nginx
  17. image: harbor.xxx.cn/official_hub/nginx:1.13-alpine
  18. imagePullPolicy: IfNotPresent
  19. ports:
  20. - containerPort: 80
  21. ---
  22. apiVersion: v1
  23. kind: Service
  24. metadata:
  25. name: nginx-service-nodeport
  26. spec:
  27. ports:
  28. - port: 80
  29. targetPort: 80
  30. protocol: TCP
  31. type: NodePort
  32. selector:
  33. name: nginx
  1. ---
  2. apiVersion: v1
  3. kind: ConfigMap
  4. metadata:
  5. name: proxy-nginx
  6. namespace: kube-system
  7. data:
  8. default.conf: |-
  9. upstream prometheus {
  10. server prometheus:9090;
  11. }
  12. upstream grafana {
  13. server monitoring-grafana:80;
  14. }
  15. upstream dashboard {
  16. server 1.2.3.4:32766;
  17. }
  18. server {
  19. listen 80;
  20. server_name localhost;
  21. location / {
  22. root /usr/share/nginx/html;
  23. index index.html index.htm;
  24. }
  25. location /check {
  26. default_type text/plain;
  27. return 200 "serving is ok!";
  28. }
  29. location /status {
  30. stub_status on;
  31. access_log off;
  32. }
  33. location /prometheus {
  34. proxy_pass http://prometheus;
  35. proxy_set_header Host $host;
  36. }
  37. location /grafana {
  38. proxy_pass http://grafana;
  39. rewrite ^/grafana/(.*) /$1 break;
  40. proxy_set_header Host $host;
  41. }
  42. location /dashboard {
  43. auth_basic "Password please";
  44. auth_basic_user_file /etc/nginx/conf.d/nginx_passwd;
  45. proxy_pass http://dashboard;
  46. rewrite ^/dashboard/(.*) /$1 break;
  47. proxy_set_header Host $host;
  48. }
  49. # redirect server error pages to the static page /50x.html
  50. # chengang from k8s config map file
  51. error_page 500 502 503 504 /50x.html;
  52. location = /50x.html {
  53. root /usr/share/nginx/html;
  54. }
  55. }
  56. nginx_passwd: |-
  57. admin:xxxxxxxxxxxxxxxxxxxxxxxxx
  58. ---
  59. apiVersion: extensions/v1beta1
  60. kind: Deployment
  61. metadata:
  62. name: proxy-nginx
  63. namespace: kube-system
  64. spec:
  65. replicas: 1
  66. template:
  67. metadata:
  68. labels:
  69. k8s-app: proxy-nginx
  70. spec:
  71. containers:
  72. - name: nginx
  73. image: harbor.xxx.cn/official_hub/nginx:1.13-alpine
  74. imagePullPolicy: IfNotPresent
  75. ports:
  76. - containerPort: 80
  77. protocol: TCP
  78. volumeMounts:
  79. - name: nginx-conf
  80. mountPath: /etc/nginx/conf.d
  81. volumes:
  82. - name: nginx-conf
  83. configMap:
  84. name: proxy-nginx
  85. nodeSelector:
  86. node-role.kubernetes.io/master: ""
  87. tolerations:
  88. - key: "node-role.kubernetes.io/master"
  89. effect: "NoSchedule"
  90. ---
  91. apiVersion: v1
  92. kind: Service
  93. metadata:
  94. name: proxy-nginx
  95. namespace: kube-system
  96. spec:
  97. type: NodePort
  98. ports:
  99. - port: 80
  100. targetPort: 80
  101. nodePort: 32767
  102. selector:
  103. k8s-app: proxy-nginx