主机名 |
公网ip |
eth0内网ip |
服务 |
k8s-master |
180.76.97.148 |
192.168.48.9 |
master |
k8s-node1 |
180.76.147.43 |
192.168.48.10 |
node1 |
k8s-node2 |
180.76.159.209 |
192.168.48.11 |
node2 |
k8s-etcd |
180.76.116.214 |
192.168.48.12 |
etcd harbor |
ljsd-test-14 |
|
60.1.1.14 |
docker客户端 |
#进入60.1.1.14
#账号: root xxxxxxx
#从git拉取gm项目的代码
[root@ljsd-test-14 ~]# git clone http://60.1.1.230/backend/gm_tw_admin.git
[root@ljsd-test-14 ~]# cd gm_tw_admin/
#mvn编译打包,出来的jar在./target/gm_tw_admin-1.0-SNAPSHOT.jar
[root@ljsd-test-14 gm_tw_admin]# mvn clean install
[INFO] Scanning for projects...
[INFO]
[INFO] ------------------------------------------------------------------------
[INFO] Building gm_tw_admin 1.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO]
[INFO] --- maven-clean-plugin:2.6.1:clean (default-clean) @ gm_tw_admin ---
[INFO] Deleting /root/gm_tw_admin/target
[INFO]
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ gm_tw_admin ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 1 resource
[INFO] Copying 1115 resources
[INFO]
[INFO] --- maven-compiler-plugin:3.1:compile (default-compile) @ gm_tw_admin ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 86 source files to /root/gm_tw_admin/target/classes
[WARNING] /root/gm_tw_admin/src/main/java/com/jmfy/controller/RecoverItemController.java: Some input files use unchecked or unsafe operations.
[WARNING] /root/gm_tw_admin/src/main/java/com/jmfy/controller/RecoverItemController.java: Recompile with -Xlint:unchecked for details.
[INFO]
[INFO] --- maven-resources-plugin:2.6:testResources (default-testResources) @ gm_tw_admin ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /root/gm_tw_admin/src/test/resources
[INFO]
[INFO] --- maven-compiler-plugin:3.1:testCompile (default-testCompile) @ gm_tw_admin ---
[INFO] No sources to compile
[INFO]
[INFO] --- maven-surefire-plugin:2.18.1:test (default-test) @ gm_tw_admin ---
[INFO] No tests to run.
[INFO]
[INFO] --- maven-jar-plugin:2.6:jar (default-jar) @ gm_tw_admin ---
[INFO] Building jar: /root/gm_tw_admin/target/gm_tw_admin-1.0-SNAPSHOT.jar
[INFO]
[INFO] --- spring-boot-maven-plugin:1.4.3.RELEASE:repackage (default) @ gm_tw_admin ---
[INFO]
[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ gm_tw_admin ---
[INFO] Installing /root/gm_tw_admin/target/gm_tw_admin-1.0-SNAPSHOT.jar to /root/.m2/repository/com/deshen/gm_tw_admin/1.0-SNAPSHOT/gm_tw_admin-1.0-SNAPSHOT.jar
[INFO] Installing /root/gm_tw_admin/pom.xml to /root/.m2/repository/com/deshen/gm_tw_admin/1.0-SNAPSHOT/gm_tw_admin-1.0-SNAPSHOT.pom
[INFO] ------------------------------------------------------------------------
[INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 3.828 s
[INFO] Finished at: 2019-05-06T17:52:19+08:00
[INFO] Final Memory: 44M/437M
[INFO] ------------------------------------------------------------------------
#编写Dockerfile文件
[root@ljsd-test-14 gm_tw_admin]# cat Dockerfile
FROM primetoninc/jdk:1.8
ADD ./target/gm_tw_admin-1.0-SNAPSHOT.jar /data/gm/gm_tw_admin-1.0-SNAPSHOT.jar
ADD ./conf/ /data/gm/conf/
EXPOSE 8888
WORKDIR /data/gm/
CMD ["java","-Dspring.config.location=./conf/application.properties","-jar","./gm_tw_admin-1.0-SNAPSHOT.jar"]
#按照Dockerfile文件进行打包成镜像
[root@ljsd-test-14 gm_tw_admin]# docker build -t gm:latest .
Sending build context to Docker daemon 103.9 MB
Step 1/6 : FROM primetoninc/jdk:1.8
---> f4b4fccc65bb
Step 2/6 : ADD ./target/gm_tw_admin-1.0-SNAPSHOT.jar /data/gm/gm_tw_admin-1.0-SNAPSHOT.jar
---> 6eae187655db
Removing intermediate container 8c5365b86b7c
Step 3/6 : ADD ./conf/ /data/gm/conf/
---> 0c386e0e4204
Removing intermediate container 9b37d0bfaa2a
Step 4/6 : EXPOSE 8888
---> Running in 2e219c84522c
---> a4ef2a9c419c
Removing intermediate container 2e219c84522c
Step 5/6 : WORKDIR /data/gm/
---> 75829ad402f6
Removing intermediate container 36cc1c3be630
Step 6/6 : CMD java -Dspring.config.location=./conf/application.properties -jar ./gm_tw_admin-1.0-SNAPSHOT.jar
---> Running in b6489177e685
---> 436802adbf94
Removing intermediate container b6489177e685
Successfully built 436802adbf94
#查看打包好的镜像
[root@ljsd-test-14 gm_tw_admin]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
gm latest 436802adbf94 29 seconds ago 708 MB
#给刚打包好的镜像打tag
[root@ljsd-test-14 gm_tw_admin]# docker tag gm:latest reg.kt007.com/ljsd/gm:latest
#将tag镜像推送至harbor仓库
[root@ljsd-test-14 gm_tw_admin]# docker push reg.kt007.com/ljsd/gm:latest
#进入180.76.97.148 k8s-master
[root@k8s-master ~]# cd gm/
[root@k8s-master gm]# ls
deployment_gm.yaml ingress-gm.yaml services_gm.yaml
#编写yaml文件
[root@k8s-master gm]# cat deployment_gm.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ysj-gm
labels:
app: ysj-tw-gm
spec:
replicas: 3
selector:
matchLabels:
app: ysj-tw-gm
template:
metadata:
labels:
app: ysj-tw-gm
spec:
imagePullSecrets:
- name: registry-pull-secret
containers:
- name: tomcat
image: reg.kt007.com/ljsd/gm:latest
imagePullPolicy: Always
ports:
- containerPort: 8888
[root@k8s-master gm]#
[root@k8s-master gm]# cat services_gm.yaml
apiVersion: v1
kind: Service
metadata:
name: ysj-gm
namespace: default
spec:
selector:
app: ysj-tw-gm
ports:
- name: http
port: 8888
targetPort: 8888
[root@k8s-master gm]#
[root@k8s-master gm]# cat ingress-gm.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ingress-gm
namespace: default
annotations:
kubernets.io/ingress.class: "nginx"
spec:
rules:
- host: gm.ljsd.com
http:
paths:
- path:
backend:
serviceName: ysj-gm
servicePort: 8888
[root@k8s-master gm]#
#创建gm deployment
[root@k8s-master gm]# kubectl create -f deployment_gm.yaml
#创建gm services
[root@k8s-master gm]# kubectl create -f services_gm.yaml
#使用ingress 发布
[root@k8s-master gm]# kubectl create -f ingress-gm.yaml
#查看ingress端口号是32080
[root@k8s-master gm]# kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx NodePort 10.244.6.129 <none> 80:32080/TCP,443:32443/TCP 6d
编辑C:\Windows\System32\drivers\etc\hosts
添加一条
180.76.159.209 gm.ljsd.com
#然后浏览器访问http://gm.ljsd.com:32080/login
admin
123
#存在一个问题,登陆进去后,页面提示又要登陆,问了开发,登陆那块做了session,k8s默认负载均衡策略是rr,第一次登陆在第一个pod,第二次登陆到第二个pod,session就会丢失.
解决方案:
1.kube-proxy默认使用的是iptables,默认是rr轮询.可以切换ipvs负载均衡,使用ip_hash.
2.yaml文件replicas的值设置成1
3.使用StatefulSet部署,而不使用Deployments部署.
StatefulSet是为了解决有状态服务的问题(对应Deployments和ReplicaSets是为无状态服务而设计)