yum install perl perl-devel libaio libaio-devel perl-Time-HiRos perl-DBD-MySQL libev -y
yum localinstall percona-xtrabackup-24-2.4.4-1.el7.x86_64.rpm
vim /etc/my.cnf
[client]
socket=sock路径
能连上数据库。
vim /etc/my.cnf
[client]
socket=sock路径
(全备命令)
innobackupex --user=root --password=123 /data/xbk/
自定义时间戳方式全备:
innobackupex --user=root --password=123 --no-timestamp /data/xbk/full_`date +%F`
全备恢复:
(场景:关闭数据库,删除数据库数据目录)
先做备份预处理:
prepare预处理
预处理原理:redo前滚,undo回滚。模仿csr过程。
innobackupex --apply-log /data/xbk/备份目录/
数据恢复并启动数据库
cp -a /data/xbk/备份目录/* /data/数据库数据目录/
更改拷贝后的数据为mysql
启动数据库
xtrabackup增量备份:
增量备份逻辑:
前提:增量依赖于全备(假设第一天全部备份)
后面的增量针对前一次的全备数据量进行备份。
主要参照前次全量备份数据的LSN号码
再次基础上变化的数据页备份走,会将备份过程中产生新的redo备份走。
增量只是少量数据。
恢复时:
需要将所有的增量(inc)备份按顺序合并到全备中,然后恢复。
每个备份都需要做prepare(预处理)
具体操作:
1,建库建表操作,插入数据
2,模拟全备:
innobackupex --user=root --passswd=123 --no-timestamp /data/xbk/full
3,模拟数据变化:
建表插入数据。
4,模拟增量备份(inc,duibi LSN号):
innobackupex --user=root --passswd=123 --no-timestamp --incremental --incremental-basedir=/data/xbk/full /data/backup/inc
--incremental打开备份开关
--incremental-basedir 所依赖的前一次全备的目录。
5,模拟下一次增量备份
增加表。
innobackupex --user=root --passswd=123 --no-timestamp --incremental --incremental-basedir=/data/backup/inc /data/backup/inc1
6,再模拟一次数据增量备份
7,搞破坏,干掉进程和数据。
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
基于xbk full + inc + binlog 恢复:
确认备份是否成功。
检查数据目录或者xbk输出日志或者checkpoints。
上一个last的lsn与上个from差9个数则正常。
恢复:
1,进行合并整理,所有inc备份到全备:
基础全备的整理:
innobackupex --apply-log --redo-only /data/backup/full
只应用redo,不做undo,防止lsn号码对不上。
强制跳过回滚。
可以--help查看
除了最后一个inc不用增加redo-only参数,其余需要。
合并并prepare inc 到full
innobackupex --apply-log --redo-only --incremental-dir=/data/backup/inc /data/backup/full
合并并prepare inc1 到full
innobackupex --apply-log --incremental-dir=/data/backup/inc1 /data/backup/full
再做一次整体的prepare
innobackupex --apply-log /data/backup/full
修复数据库并重启(可以cp过去,也可以更改my.cnf的数据目录datadir=参数)
截取日志:
起点:最后一次备份的xtrabackup_binlog_info内容
终点:因为是rm,所以直接找最后一个就行。
搭建:
准备多台服务器。
检查关键信息:
保证server_id不同。
检查主库的binlog是否开启。
主库建立复制用户:
grant replication slave on *.* to repl@"%" identified by "123";
主库备份恢复到从库
mysqldump主库数据然后恢复到从库。
告知从库复制信息:
从库执行下面命令:
help change mater to;
CHANGE_MATSER TO
MASTER_HOST='192.168.75.33',
MASTER_USER='repl',
MASTER_PASSWORD='123',
MASTER_LOG_FILE='mysql-bin.000011',
MASTER_LOF_POS=517;
MASTER_CONNECT_RETRY=10;
grep "\-- CHANGE_MATSER TO" /全备文件 找到MASTER_LOG_POS=xxx
开启专用的复制线程。
在从库中运行:start slave;
CHANGE MASTER TO
MASTER_HOST='192.168.75.33',
MASTER_USER='repl',
MASTER_PASSWORD='123',
MASTER_PORT=3306,
MASTER_LOG_FILE='mysql-bin.000011',
MASTER_LOG_POS=517,
MASTER_CONNECT_RETRY=10;
start slave;
验证:
查看线程状态:
show slave status;
查看Running
如果搭建不成重新开始:
清空主从:
从库中执行:
stop slave;reset slave all;
主从故障分析及处理:
从库的线程状态以及报错信息:
slave_to_running: yes
slave_sql_running:yes
last_IO_errono:
last_IO_erro:
last_sql_errono:
last_sql_erro:
IO线程:
正常状态:slave_to_running: yes
非正常状态:
slave_to_running: NO
slave_to_running: Conncting
故障原因:
连接主库:
1,网络防火墙端口问题。
2,用户密码问题。用户权限必须时replication slave权限
3,主库的连接数达到上限。
4,版本不统一的情况。
主从中的线程管理:
故障模拟:
start slave 启动所有线程
stop slave 停止所有线程。
单独起某个线程
start slave sql_thread
start slave io_thread
解除从库身份:
reset slave all;
show slave status\G 查看从库情况。
假设把密码写错。
启动从库:
报错:
slave_to_running: Conncting
报错处理:
统一思路用相关用户连接一下数据库。
请求日志接受日志:
主库的二进制文件不完整:损坏,不连续。。。
解决方案:
重新生成binlog
从库的请求起点问题。主从的server_id(server_uuid)冲突
在master上执行change master to自己了。
解决方案:
更改server_id
relaylog问题(很少见)
模拟故障:
主库出现reset master;
如果业务繁忙期间做,有可能造成数据库hang
如果要恢复主从,需要重新搭建主从
生成中必须要reset master
1,找业务不繁忙期间,申请业务暂停几分钟。
2,等待从库重放完所有主库日志。
3,主库reset master
4,从库重新同步主库日志:
stop slave
reset slave all
重新change master to
start slave
9-21 作业:
xbk备份脚本,设定场景,周日全备,每天增量备份。
模拟:
周日全备,1-4增量,周五删库
然后模拟恢复,用xbk+binlog日志方式恢复。
普通主从复制,物理层面的损坏比较擅长
逻辑层面的drop操作需要延时到从库操作。
延时从库:
可以处理逻辑故障;
配置的方法:
从库增加参数:
stop slave;
CHANGE MASTER TO MASTER_DELAY = 300;#设置延时多少秒。单位为秒根据实际。
start slave;
show slave status
SQL_Delay: 300
SQL_Remaining_Delay:NULL
故障模拟以及恢复:
模拟数据:
建库---》在库中建表。插入数据。---》模拟drop数据库。
恢复思路:
1,先停业务,挂维护页。
2,停从库的操作sql线程。
stop slave sql_thread;
观察日志量(relay.info),防止未同步完。
stop slave;
3,追加后续缺失部分的日志到从库,手工模拟sql线程的操作。
日志位置:----》relay-log,
范围: relay.info的点到drop之前的点
4.恢复业务方案:
1,把库导出恢复到主库。
2.推荐方法:直接将从库切成主库。
恢复方法:
1,从库: stop slave sql_thread;
2,截取relay-log
起点:cat relay-log.info
终点:
从 relay-log.info找下面relay-bin文件
show relaylog events in 'relay-bin.xxxxx';
只观察前面的点,后面的点是对应主库binlog的操作点。
找到drop行前面的点。
mysqlbinlog --start-position=xx --stop-position=xxx relay-bin.xxxxx
> /data/backup/xxx.sql
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
[mysqld]
user=mysql
basedir=/usr/local/mysql
datadir=/data/mysqldata
server_id=6
log-error=/var/log/mysql/error.log
pid-file=/tmp/mysql.pid
port=3306
socket=/tmp/mysql.sock
log_bin=/data/binlog/mysql-bin
sync_binlog=1
binlog_format=row
gtid-mode=on
enforce-gtid-consistency=true
secure-file-priv=/tmp
log-slave-updates=1
autocommit=0
slow_query_log=1
slow_query_log_file=/var/log/mysql/slow.log
long_query_time=1
log_queries_not_using_indexes=1
[mysql]
socket=/tmp/mysql.sock
prompt=[\\d]>
[client]
socket=/tmp/mysql.sock
gtid主从复制:
构建主从:
主库:创建用户,grant replication slave on *.* to repl @'%' identified by '123';
从库:
CHANGE MASTER TO
MASTER_HOST='192.168.75.33',
MASTER_USER='repl',
MASTER_PASSWORD='123',
MASTER_AUTO_POSITION=1;
start slave;
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MHA高可用架构:
1.准备gtid的1主2从架构
2.三台机器全部安装node包,manager生产建议单独放。此处可以放到一台从库。
必做软连接:
ln -s /usr/local/mysql/bin/mysqlbinlog /usr/bin/mysqlbinlog
ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql
3.各节点之间免密通信。
ssh-copy-id -i ~/.ssh/id_rsa.pub root@ip,输入节点root密码,回车
4.所有节点安装node软件依赖包
yum install perl-DBD-MySQL -y
rpm -ivh mha4mysql-node-0.58-0.el7.centos.noarch.rpm
5.数据库主节点中授权(从库 节点会同步)
grant all privileges on *.* to mha@'%' identified by 'mha';
6.安装manager(选从库)
yum install -y perl-Config-Tiny epel-release perl-Log-Dispatch perl-Parallel-ForkManager perl-Time-HiRes(执行两遍,第一遍安装epel加载而外包,第二遍安装包)
7.配置manager端的配置文件:
创建配置目录:
mkdir /etc/mha
创建日志目录:
mkdir -p /var/log/mha/app1
创建mha配置文件:
vim /etc/mha/app1.cnf
[server default]
manager_log=/var/log/mha/app1/manager
manager_workdir=/var/log/mha/app1
master_binlog_dir=/data/binlog
user=mha
password=mha
ping_interval=2
repl_password=123
repl_user=repl
ssh_user=root
[server1]
hostname=主库ip
port=3306
[server2]
hostname=从库1ip
port=3306
[server3]
hostname=从库2ip
port=3306
配置检查:
masterha_check_ssh --conf=/etc/mha/app1.cnf
masterha_check_repl --conf=/etc/mha/app1.cnf
启动mha在安装mha manager的节点:
nohup masterha_manager --conf=/etc/mha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null >/var/log/mha/app1/manager.log 2>&1 &
检查集群状况:
masterha_check_status --conf=/etc/mha/app1.cnf
rpm -ivh mha4mysql-manager-0.58-0.el7.centos.noarch.rpm
实现应用透明:
vip功能(自带该功能,但是无法跨机房跨网络)
通过脚本实现:
配置manager参数:
master_ip_failover_script=/usr/local/bin/master_ip_failover
修改脚本:
vim /usr/local/bin/master_ip_failover
my $vip ='192.168.75.55/24'; #空闲ip
my $key ='1';
my $ssh_start_vup = "/sbin/ifconfig ens33:$key $vip";
my $ssh_stop_vip = "/sbin/ifconfig ens33:$key down";
(所有网卡都是统一名字)
解决中文字符:
dos2unix /usr/local/bin/master_ip_failover
给予执行权:
chmod a+x /usr/local/bin/master_ip_failover
在主库绑定vip
ifconfig ens33:1 192.168.75.55/24
重启mha
masterha_stop --conf=/etc/mha/app1.cnf
再启动。
(源码包中含有vip漂移脚本)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
9-22作业:
1.基于GTID模式一键安装MySQL主从脚本。
2.基于MySQL主从架构一键安装mha架构脚本。
3.编写mha被剔除后快速加入集群并生成架构的恢复脚本。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mhabinlogserver应用:
实时会做个binlog的复制,复制到自己的binlogserver的地方。
建议单独找一台机器做。可以先临时放到从库3
vim /etc/mha/app1.cnf
[binlog1]
no_master=1(manager不选主该机)
hostname=所在节点ip
master_binlog_dir=/data/mysql/binlog (拉取主库binlog日志自己存放的位置)
mkdir -p /data/mysql/binlog
chown mysql:mysql -R /data/*
拉取主库binlog日志:
mysqlbinlog -R --host=主库ip --user=mha --password=mha --raw --stop-never mysql-bin.000001 &
注意:
1.先cd到创建好的目录中在再执行上面的命令
2.需要按照目前当前的binlog为起点拉取
重启mha
测试mha的功能。
宕机主库查看mha的日志。
修复思路:
1,先排查进程状态。检查manager
2,检查配置文件,正常故障会删除配置文件内容,说明切换成功。
3,看相关日志。
修复过程:
1,修复故障库。
2,修复主从:手工加入已有主从中,成为新的从库。
3,配置文件修复,mha文件恢复成原样。
4,检查ssh的互信和repl的状态。
5,修复binlogserver,清空原目录下有的binlog日志,重新拉取。
6,检查主节点的vip信息。不存在则手动修复
7,最后启动mha。检查mha状态。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
三台节点都关机修复过程:
1,启动所有节点。
2,确认主库。
看配置文件,到数据库确认。
3,修复一主两从复制环境。
CHANGE MASTER TO
MASTER_HOST='192.168.75.34',
MASTER_USER='repl',
MASTER_PASSWORD='123',
MASTER_AUTO_POSITION=1;
主从重构下。
4,修复配置文件。
5,修复binlogserver。
6,修复主库vip
7,检查ssh和主从。
8,启动mha,查询mha的状态。
(基本上是通用步骤)
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
atlas读写分离:
更充分的利用硬件资源:
生产建议单独机器,这里暂时放到从库manager节点
rpm -ivh Atlas-2.2.1.el6.x86_64.rpm
(兼容6系统的包)
(一般配合mha来用)
配置:
cd /usr/local/mysql-proxy/conf
mv test.cnf test.cnf.bak
vi test.cnf
[mysql-proxy]
admin-username = user
admin-password = pwd
proxy-backend-addresses = vip:3306(主库所在)
proxy-read-only-backend-addresses = 读库1:3306,读库2:3306
pwds = repl:3yb5jEku5h4=,mha:O2jBXONX098=
daemon = true
keepalive = true
event-threads = 8
log-level = message
log-path = /usr/local/mysql-proxy/log
sql-log=ON
proxy-address = 0.0.0.0:33060
admin-address = 0.0.0.0:2345
charset=utf8
启动atlas
/usr/local/mysql-proxy/bin/mysql-proxyd test(配置文件前缀) start
ps -ef |grep proxy
测试读写分离的功能:
测试读操作:
mysql -umha -pmha -h 从库manager所在ip -P 33060
mysql>select @@server_id;
会发现轮询读操作。
测试写操作:
mysql> begin;select @@server_id;commit;
用事务测试读写落点。
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atlas 的管理操作
[root@db03 conf]# mysql -uuser -ppwd -h 10.0.0.53 -P2345
db03 [(none)]>select * from help;
4.1 查看所有节点
db03 [(none)]>SELECT * FROM backends;
+-------------+----------------+-------+------+
| backend_ndx | address | state | type |
+-------------+----------------+-------+------+
| 1 | 10.0.0.55:3306 | up | rw |
| 2 | 10.0.0.52:3306 | up | ro |
| 3 | 10.0.0.53:3306 | up | ro |
+-------------+----------------+-------+------+
3 rows in set (0.00 sec)
4.2 节点的上线和下线
db03 [(none)]>SET OFFLINE 1;
+-------------+----------------+---------+------+
| backend_ndx | address | state | type |
+-------------+----------------+---------+------+
| 1 | 10.0.0.55:3306 | offline | rw |
+-------------+----------------+---------+------+
1 row in set (0.01 sec)
db03 [(none)]>SELECT * FROM backends;
+-------------+----------------+---------+------+
| backend_ndx | address | state | type |
+-------------+----------------+---------+------+
| 1 | 10.0.0.55:3306 | offline | rw |
| 2 | 10.0.0.52:3306 | up | ro |
| 3 | 10.0.0.53:3306 | up | ro |
+-------------+----------------+---------+------+
db03 [(none)]>SET ONLINE 1;
+-------------+----------------+---------+------+
| backend_ndx | address | state | type |
+-------------+----------------+---------+------+
| 1 | 10.0.0.55:3306 | unknown | rw |
+-------------+----------------+---------+------+
4.3 删除和添加节点
db03 [(none)]>REMOVE BACKEND 3;
db03 [(none)]>ADD SLAVE 10.0.0.53:3306;
从库用的比较多。(以上操作都是临时性的)
4.4 用户管理
主库[(none)]>grant all on *.* to oldliu@'%' identified by '123';
从库阿特拉斯所在[(none)]>SELECT * FROM pwds;查看用户
从库阿特拉斯所在[(none)]>add pwd oldliu:123; (将主库授权的用户放到阿特拉斯)
密码加密:
/usr/local/mysql-proxy/bin/encrypt 密码
add enpwd oldliu:密文密码
4.5 持久化配置文件
从库阿特拉斯所在[(none)]>save config;
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
========db01==============
cat >/data/3307/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3307/data
socket=/data/3307/mysql.sock
port=3307
log-error=/data/3307/mysql.log
log_bin=/data/3307/mysql-bin
binlog_format=row
skip-name-resolve
server-id=7
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
cat >/data/3308/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3308/data
port=3308
socket=/data/3308/mysql.sock
log-error=/data/3308/mysql.log
log_bin=/data/3308/mysql-bin
binlog_format=row
skip-name-resolve
server-id=8
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
cat >/data/3309/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3309/data
socket=/data/3309/mysql.sock
port=3309
log-error=/data/3309/mysql.log
log_bin=/data/3309/mysql-bin
binlog_format=row
skip-name-resolve
server-id=9
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
cat >/data/3310/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3310/data
socket=/data/3310/mysql.sock
port=3310
log-error=/data/3310/mysql.log
log_bin=/data/3310/mysql-bin
binlog_format=row
skip-name-resolve
server-id=10
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
cat >/etc/systemd/system/mysqld3307.service<<EOF
[Unit]
Description=MySQL Server
Documentation=man:mysqld(8)
Documentation=http://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target
After=syslog.target
[Install]
WantedBy=multi-user.target
[Service]
User=mysql
Group=mysql
ExecStart=/usr/local/mysql/bin/mysqld --defaults-file=/data/3307/my.cnf
LimitNOFILE = 5000
EOF
cat >/etc/systemd/system/mysqld3308.service<<EOF
[Unit]
Description=MySQL Server
Documentation=man:mysqld(8)
Documentation=http://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target
After=syslog.target
[Install]
WantedBy=multi-user.target
[Service]
User=mysql
Group=mysql
ExecStart=/usr/local/mysql/bin/mysqld --defaults-file=/data/3308/my.cnf
LimitNOFILE = 5000
EOF
cat >/etc/systemd/system/mysqld3309.service<<EOF
[Unit]
Description=MySQL Server
Documentation=man:mysqld(8)
Documentation=http://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target
After=syslog.target
[Install]
WantedBy=multi-user.target
[Service]
User=mysql
Group=mysql
ExecStart=/usr/local/mysql/bin/mysqld --defaults-file=/data/3309/my.cnf
LimitNOFILE = 5000
EOF
cat >/etc/systemd/system/mysqld3310.service<<EOF
[Unit]
Description=MySQL Server
Documentation=man:mysqld(8)
Documentation=http://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target
After=syslog.target
[Install]
WantedBy=multi-user.target
[Service]
User=mysql
Group=mysql
ExecStart=/usr/local/mysql/bin/mysqld --defaults-file=/data/3310/my.cnf
LimitNOFILE = 5000
EOF
========db02===============
cat >/data/3307/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3307/data
socket=/data/3307/mysql.sock
port=3307
log-error=/data/3307/mysql.log
log_bin=/data/3307/mysql-bin
binlog_format=row
skip-name-resolve
server-id=17
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
cat >/data/3308/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3308/data
port=3308
socket=/data/3308/mysql.sock
log-error=/data/3308/mysql.log
log_bin=/data/3308/mysql-bin
binlog_format=row
skip-name-resolve
server-id=18
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
cat >/data/3309/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3309/data
socket=/data/3309/mysql.sock
port=3309
log-error=/data/3309/mysql.log
log_bin=/data/3309/mysql-bin
binlog_format=row
skip-name-resolve
server-id=19
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
cat >/data/3310/my.cnf<<EOF
[mysqld]
basedir=/usr/local/mysql
datadir=/data/3310/data
socket=/data/3310/mysql.sock
port=3310
log-error=/data/3310/mysql.log
log_bin=/data/3310/mysql-bin
binlog_format=row
skip-name-resolve
server-id=20
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
EOF
2.8 开始配置主从环境
# shard1
# shard1
## 10.0.0.51:3307 <-----> 10.0.0.52:3307
# db02
mysql -S /data/3307/mysql.sock -e "grant replication slave on *.* to repl@'10.0.0.%' identified by '123';"
mysql -S /data/3307/mysql.sock -e "grant all on *.* to root@'10.0.0.%' identified by '123' with grant option;"
# db01
mysql -S /data/3307/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.52', MASTER_PORT=3307, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3307/mysql.sock -e "start slave;"
mysql -S /data/3307/mysql.sock -e "show slave status\G"
# db02
mysql -S /data/3307/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.51', MASTER_PORT=3307, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3307/mysql.sock -e "start slave;"
mysql -S /data/3307/mysql.sock -e "show slave status\G"
## 10.0.0.51:3309 ------> 10.0.0.51:3307
# db01
mysql -S /data/3309/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.51', MASTER_PORT=3307, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3309/mysql.sock -e "start slave;"
mysql -S /data/3309/mysql.sock -e "show slave status\G"
## 10.0.0.52:3309 ------> 10.0.0.52:3307
# db02
mysql -S /data/3309/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.52', MASTER_PORT=3307, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3309/mysql.sock -e "start slave;"
mysql -S /data/3309/mysql.sock -e "show slave status\G"
====================================================================
# shard2
## 10.0.0.52:3308 <-----> 10.0.0.51:3308
# db01
mysql -S /data/3308/mysql.sock -e "grant replication slave on *.* to repl@'10.0.0.%' identified by '123';"
mysql -S /data/3308/mysql.sock -e "grant all on *.* to root@'10.0.0.%' identified by '123' with grant option;"
# db02
mysql -S /data/3308/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.51', MASTER_PORT=3308, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3308/mysql.sock -e "start slave;"
mysql -S /data/3308/mysql.sock -e "show slave status\G"
# db01
mysql -S /data/3308/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.52', MASTER_PORT=3308, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3308/mysql.sock -e "start slave;"
mysql -S /data/3308/mysql.sock -e "show slave status\G"
## 10.0.0.52:3310 -----> 10.0.0.52:3308
# db02
mysql -S /data/3310/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.52', MASTER_PORT=3308, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3310/mysql.sock -e "start slave;"
mysql -S /data/3310/mysql.sock -e "show slave status\G"
##10.0.0.51:3310 -----> 10.0.0.51:3308
# db01
mysql -S /data/3310/mysql.sock -e "CHANGE MASTER TO MASTER_HOST='10.0.0.51', MASTER_PORT=3308, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='123';"
mysql -S /data/3310/mysql.sock -e "start slave;"
mysql -S /data/3310/mysql.sock -e "show slave status\G"
2.9 检测主从状态
mysql -S /data/3307/mysql.sock -e "show slave status\G"|grep Running
mysql -S /data/3308/mysql.sock -e "show slave status\G"|grep Running
mysql -S /data/3309/mysql.sock -e "show slave status\G"|grep Running
mysql -S /data/3310/mysql.sock -e "show slave status\G"|grep Running
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
注:如果中间出现错误,在每个节点进行执行以下命令,从2.8从头执行
mysql -S /data/3307/mysql.sock -e "stop slave; reset slave all;"
mysql -S /data/3308/mysql.sock -e "stop slave; reset slave all;"
mysql -S /data/3309/mysql.sock -e "stop slave; reset slave all;"
mysql -S /data/3310/mysql.sock -e "stop slave; reset slave all;"
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
清理原环境的java:
rpm -qa | grep java
rpm -qa | grep jdk
如果有,yum remove相关包。
安装java环境:
rpm -ivh jdk-8u151-linux-x64.rpm
设置java环境变量:
vim /etc/profile
最后一行添加:
export JAVA_HOME=/usr/java/jdk1.8.0_151
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$JAVA_HOME/bin:$PATH
保存退出。
在linux执行:
source /etc/profile
验证:
java -version
安装mycat:
解压mycat的tar包:
tar xf Mycat-server-*
将解压后获得的mycat目录放到固定路径下。此处放在/usr/local下:
/usr/local/mycat
设置mycat的环境变量:
vim /etc/profile
export PATH=/usr/local/mycat/bin:$PATH
生效:
source /etc/profile
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="dn1">
</schema>
<dataNode name="dn1" dataHost="localhost1" database="world" />
<dataHost name="localhost1" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="1" >
<heartbeat>select user()</heartbeat>
<writeHost host="db1" url="192.168.75.33:3307" user="root"
password="123">
<readHost host="db2" url="192.168.75.33:3309" user="root"
password="123"/>
</writeHost>
</dataHost>
</mycat:schema>
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
设置高可用读写分离:
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="dn1">
</schema>
<dataNode name="dn1" dataHost="localhost1" database="world" />
<dataHost name="localhost1" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="1" >
<heartbeat>select user()</heartbeat>
<writeHost host="db1" url="192.168.75.33:3307" user="root"
password="123">
<readHost host="db2" url="192.168.75.33:3309" user="root"
password="123"/>
</writeHost>
<writeHost host="db3" url="192.168.75.34:3307" user="root"
password="123">
<readHost host="db4" url="192.168.75.34:3309" user="root"
password="123"/>
</writeHost>
</dataHost>
</mycat:schema>
说明:配置多节点高可用读写分离,增加writehost 与readhost相关行,可多行配置。
注意:节点宕机,则写入配置的读写节点全部被mycat剔除集群。
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="dn1">
<table name="city" dataNode="dn1" />
</schema>
<dataNode name="dn1" dataHost="localhost1" database="dl" />
<dataHost name="localhost1" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="1" >
<heartbeat>select user()</heartbeat>
<writeHost host="db1" url="192.168.75.33:3307" user="root"
password="123">
<readHost host="db2" url="192.168.75.33:3309" user="root"
password="123"/>
</writeHost>
<writeHost host="db3" url="192.168.75.34:3307" user="root"
password="123">
<readHost host="db4" url="192.168.75.34:3309" user="root"
password="123"/>
</writeHost>
</dataHost>
</mycat:schema>
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="dn1">
<table name="user" dataNode="dn1" />
<table name="order_t" dataNode="dn2"/>
</schema>
<dataNode name="dn1" dataHost="localhost1" database="taobao" />
<dataNode name="dn2" dataHost="localhost2" database="taobao" />
<dataHost name="localhost1" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="1" >
<heartbeat>select user()</heartbeat>
<writeHost host="db1" url="192.168.75.33:3307" user="root"
password="123">
<readHost host="db2" url="192.168.75.33:3309" user="root"
password="123"/>
</writeHost>
<writeHost host="db3" url="192.168.75.34:3307" user="root"
password="123">
<readHost host="db4" url="192.168.75.34:3309" user="root"
password="123"/>
</writeHost>
</dataHost>
<dataHost name="localhost2" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="1" >
<heartbeat>select user()</heartbeat>
<writeHost host="db1" url="192.168.75.33:3308" user="root"
password="123">
<readHost host="db2" url="192.168.75.33:3310" user="root"
password="123"/>
</writeHost>
<writeHost host="db3" url="192.168.75.34:3308" user="root"
password="123">
<readHost host="db4" url="192.168.75.34:3310" user="root"
password="123"/>
</writeHost>
</dataHost>
</mycat:schema>
创建测试库和表:
mysql -S /data/3307/mysql.sock -e "create database taobao charset utf8;"
mysql -S /data/3308/mysql.sock -e "create database taobao charset utf8;"
mysql -S /data/3307/mysql.sock -e "use taobao;create table user(id int,name varchar(20))";
mysql -S /data/3308/mysql.sock -e "use taobao;create table order_t(id int,name varchar(20))"
# 重启mycat
mycat restart
进入mycat查看。
# mycat中对user 和 order 数据插入
mysql -uroot -p123456 -h 10.0.0.51 -P 8066
insert into user values(1,'a');
insert into user values(2,'b');
insert into user values(3,'c');
commit;
insert into order_t values(1,'x'),(2,'y');
commit;
[root@db01 conf]# mysql -S /data/3307/mysql.sock -e "show tables from taobao"
+------------------+
| Tables_in_taobao |
+------------------+
| user |
+------------------+
[root@db01 conf]# mysql -S /data/3308/mysql.sock -e "show tables from taobao"
+------------------+
| Tables_in_taobao |
+------------------+
| order_t |
+------------------+
[root@db01 conf]# mysql -S /data/3307/mysql.sock -e "select * from taobao.user"
+------+------+
| id | name |
+------+------+
| 1 | a |
| 2 | b |
| 3 | c |
+------+------+
[root@db01 conf]# mysql -S /data/3308/mysql.sock -e "select * from taobao.order_t"
+------+------+
| id | name |
+------+------+
| 1 | x |
| 2 | y |
+------+------+
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
vim schema.xml
<schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100" dataNode="sh1">
只增加该段(原有的垂直分表配置不动) <table name="t3" dataNode="sh1,sh2" rule="auto-sharding-long" />
</schema>
<dataNode name="sh1" dataHost="oldlao1" database= "taobao" />
<dataNode name="sh2" dataHost="oldlao2" database= "taobao" />
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
(此处只做查看)vim rule.xml
<tableRule name="auto-sharding-long">
<rule>
<columns>id</columns>
<algorithm>rang-long</algorithm>
</rule>
<function name="rang-long"
class="io.mycat.route.function.AutoPartitionByLong">
<property name="mapFile">autopartition-long.txt</property>
</function>
===================================
(需要修改)vim autopartition-long.txt
0-10=0
11-20=1
创建测试表:
mysql -S /data/3307/mysql.sock -e "use taobao;create table t3 (id int not null primary key auto_increment,name varchar(20) not null);"
mysql -S /data/3308/mysql.sock -e "use taobao;create table t3 (id int not null primary key auto_increment,name varchar(20) not null);"
测试:
重启mycat
mycat restart
mysql -uroot -p123456 -h 127.0.0.1 -P 8066
insert into t3(id,name) values(1,'a');
insert into t3(id,name) values(2,'b');
insert into t3(id,name) values(3,'c');
insert into t3(id,name) values(4,'d');
insert into t3(id,name) values(11,'aa');
insert into t3(id,name) values(12,'bb');
insert into t3(id,name) values(13,'cc');
insert into t3(id,name) values(14,'dd');
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
取模分片(mod-long):
取余分片方式:分片键(一个列)与节点数量进行取余,得到余数,将数据写入对应节点
vim schema.xml
添加此行:<table name="t4" dataNode="sh1,sh2" rule="mod-long" />
vim rule.xml
此处需要更改:<property name="count">2</property>
准备测试环境
创建测试表:
mysql -S /data/3307/mysql.sock -e "use taobao;create table t4 (id int not null primary key auto_increment,name varchar(20) not null);"
mysql -S /data/3308/mysql.sock -e "use taobao;create table t4 (id int not null primary key auto_increment,name varchar(20) not null);"
重启mycat
mycat restart
测试:
mysql -uroot -p123456 -h10.0.0.52 -P8066
use TESTDB
insert into t4(id,name) values(1,'a');
insert into t4(id,name) values(2,'b');
insert into t4(id,name) values(3,'c');
insert into t4(id,name) values(4,'d');
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
安装依赖:
yum -y install gcc automake autoconf libtool make
tar -xf redis-5.0.8.tar.gz
mkdir -p /usr/local/redis
cd redis-5.0.8/
make prefix=/usr/local/redis
make install
cp redis-5.0.8/src/redis-trib.rb /usr/local/redis/bin/
(执行完成的绿色文件全部拷贝到应用目录。)
解压按照自己版本解压。
tar xzf redis-3.2.12.tar.gz
mv redis-3.2.12 redis
安装:
yum -y install gcc automake autoconf libtool make
cd redis
make
环境变量:
vim /etc/profile
export PATH=/data/redis/src:$PATH
source /etc/profile
启动:
redis-server &
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
配置文件:(需要关注的参数)
vim redis.conf
daemonize yes #开启后台运行模式
port 6379 #端口号
bind ip #绑定指定ip
logfile /xxxx/redis.log #指定日志生成路径和文件名字
dir /data/6379 #指定应用运行路径
dbfilename dump.rdb #默认的持久化文件名字,根据运行路径决定它的位置。也可以写绝对路径指定。
requirepass 123456 #开启密码验证,生产建议复杂密码。
+++++++++++++++++++++++++++++++++++++++++++++++++++++
Strings
应用场景
session 共享
常规计数:微博数,粉丝数,订阅、礼物
key:value
----------
(1)
set name zhangsan
(2)
MSET id 101 name zhangsan age 20 gender m
等价于以下操作:
SET id 101
set name zhangsan
set age 20
set gender m
(3)计数器
每点一次关注,都执行以下命令一次
127.0.0.1:6379> incr num
显示粉丝数量:
127.0.0.1:6379> get num
暗箱操作:
127.0.0.1:6379> INCRBY num 10000
(integer) 10006
127.0.0.1:6379> get num
"10006"
127.0.0.1:6379> DECRBY num 10000
(integer) 6
127.0.0.1:6379> get num
"6"
详细的例子:------------------------------------
增
set mykey "test" 为键设置新值,并覆盖原有值
getset mycounter 0 设置值,取值同时进行
setex mykey 10 "hello" 设置指定 Key 的过期时间为10秒,在存活时间可以获取value
setnx mykey "hello" 若该键不存在,则为键设置新值
mset key3 "zyx" key4 "xyz" 批量设置键
删
del mykey 删除已有键
改
append mykey "hello" 若该键并不存在,返回当前 Value 的长度
该键已经存在,返回追加后 Value的长度
incr mykey 值增加1,若该key不存在,创建key,初始值设为0,增加后结果为1
decrby mykey 5 值减少5
setrange mykey 20 dd 把第21和22个字节,替换为dd, 超过value长度,自动补0
查
exists mykey 判断该键是否存在,存在返回 1,否则返回0
get mykey 获取Key对应的value
strlen mykey 获取指定 Key 的字符长度
ttl mykey 查看一下指定 Key 的剩余存活时间(秒数)
getrange mykey 1 20 获取第2到第20个字节,若20超过value长度,则截取第2个和后面所有的
mget key3 key4 批量获取键
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
hash类型(字典类型)
应用场景:
存储部分变更的数据,如用户信息等。
最接近mysql表结构的一种类型
主要是可以做数据库缓存。
存数据:
hmset stu id 101 name zhangsan age 20 gender m
hmset stu1 id 102 name zhangsan1 age 21 gender f
取数据:
HMGET stu id name age gender
HMGET stu1 id name age gender
select concat("hmset city_",id," id ",id," name ",name," countrycode ",countrycode," district ",district," population ",population) from city limit 10 into outfile '/tmp/hmset.txt'
---------------------更多的例子
增
hset myhash field1 "s"
若字段field1不存在,创建该键及与其关联的Hashes, Hashes中,key为field1 ,并设value为s ,若存在会覆盖原value
hsetnx myhash field1 s
若字段field1不存在,创建该键及与其关联的Hashes, Hashes中,key为field1 ,并设value为s, 若字段field1存在,则无效
hmset myhash field1 "hello" field2 "world 一次性设置多个字段
删
hdel myhash field1 删除 myhash 键中字段名为 field1 的字段
del myhash 删除键
改
hincrby myhash field 1 给field的值加1
查
hget myhash field1 获取键值为 myhash,字段为 field1 的值
hlen myhash 获取myhash键的字段数量
hexists myhash field1 判断 myhash 键中是否存在字段名为 field1 的字段
hmget myhash field1 field2 field3 一次性获取多个字段
hgetall myhash 返回 myhash 键的所有字段及其值
hkeys myhash 获取myhash 键中所有字段的名字
hvals myhash 获取 myhash 键中所有字段的值
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
LIST(列表)
应用场景
消息队列系统
比如sina微博
在Redis中我们的最新微博ID使用了常驻缓存,这是一直更新的。
但是做了限制不能超过5000个ID,因此获取ID的函数会一直询问Redis。
只有在start/count参数超出了这个范围的时候,才需要去访问数据库。
系统不会像传统方式那样“刷新”缓存,Redis实例中的信息永远是一致的。
SQL数据库(或是硬盘上的其他类型数据库)只是在用户需要获取“很远”的数据时才会被触发,
而主页或第一个评论页是不会麻烦到硬盘上的数据库了。
微信朋友圈:
127.0.0.1:6379> LPUSH wechat "today is nice day !"
127.0.0.1:6379> LPUSH wechat "today is bad day !"
127.0.0.1:6379> LPUSH wechat "today is good day !"
127.0.0.1:6379> LPUSH wechat "today is rainy day !"
127.0.0.1:6379> LPUSH wechat "today is friday !"
[5,4,3,2,1]
0 1 2 3 4
[e,d,c,b,a]
0 1 2 3 4
127.0.0.1:6379> lrange wechat 0 0
1) "today is friday !"
127.0.0.1:6379> lrange wechat 0 1
1) "today is friday !"
2) "today is rainy day !"
127.0.0.1:6379> lrange wechat 0 2
1) "today is friday !"
2) "today is rainy day !"
3) "today is good day !"
127.0.0.1:6379> lrange wechat 0 3
127.0.0.1:6379> lrange wechat -2 -1
1) "today is bad day !"
2) "today is nice day !"
-----------------
增
lpush mykey a b 若key不存在,创建该键及与其关联的List,依次插入a ,b, 若List类型的key存在,则插入value中
lpushx mykey2 e 若key不存在,此命令无效, 若key存在,则插入value中
linsert mykey before a a1 在 a 的前面插入新元素 a1
linsert mykey after e e2 在e 的后面插入新元素 e2
rpush mykey a b 在链表尾部先插入b,在插入a
rpushx mykey e 若key存在,在尾部插入e, 若key不存在,则无效
rpoplpush mykey mykey2 将mykey的尾部元素弹出,再插入到mykey2 的头部(原子性的操作)
删
del mykey 删除已有键
lrem mykey 2 a 从头部开始找,按先后顺序,值为a的元素,删除数量为2个,若存在第3个,则不删除
ltrim mykey 0 2 从头开始,索引为0,1,2的3个元素,其余全部删除
改
lset mykey 1 e 从头开始, 将索引为1的元素值,设置为新值 e,若索引越界,则返回错误信息
rpoplpush mykey mykey 将 mykey 中的尾部元素移到其头部
查
lrange mykey 0 -1 取链表中的全部元素,其中0表示第一个元素,-1表示最后一个元素。
lrange mykey 0 2 从头开始,取索引为0,1,2的元素
lrange mykey 0 0 从头开始,取第一个元素,从第0个开始,到第0个结束
lpop mykey 获取头部元素,并且弹出头部元素,出栈
lindex mykey 6 从头开始,获取索引为6的元素 若下标越界,则返回nil
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
SET 集合类型(join union)
应用场景:
案例:在微博应用中,可以将一个用户所有的关注人存在一个集合中,将其所有粉丝存在一个集合。
Redis还为集合提供了求交集、并集、差集等操作,可以非常方便的实现如共同关注、共同喜好、二度好友等功能,
对上面的所有集合操作,你还可以使用不同的命令选择将结果返回给客户端还是存集到一个新的集合中。
127.0.0.1:6379> sadd lxl pg1 jnl baoqiang gsy alexsb
(integer) 5
127.0.0.1:6379> sadd jnl baoqiang ms bbh yf wxg
(integer) 5
127.0.0.1:6379> SUNION lxl jnl
1) "gsy"
2) "yf"
3) "alexsb"
4) "bbh"
5) "jnl"
6) "pg1"
7) "baoqiang"
8) "ms"
9) "wxg"
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379> SINTER lxl jnl
1) "baoqiang"
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379> SDIFF jnl lxl
1) "wxg"
2) "yf"
3) "bbh"
4) "ms"
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379> SDIFF lxl jnl
1) "jnl"
2) "pg1"
3) "gsy"
4) "alexsb"
增
sadd myset a b c
若key不存在,创建该键及与其关联的set,依次插入a ,b,c,若key存在,则插入value中,若a 在myset中已经存在,则插入了 b 和 e两个新成员。
删
spop myset 尾部的b被移出,事实上b并不是之前插入的第一个或最后一个成员
srem myset a d f 若f不存在, 移出 a、d ,并返回2
改
smove myset myset2 a 将a从 myset 移到 myset2,
查
sismember myset a 判断 a 是否已经存在,返回值为 1 表示存在。
smembers myset 查看set中的内容
scard myset 获取Set 集合中元素的数量
srandmember myset 随机的返回某一成员
sdiff myset1 myset2 myset3 1和2得到一个结果,拿这个集合和3比较,获得每个独有的值
sdiffstore diffkey myset myset2 myset3 3个集和比较,获取独有的元素,并存入diffkey 关联的Set中
sinter myset myset2 myset3 获得3个集合中都有的元素
sinterstore interkey myset myset2 myset3 把交集存入interkey 关联的Set中
sunion myset myset2 myset3 获取3个集合中的成员的并集
sunionstore unionkey myset myset2 myset3 把并集存入unionkey 关联的Set中
SortedSet(有序集合)
应用场景:
排行榜应用,取TOP N操作
这个需求与上面需求的不同之处在于,前面操作以时间为权重,这个是以某个条件为权重,比如按顶的次数排序,
这时候就需要我们的sorted set出马了,将你要排序的值设置成sorted set的score,将具体的数据设置成相应的value,
每次只需要执行一条ZADD命令即可。
127.0.0.1:6379> zadd topN 0 smlt 0 fskl 0 fshkl 0 lzlsfs 0 wdhbx 0 wxg
(integer) 6
127.0.0.1:6379> ZINCRBY topN 100000 smlt
"100000"
127.0.0.1:6379> ZINCRBY topN 10000 fskl
"10000"
127.0.0.1:6379> ZINCRBY topN 1000000 fshkl
"1000000"
127.0.0.1:6379> ZINCRBY topN 100 lzlsfs
"100"
127.0.0.1:6379> ZINCRBY topN 10 wdhbx
"10"
127.0.0.1:6379> ZINCRBY topN 100000000 wxg
"100000000"
127.0.0.1:6379> ZREVRANGE topN 0 2
1) "wxg"
2) "fshkl"
3) "smlt"
127.0.0.1:6379> ZREVRANGE topN 0 2 withscores
1) "wxg"
2) "100000000"
3) "fshkl"
4) "1000000"
5) "smlt"
6) "100000"
127.0.0.1:6379>
增
zadd myzset 2 "two" 3 "three" 添加两个分数分别是 2 和 3 的两个成员
删
zrem myzset one two 删除多个成员变量,返回删除的数量
改
zincrby myzset 2 one 将成员 one 的分数增加 2,并返回该成员更新后的分数
查
zrange myzset 0 -1 WITHSCORES 返回所有成员和分数,不加WITHSCORES,只返回成员
zrank myzset one 获取成员one在Sorted-Set中的位置索引值。0表示第一个位置
zcard myzset 获取 myzset 键中成员的数量
zcount myzset 1 2 获取分数满足表达式 1 <= score <= 2 的成员的数量
zscore myzset three 获取成员 three 的分数
zrangebyscore myzset 1 2 获取分数满足表达式 1 < score <= 2 的成员
#-inf 表示第一个成员,+inf最后一个成员
#limit限制关键字
#2 3 是索引号
zrangebyscore myzset -inf +inf limit 2 3 返回索引是2和3的成员
zremrangebyscore myzset 1 2 删除分数 1<= score <= 2 的成员,并返回实际删除的数量
zremrangebyrank myzset 0 1 删除位置索引满足表达式 0 <= rank <= 1 的成员
zrevrange myzset 0 -1 WITHSCORES 按位置索引从高到低,获取所有成员和分数
#原始成员:位置索引从小到大
one 0
two 1
#执行顺序:把索引反转
位置索引:从大到小
one 1
two 0
#输出结果: two
one
zrevrange myzset 1 3 获取位置索引,为1,2,3的成员
#相反的顺序:从高到低的顺序
zrevrangebyscore myzset 3 0 获取分数 3>=score>=0的成员并以相反的顺序输出
zrevrangebyscore myzset 4 0 limit 1 2 获取索引是1和2的成员,并反转位置索引
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
发布订阅
PUBLISH channel msg
将信息 message 发送到指定的频道 channel
SUBSCRIBE channel [channel ...]
订阅频道,可以同时订阅多个频道
UNSUBSCRIBE [channel ...]
取消订阅指定的频道, 如果不指定频道,则会取消订阅所有频道
PSUBSCRIBE pattern [pattern ...]
订阅一个或多个符合给定模式的频道,每个模式以 * 作为匹配符,比如 it* 匹配所 有以 it 开头的频道( it.news 、 it.blog 、 it.tweets 等等), news.* 匹配所有 以 news. 开头的频道( news.it 、 news.global.today 等等),诸如此类
PUNSUBSCRIBE [pattern [pattern ...]]
退订指定的规则, 如果没有参数则会退订所有规则
PUBSUB subcommand [argument [argument ...]]
查看订阅与发布系统状态
注意:使用发布订阅模式实现的消息队列,当有客户端订阅channel后只能收到后续发布到该频道的消息,之前发送的不会缓存,必须Provider和Consumer同时在线。
发布订阅例子:
窗口1:
127.0.0.1:6379> SUBSCRIBE baodi
窗口2:
127.0.0.1:6379> PUBLISH baodi "jin tian zhen kaixin!"
订阅多频道:
窗口1:
127.0.0.1:6379> PSUBSCRIBE wang*
窗口2:
127.0.0.1:6379> PUBLISH wangbaoqiang "jintian zhennanshou "
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Redis事务
redis的事务是基于队列实现的。
mysql的事务是基于事务日志和锁机制实现的。
redis是乐观锁机制。
开启事务功能时(multi)
multi
command1
command2
command3
command4
exec
discard
4条语句作为一个组,并没有真正执行,而是被放入同一队列中。
如果,这是执行discard,会直接丢弃队列中所有的命令,而不是做回滚。
exec
当执行exec时,对列中所有操作,要么全成功要么全失败
127.0.0.1:6379> set a b
OK
127.0.0.1:6379> MULTI
OK
127.0.0.1:6379> set a b
QUEUED
127.0.0.1:6379> set c d
QUEUED
127.0.0.1:6379> exec
1) OK
2) OK
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
redis乐观锁实现(模拟买票)
发布一张票
set ticket 1
窗口1:
watch ticket
multi
set ticket 0 1---->0
窗口2:
multi
set ticket 0
exec
窗口1:
exec
10、 服务器管理命令
Info
Client list
Client kill ip:port
config get *
CONFIG RESETSTAT 重置统计
CONFIG GET/SET 动态修改
Dbsize
FLUSHALL 清空所有数据
select 1
FLUSHDB 清空当前库
MONITOR 监控实时指令
SHUTDOWN 关闭服务器
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
主从数据一致性保证
min-slaves-to-write 1
min-slaves-max-lag 3
11.3 主库是否要开启持久化?
如果不开有可能,主库重启操作,造成所有主从数据丢失!
12. 主从复制实现
1、环境:
准备两个或两个以上redis实例
mkdir /data/638{0..2}
配置文件示例:
cat >> /data/6380/redis.conf <<EOF
port 6380
daemonize yes
pidfile /data/6380/redis.pid
loglevel notice
logfile "/data/6380/redis.log"
dbfilename dump.rdb
dir /data/6380
requirepass 123
masterauth 123
EOF
cat >> /data/6381/redis.conf <<EOF
port 6381
daemonize yes
pidfile /data/6381/redis.pid
loglevel notice
logfile "/data/6381/redis.log"
dbfilename dump.rdb
dir /data/6381
requirepass 123
masterauth 123
EOF
cat >> /data/6382/redis.conf <<EOF
port 6382
daemonize yes
pidfile /data/6382/redis.pid
loglevel notice
logfile "/data/6382/redis.log"
dbfilename dump.rdb
dir /data/6382
requirepass 123
masterauth 123
EOF
启动:
redis-server /data/6380/redis.conf
redis-server /data/6381/redis.conf
redis-server /data/6382/redis.conf
主节点:6380
从节点:6381、6382
2、开启主从:
6381/6382命令行:
redis-cli -p 6381 -a 123 SLAVEOF 127.0.0.1 6380
redis-cli -p 6382 -a 123 SLAVEOF 127.0.0.1 6380
解除:redis-cli -p 6381 -a 123 SLAVEOF no one
3、查询主从状态
redis-cli -p 6380 -a 123 info replication
redis-cli -p 6381 -a 123 info replication
redis-cli -p 6382 -a 123 info replication
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sentinel搭建过程
mkdir /data/26380
cd /data/26380
vim sentinel.conf
port 26380
dir "/data/26380"
sentinel monitor mymaster 127.0.0.1 6380 1
sentinel down-after-milliseconds mymaster 5000
sentinel auth-pass mymaster 123
启动:
[root@db01 26380]# redis-sentinel /data/26380/sentinel.conf &>/tmp/sentinel.log &
==============================
如果有问题:
1、重新准备1主2从环境
2、kill掉sentinel进程
3、删除sentinel目录下的所有文件
4、重新搭建sentinel
======================================
停主库测试:
[root@db01 ~]# redis-cli -p 6380 shutdown
[root@db01 ~]# redis-cli -p 6381
info replication
启动源主库(6380),看状态。
Sentinel管理命令:
redis-cli -p 26380
PING :返回 PONG 。
SENTINEL masters :列出所有被监视的主服务器
SENTINEL slaves <master name>
SENTINEL get-master-addr-by-name <master name> : 返回给定名字的主服务器的 IP 地址和端口号。
SENTINEL reset <pattern> : 重置所有名字和给定模式 pattern 相匹配的主服务器。
SENTINEL failover <master name> : 当主服务器失效时, 在不询问其他 Sentinel 意见的情况下, 强制开始一次自动故障迁移。
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
规划、搭建过程:
6个redis实例,一般会放到3台硬件服务器
注:在企业规划中,一个分片的两个分到不同的物理机,防止硬件主机宕机造成的整个分片数据丢失。
端口号:7000-7005
安装集群插件:
EPEL源安装ruby支持
yum install ruby rubygems -y
使用国内源
gem sources -l
gem sources -a http://mirrors.aliyun.com/rubygems/
gem sources --remove https://rubygems.org/
gem sources -l
gem install redis -v 3.3.3
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
集群节点准备
mkdir /data/700{0..5}
cat > /data/7000/redis.conf <<EOF
port 7000
daemonize yes
pidfile /data/7000/redis.pid
loglevel notice
logfile "/data/7000/redis.log"
dbfilename dump.rdb
dir /data/7000
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
cat >> /data/7001/redis.conf <<EOF
port 7001
daemonize yes
pidfile /data/7001/redis.pid
loglevel notice
logfile "/data/7001/redis.log"
dbfilename dump.rdb
dir /data/7001
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
cat >> /data/7002/redis.conf <<EOF
port 7002
daemonize yes
pidfile /data/7002/redis.pid
loglevel notice
logfile "/data/7002/redis.log"
dbfilename dump.rdb
dir /data/7002
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
cat >> /data/7003/redis.conf <<EOF
port 7003
daemonize yes
pidfile /data/7003/redis.pid
loglevel notice
logfile "/data/7003/redis.log"
dbfilename dump.rdb
dir /data/7003
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
cat >> /data/7004/redis.conf <<EOF
port 7004
daemonize yes
pidfile /data/7004/redis.pid
loglevel notice
logfile "/data/7004/redis.log"
dbfilename dump.rdb
dir /data/7004
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
cat >> /data/7005/redis.conf <<EOF
port 7005
daemonize yes
pidfile /data/7005/redis.pid
loglevel notice
logfile "/data/7005/redis.log"
dbfilename dump.rdb
dir /data/7005
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
启动节点:
redis-server /data/7000/redis.conf
redis-server /data/7001/redis.conf
redis-server /data/7002/redis.conf
redis-server /data/7003/redis.conf
redis-server /data/7004/redis.conf
redis-server /data/7005/redis.conf
[root@db01 ~]# ps -ef |grep redis
root 8854 1 0 03:56 ? 00:00:00 redis-server *:7000 [cluster]
root 8858 1 0 03:56 ? 00:00:00 redis-server *:7001 [cluster]
root 8860 1 0 03:56 ? 00:00:00 redis-server *:7002 [cluster]
root 8864 1 0 03:56 ? 00:00:00 redis-server *:7003 [cluster]
root 8866 1 0 03:56 ? 00:00:00 redis-server *:7004 [cluster]
root 8874 1 0 03:56 ? 00:00:00 redis-server *:7005 [cluster]
将节点加入集群管理
redis-trib.rb create --replicas 1 127.0.0.1:7000 127.0.0.1:7001 \
127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005
集群状态查看
集群主节点状态
redis-cli -p 7000 cluster nodes | grep master
集群从节点状态
redis-cli -p 7000 cluster nodes | grep slave
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
14.3 集群节点管理
增加新的节点
mkdir /data/7006
mkdir /data/7007
cat > /data/7006/redis.conf <<EOF
port 7006
daemonize yes
pidfile /data/7006/redis.pid
loglevel notice
logfile "/data/7006/redis.log"
dbfilename dump.rdb
dir /data/7006
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
cat > /data/7007/redis.conf <<EOF
port 7007
daemonize yes
pidfile /data/7007/redis.pid
loglevel notice
logfile "/data/7007/redis.log"
dbfilename dump.rdb
dir /data/7007
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
redis-server /data/7006/redis.conf
redis-server /data/7007/redis.conf
添加主节点:
redis-trib.rb add-node 127.0.0.1:7006 127.0.0.1:7000
转移slot(重新分片)
redis-trib.rb reshard 127.0.0.1:7000
添加一个从节点
redis-trib.rb add-node --slave --master-id 8ff9ef5b78e6da62bd7b362e1fe190cba19ef5ae 127.0.0.1:7007 127.0.0.1:7000
14.4 删除节点
将需要删除节点slot移动走
redis-trib.rb reshard 127.0.0.1:7000
49257f251824dd815bc7f31e1118b670365e861a
127.0.0.1:7006
0-1364 5461-6826 10923-12287
1365 1366 1365
删除一个节点
删除master节点之前首先要使用reshard移除master的全部slot,然后再删除当前节点
redis-trib.rb del-node 127.0.0.1:7006 8ff9ef5b78e6da62bd7b362e1fe190cba19ef5ae
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
redis集群搭建,5.0以上版本,5.0以下需要安装ruby。(未开启哨兵模式)
1。准备redis节点,并安装redis:(因为redis集群为去中心化设计,至少得6个节点,3主3从,保证投票可用主。此处虚拟机单节点伪集群)
redis安装:
1。安装依赖:
yum -y install gcc automake autoconf libtool make
tar -xf redis-5.0.8.tar.gz
mkdir -p /usr/local/redis
cd redis-5.0.8/
make prefix=/usr/local/redis
make install
cp redis-5.0.8/src/redis-trib.rb /usr/local/redis/bin/
创建集群配置以及数据目录:
mkdir /data/redis_cluster
mkdir /data/redis_cluster/700{0..5}
在集群总目录下生成各个节点的配置(注:此处为伪集群所以一个目录生成多个节点的配置,多节点需在各个节点上配置)
mkdir 7000 7001 7002 7003 7004 7005(此处为生成伪集群六个节点的配置目录)
将源码包内生成的redis.conf文件拷贝到以上各个目录中:
cp redis.conf ../redis_cluster/7000/
cp redis.conf ../redis_cluster/7001/
cp redis.conf ../redis_cluster/7002/
cp redis.conf ../redis_cluster/7003/
cp redis.conf ../redis_cluster/7004/
cp redis.conf ../redis_cluster/7005/
(以上可以更改一个文件其余使用该文件修改端口即可)
根据目录修改相应的配置:
举例模板为:
port 7000 //端口7000,7002,7003
bind 本机ip //默认ip为127.0.0.1 需要改为其他节点机器可访问的ip 否则创建集群时无法访问对应的端口,无法创建集群
daemonize yes //redis后台运行
pidfile /var/run/redis_7000.pid //pidfile文件对应7000,7001,7002
cluster-enabled yes //开启集群 把注释#去掉
cluster-config-file nodes_7000.conf //集群的配置 配置文件首次启动自动生成 7000,7001,7002 把注释#去掉
cluster-node-timeout 15000 //请求超时 默认15秒,可自行设置 把注释#去掉
appendonly yes //aof日志开启 有需要就开启,它会每次写操作都记录一条日志
修改端口:
sed -i 's/7000/7001/g' ../7001/redis.conf
sed -i 's/7000/7002/g' ../7002/redis.conf
sed -i 's/7000/7003/g' ../7003/redis.conf
sed -i 's/7000/7004/g' ../7004/redis.conf
sed -i 's/7000/7005/g' ../7005/redis.conf
启动相关服务:
/usr/local/redis/bin/redis-server /data/redis_cluster/7000/redis.conf
/usr/local/redis/bin/redis-server /data/redis_cluster/7001/redis.conf
/usr/local/redis/bin/redis-server /data/redis_cluster/7002/redis.conf
/usr/local/redis/bin/redis-server /data/redis_cluster/7003/redis.conf
/usr/local/redis/bin/redis-server /data/redis_cluster/7004/redis.conf
/usr/local/redis/bin/redis-server /data/redis_cluster/7005/redis.conf
检查服务:
ps -ef | grep redis
netstat -anptl | grep redis
创建集群
原命令 redis-trib.rb 这个工具目前已经废弃,使用redis-cli (5.0之前版本使用redis-trib.rb)
/usr/local/redis/bin/redis-cli --cluster create --cluster-replicas 1 192.168.75.33:7000 192.168.75.33:7001 192.168.75.33:7002 192.168.75.33:7003 192.168.75.33:7004 192.168.75.33:7005
然后输入:yes确定等集群创建
出现如下字样表示集群创建成功:
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered
验证集群:
/usr/local/redis/bin/redis-cli -c -h 192.168.75.15 -p 7000(注意-c选项启用集群链接。)
cluster info
cluster nodes
显示集群信息,正常输出则集群成功。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1.配置MongoDB的yum源
创建yum源文件:
vim /etc/yum.repos.d/mongodb-org-3.4.repo
添加以下内容:
[mongodb-org-3.4]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/3.4/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-3.4.asc
这里可以修改 gpgcheck=0, 省去gpg验证
安装之前先更新所有包 :yum update (可选操作)
2.安装MongoDB
安装命令:
yum -y install mongodb-org
++++++++++++++++++++++++++++++++++++++++++++++
############################################################################
创建所需用户和组
useradd mongod
passwd mongod
创建mongodb所需目录结构
mkdir -p /mongodb/conf
mkdir -p /mongodb/log
mkdir -p /mongodb/data
上传并解压软件到指定位置
[root@db01 data]# cd /data
[root@db01 data]# tar xf mongodb-linux-x86_64-rhel70-3.6.12.tgz
[root@db01 data]# cp -r /data/mongodb-linux-x86_64-rhel70-3.6.12/bin/ /mongodb
设置目录结构权限
chown -R mongod:mongod /mongodb
设置用户环境变量
su - mongod
vi .bash_profile
export PATH=/mongodb/bin:$PATH
source .bash_profile
启动mongodb
mongod --dbpath=/mongodb/data --logpath=/mongodb/log/mongodb.log --port=27017 --logappend --fork
登录mongodb
[mongod@server2 ~]$ mongo
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mongodb常用基本操作
mongodb 默认存在的库
test:登录时默认存在的库
管理MongoDB有关的系统库
admin库:系统预留库,MongoDB系统管理库
local库:本地预留库,存储关键日志
config库:MongoDB配置信息库
show databases/show dbs
show tables/show collections
use admin
db/select database()
命令种类
db 对象相关命令
db.[TAB][TAB]
db.help()
db.oldliu.[TAB][TAB]
db.oldliu.help()
rs 复制集有关(replication set):
rs.[TAB][TAB]
rs.help()
sh 分片集群(sharding cluster)
sh.[TAB][TAB]
sh.help()
mongodb对象操作
mongo mysql
库 -----> 库
集合 -----> 表
文档 -----> 数据行
库的操作
> use test
>db.dropDatabase()
{ "dropped" : "test", "ok" : 1 }
集合的操作
app> db.createCollection('a')
{ "ok" : 1 }
app> db.createCollection('b')
创建数据库:
use 数据库名 即可
方法2:当插入一个文档的时候,一个集合就会自动创建。
use oldliu
db.test.insert({name:"zhangsan"})
db.stu.insert({id:101,name:"zhangsan",age:20,gender:"m"})
show tables;
db.stu.insert({id:102,name:"lisi"})
db.stu.insert({a:"b",c:"d"})
db.stu.insert({a:1,c:2})
文档操作
数据录入:
for(i=0;i<10000;i++){db.log.insert({"uid":i,"name":"mongodb","age":6,"date":new
Date()})}
查询数据行数:
> db.log.count()
全表查询:
> db.log.find()
每页显示50条记录:
> DBQuery.shellBatchSize=50;
按照条件查询
> db.log.find({uid:999})
以标准的json格式显示数据
> db.log.find({uid:999}).pretty()
{
"_id" : ObjectId("5cc516e60d13144c89dead33"),
"uid" : 999,
"name" : "mongodb",
"age" : 6,
"date" : ISODate("2019-04-28T02:58:46.109Z")
}
删除集合中所有记录
app> db.log.remove({})
查看集合存储信息
app> db.log.totalSize() //集合中索引+数据压缩存储之后的大小
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
用户及权限管理
注意
验证库: 建立用户时use到的库,在使用用户时,要加上验证库才能登陆。
对于管理员用户,必须在admin下创建.
1. 建用户时,use到的库,就是此用户的验证库
2. 登录时,必须明确指定验证库才能登录
3. 通常,管理员用的验证库是admin,普通用户的验证库一般是所管理的库设置为验证库
4. 如果直接登录到数据库,不进行use,默认的验证库是test,不是我们生产建议的.
5. 从3.6 版本开始,不添加bindIp参数,默认不让远程登录,只能本地管理员登录。
用户创建语法
use admin
db.createUser
{
user: "<name>",
pwd: "<cleartext password>",
roles: [
{ role: "<role>",
db: "<database>" } | "<role>",
...
]
}
基本语法说明:
user:用户名
pwd:密码
roles:
role:角色名
db:作用对象
role:root, readWrite,read
验证数据库:
mongo -u oldliu -p 123 10.0.0.53/oldliu
用户管理例子
创建超级管理员:管理所有数据库(必须use admin再去创建)
$ mongo
use admin
db.createUser(
{
user: "root",
pwd: "root123",
roles: [ { role: "root", db: "admin" } ]
}
)
验证用户
db.auth('root','root123')
配置文件中,加入以下配置
security:
authorization: enabled
重启mongodb
mongod -f /mongodb/conf/mongo.conf --shutdown
mongod -f /mongodb/conf/mongo.conf
登录验证
mongo -uroot -proot123 admin
mongo -uroot -proot123 10.0.0.53/admin
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
配置文件写法:
YAML例子
cat > /mongodb/conf/mongo.conf <<EOF
systemLog:
destination: file
path: "/mongodb/log/mongodb.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb/data/"
processManagement:
fork: true
net:
port: 27017
bindIp: 10.0.0.51,127.0.0.1
EOF
mongod -f /mongodb/conf/mongo.conf --shutdown
mongod -f /mongodb/conf/mongo.conf
mongodb的关闭方式
mongod -f mongo.conf --shutdown
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
查看用户:
use admin
db.system.users.find().pretty()
创建应用用户
use oldliu
db.createUser(
{
user: "app01",
pwd: "app01",
roles: [ { role: "readWrite" , db: "oldliu" } ]
}
)
mongo -uapp01 -papp01 app
查询mongodb中的用户信息
mongo -uroot -proot123 10.0.0.53/admin
db.system.users.find().pretty()
删除用户(root身份登录,use到验证库)
删除用户
db.createUser({user: "app02",pwd: "app02",roles: [ { role: "readWrite" , db: "oldliu1" } ]})
mongo -uroot -proot123 10.0.0.53/admin
use oldliu1
db.dropUser("app02")
用户管理注意事项
1. 建用户要有验证库,管理员admin,普通用户是要管理的库
2. 登录时,注意验证库
mongo -uapp01 -papp01 10.0.0.51:27017/oldliu
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MongoDB复制集RS(ReplicationSet)
基本原理
基本构成是1主2从的结构,自带互相监控投票机制(Raft(MongoDB) Paxos(mysql MGR 用的是变种))
如果发生主库宕机,复制集内部会进行投票选举,选择一个新的主库替代原有主库对外提供服务。同时复制集会自动通知
客户端程序,主库已经发生切换了。应用就会连接到新的主库。
Replication Set配置过程详解
规划
三个以上的mongodb节点(或多实例)
环境准备
多个端口:
28017、28018、28019、28020
多套目录:
su - mongod
mkdir -p /mongodb/28017/conf /mongodb/28017/data /mongodb/28017/log
mkdir -p /mongodb/28018/conf /mongodb/28018/data /mongodb/28018/log
mkdir -p /mongodb/28019/conf /mongodb/28019/data /mongodb/28019/log
mkdir -p /mongodb/28020/conf /mongodb/28020/data /mongodb/28020/log
多套配置文件
/mongodb/28017/conf/mongod.conf
/mongodb/28018/conf/mongod.conf
/mongodb/28019/conf/mongod.conf
/mongodb/28020/conf/mongod.conf
配置文件内容
cat > /mongodb/28017/conf/mongod.conf <<EOF
systemLog:
destination: file
path: /mongodb/28017/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/28017/data
directoryPerDB: true
#engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 192.168.75.33,127.0.0.1
port: 28017
replication:
oplogSizeMB: 2048
replSetName: my_repl
EOF
\cp /mongodb/28017/conf/mongod.conf /mongodb/28018/conf/
\cp /mongodb/28017/conf/mongod.conf /mongodb/28019/conf/
\cp /mongodb/28017/conf/mongod.conf /mongodb/28020/conf/
sed 's#28017#28018#g' /mongodb/28018/conf/mongod.conf -i
sed 's#28017#28019#g' /mongodb/28019/conf/mongod.conf -i
sed 's#28017#28020#g' /mongodb/28020/conf/mongod.conf -i
启动多个实例备用
/bin/mongod -f /mongodb/28017/conf/mongod.conf
/bin/mongod -f /mongodb/28018/conf/mongod.conf
/bin/mongod -f /mongodb/28019/conf/mongod.conf
/bin/mongod -f /mongodb/28020/conf/mongod.conf
netstat -lnp|grep 280
配置普通复制集:
1主2从,从库普通从库
mongo --port 28017 admin
config = {_id: 'my_repl', members: [
{_id: 0, host: '192.168.75.33:28017'},
{_id: 1, host: '192.168.75.33:28018'},
{_id: 2, host: '192.168.75.33:28019'}]
}
rs.initiate(config)
查询复制集状态
rs.status();
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1主1从1个arbiter
mongo -port 28017 admin
config = {_id: 'my_repl', members: [
{_id: 0, host: '192.168.75.33:28017'},
{_id: 1, host: '192.168.75.33:28018'},
{_id: 2, host: '192.168.75.33:28019',"arbiterOnly":true}]
}
rs.initiate(config)
复制集管理操作
查看复制集状态
rs.status(); //查看整体复制集状态
rs.isMaster(); // 查看当前是否是主节点
rs.conf(); //查看复制集配置信息
添加删除节点
rs.remove("ip:port"); // 删除一个节点
rs.add("ip:port"); // 新增从节点
rs.addArb("ip:port"); // 新增仲裁节点
例子:
添加 arbiter节点
1、连接到主节点
[mongod@db03 ~]$ mongo --port 28018 admin
2、添加仲裁节点
my_repl:PRIMARY> rs.addArb("192.168.75.33:28020")
3、查看节点状态
my_repl:PRIMARY> rs.isMaster
({
"hosts" : [
"192.168.75.33:28017",
"192.168.75.33:28018",
"192.168.75.33:28019"
],
"arbiters" : [
"192.168.75.33:28020"
]
})
rs.remove("ip:port"); // 删除一个节点
例子:
my_repl:PRIMARY> rs.remove("10.0.0.53:28019");
{ "ok" : 1 }
my_repl:PRIMARY> rs.isMaster()
rs.add("ip:port"); // 新增从节点
例子:
my_repl:PRIMARY> rs.add("10.0.0.53:28019")
{ "ok" : 1 }
my_repl:PRIMARY> rs.isMaster()
特殊从节点
介绍:
arbiter节点:主要负责选主过程中的投票,但是不存储任何数据,也不提供任何服务
hidden节点:隐藏节点,不参与选主,也不对外提供服务。
delay节点:延时节点,数据落后于主库一段时间,因为数据是延时的,也不应该提供服务或参与选主,所以通常会配合hidden(隐藏)
一般情况下会将delay+hidden一起配置使用
++++++++++++++++++++++++++++++++++++++++++++++++++
MongoDB Sharding Cluster 分片集群
规划
10个实例:38017-38026
(1)configserver:38018-38020
3台构成的复制集(1主两从,不支持arbiter)38018-38020(复制集名字configsvr)
(2)shard节点:
sh1:38021-23 (1主两从,其中一个节点为arbiter,复制集名字sh1)
sh2:38024-26 (1主两从,其中一个节点为arbiter,复制集名字sh2)
(3) mongos:
38017
Shard节点配置过程
目录创建:
mkdir -p /mongodb/38021/conf /mongodb/38021/log /mongodb/38021/data
mkdir -p /mongodb/38022/conf /mongodb/38022/log /mongodb/38022/data
mkdir -p /mongodb/38023/conf /mongodb/38023/log /mongodb/38023/data
mkdir -p /mongodb/38024/conf /mongodb/38024/log /mongodb/38024/data
mkdir -p /mongodb/38025/conf /mongodb/38025/log /mongodb/38025/data
mkdir -p /mongodb/38026/conf /mongodb/38026/log /mongodb/38026/data
修改配置文件:
第一组复制集搭建:21-23(1主 1从 1Arb)
cat > /mongodb/38021/conf/mongodb.conf <<EOF
systemLog:
destination: file
path: /mongodb/38021/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38021/data
directoryPerDB: true
#engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
bindIp: 192.168.75.33,127.0.0.1
port: 38021
replication:
oplogSizeMB: 2048
replSetName: sh1
sharding:
clusterRole: shardsvr
processManagement:
fork: true
EOF
\cp /mongodb/38021/conf/mongodb.conf /mongodb/38022/conf/
\cp /mongodb/38021/conf/mongodb.conf /mongodb/38023/conf/
sed 's#38021#38022#g' /mongodb/38022/conf/mongodb.conf -i
sed 's#38021#38023#g' /mongodb/38023/conf/mongodb.conf -i
第二组节点:24-26(1主1从1Arb)
cat > /mongodb/38024/conf/mongodb.conf <<EOF
systemLog:
destination: file
path: /mongodb/38024/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38024/data
directoryPerDB: true
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
bindIp: 192.168.75.33,127.0.0.1
port: 38024
replication:
oplogSizeMB: 2048
replSetName: sh2
sharding:
clusterRole: shardsvr
processManagement:
fork: true
EOF
\cp /mongodb/38024/conf/mongodb.conf /mongodb/38025/conf/
\cp /mongodb/38024/conf/mongodb.conf /mongodb/38026/conf/
sed 's#38024#38025#g' /mongodb/38025/conf/mongodb.conf -i
sed 's#38024#38026#g' /mongodb/38026/conf/mongodb.conf -i
启动所有节点,并搭建复制集
/bin/mongod -f /mongodb/38021/conf/mongodb.conf
/bin/mongod -f /mongodb/38022/conf/mongodb.conf
/bin/mongod -f /mongodb/38023/conf/mongodb.conf
/bin/mongod -f /mongodb/38024/conf/mongodb.conf
/bin/mongod -f /mongodb/38025/conf/mongodb.conf
/bin/mongod -f /mongodb/38026/conf/mongodb.conf
ps -ef |grep mongod
mongo --port 38021
use admin
config = {_id: 'sh1', members: [
{_id: 0, host: '192.168.75.33:38021'},
{_id: 1, host: '192.168.75.33:38022'},
{_id: 2, host: '192.168.75.33:38023',"arbiterOnly":true}]
}
rs.initiate(config)
mongo --port 38024
use admin
config = {_id: 'sh2', members: [
{_id: 0, host: '192.168.75.33:38024'},
{_id: 1, host: '192.168.75.33:38025'},
{_id: 2, host: '192.168.75.33:38026',"arbiterOnly":true}]
}
rs.initiate(config)
config节点配置
目录创建
mkdir -p /mongodb/38018/conf /mongodb/38018/log /mongodb/38018/data
mkdir -p /mongodb/38019/conf /mongodb/38019/log /mongodb/38019/data
mkdir -p /mongodb/38020/conf /mongodb/38020/log /mongodb/38020/data
修改配置文件:
cat > /mongodb/38018/conf/mongodb.conf <<EOF
systemLog:
destination: file
path: /mongodb/38018/log/mongodb.conf
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38018/data
directoryPerDB: true
#engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
bindIp: 192.168.75.33,127.0.0.1
port: 38018
replication:
oplogSizeMB: 2048
replSetName: configReplSet
sharding:
clusterRole: configsvr
processManagement:
fork: true
EOF
\cp /mongodb/38018/conf/mongodb.conf /mongodb/38019/conf/
\cp /mongodb/38018/conf/mongodb.conf /mongodb/38020/conf/
sed 's#38018#38019#g' /mongodb/38019/conf/mongodb.conf -i
sed 's#38018#38020#g' /mongodb/38020/conf/mongodb.conf -i
启动节点,并配置复制集
/bin/mongod -f /mongodb/38018/conf/mongodb.conf
/bin/mongod -f /mongodb/38019/conf/mongodb.conf
/bin/mongod -f /mongodb/38020/conf/mongodb.conf
mongo --port 38018
use admin
config = {_id: 'configReplSet', members: [
{_id: 0, host: '192.168.75.33:38018'},
{_id: 1, host: '192.168.75.33:38019'},
{_id: 2, host: '192.168.75.33:38020'}]
}
rs.initiate(config)
注:configserver 可以是一个节点,官方建议复制集。configserver不能有arbiter。
新版本中,要求必须是复制集。
注:mongodb 3.4之后,虽然要求config server为replica set,但是不支持arbiter
mongos节点配置:
创建目录:
mkdir -p /mongodb/38017/conf /mongodb/38017/log
配置文件:
cat > /mongodb/38017/conf/mongos.conf <<EOF
systemLog:
destination: file
path: /mongodb/38017/log/mongos.log
logAppend: true
net:
bindIp: 192.168.75.33,127.0.0.1
port: 38017
sharding:
configDB: configReplSet/192.168.75.33:38018,192.168.75.33:38019,192.168.75.33:38020
processManagement:
fork: true
EOF
启动mongos
/bin/mongos -f /mongodb/38017/conf/mongos.conf
分片集群添加节点
连接到其中一个mongos(10.0.0.51),做以下配置
(1)连接到mongs的admin数据库
# su - mongod
$ mongo 192.168.75.33:38017/admin
(2)添加分片
db.runCommand( { addshard : "sh1/192.168.75.33:38021,192.168.75.33:38022,192.168.75.33:38023",name:"shard1"} )
db.runCommand( { addshard : "sh2/192.168.75.33:38024,192.168.75.33:38025,192.168.75.33:38026",name:"shard2"} )
(3)列出分片
mongos> db.runCommand( { listshards : 1 } )
(4)整体状态查看
mongos> sh.status();
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
使用分片集群
RANGE分片配置及测试
1、激活数据库分片功能
mongo --port 38017 admin
admin> ( { enablesharding : "数据库名称" } )
eg:
admin> db.runCommand( { enablesharding : "test" } )
2、指定分片键对集合分片
### 创建索引
use test
> db.vast.ensureIndex( { id: 1 } )
### 开启分片
use admin
> db.runCommand( { shardcollection : "test.vast",key : {id: 1} } )
3、集合分片验证
admin> use test
test> for(i=1;i<2000000;i++){ db.vast.insert({"id":i,"name":"shenzheng","age":70,"date":new Date()}); }
test> db.vast.stats()
4、分片结果测试
shard1:
mongo --port 38021
db.vast.count();
shard2:
mongo --port 38024
db.vast.count();
Hash分片例子:
对oldliu库下的vast大表进行hash
创建哈希索引
(1)对于oldliu开启分片功能
mongo --port 38017 admin
use admin
admin> db.runCommand( { enablesharding : "oldliu" } )
(2)对于oldliu库下的vast表建立hash索引
use oldliu
oldliu> db.vast.ensureIndex( { id: "hashed" } )
(3)开启分片
use admin
admin > sh.shardCollection( "oldliu.vast", { id: "hashed" } )
(4)录入10w行数据测试
use oldliu
for(i=1;i<100000;i++){ db.vast.insert({"id":i,"name":"shenzheng","age":70,"date":new Date()}); }
(5)hash分片结果测试
mongo --port 38021
use oldliu
db.vast.count();
mongo --port 38024
use oldliu
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1.安装epel源。
vim /etc/yum.repo.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=http://mirrors.aliyun.com/epel/7/$basearch
failovermethod=priority
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
baseurl=http://mirrors.aliyun.com/epel/7/$basearch/debug
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=0
[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=http://mirrors.aliyun.com/epel/7/SRPMS
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=0
也可以到阿里云官方仓库下载。
yum install autoconf gcc libxml2-devel openssl-devel curl-devel libjpeg-devel libpng-devel libXpm-devel freetype-devel libmcrypt-devel make ImageMagick-devel libssh2-devel gcc-c++ cyrus-sasl-devel -y
编译参数:
./configure
--prefix=/usr/local/php \
--with-config-file-path=/usr/local/php/etc \
--with-config-file-scan-dir=/usr/local/php/etc/php.d \
--disable-ipv6 \
--enable-bcmath \
--enable-calendar \
--enable-exif \
--enable-fpm \
--with-fpm-user=www \
--with-fpm-group=www \
--enable-ftp \
--enable-gd-jis-conv \
--enable-gd-native-ttf \
--enable-inline-optimization \
--enable-mbregex \
--enable-mbstring \
--enable-mysqlnd \
--enable-opcache \
--enable-pcntl \
--enable-shmop \
--enable-soap \
--enable-sockets \
--enable-static \
--enable-sysvsem \
--enable-wddx \
--enable-xml \
--with-curl \
--with-gd \
--with-jpeg-dir \
--with-freetype-dir \
--with-xpm-dir \
--with-png-dir \
--with-gettext \
--with-iconv \
--with-libxml-dir \
--with-mcrypt \
--with-mhash \
--with-mysqli \
--with-pdo-mysql \
--with-pear \
--with-openssl \
--with-xmlrpc \
--with-zlib \
--disable-debug \
--disable-phpdbg
输出Thank you for using php 没有报错即为正常;
make && make install
+++++++++++++++++++++++++++++++++
cp /data/php-7.0.31/sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm
chmod a+x /etc/init.d/php-fpm
cp /data/php-7.0.27/php.ini-development /usr/local/php/etc/php.ini
cp /usr/local/php/etc/php-fpm.conf.default /usr/local/php/etc/php-fpm.conf
cp /usr/local/php/etc/php-fpm.d/www.conf.default /usr/local/php/etc/php-fpm.d/www.conf
启动:
/etc/init.d/php-fpm start
出现done字样,表示php启动成功。
server {
listen 80;
server_name www.gz.com;
access_log /data/web-logs/gz.com.access.log weblog;
charset utf-8;
error_page 404 403 /erro/404.html;
error_page 500 502 503 504 /erro/50x.html;
root /data/web-gz;
index index.php index.html;
location ~ [^/]\.php(/|$) {
#fastcgi_pass 127.0.0.1:9000;
fastcgi_pass unix:/dev/shm/php-cgi.sock;
index index.php;
fastcgi_index index.php;
fastcgi_param MB_APPLICATION production;
include fastcgi.conf;
}
location /sz {
rewrite .* http://www.baidu.com last;
}
}
+++++++++++++++++++++++++++++++++++++++++++++++++++
第三方模块安装步骤:
1,下载相关模块包:
https://pecl.php.net/package(在此页面搜索包名)
2,解压相关包,进入解压完的目录
执行:
/usr/local/php/bin/phpize(根据安装路径不同找到phpize命令)
3,进行./configure
./configure --with-php-config=/usr/local/php/bin/php-config (根据安装路径不同找到php-config命令)
4,无报错则执行:
make && make install
5,在php.ini里面增加相应的模块配置,并重启php
增加示例:
vim /usr/local/php/etc/php.ini
最后一行:(假设安装的是redis模块)
extension=redis.so
必要情况下指定.so的路径,路径显示在编译安装完后最后一行:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mkdir /data/
yum repolist
cd /data/
yum install -y zlib zlib-devel bzip2 bzip2-devel ncurses ncurses-devel readline readline-devel openssl openssl-devel openssl-static xz lzma xz-devel sqlite sqlite-devel gdbm gdbm-devel tk tk-devel gcc
wget https://www.python.org/ftp/python/3.6.2/Python-3.6.2.tar.xz
mkdir -p /usr/local/python3
tar -xf /data/Python-3.6.2.tar.xz
cd /data/Python-3.6.2
./configure --prefix=/usr/local/python3 --enable-optimizations
make && make install
ln -s /usr/local/python3/bin/python3 /usr/bin/python3
ln -s /usr/local/python3/bin/pip3 /usr/bin/pip3
pip3 install --upgrade pip
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
nginx静态文件配置:[root@nginx conf.d]# vim local.conf
server {
listen 80; ##监听端口
server_name ip;
access_log /opt/nginx_log/local.log main;
location / {
index index.html;
root /var/www/html; ##主目录
}
location ~ \.(gif|jpg|jpeg|png|bmp|swf)$ { ##以括号内结尾的请求,都请求nginx
root /var/www/html; ##主目录
}
location ~ \.(jsp|do)$ { ##以.jsp和.do结尾的,都请求tomcat
proxy_pass http://ip; ##tomcat的IP地址
expires 1h; ##缓存一小时
}
}
编写静态文件:
vim /var/www/html/index.html
nginx
编写动态文件:
vim /data/webapps/test1.jsp
<%@ page contentType="text/html; charset=utf-8" language="java" import="java.sql.*" errorPage="" %>
<head>
<body>
<script type="text/javascript">
function display(clock){
var now=new Date(); //创建Date对象
var year=now.getFullYear(); //获取年份
var month=now.getMonth(); //获取月份
var date=now.getDate(); //获取日期
var day=now.getDay(); //获取星期
var hour=now.getHours(); //获取小时
var minu=now.getMinutes(); //获取分钟
var sec=now.getSeconds(); //获取秒钟
month=month+1;
var arr_week=new Array("星期日","星期一","星期二","星期三","星期四","星期五","星期六");
var week=arr_week[day]; //获取中文的星期
var time=year+"年"+month+"月"+date+"日 "+week+" "+hour+":"+minu+":"+sec; //组合系统时间
clock.innerHTML="当前时间:"+time; //显示系统时间
}
window.onload=function(){
window.setInterval("display(clock)", 1000);
}
</script>
<div id="clock" ></div>
</body>
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
(1) MASTER 节点配置文件(192.168.50.133)
# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
## keepalived 自带的邮件提醒需要开启 sendmail 服务。 建议用独立的监控或第三方 SMTP
router_id liuyazhuang133 ## 标识本节点的字条串,通常为 hostname
}
## keepalived 会定时执行脚本并对脚本执行的结果进行分析,动态调整 vrrp_instance 的优先级。如果脚本执行结果为 0,并且 weight 配置的值大于 0,则优先级相应的增加。如果脚本执行结果非 0,并且 weight配置的值小于 0,则优先级相应的减少。其他情况,维持原本配置的优先级,即配置文件中 priority 对应的值。
vrrp_script chk_nginx {
script "/etc/keepalived/nginx_check.sh" ## 检测 nginx 状态的脚本路径
interval 2 ## 检测时间间隔
weight -20 ## 如果条件成立,权重-20
}
## 定义虚拟路由, VI_1 为虚拟路由的标示符,自己定义名称
vrrp_instance VI_1 {
state MASTER ## 主节点为 MASTER, 对应的备份节点为 BACKUP
interface eth0 ## 绑定虚拟 IP 的网络接口,与本机 IP 地址所在的网络接口相同, 我的是 eth0
virtual_router_id 33 ## 虚拟路由的 ID 号, 两个节点设置必须一样, 可选 IP 最后一段使用, 相同的 VRID 为一个组,他将决定多播的 MAC 地址
mcast_src_ip 192.168.50.133 ## 本机 IP 地址
priority 100 ## 节点优先级, 值范围 0-254, MASTER 要比 BACKUP 高
nopreempt ## 优先级高的设置 nopreempt 解决异常恢复后再次抢占的问题
advert_int 1 ## 组播信息发送间隔,两个节点设置必须一样, 默认 1s
## 设置验证信息,两个节点必须一致
authentication {
auth_type PASS
auth_pass 1111 ## 真实生产,按需求对应该过来
}
## 将 track_script 块加入 instance 配置块
track_script {
chk_nginx ## 执行 Nginx 监控的服务
} #
# 虚拟 IP 池, 两个节点设置必须一样
virtual_ipaddress {
192.168.50.130 ## 虚拟 ip,可以定义多个
}
}
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
BACKUP 节点配置文件(192.168.50.134)
# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id liuyazhuang134
}
vrrp_script chk_nginx {
script "/etc/keepalived/nginx_check.sh"
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface eth1
virtual_router_id 33
mcast_src_ip 192.168.50.134
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nginx
}
virtual_ipaddress {
192.168.50.130
}
}
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# vi /etc/keepalived/nginx_check.sh
#!/bin/bash
A=`ps -C nginx --no-header |wc -l`
if [ $A -eq 0 ];then
/usr/local/nginx/sbin/nginx
sleep 2
if [ `ps -C nginx --no-header |wc -l` -eq 0 ];then
pkill keepalived
fi
fi
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
出现es报错,无法锁住内存
在启动脚本里增加:
[Service]
LimitMEMLOCK=infinity
重启服务
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
docker安装
yum install -y yum-utils device-mapper-persistent-data lvm2 安装相关依赖
增加docker仓库
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
安装docker
yum install docker-ce-18.06.3.ce -y
启动,开机自启,查看版本
systemctl start docker
systemctl enable docker
docker version
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
log_format main '{"@timestamp": "$time_iso8601",'
'"host": "$server_addr",'
'"clientip": "$remote_addr",'
'"size": $body_bytes_sent,'
'"responsetime": $request_time,'
'"upstreamtime": "$upstream_response_time",'
'"upstreamhost": "$upstream_addr",'
'"http_host": "$host",'
'"url": "$uri",'
'"domain": "$host",'
'"xff": "$http_x_forwarded_for",'
'"referer": "$http_referer",'
'"status": "$status"'
' }';
+++++++++++++++++++++++++++++++++++++++++++++++++
pattern="{"client":"%h", "client user":"%l", "authenticated":"%u", "access time":"%t", "method":"%r", "status":"%s", "send bytes":"%b", "Query?string":"%q", "partner":"%{Referer}i", "Agent version":"%{User-Agent}i"}"/>
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sed -i 's#https://updates.jenkins.io/download#https://mirrors.tuna.tsinghua.edu.cn/jenkins#g' default.json && sed -i 's#http://www.google.com#https://www.baidu.com#g' default.json
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
yum install -y curl policycoreutils-python openssh-server
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
创建dev分支
git branch dev
切换到dev分支
git checkout dev
查看所在分支
git branch
增加文件然后commit操作:
git add .
git commit -m "xxx"
推送代码报错:
git push
按照提示设置如下:
git config --global push.default simple
再次
git push
提示:上游无dev分支
按照提示,创建并提交
git push --set-upstream origin dev
提示输入账号密码,输入完成则提交成功。
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
[zabbix]
name=Zabbix Official Repository - $basearch
baseurl=https://mirrors.aliyun.com/zabbix/zabbix/4.0/rhel/7/$basearch/
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX-A14FE591
[zabbix-debuginfo]
name=Zabbix Official Repository debuginfo - $basearch
baseurl=https://mirrors.aliyun.com/zabbix/zabbix/4.0/rhel/7/$basearch/debuginfo/
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX-A14FE591
gpgcheck=0
[zabbix-non-supported]
name=Zabbix Official Repository non-supported - $basearch
baseurl=https://mirrors.aliyun.com/zabbix/non-supported/rhel/7/$basearch/
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX
gpgcheck=0
20,1 底端
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
kvm虚拟化管理软件的安装
yum install libvirt virt-install qemu-kvm -y
systemctl start libvirtd.service
systemctl status libvirtd.service
建议虚拟机内存不要低于1024M,否则安装系统特别慢!
virt-install --virt-type kvm --os-type=linux --os-variant rhel7 --name centos7 --memory 1024 --vcpus 1 --disk /opt/centos2.raw,format=raw,size=10 --cdrom /opt/CentOS-7-x86_64-DVD-1708.iso --network network=default --graphics vnc,listen=0.0.0.0 --noautoconsole
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
kvm虚拟机的virsh日常管理和配置
列表list(--all)
开机start
关机shutdown
拔电源关机destroy
导出配置dumpxml 例子:virsh dumpxml centos7 >centos7-off.xml
删除undefine 推荐:先destroy,在undefine
导入配置define
修改配置edit(自带语法检查)
centos7的kvm虚拟机:
grubby --update-kernel=ALL --args="console=ttyS0,115200n8"
reboot
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
10、我们web站点的架构环境为LNMP,如果有大量用户反馈访问我们web站点速度比较慢,请你从多方面谈谈问题排查思路,并给出解决方案。 (25分)
安全:是否有ddos攻击。是否有cc攻击
缓存:cdn缓存,redis数据缓存
Nginx调优:nginx的静态资源缓存,开启资源压缩,最大连接数,epoll开启,自适应进程开启。连接timeout优化
Mysql调优:语句调优,查找慢查询,增加索引,规范sql用法,避免select*之类语句。增加相应的主键,配置上,增加inndb缓存,增大连接数。开启独立表空间,开启独立索引空间。
代码:检查代码慢日志。看是否有死循环之类的或者高消耗。
网络:网络有无掉包,带宽是否足够,dns解析情况
Php调优:增加子进程,增加php缓存,开启异步模式。
扩容:买服务器。氪服务器。加内存,加cpu,氪带宽。氪ssd。
服务器调优:最大文件打开数调大。相应的内核转发参数打开,开启内核快速回收机制。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
本地安:上传rpm tar包,解压缩,然后配置本地yum源
[openstack]
name=openstack
baseurl=file:///opt/repo
gpgcheck=0
更改节点名字:
vim /etc/hosts
192.168.75.18 controller
192.168.75.19 compute
hostnamectl set-hostname controller
hostnamectl set-hostname compute
安装基础服务:
所有节点安装:
yum install chrony -y
控制节点
(选择性更改ntp服务器上游,此处不做更改)
vim /etc/chronyd.conf
26行 allow 192.168.0.0/16
systemctl restart chronyd
计算节点:
vim /etc/chronyd.conf
修改3行
server 192.168.75.18 iburst
剩下server注释掉
所有节点同时安装
yum install python-openstackclient openstack-selinux -y
controller节点安装:
安装mariadb:
yum install mariadb mariadb-server python2-PyMySQL -y
echo '[mysqld]
bind-address = 192.168.75.15
default-storage-engine = innodb
innodb_file_per_table #设置每个表的独立表空间文件
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8' >/etc/my.cnf.d/openstack.cnf
systemctl start mariadb
systemctl enable mariadb
mysql_secure_installation #数据库安全初始化,不做会同步有问题
回车
n #不设置数据库密码
y
y
y
y
创建组件所需数据库以及使用用户及密码:
kestone相关:
create database keystone;
grant all on keystone.* to 'keystone'@'localhost' identified by 'KEYSTONE_DBPASS';
grant all on keystone.* to 'keystone'@'%' identified by 'KEYSTONE_DBPASS';
glance相关:
create database glance;
grant all on glance.* to 'glance'@'localhost' identified by 'GLANCE_DBPASS';
grant all on glance.* to 'glance'@'%' identified by 'GLANCE_DBPASS';
nova相关:
create database nova;
grant all on nova.* to 'nova'@'localhost' identified by 'NOVA_DBPASS';
grant all on nova.* to 'nova'@'%' identified by 'NOVA_DBPASS';
nova api相关:
create database nova_api;
grant all on nova_api.* to 'nova'@'localhost' identified by 'NOVA_DBPASS';
grant all on nova_api.* to 'nova'@'%' identified by 'NOVA_DBPASS';
neutron相关:
create database neutron;
grant all on neutron.* to 'neutron'@'localhost' identified by 'NEUTRON_DBPASS';
grant all on neutron.* to 'neutron'@'%' identified by 'NEUTRON_DBPASS';
查看创建用户
select user,host from mysql.user;
安装消息队列
yum install rabbitmq-server -y
systemctl start rabbitmq-server.service
systemctl enable rabbitmq-server.service
rabbitmqctl add_user openstack RABBIT_PASS (增加OpenStack用户设置密码为RABBIT_PASS)
rabbitmqctl set_permissions openstack ".*" ".*" ".*" (可读可写可配置)
rabbitmq-plugins enable rabbitmq_management (启用rabbitmq管理插件)默认登录账号guest 密码guest
http://192.168.75.21:15672/ guest guest
安装memcached
yum install memcached python-memcached -y
sed -i "s#127.0.0.1#0.0.0.0#g" /etc/sysconfig/memcached (或者改成本机ip)
systemctl start memcached
systemctl enable memcached
以下为OpenStack服务:
keystone认证服务:
通过阿帕奇启动:
认证管理,授权管理,服务目录
认证:账号密码
授权:授权管理
服务目录: 记录作用记录每个服务的相关信息
yum install openstack-utils -y (自动改配置文件工具)
yum install openstack-keystone httpd mod_wsgi -y
cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
去掉空行
grep -Ev '^$|#' /etc/keystone/keystone.conf
grep -Ev '^$|#' /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
配置修改以下几行:
[DEFAULT]
admin_token = ADMIN_TOKEN
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[token]
provider = fernet (令牌的提供者)
d5acb3db852fe3f247f4f872b051b7a9 keystone.conf
可以命令生成:
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token ADMIN_TOKEN
openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
openstack-config --set /etc/keystone/keystone.conf token provider fernet
同步数据库:
su -s /bin/sh -c "keystone-manage db_sync" keystone
初始化fernet:
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
配置httpd:
echo "ServerName controller" >> /etc/httpd/conf/httpd.conf (优化阿帕奇)
生成keystone httpd的配置:
echo 'Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>' >/etc/httpd/conf.d/wsgi-keystone.conf
systemctl start httpd.service
systemctl enable httpd.service
创建注册账户,注册keystone自己api:
声明安装注册参数:
export OS_TOKEN=ADMIN_TOKEN
export OS_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
env | grep OS
openstack service create --name keystone --description "OpenStack Identity" identity
openstack endpoint create --region RegionOne identity public http://controller:5000/v3
openstack endpoint create --region RegionOne identity internal http://controller:5000/v3
openstack endpoint create --region RegionOne identity admin http://controller:35357/v3
创建域(地区)、项目(租户)、用户、角色
openstack domain create --description "Default Domain" default
openstack project create --domain default --description "Admin Project" admin
openstack user create --domain default --password ADMIN_PASS admin
openstack role create admin
关联项目、用户、角色
openstack role add --project admin --user admin admin
创建系统用户域(存放系统用户的)
openstack project create --domain default --description "Service Project" service
测试keystone服务:
已有系统变量的情况下回报错: openstack token issue
取消环境变量 unset OS_TOKEN OS_URL
重新设置变量,但是退出终端变量会消失。
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
测试命令:
openstack user list
openstack token issue
家目录下创建环境变量脚本
echo 'export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2' >/root/admin-openrc
source ~/admin-openrc
直接写到,.bashrc中
source admin-openrc
glance相关:
在keystone上创建glance相关关联角色:
openstack user create --domain default --password GLANCE_PASS glance
openstack role add --project service --user glance admin
验证:openstack role assignment list
openstack user list (可以与上面的表对应)
openstack project list (与上面一样)
在keystone上创建服务和api
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
安装glance服务
yum install openstack-glance -y
修改glance配置文件:
#用openstack配置命令生成(glanceapi的文件)
openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
openstack-config --set /etc/glance/glance-api.conf glance_store stores file,http
openstack-config --set /etc/glance/glance-api.conf glance_store default_store file
openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password GLANCE_PASS
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
#用openstack配置命令生成(glanceregistry文件)
openstack-config --set /etc/glance/glance-registry.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password GLANCE_PASS
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
同步数据库:
su -s /bin/sh -c "glance-manage db_sync" glance
启动服务:
systemctl start openstack-glance-api.service openstack-glance-registry.service
systemctl enable openstack-glance-api.service openstack-glance-registry.service
上传镜像测试:
上传命令:openstack image create "cirros"(名字) --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 (格式)\
--container-format bare(说明是openstack的镜像) --public (指定为公共镜像)
查看相关镜像:
openstack image list
nova的计算服务(核心服务)
nova-api:接受请求反馈请求
nova-compute: 真正管理虚拟机(控制节点不安装(多个)调用libvirt管理虚拟机)
nova-conductor:帮助nova-compute代理修改数据库中的虚拟机状态
nova-consoleauth: web版vnc直接操作云主机
nova-network: 早期openstack网络管理(已弃用,改用neutron)
nova-novncproxy:web版vnc客户端
nova-scheduler:nava调度器(挑出最合适的nova-compute来创建)
nova-api-metadata:接受虚拟机发送的元数据请求(配合neutron-metadata-anget来定制虚拟机)
在keystone创建系统用户:
openstack user create --domain default --password NOVA_PASS nova
openstack role add --project service --user nova admin
在keystone上注册服务和api
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
安装nova服务:
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler -y
#生成配置文件:
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend rabbit
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.75.15
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:NOVA_DBPASS@controller/nova
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_host controller
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_userid openstack
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
openstack-config --set /etc/nova/nova.conf vnc enabled True
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen '0.0.0.0'
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
同步数据库:
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova
启用服务:
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
计算节点安装:
yum install openstack-nova-compute -y
yum install openstack-utils -y
#配置:
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend rabbit
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.75.16
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_host controller
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_userid openstack
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
openstack-config --set /etc/nova/nova.conf vnc enabled True
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
#openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
openstack-config --set /etc/nova/nova.conf libvirt virt_type kvm
openstack-config --set /etc/nova/nova.conf libvirt cpu_mode none
启动服务:
systemctl start libvirtd
systemctl enable libvirtd
systemctl start openstack-nova-compute
systemctl enable openstack-nova-compute
网络服务neutron(在控制节点做)
neutorn-server 相当于api
neutron-linucbridge-agent 创建桥接网卡
neutron-dhcp-agent 分配ip
neutorn-metadata-agent 虚拟机定制化操作
L3-agent 实现vxlan三层
创建用户关联角色
openstack user create --domain default --password NEUTRON_PASS neutron
openstack role add --project service --user neutron admin
创建服务注册api
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
安装相关包:
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables ipset -y
修改相应配置:
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins
openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend rabbit
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes True
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes True
openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://controller:35357
openstack-config --set /etc/neutron/neutron.conf nova auth_type password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name service
openstack-config --set /etc/neutron/neutron.conf nova username nova
openstack-config --set /etc/neutron/neutron.conf nova password NOVA_PASS
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_host controller
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_userid openstack
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
#cat ml2_conf.ini >/etc/neutron/plugins/ml2/ml2_conf.ini
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset True
#cat linuxbridge_agent.ini >/etc/neutron/plugins/ml2/linuxbridge_agent.ini
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens33
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
#cat dhcp_agent.ini >/etc/neutron/dhcp_agent.ini
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true
#cat metadata_agent.ini >/etc/neutron/metadata_agent.ini
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_ip controller
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret METADATA_SECRET
做软连接找到插件
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
数据库同步:
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file \
/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
启动服务:
systemctl start neutron-server.service neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service
检测方法:neutron agent-list
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy True
openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret METADATA_SECRET
计算节点安装neutron:
安装:
yum install openstack-neutron-linuxbridge ebtables ipset -y
配置:
openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend rabbit
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_host controller
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_userid openstack
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
#cat linuxbridge_agent.ini >/etc/neutron/plugins/ml2/linuxbridge_agent.ini
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens33
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
nova配置:
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
启动服务:
systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service
验证:
neutron agent-list
web页面:
直接装在控制节点:
安装:
yum install openstack-dashboard -y
配置:
cat local_settings >/etc/openstack-dashboard/local_settings (记录好模板)
sed -i '3a WSGIApplicationGroup %{GLOBAL}' /etc/httpd/conf.d/openstack-dashboard.conf(修改不能访问的bug)
systemctl restart httpd.service memcached
http://ip/dashboard
域 default 账户:admin 密码:ADMIN_PASS
启动实例步骤:
创建网络:
neutron net-create --shared --provider:physical_network provider --provider:network_type flat WAN
neutron subnet-create --name subnet-wan --allocation-pool \
start=192.168.75.100,end=192.168.75.200 --dns-nameserver 223.5.5.5 \
--gateway 192.168.75.2 WAN 192.168.75.0/24
硬件配置:
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
生成sshkey并且生成安全组
ssh-keygen -q -N "" -f ~/.ssh/id_rsa
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default (创建默认安全组)
查看网络id:
neutron net-list
启动实例:
openstack server create --flavor m1.nano --image cirros \
--nic net-id=85be22ac-6f15-43f9-bb78-722c461d9df4 --security-group default \ #net-id用上面查看网络id的命令自查
--key-name mykey laogou(实例名称)
glance镜像服务迁移
1,停掉控制节点的glance服务
停掉glance-api glance-registry
2,在先节点上安装数据库,python2-PyMySQL
启动数据库并且安全初始化
3,恢复glance数据库数据,mysqldump -B glance > glance.sql
把生成的sql文件放到新的节点
在新的计算节点:
mysql导入:mysql < "glance.sql“
创建glance用户以及密码
4,安装配置glance服务:
yum install openstack-glance -y
配置glance
拉取就配置然后更改:
拉取glance两个文件,glance-api,glance-registry配置
更改链接数据库的信息
更改数据库controller为本机地址
启动服务
5,迁移glance镜像
/var/lib/glance/images/*
注意镜像权限
6,更改keystone的注册信息(更改endpoint的信息) 注意备份(在控制节点操作)
mysqldump keystone endpoint > endpoint.sql
cp endpoint.sql /data/bak
vim endpoint.sql
%s#http://comtroller:9292#http://ip:9292#gc(c参数是有交互的检查)
检测:
openstack endpoint list | grep image
opnestack image list
7,此时启动实例报错,更改nova配置(包括控制节点,计算节点)
sed -i 's#'http://controller:9292#http://ip:9292#g' /etc/nova/nova.conf (可用ansilbe批量更改)
重启控制节点:
systemctl restart openstack-nova-api (控制节点)
systemctl restart openstack-nova-compute (计算节点)
验证以上所有方法:
增加个镜像验证(可用用鼠标测)在ui界面上传镜像并生成服务器测试。
cinder块存储服务
cinder-api: 接收和响应外部有关块存储请求
cinder-volume: 提供存储空间
cinder-scheduler:调度器,决定将要分配的空间由哪一个cinder-volume提供
cinder-backup: 备份存储
1:数据库创库授权
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
IDENTIFIED BY 'CINDER_DBPASS';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
IDENTIFIED BY 'CINDER_DBPASS';
2:在keystone创建系统用户(glance,nova,neutron,cinder)关联角色
openstack user create --domain default --password CINDER_PASS cinder
openstack role add --project service --user cinder admin
3:在keystone上创建服务和注册api
openstack service create --name cinder \
--description "OpenStack Block Storage" volume
openstack service create --name cinderv2 \
--description "OpenStack Block Storage" volumev2
openstack endpoint create --region RegionOne \
volume public http://controller:8776/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne \
volume internal http://controller:8776/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne \
volume admin http://controller:8776/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne \
volumev2 public http://controller:8776/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne \
volumev2 internal http://controller:8776/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne \
volumev2 admin http://controller:8776/v2/%\(tenant_id\)s
4:安装服务相应软件包
yum install openstack-cinder
5:修改相应服务的配置文件
cp /etc/cinder/cinder.conf{,.bak}
grep -Ev '^$|#' /etc/cinder/cinder.conf.bak >/etc/cinder/cinder.conf
openstack-config --set /etc/cinder/cinder.conf DEFAULT rpc_backend rabbit
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.75.15
openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password CINDER_PASS
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_host controller
openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_userid openstack
openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
注意增加glance-api地址,否则会报错([defaul]下)
6:同步数据库
su -s /bin/sh -c "cinder-manage db sync" cinder
7:启动服务
openstack-config --set /etc/nova/nova.conf cinder os_region_name RegionOne
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
在计算节点上:
先决条件
yum install lvm2 -y
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service
###增加两块硬盘
echo '- - -' >/sys/class/scsi_host/host0/scan (扫描识别硬盘)
fdisk -l
pvcreate /dev/sdb
pvcreate /dev/sdc
vgcreate cinder-ssd /dev/sdb
vgcreate cinder-sata /dev/sdc
###修改/etc/lvm/lvm.conf
在130下面插入一行:
filter = [ "a/sdb/", "a/sdc/","r/.*/"]
安装
yum install openstack-cinder targetcli python-keystone -y
配置
[root@compute1 ~]# cat /etc/cinder/cinder.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.75.15
glance_api_servers = http://192.168.75.17:9292
enabled_backends = ssd,sata
[BACKEND]
[BRCD_FABRIC_EXAMPLE]
[CISCO_FABRIC_EXAMPLE]
[COORDINATION]
[FC-ZONE-MANAGER]
[KEYMGR]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[matchmaker_redis]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[ssl]
[ssd]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-ssd
iscsi_protocol = iscsi
iscsi_helper = lioadm
volume_backend_name = ssd
[sata]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-sata
iscsi_protocol = iscsi
iscsi_helper = lioadm
volume_backend_name = sata
启动
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service
检测:cinder service-list
增加一个flat网段:
准备工作:
1,增加一块网卡。生成配置文件,更改名字和ip 不能重启网卡,
用命令启动: ifup 网卡名字 启动网卡 (同样在计算节点也做)
2,vim /etc/neutron/plugins/ml2/ml2_conf.ini (controller-node)
flat_networks = provider,net172_16
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = provider:ens33,net172_16:新网卡名字
重启服务:
systemctl restart neutron-server.service neutron-linuxbridge-agent.service
计算节点:
仅仅更改linuxbridge_agent.ini配置
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = provider:ens33,net172_16:新网卡名字
重启服务:
systemctl restart neutron-linuxbridge-agent.service
创建网络:
neutron net-create --shared --provider:physical_network net172_16 --provider:network_type flat WAN172
neutron subnet-create --name subnet-172wan --allocation-pool \
start=172.16.0.100,end=172.16.0.200 --dns-nameserver 223.5.5.5 \
--gateway 172.16.0.2 WAN172 172.16.0.0/24
块存储对接nfs(在存储节点)
安装nfs(略)
vim etc/cinder/cinder.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.75.15
glance_api_servers = http://192.168.75.17:9292
enabled_backends = ssd,sata,nfs
......
[nfs]
volume_driver = cinder.volume.drivers.nfs.NfsDriver
nfs_shares_config = /etc/cinder/nfs_shares
volume_backend_name = nfs
vim /etc/cinder/nfs_shares
ip:/data
重启cinder-volume服务。
把控制节点兼职计算节点
yum install openstack-nova-compute -y
vi /etc/nova/nova.conf
配置对比compute节点,更改为compute节点重启libvirtd和nova-compute
实例冷迁移:
1,nova节点之间免密钥互信
usermod -s /bin/bash nova
su 进nova用户
生成密钥 ssh-keygen -t rsa -q -N ''
免密登录自己:
在.ssh目录下
cp -fa id_rsa.pub authorized_keys
然后用nova用户ssh登录下本机
然后两台compute可以用nava账户互相登录
将公钥发送到其他计算节点的/var/lib/nova/.ssh 注意权限和权限组(nova)
修改控制节点nova.conf
vi /etc/nova/nova.conf
[DEFAULT]
scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
重启openstack-nova-scheduler
systemctl restart openstack-nova-scheduler.service
修改所有计算节点
vi /etc/nova/nova.conf
[DEFAULT]
allow_resize_to_same_host = True
重启openstack-nova-compute
systemctl restart openstack-nova-compute.service
ui界面迁移,另外如果配置后加,之前生成的实例不能被迁移
openstack vxlan三层网络
1,删除所有平面网络的实例
2,修改/etc/neutron/neutron.conf的[DEFAULT]区域
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
3,修改/etc/neutron/plugins/ml2/ml2_conf.ini文件
[ml2]区域修改如下
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
在[ml2_type_vxlan]区域增加一行
vni_ranges = 1:1000
最终的配置文件如下
[root@controller ~]# cat /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = True
4,修改/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件
在[vxlan]区域下
修改为
enable_vxlan = True
local_ip = 172.16.1.15
l2_population = True
#172.16.0.11这个IP还没有,马上就配,主机规划的第二块网卡,就是现在用的
最终的配置文件如下
[root@controller ~]# cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = True
local_ip = 172.16.0.11
l2_population = True
千万不要重启网卡!!!
我们使用ifconfig命令来添加网卡
ifconfig eth1 172.16.0.11 netmask 255.255.255.0
5,修改/etc/neutron/l3_agent.ini文件
在[DEFAULT]区域下,增加下面两行
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
external_network_bridge =
systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-l3-agent.service
开机启动
systemctl enable neutron-l3-agent.service
计算节点:
配置
修改/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件
在[vxlan]区域下
enable_vxlan = True
local_ip = 172.16.1.16
l2_population = True
最终的配置文件如下:
[root@compute1 ~]# cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = True
local_ip = 172.16.0.31
l2_population = True
#这个ip暂时没有,所以也需要配置
千万不要重启网卡!!!
我们使用ifconfig命令来添加网卡
ifconfig eth1 172.16.0.31 netmask 255.255.255.0
启动
重启agent服务
systemctl restart neutron-linuxbridge-agent.service
回到控制节点
vi /etc/openstack-dashboard/local_settings
将263行的
'enable_router': False,
修改为
'enable_router': True,
systemctl restart httpd.service memcached.service
在dashboard上开启三层路由
只有修改了/etc/openstack-dashboard/local_settings,才能开启三层路由器
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
docker容器
1:什么是容器?
容器就是在隔离的环境运行的一个进程,如果进程停止,容器就会销毁。隔离的环境拥有自己的系统文件,ip地址,主机名等
kvm虚拟机,linux,系统文件
程序:代码,命令
进程:正在运行的程序
2:容器和虚拟化的区别
linux容器技术,容器虚拟化和kvm虚拟化的区别
kvm虚拟化: 需要硬件的支持,需要模拟硬件,可以运行不同的操作系统,启动时间分钟级(开机启动流程)
linux开机启动流程:
bios开机硬件自检
根据bios设置的优先启动项boot 网卡 硬盘 u盘 光驱
读取mbr引导 UEFI(gpt分区) mbr硬盘分区信息,内核加载路径
加载内核
启动第一个进程init systemd
系统初始化完成
运行服务
。。。
容器:共用宿主机内核,容器的第一个进程直接运行服务,损耗少,启动快,性能高
容器虚拟化:不需要硬件的支持。不需要模拟硬件,共用宿主机的内核,启动时间秒级(没有开机启动流程)
总结:
(1)与宿主机使用同一个内核,性能损耗小;
(2)不需要指令级模拟;
(3)容器可以在CPU核心的本地运行指令,不需要任何专门的解释机制;
(4)避免了准虚拟化和系统调用替换中的复杂性;
(5)轻量级隔离,在隔离的同时还提供共享机制,以实现容器与宿主机的资源共享。
3:容器技术的发展过程:
1):chroot技术,新建一个子系统(拥有自己完整的系统文件)
参考资料:https://www.ibm.com/developerworks/cn/linux/l-cn-chroot/
chang root
作业1:使用chroot监狱限制SSH用户访问指定目录和使用指定命令
https://linux.cn/article-8313-1.html
ls
2):linux容器(lxc) linux container(namespaces 命名空间 隔离环境 及cgroups 资源限制)
cgroups 限制一个进程能够使用的资源。cpu,内存,硬盘io
kvm虚拟机:资源限制(1c 1G 20G)
##需要使用epel源
#安装epel源
yum install epel-release -y
#编译epel源配置文件
vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
##安装lxc
yum install lxc-* -y
yum install libcgroup* -y
yum install bridge-utils.x86_64 -y
##桥接网卡
[root@controller ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
echo 'TYPE=Ethernet
BOOTPROTO=none
NAME=eth0
DEVICE=eth0
ONBOOT=yes
BRIDGE=virbr0' >/etc/sysconfig/network-scripts/ifcfg-eth0
[root@controller ~]# cat /etc/sysconfig/network-scripts/ifcfg-virbr0
echo 'TYPE=Bridge
BOOTPROTO=static
NAME=virbr0
DEVICE=virbr0
ONBOOT=yes
IPADDR=192.168.75.25
NETMASK=255.255.255.0
GATEWAY=192.168.75.2
DNS1=223.5.5.5' >/etc/sysconfig/network-scripts/ifcfg-virbr0
##启动cgroup
systemctl start cgconfig.service
##启动lxc
systemctl start lxc.service
##创建lxc容器
方法1:
lxc-create -t download -n centos6 -- --server mirrors.tuna.tsinghua.edu.cn/lxc-images -d centos -r 6 -a amd64
方法2:
lxc-create -t centos -n test
#####为lxc容器设置root密码:
[root@controller ~]# chroot /var/lib/lxc/test/rootfs passwd
Changing password for user root.
New password:
BAD PASSWORD: it is too simplistic/systematic
BAD PASSWORD: is too simple
Retype new password:
passwd: all authentication tokens updated successfully.
##为容器指定ip和网关
vi /var/lib/lxc/centos7/config
lxc.network.name = eth0
lxc.network.ipv4 = 10.0.0.111/24
lxc.network.ipv4.gateway = 10.0.0.254
##启动容器
lxc-start -n centos7
3):docker容器
centos7.4 2G 10.0.0.11 docker01 host解析
centos7.4 2G 10.0.0.12 docker02 host解析
Docker是通过进程虚拟化技术(namespaces及cgroups cpu、内存、磁盘io等)来提供容器的资源隔离与安全保障等。由于Docker通过操作系统层的虚拟化实现隔离,所以Docker容器在运行时,不需要类似虚拟机(VM)额外的操作系统开销,提高资源利用率。
namespace 资源隔离
cgroups 进程的资源限制
kvm 虚拟磁盘文件,资源隔离
kvm 资源限制,--cpus --memory
docker 初期把lxc二次开发,libcontainer
top
htop
docker的主要目标是"Build,Ship and Run any App,Angwhere",构建,运输,处处运行
部署服务,环境问题
一次构建,处处运行
docker是一种软件的打包技术
构建:做一个docker镜像
运输:docker pull
运行:启动一个容器
每一个容器,他都有自己的系统文件rootfs.
kvm解决了硬件和操作系统之间的依赖
kvm独立的虚拟磁盘,xml配置文件
docker解决了软件和操作系统环境之间的依赖,能够让独立服务或应用程序在不同的环境中,得到相同的运行结果。
docker镜像有自己的文件系统。
docker容器是一种轻量级、可移植、自包含的软件打包技术,使应用程序可以在几乎任何地方以相同的方式运行。开发人员在自己笔记本上创建并测试好的容器,无需任何修改就能够在生产系统的虚拟机、物理服务器或公有云主机上运行。
4:docker的安装
10.0.0.11:修改主机名和host解析
rm -fr /etc/yum.repos.d/local.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
sed -i 's#download.docker.com#mirrors.tuna.tsinghua.edu.cn/docker-ce#g' /etc/yum.repos.d/docker-ce.repo
yum install docker-ce -y
5:docker的主要组成部分
docker是传统的CS架构分为docker client和docker server,向mysql一样
命令:docker version
[root@controller ~]# docker version
Client:
Version: 17.12.0-ce
API version: 1.35
Go version: go1.9.2
Git commit: c97c6d6
Built: Wed Dec 27 20:10:14 2017
OS/Arch: linux/amd64
Server:
Engine:
Version: 17.12.0-ce
API version: 1.35 (minimum version 1.12)
Go version: go1.9.2
Git commit: c97c6d6
Built: Wed Dec 27 20:12:46 2017
OS/Arch: linux/amd64
Experimental: false
docker info(如果要做监控)
docker主要组件有:镜像、容器、仓库, 网络,存储
启动容器必须需要一个镜像,仓库中只存储镜像
容器---镜像---仓库
docker初次体验:
安装Nginx步骤:
官网下载Nginx源码包wget
tar
创建Nginx用户
编译安装
./config....
修改配置文件,
启动
6:启动第一个容器
##配置docker镜像加速
vi /etc/docker/daemon.json
{
"registry-mirrors": ["https://registry.docker-cn.com"]
}
docker run -d -p 80:80 nginx
run(创建并运行一个容器)
-d 放在后台
-p 端口映射
nginx docker镜像的名字
7:docker的镜像管理
搜索镜像
docker search
选镜像的建议:
1,优先考虑官方
2,stars数量多
获取镜像
docker pull(push)
镜像加速器:阿里云加速器,daocloud加速器,中科大加速器,Docker 中国官方镜像加速:https://registry.docker-cn.com
官方pull docker pull centos:6.8(没有指定版本,默认会下载最新版)
私有仓库pull docker pull daocloud.io/huangzhichong/alpine-cn:latest
##配置docker镜像加速
vi /etc/docker/daemon.json
{
"registry-mirrors": ["https://registry.docker-cn.com"]
}
查看镜像列表
docker images docker image ls
删除镜像
docker rmi 例子:docker image rm centos:latest
导出镜像
docker save 例子:docker image save centos > docker-centos7.4.tar.gz
导入镜像
docker load 例子:docker image load -i docker-centos7.4.tar.gz
8:docker的容器管理
docker run -d -p 80:80 nginx:latest
run(创建并运行一个容器)
-d 放在后台
-p 端口映射
-v 源地址(宿主机):目标地址(容器)
nginx docker镜像的名字
docker run -it --name centos6 centos:6.9 /bin/bash
-it 分配交互式的终端
--name 指定容器的名字
/bin/sh覆盖容器的初始命令
启动容器***
docker run image_name
docker run -it image_name CMD
docker run ==== docker create + docker start
停止容器
docker stop CONTAINER_ID
杀死容器
docker kill container_name
查看容器列表
docker ps
docker ps –a
进入容器(目的,调试,排错)
*** docker exec (会分配一个新的终端tty)
docker exec [OPTIONS] CONTAINER COMMAND [ARG...]
docker exec -it 容器id或容器名字 /bin/bash(/bin/sh)
docker attach(使用同一个终端)
docker attach [OPTIONS] CONTAINER
nsenter(安装yum install -y util-linux 弃用)
删除容器
docker rm
批量删除容器
docker rm -f `docker ps -a -q`
总结:docker容器内的第一个进程(初始命令)必须一直处于前台运行的状态(必须夯住),否则这个容器,就会处于退出状态!
业务在容器中运行:夯住,启动服务
9:docker容器的网络访问(端口映射)
docker0:172.17.0.1 jumpserver:172.17.0.2 nginx:172.17.0.3
指定映射(docker 会自动添加一条iptables规则来实现端口映射)
-p hostPort:containerPort
-p ip:hostPort:containerPort 多个容器都想使用80端口
-p ip::containerPort(随机端口)
-p hostPort:containerPort:udp
-p 81:80 –p 443:443 可以指定多个-p
随机映射
docker run -P (随机端口)
通过iptables来实现的端口映射
10:docker的数据卷管理
/usr/share/nginx/html
持久化
数据卷(文件或目录)
-v 卷名:/data
-v src(宿主机的目录):dst(容器的目录)
数据卷容器
--volumes-from(跟某一个已经存在的容器挂载相同的卷)
基于nginx启动一个容器,监听80和81,访问80,出现nginx默认欢迎首页,访问81,出现捕鱼。
-p 80:80 -p 81:81 -v xxx:xxx -v xxx:xxxx
基于nginx多端口的多站点。
11:手动将容器保存为镜像
docker commit 容器id或者容器的名字 新的镜像名字[:版本号可选]
1):基于容器制作镜像
docker run -it centos:6.9
######
yum install httpd
yum install openssh-server
/etc/init.d/sshd start
vi /init.sh
#!/bin/bash
/etc/init.d/httpd start
/usr/sbin/sshd -D
chmod +x /init.sh
2)将容器提交为镜像
docker commit oldboy centos6-ssh-httpd:v1
3)测试镜像功能是否可用
手动制作的镜像,传输时间长
镜像初始命令
制作一个kodexplorer网盘docker镜像。nginx + php-fpm(httpd + php)
12:dockerfile自动构建docker镜像
类似ansible剧本,大小几kb
手动做镜像:大小几百M+
dockerfile 支持自定义容器的初始命令
dockerfile主要组成部分:
基础镜像信息 FROM centos:6.9
制作镜像操作指令 RUN yum install openssh-server -y
容器启动时执行指令 CMD ["/bin/bash"]
dockerfile常用指令:
FROM 这个镜像的妈妈是谁?(指定基础镜像)
MAINTAINER 告诉别人,谁负责养它?(指定维护者信息,可以没有)
LABLE 描述,标签
RUN 你想让它干啥(在命令前面加上RUN即可)
ADD 给它点创业资金(会自动解压tar) 制作docker基础的系统镜像
WORKDIR 我是cd,今天刚化了妆(设置当前工作目录)
VOLUME 给它一个存放行李的地方(设置卷,挂载主机目录)
EXPOSE 它要打开的门是啥(指定对外的端口)(-P 随机端口)
CMD 奔跑吧,兄弟!(指定容器启动后的要干的事情)(容易被替换)
dockerfile其他指令:
COPY 复制文件(不会解压)rootfs.tar.gz
ENV 环境变量
ENTRYPOINT 容器启动后执行的命令(无法被替换,启容器的时候指定的命令,会被当成参数)
参考其他的dockerfile
官方dockerfile或者时速云镜像广场
13:docker镜像的分层(kvm 链接克隆,写时复制的特性)
镜像分层的好处:复用,节省磁盘空间,相同的内容只需加载一份到内存。
修改dockerfile之后,再次构建速度快
14:.容器间的互联(--link 是单方向的!!!)
docker run -d -p 80:80 nginx
docker run -it --link quirky_brown:web01 qstack/centos-ssh /bin/bash
ping web01
lb ---> nginx 172.17.0.4 --> db01 172.17.0.3
--> nfs01 172.17.0.2
使用docker运行zabbix-server
docker run --name mysql-server -t \
-e MYSQL_DATABASE="zabbix" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="zabbix_pwd" \
-e MYSQL_ROOT_PASSWORD="root_pwd" \
-d mysql:5.7 \
--character-set-server=utf8 --collation-server=utf8_bin
docker run --name zabbix-java-gateway -t \
-d zabbix/zabbix-java-gateway:latest
docker run --name zabbix-server-mysql -t \
-e DB_SERVER_HOST="mysql-server" \
-e MYSQL_DATABASE="zabbix" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="zabbix_pwd" \
-e MYSQL_ROOT_PASSWORD="root_pwd" \
-e ZBX_JAVAGATEWAY="zabbix-java-gateway" \
--link mysql-server:mysql \
--link zabbix-java-gateway:zabbix-java-gateway \
-p 10051:10051 \
-d zabbix/zabbix-server-mysql:latest
docker run --name zabbix-web-nginx-mysql -t \
-e DB_SERVER_HOST="mysql-server" \
-e MYSQL_DATABASE="zabbix" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="zabbix_pwd" \
-e MYSQL_ROOT_PASSWORD="root_pwd" \
--link mysql-server:mysql \
--link zabbix-server-mysql:zabbix-server \
-p 80:80 \
-d zabbix/zabbix-web-nginx-mysql:latest
监控报警:微信报警,alpine
yum 安装zabbix好使
17:docker-compose(单机版的容器编排工具)
ansible剧本
yum install -y python2-pip(需要epel源)
pip install docker-compose(默认pypi源在国外)
##pip 加速
##详细指令
http://www.jianshu.com/p/2217cfed29d7
cd my_wordpress/
vi docker-compose.yml
version: '3'
services:
db:
image: mysql:5.7
volumes:
- db_data:/var/lib/mysql
restart: always
environment:
MYSQL_ROOT_PASSWORD: somewordpress
MYSQL_DATABASE: wordpress
MYSQL_USER: wordpress
MYSQL_PASSWORD: wordpress
wordpress:
depends_on:
- db
image: wordpress:latest
volumes:
- web_data:/var/www/html
ports:
- "80:80"
restart: always
environment:
WORDPRESS_DB_HOST: db:3306
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_PASSWORD: wordpress
volumes:
db_data:
web_data:
#启动
docker-compose up
#后台启动
docker-compose up -d
18:重启docker服务,容器全部退出的解决办法
方法一:docker run --restart=always
方法二:"live-restore": true
docker server配置文件/etc/docker/daemon.json参考
{
"registry-mirrors": ["http://b7a9017d.m.daocloud.io"],
"insecure-registries":["10.0.0.11:5000"],
"live-restore": true
}
Docker网络类型
None:不为容器配置任何网络功能,--net=none
Container:与另一个运行中的容器共享Network Namespace,--net=container:containerID(K8S)
Host:与宿主机共享Network Namespace,--net=host
Bridge:Docker设计的NAT网络模型
21:Docker跨主机容器之间的通信macvlan
默认一个物理网卡,只有一个物理地址,虚拟多个mac地址
##创建macvlan网络
docker network create --driver macvlan --subnet 10.0.0.0/24 --gateway 10.0.0.254 -o parent=eth0 macvlan_1
##设置eth0的网卡为混杂模式
ip link set eth1 promisc on
##创建使用macvlan网络的容器
docker run -it --network macvlan_1 --ip=10.0.0.200 busybox
docker registry(私有仓库)
##普通的registry
docker run -d -p 5000:5000 --restart=always --name registry -v /opt/myregistry:/var/lib/registry registry
上传镜像到私有仓库:
a:给镜像打标签
docker tag centos6-sshd:v3 10.0.0.11:5000/centos6-sshd:v3
b:上传镜像
docker push 10.0.0.11:5000/centos6-sshd:v3
docker run -d 10.0.0.11:5000/centos6-sshd:v3
如果遇到报错:
The push refers to repository [10.0.0.11:5000/centos6.9_ssh]
Get https://10.0.0.11:5000/v2/: http: server gave HTTP response to HTTPS client
解决方法:
vim /etc/docker/daemon.json
{
"insecure-registries": ["10.0.0.11:5000"]
}
systemctl restart docker
22:Dcoker跨主机容器通信之overlay
http://www.cnblogs.com/CloudMan6/p/7270551.html
1)准备工作
docker01上:
docker run -d -p 8500:8500 -h consul --name consul progrium/consul -server -bootstrap
设置容器的主机名
consul:kv类型的存储数据库(key:value)
docker01、02上:
vim /etc/docker/daemon.json
{
"hosts":["tcp://0.0.0.0:2376","unix:///var/run/docker.sock"],
"cluster-store": "consul://10.0.0.13:8500",
"cluster-advertise": "10.0.0.11:2376"
}
vim /etc/docker/daemon.json
vim /usr/lib/systemd/system/docker.service
systemctl daemon-reload
systemctl restart docker
2)创建overlay网络
docker network create -d overlay --subnet 172.16.1.0/24 --gateway 172.16.1.254 ol1
3)启动容器测试
docker run -it --network ol1 --name oldboy01 busybox /bin/bash
每个容器有两块网卡,eth0实现容器间的通讯,eth1实现容器访问外网
23:docker企业级镜像仓库harbor(vmware 中国团队)
第一步:安装docker和docker-compose
第二步:下载harbor-offline-installer-v1.3.0.tgz
第三步:上传到/opt,并解压
第四步:修改harbor.cfg配置文件
hostname = 10.0.0.11
harbor_admin_password = 123456
第五步:执行install.sh
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
echo "1" >/proc/sys/net/bridge/bridge-nf-call-iptables
cd /etc/yum.repos.d/
cat>>kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install yum install kubectl-1.14.3 kubelet-1.14.3 kubeadm-1.14.3 -y
systemctl start kubelet && systemctl enable kubelet
minikube start --kubernetes-version v1.14.3 --vm-driver=none
如果出现问题重置集群然后重新安装
删除命令:
minikube delete
kubectl run --image=nginx:alpine nginx-app --port=80 启动nginxpod服务
绑定svc(内网)
kubectl expose deploy/nginx-app --port 80
内网验证
curl
验证完成删除该svc
kubectl delete svc nginx-app
然后生成一个外网随机端口访问的svc
kubectl expose deploy/nginx-app --type=NodePort --port 80
describe命令
kubectl describe node
kubectl describe pod
kubectl describe svc
logs命令
kubectl logs
scale命令
kubectl scale --replicas=3 deploy/nginx-app
delete命令
kubectl delete deploy test
yaml在线验证网站
http://www.bejson.com/validators/yaml_editor/
http://www.yamllint.com/
++++++++++++++++++++++++++++++++++++++++++
docker tag d4e7de4ee6a8 k8s.gcr.io/kube-apiserver:v1.18.12
docker tag 37efdbf07b2a k8s.gcr.io/kube-controller-manager:v1.18.12
docker tag fb649979593e k8s.gcr.io/kube-scheduler:v1.18.12
docker tag 06f1dd86004c k8s.gcr.io/kube-proxy:v1.18.12
docker tag 80d28bedfe5d k8s.gcr.io/pause:3.2
docker tag 303ce5db0e90 k8s.gcr.io/etcd:3.4.3-0
docker tag 67da37a9a360 k8s.gcr.io/coredns:1.6.7
kubectl label no node-01 node-role.kubernetes.io/bus=true
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1/1 Exec
apiVersion: v1
kind: Pod
metadata:
name: probe-exec
namespace: defualt
spec:
containers:
- name: nginx
image: nginx
livenessProbe:
exec:
command:
- cat
- /tmp/health
initialDelaySeconds: 5
timeoutSeconds: 1
+++++++++++++++++++++++++++++++++++++++++++++
1.2、 TCPSocket
apiVersion: v1
kind: Pod
metadata:
name: probe-tcp
namespace: default
spec:
containers:
- name: nginx
image: nginx
livenessProbe:
initialDelaySeconds: 5
timeoutSeconds: 1
tcpSocket:
port: 80
++++++++++++++++++++++++++++
1.3、 HTTPGet
apiVersion: v1
kind: Pod
metadata:
name: probe-http
namespace: default
spec:
containers:
- name: nginx
image: nginx
livenessProbe:
httpGet:
path: /
port: 80
host: 127.0.0.1
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
+++++++++++++++++++++++++++++++++++++++++++++++++++++
1.4、 参数详解
failureThreshold:最少连续几次探测失败的次数,满足该次数则认为 fail
initialDelaySeconds:容器启动之后开始进行存活性探测的秒数。不填立即进行
periodSeconds:执行探测的频率(秒)。默认为 10 秒。最小值为 1。
successThreshold:探测失败后,最少连续探测成功多少次才被认定为成功,满足该次数则认为 success。(但
是如果是 liveness 则必须是 1。最小值是 1。)
timeoutSeconds:每次执行探测的超时时间,默认 1 秒,最小 1 秒。
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous
++++++++++++++++++++++++++++++++++++++++++++++++++++++++
k8s集群扩容集群节点:
1,准备节点环境(安装docker,关闭selinx,关闭防火墙,清空iptables,关闭swap,hosts解析,修改主机名,主节点免密访问该节点,安装kubelet,安装kubeadm,导入镜像)
2,生成节点加入的token命令再master节点生成:
kubeadm token create --print-join-command
3,节点加入进群:
复制上面命令生成的加入命令,以下为加入模板:
kubeadm join 192.168.75.30:6443 --token vh73m3.pvqogjca8yld3554 --discovery-token-ca-cert-hash sha256:223bdaf56880d1d34b7d629819555a16828aba89038cfb139d9cd4e2009890cb
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
3.2、 PV 的访问模式(accessModes)
模式 解释
ReadWriteOnce(RWO) 可读可写,但只支持被单个节点挂载。
ReadOnlyMany(ROX) 只读,可以被多个节点挂载。
ReadWriteMany(RWX)
多路可读可写。这种存储可以以读写的方式被多个节点共享。不是每一种存储都支
持这三种方式,像共享方式,目前支持的还比较少,比较常用的是 NFS。在 PVC 绑
定 PV 时通常根据两个条件来绑定,一个是存储的大小,另一个就是访问模式。
3.3、 PV 的回收策略(persistentVolumeReclaimPolicy)
策略 解释
Retain 不清理, 保留 Volume(需要手动清理)
Recycle 删除数据,即 rm -rf /thevolume/*(只有 NFS 和 HostPath 支持)
Delete 删除存储资源,比如删除 AWS EBS 卷(只有 AWS EBS, GCE PD, Azure Disk 和 Cinder 支持)
3.4、 PV 的状态
状态 解释
Available 可用。
Bound 已经分配给 PVC。
Released PVC 解绑但还未执行回收策略。
Failed 发生错误。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
使用 configmap 一般情况下,我们是通过挂载的方式使用 configmap。
[root@kubernetes-master-01 configmap]# cat test.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: configmap-yaml
labels:
app: configmap
data:
key: value
nginx_config: |-
upstream tomcatserver1 {
server 192.168.72.49:8081;
}
server {
listen 80;
server_name 8081.max.com;
location / {
proxy_pass http://tomcatserver1;
index index.html index.htm;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
---
kind: Pod
apiVersion: v1
metadata:
name: configmap-pod
labels:
app: configmap-pod
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- mountPath: /usr/share/nginx/demo
name: conf
volumes:
- name: conf
configMap:
name: configmap-yaml
items:
- key: nginx_config
path: nginx_config
- key: key
path: key
++++++++++++++++++++++++++++++++++++++
-javaagent:E:\Software\pycharm\PyCharm 2020.1\lib\jetbrains-agent.jar
https://www.cnblogs.com/xuexianqi/p/12767075.html