1、系统初始化
init.sh
#!/usr/bin/bash
echo "关闭防火墙:firewalld"
systemctl disable firewalld && systemctl stop firewalld
echo "关闭selinux"
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
echo "设置主机名"
read -p "Enter hostname(`hostname`):" hostname
if [[ $hostname ]];then
hostnamectl set-hostname $hostname
fi
2、安装前准备
2.1、提前配置好yum源
2.2、安装时间同步服务
2.2.1、控制节点
chrony-install-controller.sh
#!/usr/bin/bash
echo "安装chrony"
yum install chrony -y
#注释掉其他的时间同步服务器,以自己为服务器进行时间同步
echo "修改配置文件/etc/chronyc.conf"
sed -ri 's/(^server.*)/#\1/' /etc/chrony.conf
#匹配最后一行,在后面加入server controller iburst
endrow=`sed -rn '/^#server.*/=' /etc/chrony.conf |sed -n "$"p`
sed -ri "$endrow"a"server controller iburst" /etc/chrony.conf
#设置允许同步的服务器 (客户端不需要这行)
sed -ri /^#allow.*/a"allow 192.100.0.0/16" /etc/chrony.conf
#(客户端不需要这行)
sed -ri /^#local.*/a"local stratum 10" /etc/chrony.conf
#启动
systemctl enable chronyd.service && systemctl restart chronyd.service
#手动同步
#ntpdate controller
#验证
echo "验证"
chronyc sources
#配置openstack源
#yum install centos-release-openstack-train -y && yum upgrade -y
yum install openstack-selinux -y
2.2.2、其他节点
chrony-install-other.sh
#!/usr/bin/bash
echo "安装chrony"
yum install chrony -y
#注释掉其他的时间同步服务器,以自己为服务器进行时间同步
sed -ri 's/(^server.*)/#\1/' /etc/chrony.conf
#匹配最后一行,在后面加入server controller iburst
endrow=`sed -rn '/^#server.*/=' /etc/chrony.conf |sed -n "$"p`
sed -ri "$endrow"a"server controller iburst" /etc/chrony.conf
#启动
systemctl enable chronyd.service && systemctl restart chronyd.service
#验证
echo "验证"
chronyc sources
#配置openstack源
#yum install centos-release-openstack-train -y && yum upgrade -y
yum install openstack-selinux -y
3、安装基础组件:mariadb、rabbitmq、memcached
mariadb-rabbitmq-memcached-install.sh
#!/bin/bash
echo "================安装SQL数据库:mariadb=================="
yum install mariadb mariadb-server python2-PyMySQL -y
cp /etc/my.cnf /etc/my.cnf.bak
sed -ri '/^\[mysqld\]/'a"bind-address = 0.0.0.0\n\
default-storage-engine = innodb\n\
innodb_file_per_table = on\n\
max_connections = 4096\n\
collation-server = utf8_general_ci\n\
character-set-server = utf8" /etc/my.cnf
echo "================启动mariadb服务=================="
systemctl enable mariadb.service && systemctl restart mariadb.service && systemctl status mariadb.service
echo "================rabbitma安装=================="
yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service && systemctl start rabbitmq-server.service && systemctl status rabbitmq-server.service
rabbitmqctl add_user openstack openstack
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
rabbitmqctl status
yum install memcached python-memcached -y
sed -ri 's/-l 127.0.0.1,::1/-l 127.0.0.1,::1,controller/g' /etc/sysconfig/memcached
systemctl enable memcached.service && systemctl start memcached.service
systemctl status memcached
echo "================mysql_secure_installation 进行数据库设置=================="
安装完需要mysql_secure_installation 进行数据库设置
4、glance安装
glance-install-controller.sh
#!/bin/bash
#创建glance数据库
mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS glance;GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"
mysql -uroot -p123456 -e "show databases;"
#创建用户、服务、端点"
#模拟登录
source /root/admin-openrc.sh
echo "创建glance用户"
openstack user create --domain default --password glance glance
echo "给glance用户添加角色权限"
openstack role add --project service --user glance admin
echo "创建glance image服务"
openstack service create --name glance --description "OpenStack Image" image
echo "添加public、internal和admin endpoint"
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
#安装
yum install openstack-glance -y
cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
#sed -ri '/^[ \t]*(#|$)/d' /etc/glance/glance-api.conf
tee /etc/glance/glance-api.conf <<-EOF
[database]
# ...
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[paste_deploy]
# ...
flavor = keystone
[glance_store]
# ...
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
EOF
#同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance
#启动
systemctl enable openstack-glance-api.service && systemctl restart openstack-glance-api.service
5、placement安装
placement-install-controller.sh
#!/bin/bash
#创建placement数据库
mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS placement;GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'placement';GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement';"
mysql -uroot -p123456 -e "show databases;"
#创建用户、服务、端点"
#模拟登录
source /root/admin-openrc.sh
echo "创建用户"
openstack user create --domain default --password placement placement
echo "添加role"
openstack role add --project service --user placement admin
echo "创建placement服务"
openstack service create --name placement --description "Placement API" placement
echo "创建endpoint"
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
#安装
echo "开始openstack-placement-api"
yum install openstack-placement-api -y
cp /etc/placement/placement.conf /etc/placement/placement.conf.bak
echo "修改配置文件"
tee /etc/placement/placement.conf <<-EOF
[placement_database]
# ...
connection = mysql+pymysql://placement:placement@controller/placement
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement
EOF
#同步数据库
echo "同步数据库"
su -s /bin/sh -c "placement-manage db sync" placement
#重启http服务
echo "重启http服务"
systemctl restart httpd
echo "验证"
placement-status upgrade check
6、nova安装
6.1、控制节点
nova-install-controller.sh
注意:**/etc/nova/nova.conf 修改myip**
#!/bin/bash
echo "创建数据库"
mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS nova_api;CREATE DATABASE IF NOT EXISTS nova;CREATE DATABASE IF NOT EXISTS nova_cell0;GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
mysql -uroot -p123456 -e "show databases;"
echo "模拟登录"
source /root/admin-openrc.sh
echo "创建用户"
openstack user create --domain default --password nova nova
echo "添加role"
openstack role add --project service --user nova admin
echo "创建compute服务"
openstack service create --name nova --description "OpenStack Compute" compute
echo "创建endpoint"
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
echo "安装nova-api、nova-conductor、nova-novncproxy、nova-scheduler"
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
tee /etc/nova/nova.conf <<-EOF
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata
my_ip = 192.100.5.137
transport_url = rabbit://openstack:openstack@controller:5672/
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
# ...
connection = mysql+pymysql://nova:nova@controller/nova_api
[database]
# ...
connection = mysql+pymysql://nova:nova@controller/nova
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[vnc]
enabled = true
# ...
server_listen = \$my_ip
server_proxyclient_address = \$my_ip
[glance]
# ...
api_servers = http://controller:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
EOF
echo "同步数据库"
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
echo "验证"
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
echo "启动"
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl status openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
6.2、计算节点
nova-install-computer.sh
#!/bin/bash
echo "安装"
yum install openstack-nova-compute -y
if [[ ! -f /etc/nova/nova.conf.bak ]];then
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
fi
tee /etc/nova/nova.conf <<-EOF
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@controller
my_ip = 192.100.5.138
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[vnc]
# ...
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = \$my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
# ...
api_servers = http://controller:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
EOF
echo "判断是否支持硬件加速"
core=`egrep -c '(vmx|svm)' /proc/cpuinfo`
if [[ $core == 0 ]]; then
tee -a /etc/nova/nova.conf <<-EOF
[libvirt]
virt_type = qemu
EOF
fi
echo "启动"
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl restart libvirtd.service openstack-nova-compute.service
systemctl status libvirtd.service openstack-nova-compute.service
#解决 nova-status upgrade check 403问题
#tee -a /etc/httpd/conf.d/00-placement-api.conf <<-EOF
# <Directory /usr/bin>
# <IfVersion >= 2.4>
# Require all granted
# </IfVersion>
# <IfVersion < 2.4>
# Order allow,deny
# Allow from all
# </IfVersion>
# </Directory>
#EOF
echo "控制器节点上运行:openstack compute service list --service nova-compute"
echo "控制器节点上运行:su -s /bin/sh -c 'nova-manage cell_v2 discover_hosts --verbose' nova"
echo "验证:openstack compute service list"
echo "验证:nova-status upgrade check"
7、neutron安装
7.1、控制节点
neutron-install-controller.sh
#!/bin/bash
echo "创建数据库"
mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS neutron;GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';show databases;"
echo "创建用户"
openstack user create --domain default --password neutron neutron
echo "添加角色"
openstack role add --project service --user neutron admin
echo "创建network服务"
openstack service create --name neutron --description "OpenStack Networking" network
echo "创建endpoint"
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
echo "选择:自助网络配置"
echo "安装"
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
if [[ ! -f /etc/neutron/neutron.conf.bak ]];then
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
fi
tee /etc/neutron/neutron.conf <<-EOF
[database]
# ...
connection = mysql+pymysql://neutron:neutron@controller/neutron
[DEFAULT]
# ...
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[nova]
# ...
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp
EOF
echo "模块化第 2 层 (ML2) 插件"
if [[ ! -f /etc/neutron/plugins/ml2/ml2_conf.ini.bak ]];then
cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
fi
tee /etc/neutron/plugins/ml2/ml2_conf.ini <<-EOF
[ml2]
# ...
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
# ...
flat_networks = provider
[ml2_type_vxlan]
# ...
vni_ranges = 1:1000
[securitygroup]
# ...
enable_ipset = true
EOF
echo "Linux 网桥代理"
if [[ ! -f /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak ]];then
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
fi
tee /etc/neutron/plugins/ml2/linuxbridge_agent.ini <<-EOF
[linux_bridge]
physical_interface_mappings = provider:ens32
[vxlan]
enable_vxlan = true
local_ip = 管理接口ip
l2_population = true
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
EOF
echo "启用网络网桥支持"
modprobe br_netfilter
echo "查看net.bridge.bridge-nf-call-iptables和net.bridge.bridge-nf-call-ip6tables的值是否为1"
sysctl net.bridge.bridge-nf-call-iptables=1
sysctl net.bridge.bridge-nf-call-ip6tables=1
echo "配置第3层代理"
if [[ ! -f /etc/neutron/l3_agent.ini.bak ]];then
cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak
fi
tee /etc/neutron/l3_agent.ini <<-EOF
[DEFAULT]
# ...
interface_driver = linuxbridge
EOF
echo "配置 DHCP 代理"
if [[ ! -f /etc/neutron/dhcp_agent.ini.bak ]];then
cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak
fi
tee /etc/neutron/dhcp_agent.ini <<-EOF
[DEFAULT]
# ...
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
EOF
echo "配置元数据代理"
if [[ ! -f /etc/neutron/metadata_agent.ini.bak ]];then
cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
fi
tee /etc/neutron/metadata_agent.ini <<-EOF
[DEFAULT]
# ...
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET
EOF
echo "将计算服务配置为使用网络服务"
tee -a /etc/nova/nova.conf <<-EOF
[neutron]
# ...
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET
EOF
echo "数据库同步"
if [[ ! -f /etc/neutron/plugin.ini ]];then
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
fi
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
echo "启动"
systemctl restart openstack-nova-api.service && systemctl status openstack-nova-api.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
echo "网络选项 2,还启用和启动第 3 层服务"
systemctl enable neutron-l3-agent.service
systemctl restart neutron-l3-agent.service
systemctl status neutron-l3-agent.service
7.2、计算节点
neutron-install-computer.sh
#!/bin/bash
echo "安装"
yum install openstack-neutron-linuxbridge ebtables ipset -y
if [[ ! -f /etc/neutron/neutron.conf.bak ]];then
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
fi
tee /etc/neutron/neutron.conf <<-EOF
[DEFAULT]
# ...
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp
EOF
echo "选择:自助网络"
if [[ ! -f /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak ]];then
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
fi
tee /etc/neutron/plugins/ml2/linuxbridge_agent.ini <<-EOF
[linux_bridge]
physical_interface_mappings = provider:ens33
[vxlan]
enable_vxlan = true
local_ip = 管理接口ip
l2_population = true
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
EOF
echo "启用网桥支持"
modprobe br_netfilter
echo "查看net.bridge.bridge-nf-call-iptables和net.bridge.bridge-nf-call-ip6tables的值是否为1"
sysctl net.bridge.bridge-nf-call-iptables=1
sysctl net.bridge.bridge-nf-call-ip6tables=1
echo "将计算服务配置为使用网络服务"
tee -a /etc/nova/nova.conf <<-EOF
[neutron]
# ...
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
EOF
echo "启动"
systemctl restart openstack-nova-compute.service && systemctl status openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service && systemctl restart neutron-linuxbridge-agent.service && systemctl status neutron-linuxbridge-agent.service
echo "验证:openstack extension list --network"
echo "验证提供商网络:openstack network agent list"
8、cinder安装
8.1.控制节点
cinder-install-controller.sh
#!/bin/bash
echo "数据库配置"
mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS cinder;GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"
mysql -uroot -p123456 -e "show databases;"
echo "模拟登录"
source /root/admin-openrc.sh
echo "创建用户"
openstack user create --domain default --password cinder cinder
echo "添加role"
openstack role add --project service --user cinder admin
echo "创建cinderv2服务"
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
echo "创建cinderv3服务"
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
echo "创建endpoint"
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
echo "安装"
yum install openstack-cinder -y
if [[ ! -f /etc/cinder/cinder.conf.bak ]];then
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
fi
tee /etc/cinder/cinder.conf <<-EOF
[database]
# ...
connection = mysql+pymysql://cinder:cinder@controller/cinder
[DEFAULT]
# ...
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
my_ip = 192.100.5.137
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp
EOF
echo "同步数据库"
su -s /bin/sh -c "cinder-manage db sync" cinder
echo "将计算配置为使用块存储"
tee -a /etc/nova/nova.conf <<-EOF
[cinder]
os_region_name = RegionOne
EOF
echo "启动"
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service && systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service
8.2.卷存储节点
需要准备好磁盘并创建好卷组
cinder-install-block.sh
#!/bin/bash
echo "安装cinder"
yum install openstack-cinder targetcli python-keystone -y
if [[ ! -f /etc/cinder/cinder.conf.bak ]];then
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
fi
tee /etc/cinder/cinder.conf <<-EOF
[database]
# ...
connection = mysql+pymysql://cinder:cinder@controller/cinder
[DEFAULT]
# ...
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
my_ip = 192.168.189.153
enabled_backends = lvm
glance_api_servers = http://controller:9292
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp
EOF
echo "启动"
systemctl enable openstack-cinder-volume.service target.service && systemctl restart openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service