1、系统初始化

init.sh

  1. #!/usr/bin/bash
  2. echo "关闭防火墙:firewalld"
  3. systemctl disable firewalld && systemctl stop firewalld
  4. echo "关闭selinux"
  5. setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
  6. echo "设置主机名"
  7. read -p "Enter hostname(`hostname`):" hostname
  8. if [[ $hostname ]];then
  9. hostnamectl set-hostname $hostname
  10. fi

(需要手动配置域名映射)

2、安装前准备

2.1、提前配置好yum源

2.2、安装时间同步服务

2.2.1、控制节点

chrony-install-controller.sh

  1. #!/usr/bin/bash
  2. echo "安装chrony"
  3. yum install chrony -y
  4. #注释掉其他的时间同步服务器,以自己为服务器进行时间同步
  5. echo "修改配置文件/etc/chronyc.conf"
  6. sed -ri 's/(^server.*)/#\1/' /etc/chrony.conf
  7. #匹配最后一行,在后面加入server controller iburst
  8. endrow=`sed -rn '/^#server.*/=' /etc/chrony.conf |sed -n "$"p`
  9. sed -ri "$endrow"a"server controller iburst" /etc/chrony.conf
  10. #设置允许同步的服务器 (客户端不需要这行)
  11. sed -ri /^#allow.*/a"allow 192.100.0.0/16" /etc/chrony.conf
  12. #(客户端不需要这行)
  13. sed -ri /^#local.*/a"local stratum 10" /etc/chrony.conf
  14. #启动
  15. systemctl enable chronyd.service && systemctl restart chronyd.service
  16. #手动同步
  17. #ntpdate controller
  18. #验证
  19. echo "验证"
  20. chronyc sources
  21. #配置openstack源
  22. #yum install centos-release-openstack-train -y && yum upgrade -y
  23. yum install openstack-selinux -y

2.2.2、其他节点

chrony-install-other.sh

  1. #!/usr/bin/bash
  2. echo "安装chrony"
  3. yum install chrony -y
  4. #注释掉其他的时间同步服务器,以自己为服务器进行时间同步
  5. sed -ri 's/(^server.*)/#\1/' /etc/chrony.conf
  6. #匹配最后一行,在后面加入server controller iburst
  7. endrow=`sed -rn '/^#server.*/=' /etc/chrony.conf |sed -n "$"p`
  8. sed -ri "$endrow"a"server controller iburst" /etc/chrony.conf
  9. #启动
  10. systemctl enable chronyd.service && systemctl restart chronyd.service
  11. #验证
  12. echo "验证"
  13. chronyc sources
  14. #配置openstack源
  15. #yum install centos-release-openstack-train -y && yum upgrade -y
  16. yum install openstack-selinux -y

3、安装基础组件:mariadb、rabbitmq、memcached

mariadb-rabbitmq-memcached-install.sh

  1. #!/bin/bash
  2. echo "================安装SQL数据库:mariadb=================="
  3. yum install mariadb mariadb-server python2-PyMySQL -y
  4. cp /etc/my.cnf /etc/my.cnf.bak
  5. sed -ri '/^\[mysqld\]/'a"bind-address = 0.0.0.0\n\
  6. default-storage-engine = innodb\n\
  7. innodb_file_per_table = on\n\
  8. max_connections = 4096\n\
  9. collation-server = utf8_general_ci\n\
  10. character-set-server = utf8" /etc/my.cnf
  11. echo "================启动mariadb服务=================="
  12. systemctl enable mariadb.service && systemctl restart mariadb.service && systemctl status mariadb.service
  13. echo "================rabbitma安装=================="
  14. yum install rabbitmq-server -y
  15. systemctl enable rabbitmq-server.service && systemctl start rabbitmq-server.service && systemctl status rabbitmq-server.service
  16. rabbitmqctl add_user openstack openstack
  17. rabbitmqctl set_permissions openstack ".*" ".*" ".*"
  18. rabbitmqctl status
  19. yum install memcached python-memcached -y
  20. sed -ri 's/-l 127.0.0.1,::1/-l 127.0.0.1,::1,controller/g' /etc/sysconfig/memcached
  21. systemctl enable memcached.service && systemctl start memcached.service
  22. systemctl status memcached
  23. echo "================mysql_secure_installation 进行数据库设置=================="

安装完需要mysql_secure_installation 进行数据库设置

4、glance安装

glance-install-controller.sh

  1. #!/bin/bash
  2. #创建glance数据库
  3. mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS glance;GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"
  4. mysql -uroot -p123456 -e "show databases;"
  5. #创建用户、服务、端点"
  6. #模拟登录
  7. source /root/admin-openrc.sh
  8. echo "创建glance用户"
  9. openstack user create --domain default --password glance glance
  10. echo "给glance用户添加角色权限"
  11. openstack role add --project service --user glance admin
  12. echo "创建glance image服务"
  13. openstack service create --name glance --description "OpenStack Image" image
  14. echo "添加public、internal和admin endpoint"
  15. openstack endpoint create --region RegionOne image public http://controller:9292
  16. openstack endpoint create --region RegionOne image internal http://controller:9292
  17. openstack endpoint create --region RegionOne image admin http://controller:9292
  18. #安装
  19. yum install openstack-glance -y
  20. cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
  21. #sed -ri '/^[ \t]*(#|$)/d' /etc/glance/glance-api.conf
  22. tee /etc/glance/glance-api.conf <<-EOF
  23. [database]
  24. # ...
  25. connection = mysql+pymysql://glance:glance@controller/glance
  26. [keystone_authtoken]
  27. # ...
  28. www_authenticate_uri = http://controller:5000
  29. auth_url = http://controller:5000
  30. memcached_servers = controller:11211
  31. auth_type = password
  32. project_domain_name = Default
  33. user_domain_name = Default
  34. project_name = service
  35. username = glance
  36. password = glance
  37. [paste_deploy]
  38. # ...
  39. flavor = keystone
  40. [glance_store]
  41. # ...
  42. stores = file,http
  43. default_store = file
  44. filesystem_store_datadir = /var/lib/glance/images/
  45. EOF
  46. #同步数据库
  47. su -s /bin/sh -c "glance-manage db_sync" glance
  48. #启动
  49. systemctl enable openstack-glance-api.service && systemctl restart openstack-glance-api.service

5、placement安装

placement-install-controller.sh

  1. #!/bin/bash
  2. #创建placement数据库
  3. mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS placement;GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'placement';GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement';"
  4. mysql -uroot -p123456 -e "show databases;"
  5. #创建用户、服务、端点"
  6. #模拟登录
  7. source /root/admin-openrc.sh
  8. echo "创建用户"
  9. openstack user create --domain default --password placement placement
  10. echo "添加role"
  11. openstack role add --project service --user placement admin
  12. echo "创建placement服务"
  13. openstack service create --name placement --description "Placement API" placement
  14. echo "创建endpoint"
  15. openstack endpoint create --region RegionOne placement public http://controller:8778
  16. openstack endpoint create --region RegionOne placement internal http://controller:8778
  17. openstack endpoint create --region RegionOne placement admin http://controller:8778
  18. #安装
  19. echo "开始openstack-placement-api"
  20. yum install openstack-placement-api -y
  21. cp /etc/placement/placement.conf /etc/placement/placement.conf.bak
  22. echo "修改配置文件"
  23. tee /etc/placement/placement.conf <<-EOF
  24. [placement_database]
  25. # ...
  26. connection = mysql+pymysql://placement:placement@controller/placement
  27. [api]
  28. # ...
  29. auth_strategy = keystone
  30. [keystone_authtoken]
  31. # ...
  32. auth_url = http://controller:5000/v3
  33. memcached_servers = controller:11211
  34. auth_type = password
  35. project_domain_name = Default
  36. user_domain_name = Default
  37. project_name = service
  38. username = placement
  39. password = placement
  40. EOF
  41. #同步数据库
  42. echo "同步数据库"
  43. su -s /bin/sh -c "placement-manage db sync" placement
  44. #重启http服务
  45. echo "重启http服务"
  46. systemctl restart httpd
  47. echo "验证"
  48. placement-status upgrade check

6、nova安装

6.1、控制节点

nova-install-controller.sh
注意:**/etc/nova/nova.conf 修改myip**

  1. #!/bin/bash
  2. echo "创建数据库"
  3. mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS nova_api;CREATE DATABASE IF NOT EXISTS nova;CREATE DATABASE IF NOT EXISTS nova_cell0;GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
  4. mysql -uroot -p123456 -e "show databases;"
  5. echo "模拟登录"
  6. source /root/admin-openrc.sh
  7. echo "创建用户"
  8. openstack user create --domain default --password nova nova
  9. echo "添加role"
  10. openstack role add --project service --user nova admin
  11. echo "创建compute服务"
  12. openstack service create --name nova --description "OpenStack Compute" compute
  13. echo "创建endpoint"
  14. openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
  15. openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
  16. openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
  17. echo "安装nova-api、nova-conductor、nova-novncproxy、nova-scheduler"
  18. yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y
  19. cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
  20. tee /etc/nova/nova.conf <<-EOF
  21. [DEFAULT]
  22. # ...
  23. enabled_apis = osapi_compute,metadata
  24. my_ip = 192.100.5.137
  25. transport_url = rabbit://openstack:openstack@controller:5672/
  26. use_neutron = true
  27. firewall_driver = nova.virt.firewall.NoopFirewallDriver
  28. [api_database]
  29. # ...
  30. connection = mysql+pymysql://nova:nova@controller/nova_api
  31. [database]
  32. # ...
  33. connection = mysql+pymysql://nova:nova@controller/nova
  34. [api]
  35. # ...
  36. auth_strategy = keystone
  37. [keystone_authtoken]
  38. # ...
  39. www_authenticate_uri = http://controller:5000/
  40. auth_url = http://controller:5000/
  41. memcached_servers = controller:11211
  42. auth_type = password
  43. project_domain_name = Default
  44. user_domain_name = Default
  45. project_name = service
  46. username = nova
  47. password = nova
  48. [vnc]
  49. enabled = true
  50. # ...
  51. server_listen = \$my_ip
  52. server_proxyclient_address = \$my_ip
  53. [glance]
  54. # ...
  55. api_servers = http://controller:9292
  56. [oslo_concurrency]
  57. # ...
  58. lock_path = /var/lib/nova/tmp
  59. [placement]
  60. # ...
  61. region_name = RegionOne
  62. project_domain_name = Default
  63. project_name = service
  64. auth_type = password
  65. user_domain_name = Default
  66. auth_url = http://controller:5000/v3
  67. username = placement
  68. password = placement
  69. EOF
  70. echo "同步数据库"
  71. su -s /bin/sh -c "nova-manage api_db sync" nova
  72. su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
  73. su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
  74. su -s /bin/sh -c "nova-manage db sync" nova
  75. echo "验证"
  76. su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
  77. echo "启动"
  78. systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  79. systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  80. systemctl status openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

6.2、计算节点

nova-install-computer.sh

  1. #!/bin/bash
  2. echo "安装"
  3. yum install openstack-nova-compute -y
  4. if [[ ! -f /etc/nova/nova.conf.bak ]];then
  5. cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
  6. fi
  7. tee /etc/nova/nova.conf <<-EOF
  8. [DEFAULT]
  9. # ...
  10. enabled_apis = osapi_compute,metadata
  11. transport_url = rabbit://openstack:openstack@controller
  12. my_ip = 192.100.5.138
  13. use_neutron = true
  14. firewall_driver = nova.virt.firewall.NoopFirewallDriver
  15. [api]
  16. # ...
  17. auth_strategy = keystone
  18. [keystone_authtoken]
  19. # ...
  20. www_authenticate_uri = http://controller:5000/
  21. auth_url = http://controller:5000/
  22. memcached_servers = controller:11211
  23. auth_type = password
  24. project_domain_name = Default
  25. user_domain_name = Default
  26. project_name = service
  27. username = nova
  28. password = nova
  29. [vnc]
  30. # ...
  31. enabled = true
  32. server_listen = 0.0.0.0
  33. server_proxyclient_address = \$my_ip
  34. novncproxy_base_url = http://controller:6080/vnc_auto.html
  35. [glance]
  36. # ...
  37. api_servers = http://controller:9292
  38. [oslo_concurrency]
  39. # ...
  40. lock_path = /var/lib/nova/tmp
  41. [placement]
  42. # ...
  43. region_name = RegionOne
  44. project_domain_name = Default
  45. project_name = service
  46. auth_type = password
  47. user_domain_name = Default
  48. auth_url = http://controller:5000/v3
  49. username = placement
  50. password = placement
  51. EOF
  52. echo "判断是否支持硬件加速"
  53. core=`egrep -c '(vmx|svm)' /proc/cpuinfo`
  54. if [[ $core == 0 ]]; then
  55. tee -a /etc/nova/nova.conf <<-EOF
  56. [libvirt]
  57. virt_type = qemu
  58. EOF
  59. fi
  60. echo "启动"
  61. systemctl enable libvirtd.service openstack-nova-compute.service
  62. systemctl restart libvirtd.service openstack-nova-compute.service
  63. systemctl status libvirtd.service openstack-nova-compute.service
  64. #解决 nova-status upgrade check 403问题
  65. #tee -a /etc/httpd/conf.d/00-placement-api.conf <<-EOF
  66. # <Directory /usr/bin>
  67. # <IfVersion >= 2.4>
  68. # Require all granted
  69. # </IfVersion>
  70. # <IfVersion < 2.4>
  71. # Order allow,deny
  72. # Allow from all
  73. # </IfVersion>
  74. # </Directory>
  75. #EOF
  76. echo "控制器节点上运行:openstack compute service list --service nova-compute"
  77. echo "控制器节点上运行:su -s /bin/sh -c 'nova-manage cell_v2 discover_hosts --verbose' nova"
  78. echo "验证:openstack compute service list"
  79. echo "验证:nova-status upgrade check"

7、neutron安装

7.1、控制节点

neutron-install-controller.sh

  1. #!/bin/bash
  2. echo "创建数据库"
  3. mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS neutron;GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';show databases;"
  4. echo "创建用户"
  5. openstack user create --domain default --password neutron neutron
  6. echo "添加角色"
  7. openstack role add --project service --user neutron admin
  8. echo "创建network服务"
  9. openstack service create --name neutron --description "OpenStack Networking" network
  10. echo "创建endpoint"
  11. openstack endpoint create --region RegionOne network public http://controller:9696
  12. openstack endpoint create --region RegionOne network internal http://controller:9696
  13. openstack endpoint create --region RegionOne network admin http://controller:9696
  14. echo "选择:自助网络配置"
  15. echo "安装"
  16. yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
  17. if [[ ! -f /etc/neutron/neutron.conf.bak ]];then
  18. cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
  19. fi
  20. tee /etc/neutron/neutron.conf <<-EOF
  21. [database]
  22. # ...
  23. connection = mysql+pymysql://neutron:neutron@controller/neutron
  24. [DEFAULT]
  25. # ...
  26. core_plugin = ml2
  27. service_plugins = router
  28. allow_overlapping_ips = true
  29. transport_url = rabbit://openstack:openstack@controller
  30. auth_strategy = keystone
  31. notify_nova_on_port_status_changes = true
  32. notify_nova_on_port_data_changes = true
  33. [keystone_authtoken]
  34. # ...
  35. www_authenticate_uri = http://controller:5000
  36. auth_url = http://controller:5000
  37. memcached_servers = controller:11211
  38. auth_type = password
  39. project_domain_name = default
  40. user_domain_name = default
  41. project_name = service
  42. username = neutron
  43. password = neutron
  44. [nova]
  45. # ...
  46. auth_url = http://controller:5000
  47. auth_type = password
  48. project_domain_name = default
  49. user_domain_name = default
  50. region_name = RegionOne
  51. project_name = service
  52. username = nova
  53. password = nova
  54. [oslo_concurrency]
  55. # ...
  56. lock_path = /var/lib/neutron/tmp
  57. EOF
  58. echo "模块化第 2 层 (ML2) 插件"
  59. if [[ ! -f /etc/neutron/plugins/ml2/ml2_conf.ini.bak ]];then
  60. cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
  61. fi
  62. tee /etc/neutron/plugins/ml2/ml2_conf.ini <<-EOF
  63. [ml2]
  64. # ...
  65. type_drivers = flat,vlan,vxlan
  66. tenant_network_types = vxlan
  67. mechanism_drivers = linuxbridge,l2population
  68. extension_drivers = port_security
  69. [ml2_type_flat]
  70. # ...
  71. flat_networks = provider
  72. [ml2_type_vxlan]
  73. # ...
  74. vni_ranges = 1:1000
  75. [securitygroup]
  76. # ...
  77. enable_ipset = true
  78. EOF
  79. echo "Linux 网桥代理"
  80. if [[ ! -f /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak ]];then
  81. cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
  82. fi
  83. tee /etc/neutron/plugins/ml2/linuxbridge_agent.ini <<-EOF
  84. [linux_bridge]
  85. physical_interface_mappings = provider:ens32
  86. [vxlan]
  87. enable_vxlan = true
  88. local_ip = 管理接口ip
  89. l2_population = true
  90. [securitygroup]
  91. # ...
  92. enable_security_group = true
  93. firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  94. EOF
  95. echo "启用网络网桥支持"
  96. modprobe br_netfilter
  97. echo "查看net.bridge.bridge-nf-call-iptables和net.bridge.bridge-nf-call-ip6tables的值是否为1"
  98. sysctl net.bridge.bridge-nf-call-iptables=1
  99. sysctl net.bridge.bridge-nf-call-ip6tables=1
  100. echo "配置第3层代理"
  101. if [[ ! -f /etc/neutron/l3_agent.ini.bak ]];then
  102. cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak
  103. fi
  104. tee /etc/neutron/l3_agent.ini <<-EOF
  105. [DEFAULT]
  106. # ...
  107. interface_driver = linuxbridge
  108. EOF
  109. echo "配置 DHCP 代理"
  110. if [[ ! -f /etc/neutron/dhcp_agent.ini.bak ]];then
  111. cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak
  112. fi
  113. tee /etc/neutron/dhcp_agent.ini <<-EOF
  114. [DEFAULT]
  115. # ...
  116. interface_driver = linuxbridge
  117. dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
  118. enable_isolated_metadata = True
  119. EOF
  120. echo "配置元数据代理"
  121. if [[ ! -f /etc/neutron/metadata_agent.ini.bak ]];then
  122. cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
  123. fi
  124. tee /etc/neutron/metadata_agent.ini <<-EOF
  125. [DEFAULT]
  126. # ...
  127. nova_metadata_host = controller
  128. metadata_proxy_shared_secret = METADATA_SECRET
  129. EOF
  130. echo "将计算服务配置为使用网络服务"
  131. tee -a /etc/nova/nova.conf <<-EOF
  132. [neutron]
  133. # ...
  134. auth_url = http://controller:5000
  135. auth_type = password
  136. project_domain_name = default
  137. user_domain_name = default
  138. region_name = RegionOne
  139. project_name = service
  140. username = neutron
  141. password = neutron
  142. service_metadata_proxy = true
  143. metadata_proxy_shared_secret = METADATA_SECRET
  144. EOF
  145. echo "数据库同步"
  146. if [[ ! -f /etc/neutron/plugin.ini ]];then
  147. ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
  148. fi
  149. su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
  150. echo "启动"
  151. systemctl restart openstack-nova-api.service && systemctl status openstack-nova-api.service
  152. systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  153. systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  154. systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  155. echo "网络选项 2,还启用和启动第 3 层服务"
  156. systemctl enable neutron-l3-agent.service
  157. systemctl restart neutron-l3-agent.service
  158. systemctl status neutron-l3-agent.service

7.2、计算节点

neutron-install-computer.sh

  1. #!/bin/bash
  2. echo "安装"
  3. yum install openstack-neutron-linuxbridge ebtables ipset -y
  4. if [[ ! -f /etc/neutron/neutron.conf.bak ]];then
  5. cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
  6. fi
  7. tee /etc/neutron/neutron.conf <<-EOF
  8. [DEFAULT]
  9. # ...
  10. transport_url = rabbit://openstack:openstack@controller
  11. auth_strategy = keystone
  12. [keystone_authtoken]
  13. # ...
  14. www_authenticate_uri = http://controller:5000
  15. auth_url = http://controller:5000
  16. memcached_servers = controller:11211
  17. auth_type = password
  18. project_domain_name = default
  19. user_domain_name = default
  20. project_name = service
  21. username = neutron
  22. password = neutron
  23. [oslo_concurrency]
  24. # ...
  25. lock_path = /var/lib/neutron/tmp
  26. EOF
  27. echo "选择:自助网络"
  28. if [[ ! -f /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak ]];then
  29. cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
  30. fi
  31. tee /etc/neutron/plugins/ml2/linuxbridge_agent.ini <<-EOF
  32. [linux_bridge]
  33. physical_interface_mappings = provider:ens33
  34. [vxlan]
  35. enable_vxlan = true
  36. local_ip = 管理接口ip
  37. l2_population = true
  38. [securitygroup]
  39. # ...
  40. enable_security_group = true
  41. firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  42. EOF
  43. echo "启用网桥支持"
  44. modprobe br_netfilter
  45. echo "查看net.bridge.bridge-nf-call-iptables和net.bridge.bridge-nf-call-ip6tables的值是否为1"
  46. sysctl net.bridge.bridge-nf-call-iptables=1
  47. sysctl net.bridge.bridge-nf-call-ip6tables=1
  48. echo "将计算服务配置为使用网络服务"
  49. tee -a /etc/nova/nova.conf <<-EOF
  50. [neutron]
  51. # ...
  52. auth_url = http://controller:5000
  53. auth_type = password
  54. project_domain_name = default
  55. user_domain_name = default
  56. region_name = RegionOne
  57. project_name = service
  58. username = neutron
  59. password = neutron
  60. EOF
  61. echo "启动"
  62. systemctl restart openstack-nova-compute.service && systemctl status openstack-nova-compute.service
  63. systemctl enable neutron-linuxbridge-agent.service && systemctl restart neutron-linuxbridge-agent.service && systemctl status neutron-linuxbridge-agent.service
  64. echo "验证:openstack extension list --network"
  65. echo "验证提供商网络:openstack network agent list"

8、cinder安装

8.1.控制节点

cinder-install-controller.sh

  1. #!/bin/bash
  2. echo "数据库配置"
  3. mysql -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS cinder;GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"
  4. mysql -uroot -p123456 -e "show databases;"
  5. echo "模拟登录"
  6. source /root/admin-openrc.sh
  7. echo "创建用户"
  8. openstack user create --domain default --password cinder cinder
  9. echo "添加role"
  10. openstack role add --project service --user cinder admin
  11. echo "创建cinderv2服务"
  12. openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
  13. echo "创建cinderv3服务"
  14. openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
  15. echo "创建endpoint"
  16. openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
  17. openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
  18. openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
  19. openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
  20. openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
  21. openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
  22. echo "安装"
  23. yum install openstack-cinder -y
  24. if [[ ! -f /etc/cinder/cinder.conf.bak ]];then
  25. cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
  26. fi
  27. tee /etc/cinder/cinder.conf <<-EOF
  28. [database]
  29. # ...
  30. connection = mysql+pymysql://cinder:cinder@controller/cinder
  31. [DEFAULT]
  32. # ...
  33. transport_url = rabbit://openstack:openstack@controller
  34. auth_strategy = keystone
  35. my_ip = 192.100.5.137
  36. [keystone_authtoken]
  37. # ...
  38. www_authenticate_uri = http://controller:5000
  39. auth_url = http://controller:5000
  40. memcached_servers = controller:11211
  41. auth_type = password
  42. project_domain_name = default
  43. user_domain_name = default
  44. project_name = service
  45. username = cinder
  46. password = cinder
  47. [oslo_concurrency]
  48. # ...
  49. lock_path = /var/lib/cinder/tmp
  50. EOF
  51. echo "同步数据库"
  52. su -s /bin/sh -c "cinder-manage db sync" cinder
  53. echo "将计算配置为使用块存储"
  54. tee -a /etc/nova/nova.conf <<-EOF
  55. [cinder]
  56. os_region_name = RegionOne
  57. EOF
  58. echo "启动"
  59. systemctl restart openstack-nova-api.service
  60. systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service && systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service
  61. systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service

8.2.卷存储节点

需要准备好磁盘并创建好卷组
cinder-install-block.sh

  1. #!/bin/bash
  2. echo "安装cinder"
  3. yum install openstack-cinder targetcli python-keystone -y
  4. if [[ ! -f /etc/cinder/cinder.conf.bak ]];then
  5. cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
  6. fi
  7. tee /etc/cinder/cinder.conf <<-EOF
  8. [database]
  9. # ...
  10. connection = mysql+pymysql://cinder:cinder@controller/cinder
  11. [DEFAULT]
  12. # ...
  13. transport_url = rabbit://openstack:openstack@controller
  14. auth_strategy = keystone
  15. my_ip = 192.168.189.153
  16. enabled_backends = lvm
  17. glance_api_servers = http://controller:9292
  18. [keystone_authtoken]
  19. # ...
  20. www_authenticate_uri = http://controller:5000
  21. auth_url = http://controller:5000
  22. memcached_servers = controller:11211
  23. auth_type = password
  24. project_domain_name = default
  25. user_domain_name = default
  26. project_name = service
  27. username = cinder
  28. password = cinder
  29. [lvm]
  30. volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
  31. volume_group = cinder-volumes
  32. target_protocol = iscsi
  33. target_helper = lioadm
  34. [oslo_concurrency]
  35. # ...
  36. lock_path = /var/lib/cinder/tmp
  37. EOF
  38. echo "启动"
  39. systemctl enable openstack-cinder-volume.service target.service && systemctl restart openstack-cinder-volume.service target.service
  40. systemctl status openstack-cinder-volume.service target.service