CentOS 8 单节点搭建 OpenStack-rocky 安装

查看OpenStack版本列表 OpenStack Releases: OpenStack Releases

官方安装站点: https://docs.openstack.org/install-guide/

参考地址: https://blog.51cto.com/liuleis/2094190 (重点:安装OpenStack Queens)
https://blog.csdn.net/qq_38773184/article/details/82391073 (重点:安装OpenStack Rocky)

1 部署OpenStack 基础服务

1.1 初始化环境

  1. # Step1:关闭防火墙
  2. systemctl restart network
  3. systemctl stop firewalld
  4. systemctl disable firewalld
  5. setenforce 0
  6. sed -i 's/=enforcing/=disabled/' /etc/selinux/config
  7. # Step2:更新软件包
  8. yum upgrade -y
  9. # Step3:更新完成后重启系统
  10. reboot
  11. # Step4:设置主机名
  12. hostnamectl set-hostname openstack_master
  13. # Step5:添加主机映射
  14. cat >> /etc/hosts <<-'EOF'
  15. # OpenStack Host BEGIN
  16. 9.136.142.148 kubesphere-master
  17. # OpenStack Host END
  18. EOF
  19. # Step6:openstack-controller1 配置时间同步
  20. yum install -y chrony
  21. # 编辑/etc/chrony.conf文件
  22. vim /etc/chrony.conf
  23. server kubesphere-master iburst
  24. allow 9.136.142.0/23
  25. # 启动访问
  26. systemctl start chronyd ; systemctl enable chronyd
  27. # Step7:其他所有节点配置时间同步
  28. yum install -y chrony
  29. # 编辑/etc/chrony.conf文件
  30. vim /etc/chrony.conf
  31. server kubesphere-master iburst
  32. # 启动访问
  33. systemctl start chronyd ; systemctl enable chronyd
  34. # Step8:设置内核
  35. modprobe bridge
  36. modprobe br_netfilter
  37. cat > /etc/sysconfig/modules/neutron.modules <<EOF
  38. #!/bin/bash
  39. modprobe -- bridge
  40. modprobe -- br_netfilter
  41. EOF
  42. chmod 755 /etc/sysconfig/modules/neutron.modules && bash /etc/sysconfig/modules/neutron.modules
  43. # 设置内核参数
  44. echo "vm.max_map_count=262144" >> /etc/sysctl.conf
  45. echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
  46. echo "net.bridge.bridge-nf-call-iptables=1" >> /etc/sysctl.conf
  47. echo "net.bridge.bridge-nf-call-ip6tables=1" >> /etc/sysctl.conf
  48. sysctl -p
  49. # WORKDIR
  50. mkdir -pv /app/openstack
  51. workdir=/app/openstack

1.2 配置 OpenStack-rocky 的 yum 源文件

官网是 yum 安装 centos-release-openstack-rocky,用的是国外的源,会比较慢,使用阿里云的源

# 配置OpenStack 阿里云源
tee > /etc/yum.repos.d/CentOS-OpenStack.repo <<-'EOF'
# [openstack-victoria]
# name=openstack-victoria
# baseurl=https://mirrors.aliyun.com/centos/8/cloud/x86_64/openstack-victoria/
# enabled=1
# gpgcheck=0

# [openstack-train]
# name=openstack-train
# baseurl=https://mirrors.aliyun.com/centos/8/cloud/x86_64/openstack-train
# enabled=1
# gpgcheck=0

[openstack-ussuri]
name=openstack-ussuri
baseurl=https://mirrors.aliyun.com/centos/8/cloud/x86_64/openstack-ussuri/
enabled=1
gpgcheck=0

[qume-kvm]
name=qemu-kvm
baseurl= https://mirrors.aliyun.com/centos/7/virt/x86_64/kvm-common/
enabled=1
gpgcheck=0
EOF

1.3 安装基础组件

# 下载基础包
yum install -y centos-release-openstack-ussuri
yum config-manager --set-enabled PowerTools

mv CentOS-Ceph-Nautilus.repo CentOS-Ceph-Nautilus.repo.bak
mv advanced-virtualization.repo advanced-virtualization.repo.bak
mv CentOS-Messaging-rabbitmq.repo CentOS-Messaging-rabbitmq.repo.bak
mv CentOS-OpenStack-ussuri.repo CentOS-OpenStack-ussuri.repo.bak
mv CentOS-Storage-common.repo CentOS-Storage-common.repo.bak
# 更新软件
yum upgrade -y
yum install -y python3-openstackclient

1.4 安装 MariaDB / MySQL

# Step1:安装 MariaDB
yum install -y mariadb mariadb-server python2-PyMySQL

# Step2:添加 openstack.cnf 文件
tee > /etc/my.cnf.d/openstack.cnf <<-'EOF'
[mysqld]
bind-address = 9.136.142.148
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF

# Step3:启动 MariaDB 服务
systemctl enable mariadb.service && systemctl start mariadb.service
# Step4:配置 MariaDB 账号口令
echo -e "\nY\nAdmin@h3c\nAdmin@h3c\nY\nn\nY\nY\n" | mysql_secure_installation
# 验证数据库
mysql -uroot -pAdmin@h3c
> show databases;

1.5 安装 RabbitMQ 队列服务

1.5.1 添加并安装 RabbitMQ 队列服务

# Step1:安装erlang YUM源
wget -P /app/openstack https://github.com/rabbitmq/erlang-rpm/releases/download/v23.0/erlang-23.0-1.el7.x86_64.rpm 
yum install -y /app/openstack/erlang-23.0-1.el7.x86_64.rpm
yum install -y socat

# Step2:添加 RabbitMQ YUM源
wget -P /app/openstack https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.8.9/rabbitmq-server-3.8.9-1.el7.noarch.rpm
yum install -y /app/openstack/rabbitmq-server-3.8.9-1.el7.noarch.rpm

1.5.2 启动 RabbitMQ 队列服务

# Step1:启动队列服务
systemctl enable rabbitmq-server.service && systemctl start rabbitmq-server.service

# Step2:创建队列用户
rabbitmqctl add_user openstack Aa123456

# Step3:为openstack用户授权
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
# 查看
rabbitmqctl list_users 

# Step4:启动rabbitmq管理界面
rabbitmq-plugins enable rabbitmq_management
#查看
rabbitmq-plugins list

# 访问地址
http://9.136.142.148:15672
### PS: 默认管理员帐号口令: guest

### 注意:若登录界面出现如下报错
# User can only log in via localhost
# Step1:解决方案:
cat > /etc/rabbitmq/rabbitmq.config <<EOF
[{rabbit, [{loopback_users, []}]}].
EOF
# Step2:重启服务
systemctl restart rabbitmq-server.service

1.6 安装 Memcache 缓存

# Step1:安装 Memcache 缓存
yum install -y memcached python3-memcached

# Step2:修改memcached配置
vim /etc/sysconfig/memcached 
OPTIONS="-l 127.0.0.1,::1,kubesphere-master"

# Step3:启动缓存服务
systemctl enable memcached.service && systemctl start memcached.service

1.7 安装 ETCD 服务

# Step1:
yum install etcd -y

# Step2:配置ETCD
cat > /etc/etcd/etcd.conf << EOF
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://9.136.142.148:2380"
ETCD_LISTEN_CLIENT_URLS="http://9.136.142.148:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://9.136.142.148:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://9.136.142.148:2379"
ETCD_INITIAL_CLUSTER="controller=http://9.136.142.148:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

# Step3:启动etcd服务
systemctl enable etcd && systemctl start etcd

2 部署 OpenStack 平台

2.1 KeyStone 组件

Identity Service(KeyStone):认证服务,用户的认证登录入口以及OpenStack的组件的认证体系

2.1.1 MySQL 数据库配置

mysql -u root -p'Admin@h3c'

> CREATE DATABASE keystone;
> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'MysqlKeyst0ne';
> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'MysqlKeyst0ne';
>

2.1.2 安装Keystone 服务

yum install openstack-keystone httpd python3-mod_wsgi -y

# 修改keystone配置文件
vim /etc/keystone/keystone.conf
# 修改如下内容
[database]
...
connection = mysql+pymysql://keystone:MysqlKeyst0ne@kubesphere-master/keystone
...
[token]
...
provider = fernet
...

# (可选择)开启详细日志,协助故障排除
[DEFAULT]
...
verbose = True

2.1.3 同步Keystone 配置到数据库

su -s /bin/sh -c "keystone-manage db_sync" keystone

# 验证
mysql -ukeystone -p'MysqlKeyst0ne'
> use keystone;
> show tables;

2.1.4 创建Keystone-manage

# Step1:创建Keystone-manage
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

# Step2:配置keystone-manage bootstrap, 注册keystone API
keystone-manage bootstrap --bootstrap-password Keyst0nePwd \
  --bootstrap-admin-url http://kubesphere-master:5000/v3/ \
  --bootstrap-internal-url http://kubesphere-master:5000/v3/ \
  --bootstrap-public-url http://kubesphere-master:5000/v3/ \
  --bootstrap-region-id RegionOne

# 帐号: admin
# 口令: Keyst0nePwd

2.1.5 服务启动

# Step1:配置Apache服务
vim /etc/httpd/conf/httpd.conf
修改如下
ServerName kubesphere-master

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

# Step2:启动httpd服务
systemctl enable httpd.service && systemctl start httpd.service

2.1.6 创建admin变量脚本

# Step1:创建admin变量脚本
cat > admin-openrc << 'EOF'
# 配置命令提示符
export PS1="(Keystone-admin) [\u@\h \W]\# "

# 配置Keystone变量
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=Keyst0nePwd
export OS_AUTH_URL=http://kubesphere-master:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF

# Step2:加载变量
source admin-openrc
或者
. admin-openrc

这里显示的这些值是在keystone-manage bootstrap中创建的默认值, 替换ADMIN_PASS为keystone-manage bootstrap中的密码

2.1.7 创建项目和服务

# 创建service 项目, 使用默认域
openstack project create --domain default --description "Service Project" service
## 创建自定义项目流程
# Step1:创建新域
openstack domain create --description "An Example Domain" mydomain

# Step2:创建demo项目, 名称为myproject
openstack project create --domain default --description "Demo Project" myproject

# Step3:创建用户, mydomain域
openstack user create --domain mydomain --password-prompt myuser
User Password: DEMO_PASS
Repeat User Password: DEMO_PASS

# Step4:创建myrole角色
openstack role create myrole

# Step5:项目-用户-角色绑定, 将myrole角色添加到myproject项目和myuser用户
openstack role add --project myproject --user myuser myrole

# Step6:客户端环境变量脚本
cat > myproject-openrc << EOF
# 配置命令提示符
export PS1="(Keystone-myproject) [\u@\h \W]\$ "

export OS_PROJECT_DOMAIN_NAME=mydomain
export OS_USER_DOMAIN_NAME=mydomain
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=DEMO_PASS
export OS_AUTH_URL=http://kubesphere-master:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF

# Step7:加载环境变量
. myproject-openrc
//DEMO_PASS为openstack user create 创建myuser时设定的密码

2.1.8 Keystone 常用命令

# 修改密码
openstack user set --password newpassword user

2.1.9 Keystone 部署验证

# 以下操作以 admin 特权用户进行执行
## 常用查看命令
openstack project list
openstack service list
openstack user list

## 为admin用户,请求身份验证令牌
openstack --os-auth-url http://kubesphere-master:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name admin --os-username admin token issue

## 为myuser用户,请求身份验证令牌
openstack --os-auth-url http://kubesphere-master:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name myproject --os-username myuser token issue

2.2 Glance 组件

Image Service(Glance):镜像服务,OpenStack 的操作系统镜像供给

2.2.1 MySQL 数据库配置

mysql -u root -p'Admin@h3c'

> CREATE DATABASE glance;
> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'MysqlG1ance';
> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'MysqlG1ance';

2.2.2 注册Glance到Keystone

# Step1:载入admin环境变量
. admin-openrc

# Step2:创建glance用户
openstack user create --domain default --password-prompt glance
User Password: G1ancePwd
Repeat User Password: G1ancePwd
# 口令: G1ancePwd

# Step3:将glance用户绑定到service项目的admin角色
openstack role add --project service --user glance admin

# Step4:创建glance服务
openstack service create --name glance --description "OpenStack Image" image

# Step5:配置image服务后端API接口
openstack endpoint create --region RegionOne image public http://kubesphere-master:9292
openstack endpoint create --region RegionOne image internal http://kubesphere-master:9292
openstack endpoint create --region RegionOne image admin http://kubesphere-master:9292

# Step6:验证
openstack service list
openstack user list
openstack endpoint list

2.2.3 安装Glance服务

# 安装
yum install openstack-glance -y

# 配置
vim /etc/glance/glance-api.conf
修改如下
[database]
connection = mysql+pymysql://glance:MysqlG1ance@kubesphere-master/glance

[keystone_authtoken]
www_authenticate_uri  = http://kubesphere-master:5000
auth_url = http://kubesphere-master:5000
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = G1ancePwd

[paste_deploy]
flavor = keystone

# 配置本地镜像存储位置
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

# (可选择)开启详细日志,协助故障排除
[DEFAULT]
...
verbose = True

2.2.4 同步glance配置到数据库

su -s /bin/sh -c "glance-manage db_sync" glance

mysql -uroot -pAdmin@h3c
> use glance;
> show tables;

2.2.5 服务启动

systemctl enable openstack-glance-api.service && systemctl start openstack-glance-api.service

2.2.6 部署验证

. admin-openrc

wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

glance image-create --name "cirros" \
  --file cirros-0.4.0-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --visibility=public

glance image-list

2.3 Placement 组件

一个资源提供者可以是一个计算节点,共享存储池,或一个IP分配池。placement服务跟踪每个供应商的库存和使用情况。

作用: 计算服务的管理部分, 从Nova服务中分离而来

Placement作用是收集各个node节点的可用资源,把node节点的资源统计写入到mysql,Placement服务会被nova scheduler服务进行调用; 由于数据存放在mysql中, 属于无状态应用, 可多节点部署

2.3.1 MySQL数据库配置

mysql -uroot -p'Admin@h3c'

> CREATE DATABASE placement;
> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'MysqlP1acement';
> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'MysqlP1acement';

2.3.2 注册Placement到Keystone

# 载入admin环境变量
. admin-openrc

# 创建Placement用户
openstack user create --domain default --password-prompt placement
User Password: P1acementPwd
Repeat User Password: P1acementPwd
# 口令: P1acementPwd

# 将placement用户绑定到service项目的admin角色
openstack role add --project service --user placement admin

# 创建Placement API接口服务
openstack service create --name placement --description "Placement API" placement

# 创建Placement API接口
openstack endpoint create --region RegionOne placement public http://kubesphere-master:8778
openstack endpoint create --region RegionOne placement internal http://kubesphere-master:8778
openstack endpoint create --region RegionOne placement admin http://kubesphere-master:8778

# 验证
openstack service list
openstack user list
openstack endpoint list

2.3.3 安装 Placenment 服务

# 安装
yum install openstack-placement-api -y

# 配置
vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:MysqlP1acement@kubesphere-master/placement

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://kubesphere-master:5000/v3
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = P1acementPwd
  1. 替换PLACEMENT_PASS为您placement在身份服务中为用户选择的密码 。
  2. user_name, password, project_domain_name and user_domain_name等参数与placement在keystone中配置的一致

2.3.4 同步Placement配置到数据库

su -s /bin/sh -c "placement-manage db sync" placement

mysql -uroot -pAdmin@h3c
> use glance;
> show tables;

2.3.5 服务启动

# 重启httpd, 载入Placement服务
systemctl restart httpd

2.3.6 部署验证

. admin-openrc

placement-status upgrade check

2.4 Nova 组件

2.4.1 控制节点

Compute(Nova):计算服务,OpenStack的核心所在(Client CLI -> Nova -> libvirt -> KVM),与虚拟化进行交互的组件

2.4.1.1 Mysql数据库配置
mysql -u root -p'Admin@h3c'

# 创建nova相关数据库
> CREATE DATABASE nova_api;
> CREATE DATABASE nova;
> CREATE DATABASE nova_cell0;

# 创建数据库用户并授权
> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'MysqlN0vaPwd';
> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'MysqlN0vaPwd';

> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'MysqlN0vaPwd';
> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'MysqlN0vaPwd';

> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'MysqlN0vaPwd';
> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'MysqlN0vaPwd';

2.4.1.2注册Nova到Keystone
# 载入Keystone环境
. admin-openrc

# 创建nova用户
openstack user create --domain default --password-prompt nova
User Password: N0vaPwd
Repeat User Password: N0vaPwd
# 口令: N0vaPwd

# 将nova用户绑定到service项目的admin角色
openstack role add --project service --user nova admin

# 创建nova服务
openstack service create --name nova --description "OpenStack Compute" compute

# 创建nova API接口
openstack endpoint create --region RegionOne compute public http://kubesphere-master:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://kubesphere-master:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://kubesphere-master:8774/v2.1

2.4.1.3 安装Nova服务
# 安装
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y

# 配置
vim /etc/nova/nova.conf
修改如下
[DEFAULT]
my_ip = 9.136.142.148
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:Aa123456@kubesphere-master:5672/

[api_database]
connection = mysql+pymysql://nova:MysqlN0vaPwd@kubesphere-master/nova_api

[database]
connection = mysql+pymysql://nova:MysqlN0vaPwd@kubesphere-master/nova

[api]
auth_strategy = keystone

[keystone_authtoken]
# keystone配置
www_authenticate_uri = http://kubesphere-master:5000/
auth_url = http://kubesphere-master:5000/
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = N0vaPwd

[vnc]
enabled = true
# VNC配置
server_listen = $my_ip
server_proxyclient_address = $my_ip

[glance]
# 关联glance
api_servers = http://kubesphere-master:9292

[placement]
# 关联placement
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://kubesphere-master:5000/v3
username = placement
password = P1acementPwd

[oslo_concurrency]
# 本地缓存路径配置
lock_path = /var/lib/nova/tmp

2.4.1.4 同步Nova配置到数据库
# 登录环境变量
. admin-openrc

# nova-api
su -s /bin/sh -c "nova-manage api_db sync" nova

# 注册cell0数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

# Create the cell1 cell, warning可忽略
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

# nova
su -s /bin/sh -c "nova-manage db sync" nova

# 验证
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova

2.4.1.5 启动服务
systemctl enable \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service

systemctl start \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service

2.4.1.6 部署验证
openstack project list
openstack service list
openstack user list

2.4.2 计算节点

2.4.2.1 安装 Nova 服务
# 安装
yum install openstack-nova-compute -y

# 配置
vim /etc/nova/nova.conf
[DEFAULT]
# 仅开启计算和元数据API
enabled_apis = osapi_compute,metadata
# 队列配置
transport_url = rabbit://openstack:Aa123456@kubesphere-master
# my_ip = 配置为节点IP
my_ip = 9.136.142.148

# keystone配置
[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://kubesphere-master:5000/
auth_url = http://kubesphere-master:5000/
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = N0vaPwd

[vnc]
enabled = true
server_listen = 0.0.0.0
# server_proxyclient_address = $my_ip
server_proxyclient_address = 9.136.142.148
novncproxy_base_url = http://9.136.142.148:6080/vnc_auto.html

# glance配置
[glance]
api_servers = http://kubesphere-master:9292

# 本地存储配置
[oslo_concurrency]
lock_path = /var/lib/nova/tmp

# placement配置
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://kubesphere-master:5000/v3
username = placement
password = P1acementPwd

# 使用libvirt操控虚拟化方式, 持续硬件继续选kvm, 不支持选qemu, 查看命令: egrep -c '(vmx|svm)' /proc/cpuinfo
[libvirt]
virt_type = kvm

2.4.2.2 服务启动
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl restart libvirtd.service openstack-nova-compute.service
# 如果启动过程中, 日志提示资源请求失败, 类似如下
# ERROR nova.compute.manager nova.exception.ResourceProviderRetrievalFailed: Failed to get resource provider with UUID 7f58ca50-e98e-41b1-aa8b-9d227ff75aa2

# 解决方法:
# 在controller节点, 配置httpd
vim /etc/httpd/conf.d/00-placement-api.conf
# 在placement-api别名下
Alias /placement-api /usr/bin/placement-api
<Location /placement-api>
SetHandler wsgi-script
Options +ExecCGI
WSGIProcessGroup placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
# 添加
<Directory /usr/bin>
<IfVersion >= 2.4>
   Require all granted
</IfVersion>
<IfVersion < 2.4>
   Order allow,deny
   Allow from all
</IfVersion>
</Directory>

# 重启服务
systemctl restart httpd

2.4.3 添加计算节点到cell库

  • Controller 控制节点
  • 新注册节点需要执行节点发现, 否则节点无法上线, 也可以在controller节点配置自动发现
# 载入环境变量
. admin-openrc

# 查看nova节点
openstack compute service list --service nova-compute

# 发现nova节点
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
vim /etc/nova/nova.conf

[scheduler]
discover_hosts_in_cells_interval = 300

2.4.4 部署验证

  • Controller 控制节点
. admin-openrc

openstack compute service list
openstack catalog list
openstack image list
# 出现报错
nova-status upgrade check

2.5 Neuturn 组件

Networking(Neutron):网络服务,华为,Cisco牵头的Neuturn组件

2.5.1 控制节点

2.5.1.1 Mysql数据库配置
mysql -u root -p'Admin@h3c'

> CREATE DATABASE neutron;
> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'MysqlNeuto0n';
> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'MysqlNeuto0n';

2.5.1.2 注册Neutron到Keystone
# 加载admin变量
. admin-openrc
# 创建neutron用户
openstack user create --domain default --password-prompt neutron
User Password: Neutr0nPwd
Repeat User Password: Neutr0nPwd
# 口令: Neutr0nPwd

# 将neutron用户绑定到service项目的admin角色
openstack role add --project service --user neutron admin

# 创建neutron服务
openstack service create --name neutron --description "OpenStack Networking" network

## network为Type类型
# 为neutron服务创建API接口
openstack endpoint create --region RegionOne network public http://kubesphere-master:9696
openstack endpoint create --region RegionOne network internal http://kubesphere-master:9696
openstack endpoint create --region RegionOne network admin http://kubesphere-master:9696

2.5.1.3 安装Neutron服务
  • controller节点
# Step1:安装
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y

# Step2:配置
vim /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:MysqlNeuto0n@kubesphere-master/neutron

[DEFAULT]
# 启动ml2插件, 路由器服务和重叠IP地址
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true

# 配置队列
transport_url = rabbit://openstack:Aa123456@kubesphere-master

# 配置Keystone
auth_strategy = keystone

# 配置组网,通知计算网络拓扑变化
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[keystone_authtoken]
www_authenticate_uri = http://kubesphere-master:5000
auth_url = http://kubesphere-master:5000
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = Neutr0nPwd


[nova]
auth_url = http://kubesphere-master:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = N0vaPwd


[oslo_concurrency]
# 配置本地缓存路径
lock_path = /var/lib/neutron/tmp

# Step3:配置ML2插件
vim /etc/neutron/plugins/ml2/ml2_conf.ini 
[ml2]
# 开启flat(直连), VLAN, and VXLAN
type_drivers = flat,vlan,vxlan

#  启用VXLAN自助服务网络
tenant_network_types = vxlan

# 开启网桥类型
mechanism_drivers = linuxbridge,l2population

# 启用端口安全扩展驱动程序
extension_drivers = port_security

[ml2_type_flat]
# 将虚拟化网络作为直连网络
flat_networks = provider

[ml2_type_vxlan]
# 配置自助服务网络的VXLAN网络标识范围
vni_ranges = 1:1000

[securitygroup]
# 启用ipset,提高安全组规则的效率
enable_ipset = true
# Warning
# After you configure the ML2 plug-in, removing values in the type_drivers option can lead to database inconsistency.

2.5.1.4 配置网桥代理
  • Linux bridge agent
  • layer-3 agent
  • DHCP agent
  • metadata agent配置
# 配置Linux bridge agent
vim  /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:eth1

[vxlan]
enable_vxlan = true
local_ip = 9.136.142.148
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

# 将PROVIDER_INTERFACE_NAME替换为本地网卡名称
# 替换OVERLAY_INTERFACE_IP_ADDRESS为处理覆盖网络的底层物理网络接口的 IP 地址。示例架构使用管理接口将流量隧道传输到其他节点。因此,替换OVERLAY_INTERFACE_IP_ADDRESS为控制器节点的管理 IP 地址

# 配置controller节点内核参数
cat >> /etc/sysctl.conf <<-'EOF'

# Linux bridge agent BEGIN
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
# Linux bridge agent END
EOF

# 临时加载内核
modprobe br_netfilter

# 永久加载模块
cat > /etc/modules-load.d/neutron-bridge.conf <<EOF 
br_netfilter
EOF
## 配置开机启动
systemctl restart systemd-modules-load
systemctl enable systemd-modules-load

sysctl -p
vim /etc/neutron/l3_agent.ini 
[DEFAULT]
interface_driver = linuxbridge
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
- controller节点
vim /etc/neutron/metadata_agent.ini 

[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = MetaAa123456Pwd

# 配置nova服务
vim /etc/nova/nova.conf

[neutron]
auth_url = http://kubesphere-master:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = Neutr0nPwd
service_metadata_proxy = true
metadata_proxy_shared_secret = MetaAa123456Pwd

# Replace METADATA_SECRET with the secret you chose for the metadata proxy.

2.5.1.5 同步Neutron配置到数据库
# 网络服务初始化脚本需要一个/etc/neutron/plugin.ini指向 ML2 插件配置文件的符号链接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

2.5.1.6 服务启动
$ systemctl restart openstack-nova-api.service

$ systemctl enable neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service

$ systemctl start neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service

$ systemctl enable neutron-l3-agent.service && systemctl start neutron-l3-agent.service

2.5.2 计算节点

2.5.2.1 安装Neutron服务
# Step1:安装
yum install openstack-neutron-linuxbridge ebtables ipset -y

# Step2:配置
vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:Aa123456@kubesphere-master
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://kubesphere-master:5000
auth_url = http://kubesphere-master:5000
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = Neutr0nPwd

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

# Step3:配置linuxbridge
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:eth1

[vxlan]
enable_vxlan = true
local_ip = OVERLAY_INTERFACE_IP_ADDRESS
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

# Step4:配置内核参数
vim /etc/sysctl.conf
# Linux bridge agent BEGIN
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
# Linux bridge agent END

sysctl -p
# 需要加载内核, 永久加载内核方法同控制节点配置相同

# Step5:Nova计算节点配置Neutron网络
vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = Neutr0nPwd

2.5.2.2 服务启动
# Step1:重启nova
systemctl restart openstack-nova-compute.service

# Step2:启动网桥代理
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

2.6 Cinder 组件

Block Storage(Cinder):块存储,底层使用iSCSI技术

2.6.1 控制节点

2.6.1.1 Mysql数据库配置
mysql -uroot -pAdmin@h3c

> CREATE DATABASE cinder;
> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'MysqlC1nder';
> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'MysqlC1nder';

2.6.1.2 注册Cinder到Keystone
# Step1:载入环境变量
. admin-openrc

# Step2:创建cinder用户
openstack user create --domain default --password-prompt cinder
User Password: C1nderPwd
Repeat User Password: C1nderPwd
# 口令: C1nderPwd

# Step3:将cinder用户绑定到service项目的admin角色
openstack role add --project service --user cinder admin

# Step4:创建cinderv2和cinderv3服务
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3

# Step5:为cinder服务创建API接口
openstack endpoint create --region RegionOne volumev2 public http://kubesphere-master:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://kubesphere-master:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://kubesphere-master:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://kubesphere-master:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://kubesphere-master:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://kubesphere-master:8776/v3/%\(project_id\)s

2.6.1.3 安装Cinder服务
# Step1:安装
yum install openstack-cinder -y

# Step2:配置
vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:MysqlC1nder@kubesphere-master/cinder

[DEFAULT]
my_ip = 9.136.142.148
transport_url = rabbit://openstack:Aa123456@kubesphere-master
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://kubesphere-master:5000
auth_url = http://kubesphere-master:5000
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = C1nderPwd

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

2.6.1.4 同步Cinder配置到数据库
su -s /bin/sh -c "cinder-manage db sync" cinder

2.6.1.5 服务启动
# Step1:配置nova
vim /etc/nova/nova.conf 
[cinder]
os_region_name = RegionOne

# Step2:重启nova服务
systemctl restart openstack-nova-api.service

# Step3:启动cinder服务
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

2.6.2 存储节点(可选择)

2.6.2.1 安装LVM工具
yum install lvm2 device-mapper-persistent-data -y

2.6.2.2 创建LVM卷组
# 这里已加入硬盘, 驱动器sdb
pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb

$ vim /etc/lvm/lvm.conf 
配置如下
devices {
filter = [ "a/sdb/", "r/.*/"]

# 如果还有sdc, sdd, 直接逗号分割, 结尾为r/.*/

2.6.2.3 安装Cinder
yum install openstack-cinder targetcli python3-keystone -y

2.6.2.4 配置Cinder
vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:MysqlC1nder@kubesphere-master/cinder

[DEFAULT]
my_ip = 节点IP
transport_url = rabbit://openstack:Aa123456@kubesphere-master
auth_strategy = keystone
glance_api_servers = http://kubesphere-master:9292
enabled_backends = lvm

[keystone_authtoken]
www_authenticate_uri = http://kubesphere-master:5000
auth_url = http://kubesphere-master:5000
memcached_servers = kubesphere-master:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = C1nderPwd

[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

2.6.2.5 启动cinder
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service

2.6.3 备份节点(可选择)

2.6.3.1 安装 Cinder
yum install -y openstack-cinder

2.6.3.2 配置 Cinder
vim /etc/cinder/cinder.conf
[DEFAULT]
backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
backup_swift_url = SWIFT_URL

# SWIFT_URL为对象存储服务的 URL, 可通过controller节点使用命令查看
openstack catalog show object-store

2.6.3.4 启动服务
systemctl enable openstack-cinder-backup.service
systemctl start openstack-cinder-backup.service

2.7 Horizon 组件

Dashboard(Horizon):仪表板,B/S的提供者

  • Controller 控制节点

2.7.1 安装Horizon

yum install openstack-dashboard -y

2.7.2 配置Horizon

vim /etc/openstack-dashboard/local_settings

OPENSTACK_HOST = "kubesphere-master"

ALLOWED_HOSTS = ['*']

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'kubesphere-master:11211',
    }
}

OPENSTACK_KEYSTONE_URL = "http://%s/identity/v3" % OPENSTACK_HOST

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin"

OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': False,
    'enable_quotas': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_vpn': False,
    'enable_fip_topology_check': False,
}

TIME_ZONE = "Asia/Shanghai"

处理bug, 在httpd配置中加入 WSGIApplicationGroup %{GLOBAL}

# 编辑配置文件
vim /etc/httpd/conf.d/openstack-dashboard.conf
# 加入
WSGIApplicationGroup %{GLOBAL}

# 重启服务
systemctl restart httpd.service memcached.service

2.7.3 验证Horizon

http://kubesphere-master/dashboard
http://9.136.142.148/dashboard/

2.7.4 排错

  • 默认httpd的dashboard配置文件无法访问Horizon主页, 需要重新生成httpd的dashboard配置文件
# 建立策略的软链接
ln -s /etc/openstack-dashboard /usr/share/openstack-dashboard/openstack_dashboard/conf

# 重新生成apache配置文件
cd /usr/share/openstack-dashboard
python3 manage.py make_web_conf --apache > /etc/httpd/conf.d/openstack-dashboard.conf

# 重启服务
systemctl restart httpd.service memcached.service
vim /etc/openstack-dashboard/local_settings
# 最下方加入
WEBROOT = '/dashboard/'

vim /etc/httpd/conf.d/openstack-dashboard.conf
# 修改如下
WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi.py
Alias /dashboard/static /usr/share/openstack-dashboard/static

# 重启服务
systemctl restart httpd.service memcached.service

3 常用操作

3.1 CentOS镜像下载

https://cloud.centos.org/centos/

3.2 修改密码功能

  • compute节点

    vim /etc/nova/nova.conf
    ...
    inject_password=true
    inject_partition = -1
    
  • horizon节点 ```bash vim /etc/openstack-dashboard/local_settings

OPENSTACK_HYPERVISOR_FEATURES = { ‘can_set_password’: True, }


- 操作方法
```bash
创建实例--> 配置--> 添加如下脚本(Aa123456为默认密码), 并勾选配置驱动

#!/bin/bash
echo Aa123456 | passwd --stdin root
sed -i 's#^PasswordAuthentication.*$#PasswordAuthentication\ yes#g' /etc/ssh/sshd_config
systemctl restart sshd

3.3 网络配置流程

示例
测试环境, 暂时用管理段IP代模拟外网IP
网络拓扑:
WAN网络             -- 172.16.20.0/24
租户LAN网络            -- 10.10.0.0/16

配置流程:
admin管理员

- 创建WAN网络
  登录openstack--> 管理员--> 网络--> 创建网络:
  名称: 自定义, 如wan
  项目: 关联项目, 这里选admin
  类型: flat(直连)
  物理网络: provider(对应neutron中配的flat_networks)
  其余复选框选项: 启用管理员状态、共享的、外部网络

- 配置租户WAN网IP
  登录openstack--> 项目--> 网络--> 在wan网actions中选择创建子网:
  子网名称: 自定义, 如wan_subnet
  网络地址: 172.16.20.0.24
  IP版本: IPV4
  网关: 172.16.20.1

  下一步, 子网详情
  DHCP地址池: 勾选激活
  分配地址池: 172.16.20.150,172.16.20.157            //这里模拟外网IP
  DNS: 自定义
  创建网络

租户管理者

- 创建租户网络

  - 创建LAN网络
    登录openstack--> 项目--> 网络--> 创建网络:
    名称: 自定义, 如project_lan
    复选框选项: 启用管理员状态、创建子网

    下一步, 创建子网
    名称: 自定义, 如project_lan_subnet
    网络地址: 10.10.0.0/16
    IP版本: IPV4
    网关IP: 10.10.0.1

    下一步, DHCP配置
    激活DHCP: 勾选
    分配地址池: 10.10.0.2,10.10.0.254
    DNS服务器: 自定义
    创建网络

  - 创建租户LAN网路由
    登录openstack--> 项目--> 网络-->网络拓扑-->

    - 新建路由
      路由名称: project_route
      启用管理员状态: 勾选
      外部网络: 选择之前定义的WAN网络
      新建路由
    - 配置路由
      鼠标放置新建好的路由接口:
      选择添加接口
      子网: 选择自定义子网网络
      IP地址: 填写自定义子网网络的网关
      提交

# 到此, 最简单的内外网模式配置完毕