1. 部署环境
- 控制节点:172.20.10.120 controller
- 计算节点:172.20.10.121 compute01
- 计算节点:172.20.10.122 compute02
- ODL节点:172.20.20.131 ODL
- 系统:ubuntu-18.04.2
- CPU:4核
- 内存:32G
- 硬盘:100G
2. 准备工作【所有节点】
(1) 安装ubuntu-18.04.2
(2) 配置网卡
vim /etc/netplan/50-cloud-init.yaml
network:
ethernets:
ens160:
addresses:
- 172.20.10.120/16
gateway4: 172.20.0.1
nameservers:
addresses: [114.114.114.114, 8.8.8.8]
ens192:
addresses:
- 172.16.10.120/24
nameservers: {}
ens224:
dhcp4: false
version: 2
#### 使配置文件生效
netplan apply
#### 查看当前网络配置
ip addr
(3) 修改主机名**
#### 打开配置文件
vim /etc/cloud/cloud.cfg
#### 设置为 true
preserve_hostname: true
#### :wq 保存退出,并打开hostname文件
vim /etc/hostname
#### 使修改马上生效
hostnamectl set-hostname <hostname>
#### 打开hosts配置
vim /etc/hosts
#### 添加集群主机
172.20.10.120 controller
172.20.10.121 compute01
172.20.10.122 compute01
(4) 修改时区
tzselect
sudo ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
(5) 替换阿里安装源
#### 备份原镜像
mv /etc/apt/sources.list /etc/apt/sources.list.bak
#### 打开apt镜像源文件
vim /etc/apt/sources.list
#### 修改镜像源
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
3. OpenStack 安装【所有节点】
#### 添加rocky安装源
add-apt-repository cloud-archive:rocky
#### 更新安装源列表及更新软件包
apt update && apt dist-upgrade
#### 安装
apt install python-openstackclient
4. 基础服务安装【控制节点】
4.1 安装 MySQL
(1) 安装
apt install mariadb-server python-pymysql
(2) 配置mysql监听地址
##### 打开配置文件
vim /etc/mysql/mariadb.conf.d/99-openstack.cnf
##### 添加配置项
[mysqld]
bind-address = 172.20.10.120
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
(3) 重启服务
service mysql restart
(4) 配置数据库管理员密码**
mysql_secure_installation
4.2 安装 RabbitMQ
#### 包安装
apt install rabbitmq-server
#### 添加 openstack 用户和密码
rabbitmqctl add_user openstack openstack
#### 设置该用户权限
rabbitmqctl set_permissions openstack "." "." ".*"
4.3 安装 Memcache
#### 包安装
apt install memcached python-memcache
#### 配置监听地址:vim /etc/memcached.conf
-l 172.20.10.120
#### 重启服务
service memcached restart
4.4 安装 Etcd
#### 包安装
apt install etcd
#### 配置etcd:vim /etc/default/etcd
#### 请根据自己的IP进行修改
ETCD_NAME="controller"
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER="controller=http://172.20.10.120:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://172.20.10.120:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://172.20.10.120:2379"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://172.20.10.120:2379"
#### 开机自启动使能并启动该服务
systemctl enable etcd
systemctl start etcd
5. 集群服务安装
5.1 Keystone 安装【控制节点】
(1) 添加 Keystone 数据库
#### 进入数据库
mysql -u root
#### 添加库和用户并设置权限
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' IDENTIFIED BY 'keystone';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';
#### ctrl+D 退出
(2) 安装并配置** apache2
#### 相关包安装
apt install apache2 libapache2-mod-wsgi
#### 配置apache服务:vim /etc/apache2/apache2.conf
ServerName controller
#### 重启apache服务
service apache2 restart
(3) 安装并配置 keystone**
#### 相关包安装
apt install keystone apache2 libapache2-mod-wsgi
#### 使用crudini进行配置(也可以打开文件进行手动配置)
crudini --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:keystone@controller/keystone
crudini --set /etc/keystone/keystone.conf token provider fernet
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/keystone/keystone.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
log_dir = /var/log/keystone
[application_credential]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:keystone@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[extra_headers]
[federation]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[policy]
[profiler]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[signing]
[token]
provider = fernet
[tokenless_auth]
[trust]
[unified_limit]
[wsgi]
(4) 同步keystone数据库**
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
(5) 尝试使用命令创建用户和项目**
#### 配置临时管理员账户
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
#### 创建service项目
openstack project create --domain default --description "Service Project" service
#### 创建user角色
openstack role create user
#### 确认操作,请求admin认证令牌
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
#### 输入密码
Password: admin
#### 输出如下:
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires | 2019-05-19T08:46:38+0000 |
| id | gAAAAABc4QneH4P5pjtrZcej_vEzHKo1J1h9WZYz2Zx0skfd70EGwSKhrnmVm9h0LY-rlJau6Br11nv1P1G4lxpavY_5ear5hQRuvFKDveN7o_xr6vQ1mw8FNfqxc0g9fR69b1shd5YIEJWg-IerhFh1y4OanBmtESkOv3B_mT-5D-g-eNRp1kU |
| project_id | fe13643127904142b74c0bfa2ea34794 |
| user_id | 28022c0955b04ffb884a90ef97142419 |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
(6) 创建用户的环境脚本**
#### 创建admin用户的环境脚本: vim admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
#### 更新环境变量命令
source admin-openrc 或者 . admin-openrc
5.2 Glance 安装【控制节点】
(1) 添加 **Glance **数据库
#### 打开数据库
mysql -u root
#### 添加库和用户并设置权限
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'controller' IDENTIFIED BY 'glance';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';
#### ctrl+D 退出
(2) 创建glance用户**
##### 未更新环境变量请先更新
. admin-openrc
#### 新建用户并设置权限
openstack user create --domain default --password glance glance
openstack role add --project service --user glance admin
#### 创建镜像服务
openstack service create --name glance --description "OpenStack Image" image
#### 为该服务 创建不同接口的服务端点
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
(3) 安装 **glance 并配置
#### 包安装
apt install glance
#### 使用crudini修改配置文件
crudini --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:glance@controller/glance
crudini --set /etc/glance/glance-api.conf keystone_authtoken www_authenticate_uri http://controller:5000
crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://controller:5000
crudini --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers controller:11211
crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
crudini --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name Default
crudini --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name Default
crudini --set /etc/glance/glance-api.conf keystone_authtoken project_name service
crudini --set /etc/glance/glance-api.conf keystone_authtoken username glance
crudini --set /etc/glance/glance-api.conf keystone_authtoken password glance
crudini --set /etc/glance/glance-api.conf paste_deploy flavor keystone
crudini --set /etc/glance/glance-api.conf glance_store stores file,http
crudini --set /etc/glance/glance-api.conf glance_store default_store file
crudini --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/glance/glance-api.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
[cors]
[database]
connection = mysql+pymysql://glance:glance@controller/glance
backend = sqlalchemy
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[image_format]
disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop.root-tar
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
(4) 修改**glance-registry配置文件
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/glance/glance-registry.conf
------------------------------------------------------------------------
[database]
# connection = sqlite:////var/lib/glance/glance.sqlite
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/glance/glance-registry.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
[database]
connection = mysql+pymysql://glance:glance@controller/glance
backend = sqlalchemy
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
(5) 同步数据库
#### 同步
su -s /bin/sh -c "glance-manage db_sync" glance
#### 输出如下:
2019-05-19 17:04:36.657 30932 INFO alembic.runtime.migration [-] Context impl MySQLImpl.
2019-05-19 17:04:36.658 30932 INFO alembic.runtime.migration [-] Will assume non-transactional DDL.
2019-05-19 17:04:36.668 30932 INFO alembic.runtime.migration [-] Context impl MySQLImpl.
2019-05-19 17:04:36.668 30932 INFO alembic.runtime.migration [-] Will assume non-transactional DDL.
......
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Database is synced successfully.
(6) 重启服务**
service glance-registry restart
service glance-api restart
(7) 下载并添加测试系统镜像
#### 下载cirros系统镜像
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
#### 添加该镜像到数据库中
openstack image create "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --public
#### 查看镜像是否添加成功
openstack image list
5.3 Nova 安装【控制节点】
(1) 添加 **Glance **数据库
#### 打开数据库
mysql -u root
#### 添加库和用户并设置权限
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'controller' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'controller' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'controller' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'controller' IDENTIFIED BY 'placement';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement';
#### ctrl+D 退出
(2) 创建Nova用户**
#### 新建用户并设置权限
openstack user create --domain default --password nova nova
openstack role add --project service --user nova admin
#### 创建镜像服务
openstack service create --name nova --description "OpenStack Compute" compute
#### 为该服务 创建不同接口的服务端点
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
(2) 创建Placement用户**
#### 新建用户并设置权限
openstack user create --domain default --password placement placement
openstack role add --project service --user placement admin
#### 创建服务
openstack service create --name placement --description "Placement API" placement
#### 为该服务 创建不同接口的服务端点
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
(3) 安装并配置 nova 和 **Placement
apt install nova-api nova-conductor nova-consoleauth nova-novncproxy nova-scheduler nova-placement-api
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/nova/nova.conf
------------------------------------------------------------------------
[api_database]
# connection = sqlite:////var/lib/nova/nova_api.sqlite
connection = mysql+pymysql://nova:nova@controller/nova_api
[database]
# connection = sqlite:////var/lib/nova/nova.sqlite
connection = mysql+pymysql://nova:nova@controller/nova
[placement_database]
connection = mysql+pymysql://placement:placement@controller/placement
[DEFAULT]
# log_dir = /var/log/nova
transport_url = rabbit://openstack:openstack@controller
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[DEFAULT]
my_ip = 172.20.10.120
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
# os_region_name = openstack
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/nova/nova.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
lock_path = /var/lock/nova
state_path = /var/lib/nova
transport_url = rabbit://openstack:openstack@controller
my_ip = 172.20.10.120
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:nova@controller/nova_api
[barbican]
[cache]
[cells]
enable = False
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[database]
connection = mysql+pymysql://nova:nova@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[placement_database]
connection = mysql+pymysql://placement:placement@controller/placement
[powervm]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]
[zvm]
(4) 同步数据库
#### 同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
#### 查看连接
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
#### 输出如下(若无输出,请检查配置或者查看日志):
+-------+--------------------------------------+------------------------------------+-------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+------------------------------------+-------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 | False |
| cell1 | 460f9ad2-6b89-4467-b1a0-fcf44d1553fe | rabbit://openstack:****@controller | mysql+pymysql://nova:****@controller/nova | False |
+-------+--------------------------------------+------------------------------------+-------------------------------------------------+----------+
(5) 重启服务
service nova-api restart
service nova-consoleauth restart
service nova-scheduler restart
service nova-conductor restart
service nova-novncproxy restart
5.4 Nova 安装【计算节点】
(1) 安装 **nova-compute**
apt install nova-compute
(2) 修改 nova 配置文件**
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/nova/nova.conf
------------------------------------------------------------------------
[DEFAULT]
# log_dir = /var/log/nova
transport_url = rabbit://openstack:openstack@controller
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[DEFAULT]
my_ip = 172.20.10.121
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://172.20.10.120:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
# os_region_name = openstack
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/nova/nova.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
lock_path = /var/lock/nova
state_path = /var/lib/nova
transport_url = rabbit://openstack:openstack@controller
my_ip = 172.20.10.121
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = sqlite:////var/lib/nova/nova_api.sqlite
[barbican]
[cache]
[cells]
enable = False
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[database]
connection = sqlite:////var/lib/nova/nova.sqlite
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[placement_database]
[powervm]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
[zvm]
(3) 修改 **nova-compute 配置文件**
------------------------------------------------------------------------
#### 查看cpu的相关信息: egrep -c '(vmx|svm)' /proc/cpuinfo
------------------------------------------------------------------------
0
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/nova/nova-compute.conf
------------------------------------------------------------------------
[libvirt]
# virt_type=kvm
virt_type = qemu
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/nova/nova-compute.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
compute_driver=libvirt.LibvirtDriver
[libvirt]
virt_type = qemu
(4) 重启服务
service nova-compute restart
5.5 添加计算节点【控制节点】
(1) 检查服务
#### 查看计算服务
openstack compute service list --service nova-compute
#### 输出如下:
+----+--------------+-----------+------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+--------------+-----------+------+---------+-------+----------------------------+
| 8 | nova-compute | compute01 | nova | enabled | up | 2019-05-19T11:53:09.000000 |
| 9 | nova-compute | compute02 | nova | enabled | up | 2019-05-19T11:53:10.000000 |
+----+--------------+-----------+------+---------+-------+----------------------------+
(2) 发现主机**
#### 发现
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
#### 输出如下:
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': 8b8d1b89-0ecb-4c1e-b9b0-95bd7af15309
Found 0 unmapped computes in cell: 8b8d1b89-0ecb-4c1e-b9b0-95bd7af15309
(3) 再次查看服务**
#### 查看
openstack compute service list
#### 输出如下:
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+------------+----------+---------+-------+----------------------------+
| 1 | nova-scheduler | controller | internal | enabled | up | 2019-05-19T11:49:14.000000 |
| 5 | nova-consoleauth | controller | internal | enabled | up | 2019-05-19T11:49:09.000000 |
| 6 | nova-conductor | controller | internal | enabled | up | 2019-05-19T11:49:10.000000 |
| 8 | nova-compute | compute01 | nova | enabled | up | 2019-05-19T11:49:09.000000 |
| 9 | nova-compute | compute02 | nova | enabled | up | 2019-05-19T11:49:10.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+
(4) 查看所有服务端点信息
#### 查看
openstack catalog list
#### 输出如下:
+-----------+-----------+-----------------------------------------+
| Name | Type | Endpoints |
+-----------+-----------+-----------------------------------------+
| keystone | identity | RegionOne |
| | | admin: http://controller:5000/v3/ |
| | | RegionOne |
| | | internal: http://controller:5000/v3/ |
| | | RegionOne |
| | | public: http://controller:5000/v3/ |
| | | |
| nova | compute | RegionOne |
| | | public: http://controller:8774/v2.1 |
| | | RegionOne |
| | | internal: http://controller:8774/v2.1 |
| | | RegionOne |
| | | admin: http://controller:8774/v2.1 |
| | | |
| placement | placement | RegionOne |
| | | admin: http://controller:8778 |
| | | RegionOne |
| | | public: http://controller:8778 |
| | | RegionOne |
| | | internal: http://controller:8778 |
| | | |
| glance | image | RegionOne |
| | | internal: http://controller:9292 |
| | | RegionOne |
| | | admin: http://controller:9292 |
| | | RegionOne |
| | | public: http://controller:9292 |
| | | |
+-----------+-----------+-----------------------------------------+
#### 检查错误
#### 1 如果在下表发现出现重复的空服务,可以通过以下方式删除
#### 1.1 openstack service list
#### 1.2 对比上述出现的两个表,找到要删除的服务的id
#### 1.3 openstack service delete <service-id>
#### 2. 如果上表中出现发现重复的Endpoint,可进入数据库删除。
#### 2.1 端点信息存放在keystone数据库中的endpoint表中,找到对应要删除的endpoint对应id进行删除
#### 2.2 进入数据库: mysql -u root
#### 2.3 查看keystone数据库中的表: use keystone; show tables;
#### 2.4 查看endpoint表中的数据: select * from endpoint;
#### 2.5 根据id删除重复数据: DELETE FROM endpoint WHERE id='<endpoint-id>'
(5) nova 状态检查
#### 查看
nova-status upgrade check
#### 输出如下:
+--------------------------------+
| Upgrade Check Results |
+--------------------------------+
| Check: Cells v2 |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Placement API |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Resource Providers |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Ironic Flavor Migration |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: API Service Version |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Request Spec Migration |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Console Auths |
| Result: Success |
| Details: None |
+--------------------------------+
5.6 Neutron 安装【控制节点】
(1) 添加 **Neutron **数据库
#### 进入数据库
mysql -u root
#### 添加库和用户并设置权限
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'controller' IDENTIFIED BY 'neutron';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';
#### ctrl+D 退出
(2) 创建 Neutron 用户**
#### 新建用户并设置权限
openstack user create --domain default --password-prompt neutron
openstack role add --project service --user neutron admin
#### 创建服务
openstack service create --name neutron --description "OpenStack Networking" network
#### 为该服务 创建不同接口的服务端点
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
(3) 安装**
#### 安装相关包
apt install neutron-server neutron-plugin-ml2 neutron-openvswitch-agent neutron-l3-agent neutron-dhcp-agent neutron-metadata-agent
#### 使用ovs尝试添加网桥
ovs-vsctl add-br br-ext
(4) 修改 **neutron **配置文件
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/neutron.conf
------------------------------------------------------------------------
[database]
# connection = sqlite:////var/lib/neutron/neutron.sqlite
connection = mysql+pymysql://neutron:neutron@controller/neutron
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
[DEFAULT]
transport_url = rabbit://openstack:openstack@controller
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[DEFAULT]
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[nova]
# ...
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/neutron.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[agent]
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
[cors]
[database]
connection = mysql+pymysql://neutron:neutron@controller/neutron
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[matchmaker_redis]
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
(5) 修改 ml2 配置文件
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/plugins/ml2/ml2_conf.ini
------------------------------------------------------------------------
[ml2]
type_drivers = flat,vlan,vxlan
[ml2]
tenant_network_types = vxlan
[ml2]
mechanism_drivers = openvswitch,l2population
[ml2]
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 10001:20000
[securitygroup]
enable_ipset = true
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/plugins/ml2/ml2_conf.ini | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
[l2pop]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
vni_ranges = 10001:20000
[securitygroup]
enable_ipset = true
(6) 修改 **openvswitch_agent **配置文件
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
------------------------------------------------------------------------
[ovs]
bridge_mappings = provider:br-ext
local_ip = 172.16.10.120
[agent]
tunnel_types = vxlan
l2_population = True
[securitygroup]
firewall_driver = iptables_hybrid
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/plugins/ml2/ml2_conf.ini | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
[agent]
tunnel_types = vxlan
l2_population = True
[network_log]
[ovs]
bridge_mappings = provider:br-ext
local_ip = 172.16.10.120
[securitygroup]
firewall_driver = iptables_hybrid
[xenapi]
(7) 修改 **l3_agent **配置文件
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/l3_agent.ini
------------------------------------------------------------------------
[DEFAULT]
interface_driver = openvswitch
external_network_bridge =
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/l3_agent.ini | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
interface_driver = openvswitch
external_network_bridge =
[agent]
[ovs]
(8) **修改 dhcp_agent 配置文件**
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/dhcp_agent.ini
------------------------------------------------------------------------
[DEFAULT]
interface_driver = openvswitch
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/dhcp_agent.ini | grep ^[\[a-z
------------------------------------------------------------------------
[DEFAULT]
interface_driver = openvswitch
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
[agent]
[ovs]
(9) **修改 metadata_agent 配置文件**
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/metadata_agent.ini
------------------------------------------------------------------------
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = metadata
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/metadata_agent.ini | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = metadata
[agent]
[cache]
(10) 再次修改 **nova 配置文件**
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/nova/nova.conf
#### 仅修改 neutron 项
------------------------------------------------------------------------
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = metadata
(11) 同步数据
#### 同步
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
#### 输出如下:
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Running upgrade for neutron ...
......
INFO [alembic.runtime.migration] Running upgrade 458aa42b14b -> f83a0b2964d0, rename tenant to project
INFO [alembic.runtime.migration] Running upgrade f83a0b2964d0 -> fd38cd995cc0, change shared attribute for firewall resource
OK
(12) 重启服务**
service nova-api restart
service neutron-server restart
service neutron-openvswitch-agent restart
service neutron-dhcp-agent restart
service neutron-metadata-agent restart
service neutron-l3-agent restart
5.7 Neutron 安装【计算节点】
(1) 安装ovs代理服务
apt install neutron-openvswitch-agent
(2) 修改 **neutron 配置文件**
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/neutron.conf
------------------------------------------------------------------------
[database]
# connection = sqlite:////var/lib/neutron/neutron.sqlite
[DEFAULT]
transport_url = rabbit://openstack:openstack@controller
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/neutron.conf | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
core_plugin = ml2
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
[agent]
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
[cors]
[database]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
(3) **修改 openvswitch_agent 配置文件**
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
------------------------------------------------------------------------
[ovs]
# bridge_mappings = provider:br-ext
local_ip = 172.16.10.121
[agent]
tunnel_types = vxlan
l2_population = True
------------------------------------------------------------------------
#### 检查配置文件: cat /etc/neutron/plugins/ml2/openvswitch_agent.ini | grep ^[\[a-z]
------------------------------------------------------------------------
[DEFAULT]
[agent]
tunnel_types = vxlan
l2_population = True
[network_log]
[ovs]
local_ip = 172.16.10.121
[securitygroup]
[xenapi]
(4) 再次修改 nova **配置文件
注意:如果在之后遇到openstack前端页面创建实例的控制台无法显示,报错无法连接服务器的话,可以将这个配置文件中 novncproxy_base_url = http://controller:6080/vnc_auto.html 的 controller 修改为控制节点的ip地址:172.20.10.120
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/nova/nova.conf
#### 仅修改 neutron 项
------------------------------------------------------------------------
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
(5) 重启服务**
service nova-compute restart
service neutron-openvswitch-agent restart
(6) 回到控制节点检查网络代理
#### 检查
openstack network agent list
#### 输出如下:
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 40f103b1-115b-40cd-a960-fa2111d68c40 | Open vSwitch agent | controller | None | :-) | UP | neutron-openvswitch-agent |
| 49671c5f-307d-4bb2-b5db-e4040e4b05dc | L3 agent | controller | nova | :-) | UP | neutron-l3-agent |
| 741916c3-c2ec-4a56-a24c-5f557ff42f9a | Metadata agent | controller | None | :-) | UP | neutron-metadata-agent |
| 8389c912-36c2-410d-b31e-2d7c56045c61 | Open vSwitch agent | compute01 | None | :-) | UP | neutron-openvswitch-agent |
| a4457089-9cc4-4ae4-9421-1f4af667b965 | Open vSwitch agent | compute02 | None | :-) | UP | neutron-openvswitch-agent |
| ad0f2e55-ae59-48b8-a83f-92dbb31d62e7 | DHCP agent | controller | nova | :-) | UP | neutron-dhcp-agent |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
5.8 Horizon安装【控制节点】
(1) 安装
#### 安装相关包
apt install openstack-dashboard
(2) 修改 setting.py 文件
注意:python文件注意段落缩进
打开文件 vim /etc/openstack-dashboard/local_settings.py
# OPENSTACK_HOST = "127.0.0.1"
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# },
#}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
# OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
# TIME_ZONE = "UTC"
TIME_ZONE = "Asia/Shanghai"
(3) 修改 dashboard 配置文件
#### 打开配置文件
vim /etc/apache2/conf-available/openstack-dashboard.conf
#### 修改 WSGIApplicationGroup 项
WSGIApplicationGroup %{GLOBAL}
(4) 重启 Apache 服务
service apache2 reload
**(5) 检查服务
打开链接 http://172.20.10.120/horizon 检查 OpenStack dashboard 是否成功启动
- Domain: default
- User Name : admin
- Password : admin
- Admin->Compute->Flavors:创建Flavors
- Admin->Computer->Images:创建镜像
- Project->Network->Networks:创建两个网络
- Project->Network->Routers:创建路由,并添加两个接口(Add Interface)
- Project->Compute->Instances:创建三个实例(分别位于创建的俩网络中,用于检验三层路由能否正常通信)
注意:创建完实例就可以进入控制台去检验二三层能否正常通信,如果三个实例之间相互能够ping通,就代表成功了。
这里插播一个链接
- 关于openstack集成的一些问题
- 这是ICE搭建以及ICE与云平台集成的“船新版本”
- 搭建过程中可以忽略,可以忽略,可以忽略
- 这个链接的内容可以自行编辑修改
http://note.youdao.com/noteshare?id=31d7bbe5d2d6d2e91b4bb6772cb99a2e
6. 集成 ICE
ICE 节点
解压
tar zxf jdk-8u212-linux-x64.tar.gz -C /root tar zxf karaf-0.8.4.tar.gz -C /root
启动ODL
cd /root/karaf-0.8.4 ./bin/start ./bin/client
安装组件:
opendaylight-user@root> feature:install odl-netvirt-openstack odl-dlux-core odl-mdsal-apidocs
检查内核版本>=4.3
uname -r
4.15.0-45-generic
检查conntrack内核模块
lsmod | grep conntrack
nf_conntrack_ipv6 20480 1
nf_conntrack_ipv4 16384 1
nf_defrag_ipv4 16384 1 nf_conntrack_ipv4
nf_defrag_ipv6 36864 2 nf_conntrack_ipv6,openvswitch
nf_conntrack 131072 6 nf_conntrack_ipv6,nf_conntrack_ipv4,nf_nat,nf_nat_ipv6,nf_nat_ipv4,openvswitch
libcrc32c 16384 4 nf_conntrack,nf_nat,openvswitch,raid456
控制节点、计算节点
git clone https://github.com/openstack/networking-odl.git
cd networking-odl/
git checkout stable/rocky
python ./setup.py install
查看/var/log/neutron/neutron-server.log,如果报websocket相关的错则执行以下三行命令安装websocket
apt install python-pip
pip install websocket
pip install websocket-client==0.47.0
计算节点
systemctl stop neutron-openvswitch-agent
systemctl disable neutron-openvswitch-agent
控制节点
这部分是为了删除在openstack前端界面上创建的实例、网络、路由,因此可以不用执行,但是要保证在前端完全删除了
nova list
nova delete
neutron subnet-list
neutron router-list
neutron router-port-list
neutron router-interface-delete
neutron subnet-delete
neutron net-list
neutron net-delete
neutron router-delete
neutron port-list
systemctl stop neutron-server
systemctl stop neutron-l3-agent
systemctl disable neutron-l3-agent
systemctl stop neutron-openvswitch-agent
systemctl disable neutron-openvswitch-agent
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight_v2
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins odl-router_v2
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT force_metadata True
crudini --set /etc/neutron/dhcp_agent.ini ovs ovsdb_interface vsctl
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT ovs_integration_bridge br-int
------------------------------------------------------------------------
#### 修改配置文件: vim /etc/neutron/plugins/ml2/ml2_conf.ini
#### 添加以下配置
------------------------------------------------------------------------
[ml2_odl]
url = http://172.20.10.131:8181/controller/nb/v2/neutron
password = admin
username = admin
删除数据库
mysql -e "DROP DATABASE IF EXISTS neutron;"
mysql -e "CREATE DATABASE neutron CHARACTER SET utf8;"
/usr/bin/neutron-db-manage \
--config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini \
upgrade head
systemctl start neutron-server
注:这里只开启了neutron-server服务,其他服务在前面都被关闭了
systemctl stop openvswitch-switch
rm -rf /var/log/openvswitch/*
rm -rf /etc/openvswitch/conf.db
systemctl start openvswitch-switch
ovs-vsctl set-manager tcp:172.20.10.131:6640
ovs-vsctl set Open_vSwitch . other_config:local_ip=172.20.10.120
注:下面这行配置的是与外部网络通信配置,如果不与外网通信,可以不用执行这条命令
ovs-vsctl set Open_vSwitch . other_config:provider_mappings=provier:ens192
neutron-odl-ovs-hostconfig --datapath_type=system
计算节点
systemctl stop openvswitch-switch
rm -rf /var/log/openvswitch/*
rm -rf /etc/openvswitch/conf.db
systemctl start openvswitch-switch
ovs-vsctl set-manager tcp:172.20.10.131:6640
ovs-vsctl set Open_vSwitch . other_config:local_ip=172.20.10.121
ovs-vsctl set Open_vSwitch . other_config:provider_mappings=provier:ens192
neutron-odl-ovs-hostconfig --datapath_type=system
所有节点
####
ovs-vsctl show
#####
356aeefd-1fc7-4f2d-b5b0-69150ae00b94
Manager "tcp:172.20.10.131:6640"
is_connected: true
Bridge br-int
Controller "tcp:172.20.10.131:6653"
is_connected: true
fail_mode: secure
Port "tun97119295314"
Interface "tun97119295314"
type: vxlan
options: {key=flow, local_ip="172.20.10.120", remote_ip="172.20.10.122"}
bfd_status: {diagnostic="No Diagnostic", flap_count="1", forwarding="true", remote_diagnostic="No Diagnostic", remote_state=up, state=up}
Port br-int
Interface br-int
type: internal
Port "ens224"
Interface "ens224"
Port "tun249b7250379"
Interface "tun249b7250379"
type: vxlan
options: {key=flow, local_ip="172.20.10.120", remote_ip="172.20.10.121"}
bfd_status: {diagnostic="No Diagnostic", flap_count="1", forwarding="true", remote_diagnostic="No Diagnostic", remote_state=up, state=up}
ovs_version: "2.10.0"
控制节点
####
openstack network agent list
####
+--------------------------------------+----------------+------------+-------------------+-------+-------+------------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+----------------+------------+-------------------+-------+-------+------------------------------+
| 24319b30-3928-48a4-8f0c-a448732dae05 | ODL L2 | compute02 | None | :-) | UP | neutron-odlagent-portbinding |
| 36a5de48-98c1-4ccf-8aa1-717000924f06 | Metadata agent | controller | None | :-) | UP | neutron-metadata-agent |
| 58d89f7d-df6c-4b80-8e58-3bdb4b2d73fe | DHCP agent | controller | nova | :-) | UP | neutron-dhcp-agent |
| d3fafd3d-9564-48af-958b-2f05cb21718e | ODL L2 | compute01 | None | :-) | UP | neutron-odlagent-portbinding |
| e671e52e-f7f5-48d0-a75c-0598f67f50c8 | ODL L2 | controller | None | :-) | UP | neutron-odlagent-portbinding |
+--------------------------------------+----------------+------------+-------------------+-------+-------+------------------------------+
注:到这里,集成工作也已经接近尾声,需要再次打开openstack前端界面按照之前的操作创建网络、路由、实例,如果能够正常通信就代表平台搭建与集成成功。
重新集成ODL
重新集成应该从哪里开始才能达到既不用恢复快照又能很快完成odl的集成,答案就在odl重新集成三步法:
- 第一步:
对openstack云平台动手,把你云平台里面的所有实例、网络、路由全部删掉。
- 第二步:
(1)停掉你的odl,切换到ice的bin目录下,执行./stop
(2)清空odl数据库,在ice的目录下通过删除命令删掉以下三个文件夹 rm -rf data/ snapshots/ journal
注: 此时的ice处于关闭状态,而集成odl时ice应该处于running状态,所以此刻应将ice启动起来,并使其界面恢复正常状态(可以选择SW软集成方式的界面)
- 第三步:
(重新集成ovs)
(1)控制节点
systemctl stop openvswitch-switch
rm -rf /var/log/openvswitch/*
rm -rf /etc/openvswitch/conf.db
systemctl start openvswitch-switch
ovs-vsctl set-manager tcp:172.20.10.131:6640
ovs-vsctl set Open_vSwitch . other_config:local_ip=172.20.10.120
ovs-vsctl set Open_vSwitch . other_config:provider_mappings=provier:ens224
neutron-odl-ovs-hostconfig --datapath_type=syste
(2)计算节点1
systemctl stop openvswitch-switch
rm -rf /var/log/openvswitch/*
rm -rf /etc/openvswitch/conf.db
systemctl start openvswitch-switch
ovs-vsctl set-manager tcp:172.20.10.131:6640
ovs-vsctl set Open_vSwitch . other_config:local_ip=172.20.10.121
ovs-vsctl set Open_vSwitch . other_config:provider_mappings=provier:ens224
neutron-odl-ovs-hostconfig --datapath_type=system
(3)计算节点2
systemctl stop openvswitch-switch
rm -rf /var/log/openvswitch/*
rm -rf /etc/openvswitch/conf.db
systemctl start openvswitch-switch
ovs-vsctl set-manager tcp:172.20.10.131:6640
ovs-vsctl set Open_vSwitch . other_config:local_ip=172.20.10.122
ovs-vsctl set Open_vSwitch . other_config:provider_mappings=provier:ens224
neutron-odl-ovs-hostconfig --datapath_type=system
如果因为接了个电话,不小心把控制节点的IP粘到了计算节点了!!!不要紧,执行控制节点的前四句,再继续就可以了,不放心的话可以提前执行ovs-vsctl show查看显示的内容
最后在集成这里补充一个ovs的一个官方版本的翻译:
ovs服务是为实例提供底层虚拟网络框架,集成网桥br-int处理ovs内实例内部网络的流通。外部网桥br-ex处理ovs内实例外部网络的流通。
排查问题的步骤:
- 1.关于odl出问题时,首先我们应该查看控制节点的服务是否正常运行。
通过命令openstack network agent list 查看各个服务是否正常,如果active 选项出现了XXX,这时候需要通过命令重启某些服务;如果是odl的状态,启动ice
控制节点:查看neutron-server的状态及其log
(1)systemctl status neutron-server
(2)vim /var/log/neutron/neutron-server.log
计算节点: 查看Nova-compute的状态及其log
(1)systemctl status nova-compute.service
(2)vim /var/log/neutron/neutron-server.log
- 2.自己在集成后ping平台创建报错了,错误是“ERROR:Failed to perform requested operation on instance “user”,the instance has an error status:please try again later[Error:No valid host was found.There are not enough hosts available]”可能导致这个报错的有其他的原因,我的问题出现在设置[ml2_odl]时,将文档最初的IP拷贝了,然后没有删掉又执行了一遍,结果生成了两个[ml2_odl]
- 3.对于在平台获取不到IP地址无法实现ping通的原因目前只能总结为绝对和ice有关,有时候重启ice就可以解决问题,但是采取更多的方法是重新集成。大神们都说和流表相关,不懂流表就重新集成吧!
结语
环境搭建的完成并不是意味着结束,而是预示着测试工作的刚刚开始。搭建环境的过程中,除了熟练掌握一些Linux下的常用命令,还要多去思考OpenStack每个组件的作用以及数据流的走向,能够通过log来正确定位问题和解决问题是更值得我们去探索的方向!