在centos8上安装openstack Ussuri版本

1、控制节点安装前准备

1、拓扑

image-20220311200931780

2、IP规划

1
2
3
openstack-controlelr:192.168.102.12/24
openstack-compute1:192.168.102.11/24
openstack-compute2:192.168.102.10/24

3、配置软件仓库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
[root@controller ~]# rm -rf /etc/yum.repos.d/*
[root@client ~]# rm -rf /etc/yum.repos.d/*

#centos8安装消息队列,必须单独配置rabbitmq的软件仓库
cat > /etc/yum.repos.d/a.repo << END
[extras]
name=extras
baseurl=https://mirrors.163.com/centos/8/extras/x86_64/os/
enable=yes
gpgcheck=0
[AppStream]
name=AppStream
baseurl=https://mirrors.163.com/centos/8-stream/AppStream/x86_64/os/
enable=yes
gpgcheck=0
[BaseOS]
name=BaseOS
baseurl=https://mirrors.163.com/centos/8-stream/BaseOS/x86_64/os/
enabel=yes
gpgcheck=0
[PowerTools]
name=PowerTools
baseurl=https://mirrors.163.com/centos/8-stream/PowerTools/x86_64/os/
enabel=yes
gpgcheck=0
[openstack-ussuri]
name=openstack-ussuri
baseurl=https://mirrors.163.com/centos/8-stream/cloud/x86_64/openstack-ussuri/
enabel=yes
gpgcheck=0
[epel]
name=epel
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/8/Everything/x86_64/
enabel=yes
gpgcheck=0
[rabbitmq-38]
name=rabbitmq-38
baseurl=https://mirrors.163.com/centos/8-stream/messaging/x86_64/rabbitmq-38/
enabel=yes
gpgcheck=0
END

yum update -y

yum install centos-release-openstack-ussuri -y
yum config-manager --set-enabled powertools

cat > /etc/hosts << END
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.102.12 controller
192.168.102.11 compute1
192.168.102.10 compute2
END

4、安全配置

1
2
3
systemctl stop firewalld
systemctl mask firewalld
setenforce 0

5、ntp安装

1
2
3
4
5
yum install chrony -y

sed -i 's/^pool.*/pool controlelr iburst/' /etc/chrony.conf

systemctl enable chronyd --now

6、安装openstack的客户端

1
2
yum install python3-openstackclient -y
yum install openstack-selinux -y

7、安装mysql数据库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
yum install mariadb mariadb-server python2-PyMySQL -y

cat >/etc/my.cnf.d/openstack.cnf << END
[mysqld]
#改成控制节点的ip,表示的意思的数据库的监听地址
bind-address = 192.168.102.12

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
END

systemctl enable mariadb.service --now

#初始化数据库
mysql_secure_installation

8、消息队列的安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
yum install rabbitmq-server -y

systemctl enable rabbitmq-server.service --now

#创建用户名和密码,赋予权限
rabbitmqctl add_user openstack RABBIT_PASS
rabbitmqctl set_permissions openstack ".*" ".*" ".*"


#列出rabbitmq用户和权限
[root@controller ~]# rabbitmqctl list_users
Listing users ...
user tags
openstack []
guest [administrator]

[root@controller ~]# rabbitmqctl list_permissions
Listing permissions for vhost "/" ...
user configure write read
guest .* .* .*
openstack .* .* .*

9、安装Memcache

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@controller ~]# yum install memcached python3-memcached -y

#配置解析
echo '192.168.102.11 controller' >> /etc/hosts

#修改配置文件
[root@controller ~]# sed -i 's/^OPTIONS.*/OPTIONS="-l 127.0.0.1,::1,controller"/g' /etc/sysconfig/memcached

#启动服务
systemctl enable memcached.service --now

#查看memcache端口
[root@controller ~]# netstat -tunpl | grep 11211
tcp 0 0 192.168.102.11:11211 0.0.0.0:* LISTEN 29964/memcached
tcp 0 0 127.0.0.1:11211 0.0.0.0:* LISTEN 29964/memcached
tcp6 0 0 ::1:11211 :::* LISTEN 29964/memcached

10、安装ectd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
yum install etcd -y

#修改配置文件
echo -e '#[Member]\nETCD_DATA_DIR="/var/lib/etcd/default.etcd"\nETCD_LISTEN_PEER_URLS="http://192.168.102.12:2380"\nETCD_LISTEN_CLIENT_URLS="http://192.168.102.12:2379"\nETCD_NAME="controller"\n#[Clustering]\nETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.102.12:2380"\nETCD_ADVERTISE_CLIENT_URLS="http://192.168.102.12:2379"\nETCD_INITIAL_CLUSTER="controller=http://192.168.102.12:2380"\nETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"\nETCD_INITIAL_CLUSTER_STATE="new"' > /etc/etcd/etcd.conf

[root@controller ~]# cat /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.102.11:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.102.11:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.102.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.102.11:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.102.11:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"

systemctl enable etcd --now

#查看监听端口
[root@controller ~]# netstat -tunpl | grep etcd
tcp 0 0 192.168.102.11:2379 0.0.0.0:* LISTEN 31227/etcd
tcp 0 0 192.168.102.11:2380 0.0.0.0:* LISTEN 31227/etcd

2、keystone的安装

1、设置keystone的数据库

1
2
3
4
5
6
7
8
9
10
11
#创建keystone数据库
mysql -u root -p1 -e 'CREATE DATABASE keystone;'
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
IDENTIFIED BY 'KEYSTONE_DBPASS';"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
IDENTIFIED BY 'KEYSTONE_DBPASS';"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' \
IDENTIFIED BY 'KEYSTONE_DBPASS';"
#测试数据库是否能正常连接
mysql -ukeystone -pKEYSTONE_DBPASS -hlocalhost
mysql -ukeystone -pKEYSTONE_DBPASS -hcontroller

2、安装keystone软件包

1
2
#keystone是基于httpd的一个组件,官网上有个坑,要安装python3-mod_wsgi而不是mod_wsgi 
yum install openstack-keystone httpd python3-mod_wsgi -y

3、修改配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#创建个过滤脚本
echo -e '#!/bin/bash\ncat $1 | grep -v ^# | grep -v ^$ > 1\ncat 1 > $1\nrm -rf 1' > grep.sh
chmod u+x grep.sh

#对配置文件进行过滤
./grep.sh /etc/keystone/keystone.conf

#修改配置文件
cat > /etc/keystone/keystone.conf << END
[DEFAULT]
[application_credential]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_receipts]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[jwt_tokens]
[ldap]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[policy]
[profiler]
[receipt]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[token]
provider = fernet
[tokenless_auth]
[totp]
[trust]
[unified_limit]
[wsgi]
END

4、初始化数据库

1
2
3
4
5
6
7
8
9
10
11
12
su -s /bin/sh -c "keystone-manage db_sync" keystone

#检查是否成功
mysql -uroot -p1 -e "use keystone;" -e "show tables;"

#初始化fernetkey
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#查看是否成功
[root@controller ~]# ls /etc/keystone/fernet-keys/
0 1

5、初始化身份认证服务

1
2
3
4
5
6
#设置service和endpoint
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne

6、修改http的配置文件

1
2
3
4
5
6
7
8
9
10
sed -i 's/^#ServerName.*/ServerName controller/g' /etc/httpd/conf/httpd.conf

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

#重启web服务
systemctl enable httpd.service --now

#查看是否生效
[root@controller ~]# netstat -tunpl | grep :5000
tcp6 0 0 :::5000 :::* LISTEN 35973/httpd

7、设置凭据文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
cat > openstackrc << END
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
END

source openstackrc

#测试成功与否
[root@controller ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| 4b63eee4103d4b54a374154c098c5b70 | RegionOne | keystone | identity | True | internal | http://controller:5000/v3/ |
| ef76bfe2a6574e72a1c36e60768aaebb | RegionOne | keystone | identity | True | public | http://controller:5000/v3/ |
| fbd95351e8264fdabce7e05d4c0e5c1e | RegionOne | keystone | identity | True | admin | http://controller:5000/v3/ |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+

8、创建一个service的project

1
2
3
4
5
6
7
8
9
openstack project create --domain default --description "Service Project" service

[root@controller ~]# openstack project list
+----------------------------------+---------+
| ID | Name |
+----------------------------------+---------+
| 0e95c8bb532045d1803b03bb2c952dc6 | service |
| a07e5116b906444290425a6be2efc82c | admin |
+----------------------------------+---------+

9、在客户端节点上安装openstack客户端

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
[root@client ~]# yum install python3-openstackclient -y
[root@client ~]# source openstackrc

#测试
[root@client ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| 4b63eee4103d4b54a374154c098c5b70 | RegionOne | keystone | identity | True | internal | http://controller:5000/v3/ |
| ef76bfe2a6574e72a1c36e60768aaebb | RegionOne | keystone | identity | True | public | http://controller:5000/v3/ |
| fbd95351e8264fdabce7e05d4c0e5c1e | RegionOne | keystone | identity | True | admin | http://controller:5000/v3/ |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
[root@client ~]# openstack service list
+----------------------------------+----------+----------+
| ID | Name | Type |
+----------------------------------+----------+----------+
| 0271f90d25d34a82b5b378e3ebfa4fcc | keystone | identity |
+----------------------------------+----------+----------+
[root@client ~]# openstack project list
+----------------------------------+---------+
| ID | Name |
+----------------------------------+---------+
| 0e95c8bb532045d1803b03bb2c952dc6 | service |
| a07e5116b906444290425a6be2efc82c | admin |
+----------------------------------+---------+

3、glance的安装(一般安装在控制节点上)

1、创建数据库

1
2
3
4
5
6
7
8
mysql -u root -p1 -e "CREATE DATABASE glance;"

mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
IDENTIFIED BY 'GLANCE_DBPASS';"

mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
IDENTIFIED BY 'GLANCE_DBPASS';"

2、创建glance的endpoint

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
openstack user create --domain default --password glance glance

openstack role add --project service --user glance admin

openstack service create --name glance \
--description "OpenStack Image" image

openstack endpoint create --region RegionOne \
image public http://controller:9292

openstack endpoint create --region RegionOne \
image internal http://controller:9292

openstack endpoint create --region RegionOne \
image admin http://controller:9292

3、安装glance和修改配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
yum install openstack-glance -y

cat > /etc/glance/glance-api.conf << END
[DEFAULT]
[cinder]
[cors]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[file]
[glance.store.http.store]
[glance.store.rbd.store]
[glance.store.s3.store]
[glance.store.swift.store]
[glance.store.vmware_datastore.store]
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[image_format]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
END


#同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance

#查看是否成功同步
[root@controller ~]# mysql -uroot -p1 -e "use glance;" -e "show tables;"
+----------------------------------+
| Tables_in_glance |
+----------------------------------+
| alembic_version |
| image_locations |
| image_members |
| image_properties |
| image_tags |
| images |
| metadef_namespace_resource_types |
| metadef_namespaces |
| metadef_objects |
| metadef_properties |
| metadef_resource_types |
| metadef_tags |
| migrate_version |
| task_info |
| tasks |
+----------------------------------+

4、启动glance服务

1
2
3
4
5
systemctl enable openstack-glance-api.service --now

#检查是否成功
[root@controller ~]# netstat -tunpl | grep 9292
tcp 0 0 0.0.0.0:9292 0.0.0.0:* LISTEN 95588/python3

5、尝试导入镜像

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

source openstackrc

glance image-create --name "cirros" \
--file cirros-0.4.0-x86_64-disk.img \
--disk-format qcow2 --container-format bare \
--visibility=public

#查看镜像
[root@controller ~]# openstack image list
+--------------------------------------+--------+--------+
| ID | Name | Status |
+--------------------------------------+--------+--------+
| 89be4361-fc6f-4941-bfa2-9c2cbe9ffeb3 | cirros | active |
+--------------------------------------+--------+--------+

4、nova的安装

1、拓扑

image-20220312230005769

2、控制节点上安装placement

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
mysql -u root -p1 -e "CREATE DATABASE placement;"

mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \
IDENTIFIED BY 'PLACEMENT_DBPASS';"

mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
IDENTIFIED BY 'PLACEMENT_DBPASS';"

openstack user create --domain default --password placement placement

openstack role add --project service --user placement admin

openstack service create --name placement \
--description "Placement API" placement

openstack endpoint create --region RegionOne \
placement public http://controller:8778

openstack endpoint create --region RegionOne \
placement internal http://controller:8778

openstack endpoint create --region RegionOne \
placement admin http://controller:8778

#安装
yum install openstack-placement-api -y

#修改配置文件
cat > /etc/placement/placement.conf << END
[DEFAULT]
[api]
auth_strategy = keystone
[cors]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement
[oslo_policy]
[placement]
[placement_database]
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
[profiler]
END

cat > /etc/httpd/conf.d/00-placement-api.conf << END
Listen 8778

<VirtualHost *:8778>
WSGIProcessGroup placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
WSGIDaemonProcess placement-api processes=3 threads=1 user=placement group=placement
WSGIScriptAlias / /usr/bin/placement-api
<IfVersion >= 2.4>
ErrorLogFormat "%M"
</IfVersion>
ErrorLog /var/log/placement/placement-api.log
#SSLEngine On
#SSLCertificateFile ...
#SSLCertificateKeyFile ...
</VirtualHost>

Alias /placement-api /usr/bin/placement-api
<Location /placement-api>
SetHandler wsgi-script
Options +ExecCGI
WSGIProcessGroup placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
END

#同步placement数据库
su -s /bin/sh -c "placement-manage db sync" placement

#重启服务
systemctl restart httpd

#检查
[root@controller ~]# netstat -tunpl | grep :8778
tcp6 0 0 :::8778 :::* LISTEN 104318/httpd

3、在控制节点上安装nova

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
mysql -u root -p1 -e "CREATE DATABASE nova_api;"
mysql -u root -p1 -e "CREATE DATABASE nova;"
mysql -u root -p1 -e "CREATE DATABASE nova_cell0;"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
IDENTIFIED BY 'NOVA_DBPASS';"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
IDENTIFIED BY 'NOVA_DBPASS';"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
IDENTIFIED BY 'NOVA_DBPASS';"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
IDENTIFIED BY 'NOVA_DBPASS';"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
IDENTIFIED BY 'NOVA_DBPASS';"
mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
IDENTIFIED BY 'NOVA_DBPASS';"

openstack user create --domain default --password nova nova

openstack role add --project service --user nova admin

openstack service create --name nova \
--description "OpenStack Compute" compute

openstack endpoint create --region RegionOne \
compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne \
compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne \
compute admin http://controller:8774/v2.1


yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y

4、修改配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
cat > /etc/nova/nova.conf << END
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/
my_ip = 192.168.102.12
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[barbican]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
[metrics]
[mks]
[neutron]
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
# ...
server_listen = $my_ip
server_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[zvm]
END

5、生成数据库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
su -s /bin/sh -c "nova-manage api_db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

su -s /bin/sh -c "nova-manage db sync" nova

[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 | False |
| cell1 | 5d9e10d7-5cb3-4abc-b14b-425ab6029992 | rabbit://openstack:****@controller:5672/ | mysql+pymysql://nova:****@controller/nova | False |
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+

6、启动控制节点 的nova服务

1
2
3
4
5
6
7
8
9
10
11
systemctl enable \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service --now

systemctl status \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service

5、计算节点的nova安装

1、安装软件包

1
2
3
4
5
6
7
8
9
yum install centos-release-openstack-ussuri -y

yum config-manager --set-enabled powertools

systemctl stop firewalld
systemctl mask firewalld
setenforce 0

yum install openstack-nova-compute -y

2、修改配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
cat > /etc/nova/nova.conf << END
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
#计算节点本身的ip
my_ip = 192.168.102.11
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
virt_type = kvm
[metrics]
[mks]
[neutron]
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[zvm]
END

3、启动服务

1
2
3
4
5
6
7
8
9
10
11
12
systemctl enable libvirtd.service openstack-nova-compute.service --now

#验证
[root@controller ~]# openstack compute service list
+----+----------------+------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+----------------+------------+----------+---------+-------+----------------------------+
| 3 | nova-conductor | controller | internal | enabled | up | 2022-03-13T12:19:11.000000 |
| 5 | nova-scheduler | controller | internal | enabled | up | 2022-03-13T12:19:03.000000 |
| 8 | nova-compute | compute1 | nova | enabled | up | 2022-03-13T12:19:12.000000 |
| 9 | nova-compute | compute2 | nova | enabled | up | 2022-03-13T12:19:12.000000 |
+----+----------------+------------+----------+---------+-------+----------------------------+

4、控制节点的cell操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
[root@controller ~]# nova-manage cell_v2 list_hosts
+-----------+-----------+----------+
| Cell Name | Cell UUID | Hostname |
+-----------+-----------+----------+
+-----------+-----------+----------+

#使用nova cell的自动发现,如果不使用自动映射,你就需要手动去映射

nova-manage cell_v2 map_cell_and_hosts .....

[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': 5d9e10d7-5cb3-4abc-b14b-425ab6029992
Checking host mapping for compute host 'compute1': 8f068264-8a21-44d9-8db6-ef1abd7b3190
Creating host mapping for compute host 'compute1': 8f068264-8a21-44d9-8db6-ef1abd7b3190
Checking host mapping for compute host 'compute2': 662ac363-9b85-4753-8025-cab77f577ceb
Creating host mapping for compute host 'compute2': 662ac363-9b85-4753-8025-cab77f577ceb
Found 2 unmapped computes in cell: 5d9e10d7-5cb3-4abc-b14b-425ab6029992

[root@controller ~]# nova-manage cell_v2 list_hosts
+-----------+--------------------------------------+----------+
| Cell Name | Cell UUID | Hostname |
+-----------+--------------------------------------+----------+
| cell1 | 5d9e10d7-5cb3-4abc-b14b-425ab6029992 | compute1 |
| cell1 | 5d9e10d7-5cb3-4abc-b14b-425ab6029992 | compute2 |
+-----------+--------------------------------------+----------+

#验证nova是否成功安装
[root@controller ~]# nova-status upgrade check
+------------------------------------+
| Upgrade Check Results |
+------------------------------------+
| Check: Cells v2 |
| Result: Success |
| Details: None |
+------------------------------------+
| Check: Placement API |
| Result: Success |
| Details: None |
+------------------------------------+
| Check: Ironic Flavor Migration |
| Result: Success |
| Details: None |
+------------------------------------+
| Check: Cinder API |
| Result: Success |
| Details: None |
+------------------------------------+
| Check: Policy Scope-based Defaults |
| Result: Success |
| Details: None |
+------------------------------------+
| Check: Older than N-1 computes |
| Result: Success |
| Details: None |
+------------------------------------+

6、安装网络节点

1、在控制节点上创建nova的数据库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
mysql -u root -p1 -e "CREATE DATABASE neutron;"

mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
IDENTIFIED BY 'NEUTRON_DBPASS';"

mysql -u root -p1 -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
IDENTIFIED BY 'NEUTRON_DBPASS';"

openstack user create --domain default --password neutron neutron

openstack role add --project service --user neutron admin

openstack service create --name neutron \
--description "OpenStack Networking" network

openstack endpoint create --region RegionOne \
network public http://network:9696

openstack endpoint create --region RegionOne \
network internal http://network:9696

openstack endpoint create --region RegionOne \
network admin http://network:9696

2、安装网络节点

1
2
3
4
5
yum install centos-release-openstack-ussuri -y
yum config-manager --set-enabled powertools

yum install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables -y

3、修改配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
cat > /etc/neutron/neutron.conf << END
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[cors]
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[privsep]
[ssl]
[nova]
# ...
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
END

4、修改ml2配置文件

1
2
3
4
5
6
7
8
9
10
cat  > /etc/neutron/plugins/ml2/ml2_conf.ini << END
[DEFAULT]
[ml2]
type_drivers = local,flat,vlan,vxlan
tenant_network_types = local
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[securitygroup]
enable_ipset = true
END

5、修改l2-agent的配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
cat   > /etc/neutron/plugins/ml2/linuxbridge_agent.ini << END
[DEFAULT]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = false
END

modprobe br_netfilter

cat > /etc/sysctl.conf << END
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
END

sysctl -p

6、修改l3-agent

1
2
3
4
5
cat   > /etc/neutron/l3_agent.ini << END
[DEFAULT]
interface_driver = linuxbridge
[cache]
END

7、修改dhcp-agent的配置文件

1
2
3
4
5
6
cat  > /etc/neutron/dhcp_agent.ini << END
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
END

8、配置metadata-agent的配置文件

1
2
3
4
5
6
7
8
cat  > /etc/neutron/metadata_agent.ini << END
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET
[cache]
END

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

9、生成neutron的数据库

1
2
3
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

10、启动服务

1
2
3
4
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service --now
systemctl enable neutron-l3-agent.service --now

7、配置nova连接neutron

1、修改nova配置文件连接neutron

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
#在控制节点上操作
cat > /etc/nova/nova.conf << END
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/
my_ip = 192.168.102.12
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[barbican]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
[metrics]
[mks]
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[zvm]
END

2、重启控制节点的nova服务

1
systemctl restart openstack-nova-api.service

8、配置计算节点的neutron

1、安装软件包

1
yum install openstack-neutron-linuxbridge ebtables ipset -y

2、修改neutron的配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
[root@compute2 ~]# cat  > /etc/neutron/neutron.conf << END
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
[cors]
[database]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[privsep]
[ssl]
END

3、修改配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
cat   > /etc/neutron/plugins/ml2/linuxbridge_agent.ini << END
[DEFAULT]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = false
END


modprobe br_netfilter

cat > /etc/sysctl.conf << END
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
END

sysctl -p

4、启动l2-agent服务

1
systemctl enable neutron-linuxbridge-agent.service --now

5、验证计算节点 的l2-agent

1
2
3
4
5
6
7
8
9
10
11
[root@controller ~]# openstack network agent list 
+--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
| 4ea7c3d1-1b6d-4544-a14e-2eeae6d51c5e | Linux bridge agent | network | None | :-) | UP | neutron-linuxbridge-agent |
| 59623f8d-75a5-4530-9aa3-5a60ea057e4c | Linux bridge agent | compute2 | None | :-) | UP | neutron-linuxbridge-agent |
| 70d7a363-6a9c-47fd-a369-cf2bc3423939 | Metadata agent | network | None | :-) | UP | neutron-metadata-agent |
| 7e196df5-eb15-49d3-a1e6-3a50e1b35ac0 | DHCP agent | network | nova | :-) | UP | neutron-dhcp-agent |
| 9cd46dad-f63a-4aab-8881-7a9b6c5154d4 | Linux bridge agent | compute1 | None | :-) | UP | neutron-linuxbridge-agent |
| fb453795-c5d5-4739-9273-12e466cb046c | L3 agent | network | nova | :-) | UP | neutron-l3-agent |
+--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+

6、配置nova访问neutron

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
cat  > /etc/nova/nova.conf  << END
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 192.168.102.10
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
virt_type = kvm
[metrics]
[mks]
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[zvm]
END


systemctl restart openstack-nova-compute.service

9、控制节点安装horizon

1、安装软件

1
yum install openstack-dashboard -y

2、修改配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
sed -i 's/^OPENSTACK_HOST.*/OPENSTACK_HOST = "controller"/' /etc/openstack-dashboard/local_settings

sed -i 's/^ALLOWED_HOSTS.*/ALLOWED_HOSTS = ['*']/' /etc/openstack-dashboard/local_settings

echo "SESSION_ENGINE = 'django.contrib.sessions.backends.cache'" >> /etc/openstack-dashboard/local_settings

echo "CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}" >> /etc/openstack-dashboard/local_settings

echo "OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True" >> /etc/openstack-dashboard/local_settings

echo "OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}" >> /etc/openstack-dashboard/local_settings

echo 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"' >> /etc/openstack-dashboard/local_settings

echo 'OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"' >> /etc/openstack-dashboard/local_settings



sed -i 's/^TIME_ZONE.*/TIME_ZONE = "TIME_ZONE"/' /etc/openstack-dashboard/local_settings

sed -i "WEBROOT = '/dashboard/'
LOGIN_URL = WEBROOT + 'auth/login/'
LOGOUT_URL = WEBROOT + 'auth/logout/'" /etc/openstack-dashboard/local_settings


echo "WSGIApplicationGroup %{GLOBAL}" >> /etc/httpd/conf.d/openstack-dashboard.conf

3、重启服务

1
systemctl restart httpd.service memcached.service