在centos7上安装openstack Rocky版本

在centos7上安装openstack Rocky版本

1
2
3
4
5
6
7
8
环境准备 #在所有版本都要准备这一环境
1、用户名和密码
2、主机网络(物理网络准备)
3、ntp
4、准备软件仓库
5、数据库
6、memcache
7、etcd #一般用不到,但是最好安装一下

1、更换yum仓库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#更换yum仓库
[root@controller ~]# cat /etc/yum.repos.d/centos.repo
[os]
name = os
baseurl = https://mirrors.163.com/centos/7/os/x86_64/
enabled = 1
gpgcheck = 0

[update]
name = update
baseurl = https://mirrors.163.com/centos/7/updates/x86_64/
enabled = 1
gpgcheck = 0

[extras]
name = extras
baseurl = https://mirrors.163.com/centos/7/extras/x86_64/
enabled = 1
gpgcheck = 0

[KVM]
name = LVM
baseurl = https://mirrors.163.com/centos/7/virt/x86_64/kvm-common/
enabled = 1
gpgcheck = 0

[rocky]
name = rocky
baseurl = https://mirrors.163.com/centos/7/cloud/x86_64/openstack-rocky/
enabled = 1
gpgcheck = 0

[epel]
name = epel
baseurl = https://mirrors.tuna.tsinghua.edu.cn/epel/7/x86_64/
enabled = 1
gpgcheck = 0

2、配置网卡的静态ip

1
2
3
4
5
[root@controller ~]# nmcli con modify eth0 ipv4.address 10.163.1.100/24 ipv4.method manual ipv4.gateway 10.163.1.200 ipv4.dns 114.114.114.114 

[root@compute1 ~]# nmcli con modify eth0 ipv4.address 10.163.1.103/24 ipv4.method manual ipv4.gateway 10.163.1.200 ipv4.dns 114.114.114.114

[root@compute2 ~]# nmcli con modify eth0 ipv4.address 10.163.1.102/24 ipv4.method manual ipv4.gateway 10.163.1.200 ipv4.dns 114.114.114.114

3、在controller节点配置ntp

1
[root@controller ~]# yum install chrony -y

4、编辑ntp配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
[root@controller ~]# cat /etc/chrony.conf 
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server ntp.aliyun.com iburst

# Record the rate at which the system clock gains/losses time.
driftfile /var/lib/chrony/drift

# Allow the system clock to be stepped in the first three updates
# if its offset is larger than 1 second.
makestep 1.0 3

# Enable kernel synchronization of the real-time clock (RTC).
rtcsync

# Enable hardware timestamping on all interfaces that support it.
#hwtimestamp *

# Increase the minimum number of selectable sources required to adjust
# the system clock.
#minsources 2

# Allow NTP client access from local network.
#allow 192.168.0.0/16
allow 0.0.0.0/0

# Serve time even if not synchronized to a time source.
#local stratum 10

# Specify file containing keys for NTP authentication.
#keyfile /etc/chrony.keys

# Specify directory for log files.
logdir /var/log/chrony

# Select which information is logged.
#log measurements statistics tracking

[root@controller ~]# systemctl enable chronyd --now

#关闭防火墙
[root@controller ~]# systemctl stop firewalld
[root@controller ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

5、计算节点安装ntp,并修改配置文件

1
2
3
4
5
6
7
8
9
[root@compute1 ~]# yum install chrony -y
[root@compute1 ~]# cat /etc/chrony.conf | grep -v ^# | grep -v ^$
server controller iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony

[root@compute1 ~]# systemctl enable chronyd --now

6、在控制节点上安装配置SQL

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@controller ~]# yum install mariadb mariadb-server python2-PyMySQL -y

#创建并编辑配置文件
[root@controller ~]# cat /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 10.163.1.100

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

[root@controller ~]# systemctl enable mariadb.service --now

#在所有节点安装openstack客户端和openstack-selinux
yum install python-openstackclient -y #在linux 7上
yum install openstack-selinux -y

7、在controller上安装rabbitmg-server

1
2
3
4
5
6
7
8
9
10
[root@controller ~]# yum install rabbitmq-server -y

[root@controller ~]# systemctl enable rabbitmq-server.service --now

#添加openstack用户
[root@controller ~]# rabbitmqctl add_user openstack RABBIT_PASS

#允许openstack用户配置、写入和读取访问权限
[root@controller ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/"

8、在控制节点上安装Memcache

1
2
3
4
5
6
7
8
9
10
11
[root@controller ~]# yum install memcached python-memcached -y

#编辑配置文件
[root@controller ~]# cat /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 127.0.0.1,::1,controller"

[root@controller ~]# systemctl enable memcached.service --now

9、安装etcd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@controller ~]# yum install etcd -y

#编辑配置文件
[root@controller ~]# cat /etc/etcd/etcd.conf | grep -v ^# | grep -v ^$
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://10.163.1.100:2380"
ETCD_LISTEN_CLIENT_URLS="http://10.163.1.100:2379"
ETCD_NAME="controller"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.163.1.100:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.163.1.100:2379"
ETCD_INITIAL_CLUSTER="controller=http://10.163.1.100:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"

[root@controller ~]# systemctl enable etcd --now

10、控制节点安装keystone

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
[root@controller ~]# mysql
MariaDB [(none)]> CREATE DATABASE keystone;
Query OK, 1 row affected (0.00 sec)

#授予权限
MariaDB [(none)]>GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' IDENTIFIED BY 'KEYSTONE_DBPASS';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
IDENTIFIED BY 'KEYSTONE_DBPASS';
Query OK, 0 rows affected (0.00 sec)

#安装和配置keystone组件,依赖的web组件http和动态web模块mod_wsgi
[root@controller ~]# yum install openstack-keystone httpd mod_wsgi -y

#过滤opnestack配置文件
[root@controller ~]# cat grep.sh
#!/bin/bash
grep -v ^# $1 | grep -v ^$ > 1
cat 1 > $1
rm -r 1

[root@controller ~]# ./grep.sh /etc/keystone/keystone.conf

[root@controller ~]# cat /etc/keystone/keystone.conf
[DEFAULT]
[application_credential]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[policy]
[profiler]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[signing]
[token]
provider = fernet #默认为UUID的方式
[tokenless_auth]
[trust]
[unified_limit]
[wsgi]

#数据库中查询root用户的权限
select * from mysql.user where user='root'\G;

#让keystone接管mysql的数据库进行同步(数据库没有)
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone

#初始化Fernet密钥库
[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

[root@controller ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#初始化身份认证服务(数据库表项在这会被创建出来)
[root@controller ~]# keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
> --bootstrap-admin-url http://controller:5000/v3/ \
> --bootstrap-internal-url http://controller:5000/v3/ \
> --bootstrap-public-url http://controller:5000/v3/ \
> --bootstrap-region-id RegionOne

#进行web服务器配置
[root@controller ~]# cat /etc/httpd/conf/httpd.conf | grep ^ServerName
ServerName controller

#创建指向 /usr/share/keystone/wsgi-keystone.conf 文件的链接
[root@controller ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

[root@controller ~]# systemctl enable httpd --now

#生成凭证文件(给openstack客户端使用)
[root@controller ~]# cat openrc
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3

[root@controller ~]# source openrc

11、控制节点安装glance

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
#创建一个新的数据库
MariaDB [(none)]> CREATE DATABASE glance;

#在数据库里赋予权限
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'controller' IDENTIFIED BY 'GLANCE_DBPASS';
Query OK, 0 rows affected (0.00 sec)

#加载凭据
[root@controller ~]# source openrc

#创建openstack用户glance
[root@controller ~]# openstack user create --domain default --password-prompt glance
User Password:glance
Repeat User Password:glance
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | ee04eaf7ed1a47bd82a9fb8c9b2e4821 |
| name | glance |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+

#创建service project
[root@controller ~]# openstack project create --domain default --description "Service Project" service
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Service Project |
| domain_id | default |
| enabled | True |
| id | d6b1c07a6f3e4dd89b2b9dcfc83962f9 |
| is_domain | False |
| name | service |
| parent_id | default |
| tags | [] |
+-------------+----------------------------------+

#赋予glance用户管理员权限
[root@controller ~]# openstack role add --project service --user glance admin

#创建一个glance的service,也可以理解成没有endpoint的catalog文件
[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Image |
| enabled | True |
| id | efe5335aa41a41dd9de475d52cb87487 |
| name | glance |
| type | image |
+-------------+----------------------------------+

#在service加入endpoint
[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 4fafefa86fe94c4e81be149a1ffa3a4e |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | efe5335aa41a41dd9de475d52cb87487 |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 760f947fa9d245789e55b9d4bfa5ef7e |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | efe5335aa41a41dd9de475d52cb87487 |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 7e07491e6c8c4e01bea913b58ed44d8f |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | efe5335aa41a41dd9de475d52cb87487 |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+

#安装glance软件包
[root@controller ~]# yum install openstack-glance -y

#编辑配置文件
[root@controller ~]# cat /etc/glance/glance-api.conf
[DEFAULT]
[cors]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[image_format]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]

[root@controller ~]# cat /etc/glance/glance-registry.conf
[DEFAULT]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]

#启动glance服务
[root@controller ~]# systemctl enable openstack-glance-api.service --now
[root@controller ~]# systemctl enable openstack-glance-registry.service --now

#同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance

#测试glance的镜像
[root@controller openstall-install]# openstack image create --file cirros-0.4.0-x86_64-disk.img cirros

[root@controller ~]# openstack image list

12、在控制节点上安装nova

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
#前提条件
1、创建数据库并授权
2、创建openstack用户
3、对openstack用户进行基于role的授权,授权的是在service项目中的权限
4、创建nova openstack service
5、创建endpoint

#创建数据库
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
CREATE DATABASE placement;

#对数据库进行授权
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'controller' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'controller' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'controller' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'PLACEMENT_DBPASS';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'controller' IDENTIFIED BY 'PLACEMENT_DBPASS';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'PLACEMENT_DBPASS';

#创建nova用户
[root@controller ~]# openstack user create --domain default --password-prompt nova
User Password: nova
Repeat User Password: nova
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 7d2b31aa06d7457598661834b7f07951 |
| name | nova |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+

[root@controller ~]# openstack role add --project service --user nova admin

[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Compute |
| enabled | True |
| id | 4accdf48ffd54f49afed14fefdc12b52 |
| name | nova |
| type | compute |
+-------------+----------------------------------+

#创建placement用户
[root@controller ~]# openstack user create --domain default --password-prompt placement
User Password: placement
Repeat User Password: placement
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | d3c8ccc354754629a5f2eec812c3aef5 |
| name | placement |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+

[root@controller ~]# openstack role add --project service --user placement admin

[root@controller ~]# openstack service create --name placement --description "Placement API" placement
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Placement API |
| enabled | True |
| id | 850d593b366249af8b963e2790906b58 |
| name | placement |
| type | placement |
+-------------+----------------------------------+

#创建endpoint
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 03347612807f4fe2a4101c8794729985 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 4accdf48ffd54f49afed14fefdc12b52 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 9631461265b04576abeb76f1a81ff55c |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 4accdf48ffd54f49afed14fefdc12b52 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | c20cfa7b1b4240b699373b86c584e1a3 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 4accdf48ffd54f49afed14fefdc12b52 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne placement public http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | e04804b590854175b6a8b9983ae66772 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 850d593b366249af8b963e2790906b58 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | dd984422efe84bdb80c6c2f5bdfc604e |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 850d593b366249af8b963e2790906b58 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 05f11cb20b5745d081d59bad5c9f4b98 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 850d593b366249af8b963e2790906b58 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+

#安装nova软件包
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y

#编辑配置文件
[root@controller ~]# cat /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 10.163.1.100
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[placement_database]
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
[powervm]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]
[zvm]

#解决和placement相关的bug,编辑配置文件/etc/httpd/conf.d/00-nova-placement-api.conf
[root@controller ~]# cat /etc/httpd/conf.d/00-nova-placement-api.conf
Listen 8778

<VirtualHost *:8778>
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
WSGIScriptAlias / /usr/bin/nova-placement-api
<IfVersion >= 2.4>
ErrorLogFormat "%M"
</IfVersion>
ErrorLog /var/log/nova/nova-placement-api.log
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
#SSLEngine On
#SSLCertificateFile ...
#SSLCertificateKeyFile ...
</VirtualHost>

Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
SetHandler wsgi-script
Options +ExecCGI
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>

systemctl restart httpd

#同步数据库,每当cell操作的时候都得同步一下数据库
su -s /bin/sh -c "nova-manage api_db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

su -s /bin/sh -c "nova-manage db sync" nova

#启动服务
systemctl enable openstack-nova-api.service openstack-nova-consoleauth openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service openstack-nova-consoleauth openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

13、在计算节点安装nova

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
[root@compute1 ~]# yum install openstack-nova-compute -y

#编辑配置文件
[root@compute1 ~]# ./grep.sh /etc/nova/nova.conf
[root@compute1 ~]# cat /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 10.163.1.103
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
virt_type = qemu
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[placement_database]
[powervm]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
[zvm]

#启动服务
systemctl enable libvirtd.service openstack-nova-compute.service --now

#测试
[root@compute1 ~]# source openrc
[root@compute1 ~]# openstack compute service list --service nova-compute
+----+--------------+----------+------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+--------------+----------+------+---------+-------+----------------------------+
| 9 | nova-compute | compute1 | nova | enabled | up | 2021-07-08T13:24:10.000000 |
+----+--------------+----------+------+---------+-------+----------------------------+
[root@compute1 ~]# openstack compute service list --service nova-compute
+----+--------------+----------+------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+--------------+----------+------+---------+-------+----------------------------+
| 9 | nova-compute | compute1 | nova | enabled | up | 2021-07-08T13:25:30.000000 |
| 10 | nova-compute | compute2 | nova | enabled | up | 2021-07-08T13:25:30.000000 |
+----+--------------+----------+------+---------+-------+----------------------------+

#在控制节点上识别到控制节点
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': 6a7e75b3-a752-4076-9785-f1e54d64cbe1
Checking host mapping for compute host 'compute1': f6c4f92c-2ccc-494b-afc3-a25e5eee12d4
Creating host mapping for compute host 'compute1': f6c4f92c-2ccc-494b-afc3-a25e5eee12d4
Checking host mapping for compute host 'compute2': 98bff046-551f-4d62-8f72-7ddc3ce45f69
Creating host mapping for compute host 'compute2': 98bff046-551f-4d62-8f72-7ddc3ce45f69
Found 2 unmapped computes in cell: 6a7e75b3-a752-4076-9785-f1e54d64cbe1

#让nova-cell发现nova-compute服务将计算节点自动的加入cell中
/etc/nova/nova.conf

[scheduler]
discover_hosts_in_cells_interval = 300

14、openstack安装nova后测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[root@controller ~]# openstack flavor create --vcpus 1 --ram 64 --disk 1 small
+----------------------------+--------------------------------------+
| Field | Value |
+----------------------------+--------------------------------------+
| OS-FLV-DISABLED:disabled | False |
| OS-FLV-EXT-DATA:ephemeral | 0 |
| disk | 1 |
| id | b88a2210-d72d-46f8-baa1-1ae9d0bb2495 |
| name | small |
| os-flavor-access:is_public | True |
| properties | |
| ram | 64 |
| rxtx_factor | 1.0 |
| swap | |
| vcpus | 1 |
+----------------------------+--------------------------------------+

[root@controller ~]# openstack flavor list
+--------------------------------------+-------+-----+------+-----------+-------+-----------+
| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
+--------------------------------------+-------+-----+------+-----------+-------+-----------+
| b88a2210-d72d-46f8-baa1-1ae9d0bb2495 | small | 64 | 1 | 0 | 1 | True |
+--------------------------------------+-------+-----+------+-----------+-------+-----------+

15、openstack安装neutron前准备

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#在控制节点上安装neutron(实际上neutron是要安装在网络节点上的,这里是将控制节点也当成网络节点)
1、创建数据库并授权
MariaDB [(none)]> CREATE DATABASE neutron;
2、创建用户并分配相应的role
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'controller' IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';
3、创建空的catalog
[root@controller ~]# openstack user create --domain default --password-prompt neutron
User Password: neutron
Repeat User Password: neutron
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 3dac15bb7ace4f8584030d210721d847 |
| name | neutron |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+

[root@controller ~]# openstack role add --project service --user neutron admin

[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Networking |
| enabled | True |
| id | 1fb5f34378df4aafa413d2898c5b6912 |
| name | neutron |
| type | network |
+-------------+----------------------------------+
4、创建endpoint
[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | d75fa491f5894d769a4d120f62f07722 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 1fb5f34378df4aafa413d2898c5b6912 |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne \
> network internal http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 90eaf78d239d4bdabb3ab14b759c2604 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 1fb5f34378df4aafa413d2898c5b6912 |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 4b5e94267ed745758c49bb9005f92c59 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 1fb5f34378df4aafa413d2898c5b6912 |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+

16、openstack安装网络节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#在控制节点上安装软件包
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y

#配置neutron-server配置文件
[root@controller ~]# cat /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[agent]
[cors]
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[matchmaker_redis]
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]

[root@controller ~]# cat /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true

[root@controller ~]# cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth1
[network_log]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = true
local_ip = 172.16.100.11
l2_population = true

#加载一下内核模块
[root@controller ~]# modprobe br_netfilter

[root@controller ~]# cat /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge
[agent]
[ovs]

[root@controller ~]# cat /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
[agent]
[ovs]

[root@controller ~]# cat /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET
[agent]
[cache]

#创建core-plugin配置文件的软连接,这样才能让neutron-server进程使用它
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

#同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

#配置eth1接口的IP地址为配置文件local_ip指定的地址
[root@controller ~]# nmcli connection modify eth1 ipv4.method manual ipv4.addresses 172.16.100.11/24
[root@controller ~]# nmcli con up eth1

#启动服务
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service \
neutron-l3-agent.service --now

systemctl status neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service \
neutron-l3-agent.service

17、openstack计算节点安装neutron

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
#安装软件包
[root@compute1 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y

#修改配置文件
[root@compute1 ~]# cat /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
[agent]
[cors]
[database]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]

[root@compute1 ~]# cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth1
[network_log]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = true
local_ip = 172.16.100.12
l2_population = true

#加载模块
[root@compute1 ~]# modprobe br_netfilter

#配置eth1的网卡的IP地址
[root@compute1 ~]# nmcli connection modify eth1 ipv4.method manual ipv4.addresses 172.16.100.12/24
[root@compute1 ~]# nmcli con up eth1


#启动服务
[root@compute1 ~]# systemctl enable neutron-linuxbridge-agent --now

#配置nova使用neutron
#控制节点上
[root@controller ~]# grep "\[neutron\]" /etc/nova/nova.conf -A 11
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET

#在计算节点上
[root@compute1 ~]# grep "\[neutron\]" /etc/nova/nova.conf -A 9
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron

#在控制节点上重启openstack-nova-api服务
systemctl restart openstack-nova-api.service

#在计算节点上重启openstack-nova-compute服务
systemctl restart openstack-nova-compute.service


#安装后的测试
[root@control ~]# openstack network create zy-test
+---------------------------+--------------------------------------+
| Field | Value |
+---------------------------+--------------------------------------+
| admin_state_up | UP |
| availability_zone_hints | |
| availability_zones | |
| created_at | 2022-03-10T13:07:46Z |
| description | |
| dns_domain | None |
| id | f973d2d0-e2c2-480d-aef8-df1cbee6a653 |
| ipv4_address_scope | None |
| ipv6_address_scope | None |
| is_default | False |
| is_vlan_transparent | None |
| mtu | 1450 |
| name | zy-test |
| port_security_enabled | True |
| project_id | 829b08f58a77457499fcb3fc3a10d47e |
| provider:network_type | vxlan |
| provider:physical_network | None |
| provider:segmentation_id | 43 |
| qos_policy_id | None |
| revision_number | 1 |
| router:external | Internal |
| segments | None |
| shared | False |
| status | ACTIVE |
| subnets | |
| tags | |
| updated_at | 2022-03-10T13:07:47Z |
+---------------------------+--------------------------------------+


[root@control ~]# openstack subnet create --network zy-test --subnet-range 1.1.1.0/24 zy-test
+-------------------+--------------------------------------+
| Field | Value |
+-------------------+--------------------------------------+
| allocation_pools | 1.1.1.2-1.1.1.254 |
| cidr | 1.1.1.0/24 |
| created_at | 2022-03-10T13:08:11Z |
| description | |
| dns_nameservers | |
| enable_dhcp | True |
| gateway_ip | 1.1.1.1 |
| host_routes | |
| id | a4a2e2e7-9f02-48ef-8e2c-82c3df437299 |
| ip_version | 4 |
| ipv6_address_mode | None |
| ipv6_ra_mode | None |
| name | zy-test |
| network_id | f973d2d0-e2c2-480d-aef8-df1cbee6a653 |
| project_id | 829b08f58a77457499fcb3fc3a10d47e |
| revision_number | 0 |
| segment_id | None |
| service_types | |
| subnetpool_id | None |
| tags | |
| updated_at | 2022-03-10T13:08:11Z |
+-------------------+--------------------------------------+

[root@control ~]# openstack network list
+--------------------------------------+---------+--------------------------------------+
| ID | Name | Subnets |
+--------------------------------------+---------+--------------------------------------+
| f973d2d0-e2c2-480d-aef8-df1cbee6a653 | zy-test | a4a2e2e7-9f02-48ef-8e2c-82c3df437299 |
+--------------------------------------+---------+--------------------------------------+

[root@control ~]# openstack subnet list
+--------------------------------------+---------+--------------------------------------+------------+
| ID | Name | Network | Subnet |
+--------------------------------------+---------+--------------------------------------+------------+
| a4a2e2e7-9f02-48ef-8e2c-82c3df437299 | zy-test | f973d2d0-e2c2-480d-aef8-df1cbee6a653 | 1.1.1.0/24 |
+--------------------------------------+---------+--------------------------------------+------------+


[root@control ~]# openstack flavor list
+--------------------------------------+-------+-----+------+-----------+-------+-----------+
| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
+--------------------------------------+-------+-----+------+-----------+-------+-----------+
| b65820c7-8929-4d30-9ad1-126ddad24a66 | small | 64 | 1 | 0 | 1 | True |
+--------------------------------------+-------+-----+------+-----------+-------+-----------+
[root@control ~]# openstack image list
+--------------------------------------+--------+--------+
| ID | Name | Status |
+--------------------------------------+--------+--------+
| de6dddc5-02a0-4704-b461-0917f8d116e4 | cirros | active |
+--------------------------------------+--------+--------+

[root@control ~]# openstack server create --flavor small --image cirros --network zy-test zy-server
+-------------------------------------+-----------------------------------------------+
| Field | Value |
+-------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | |
| OS-EXT-SRV-ATTR:host | None |
| OS-EXT-SRV-ATTR:hypervisor_hostname | None |
| OS-EXT-SRV-ATTR:instance_name | |
| OS-EXT-STS:power_state | NOSTATE |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | None |
| OS-SRV-USG:terminated_at | None |
| accessIPv4 | |
| accessIPv6 | |
| addresses | |
| adminPass | Uah8XXvVyS5E |
| config_drive | |
| created | 2022-03-10T13:12:15Z |
| flavor | small (b65820c7-8929-4d30-9ad1-126ddad24a66) |
| hostId | |
| id | d822e18a-9049-4827-8b46-ae47fc475227 |
| image | cirros (de6dddc5-02a0-4704-b461-0917f8d116e4) |
| key_name | None |
| name | zy-server |
| progress | 0 |
| project_id | 829b08f58a77457499fcb3fc3a10d47e |
| properties | |
| security_groups | name='default' |
| status | BUILD |
| updated | 2022-03-10T13:12:15Z |
| user_id | 09ce85b53a7a459aaf45165d08faa0e5 |
| volumes_attached | |
+-------------------------------------+-----------------------------------------------+

[root@control ~]# openstack server list
+--------------------------------------+-----------+--------+-----------------+--------+--------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-----------+--------+-----------------+--------+--------+
| d822e18a-9049-4827-8b46-ae47fc475227 | zy-server | ACTIVE | zy-test=1.1.1.3 | cirros | small |
+--------------------------------------+-----------+--------+-----------------+--------+--------+