summaryrefslogtreecommitdiff
path: root/openstack/etc/nova/nova.conf
blob: 09052cd7cbc2068efaeb6b7b5aa4b150cfcdd2af (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
# Full list of options available at: http://wiki.openstack.org/NovaConfigOptions
[DEFAULT]

# LOG/STATE
verbose=True
logdir=/var/log/nova

### nova.availability_zones ###
###############################
# availability_zone to show internal services under (string value)
#internal_service_availability_zone=internal

# default compute node availability_zone (string value)
#default_availability_zone=nova

### nova.crypto ###
###################
# Filename of root CA (string value)
#ca_file=cacert.pem

# Filename of private key (string value)
#key_file=private/cakey.pem

# Filename of root Certificate Revocation List (string value)
#crl_file=crl.pem

# Where we keep our keys (string value)
#keys_path=$state_path/keys

# Where we keep our root CA (string value)
#ca_path=$state_path/CA

# Should we use a CA for each project? (boolean value)
#use_project_ca=false

# Subject for certificate for users, %s for project, user,
# timestamp (string value)
#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s

# Subject for certificate for projects, %s for project,
# timestamp (string value)
#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s

### nova.exception ###
# make exception message format errors fatal (boolean value)
#fatal_exception_format_errors=false

### nova.manager ###
# Some periodic tasks can be run in a separate process. Should
# we run them here? (boolean value)
#run_external_periodic_tasks=true

#############################
# Mandatory general options #
#############################
# ip address of this host (string value)
my_ip=##NOVA_HOST##
#use_ipv6=false


########
# APIs #
########
# Selects the type of APIs you want to activate.
# Each API will bind on a specific port.
# Compute nodes should run only the metadata API,
# a nova API endpoint node should run osapi_compute.
# If you want to use nova-volume you can also enable
# osapi_volume, but if you want to run cinder, do not
# activate it.
# The list of API is: ec2,osapi_compute,metadata,osapi_volume
enabled_apis=ec2,osapi_compute,metadata

# NOVA API #
# # # # #  #
#osapi_compute_listen="0.0.0.0"
#osapi_compute_listen_port=8774

#api_paste_config=api-paste.ini

# Allows use of instance password during server creation
#enable_instance_password=true


# Name of this node.  This can be an opaque identifier.  It is
# not necessarily a hostname, FQDN, or IP address. However,
# the node name must be valid within an AMQP key, and if using
# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
#host="firefly-2.local"

#######################
# Nova API extentions #
#######################
# osapi compute extension to load (multi valued)
osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions

# Specify list of extensions to load when using
# osapi_compute_extension option with
# nova.api.openstack.compute.contrib.select_extensions (list value)
#osapi_compute_ext_list=""

# Permit instance snapshot operations. (boolean value)
#allow_instance_snapshots=true

# S3 #
# #  #
s3_host=$my_ip
#s3_port=3333

# EC2 API #
# # # # # #
#ec2_host="$my_ip"
ec2_dmz_host="$my_ip"
#ec2_private_dns_show_ip=True
#ec2_path="/services/Cloud"
#ec2_port=8773
# the protocol to use when connecting to the ec2 api server (http, https) (string value)
#ec2_scheme=http

# port and IP for ec2 api to listen
#ec2_listen="0.0.0.0"
#ec2_listen_port=8773

# Metadata API #
# # # # # # #  #
#metadata_host=$my_ip
#metadata_port=8775
#metadata_listen=0.0.0.0

########
# MISC #
########
#resume_guests_state_on_host_boot=false
#instance_name_template="instance-%08x"
# Inject the admin password at boot time, without an agent.
#libvirt_inject_password=false

########
# LOGS #
########
#log-date-format="%Y-%m-%d %H:%M:%S"
#debug=false

##########
# SYSTEM #
##########
state_path=/var/lib/nova
lock_path=/var/lock/nova
rootwrap_config=/etc/nova/rootwrap.conf
#memcached_servers=<None>

##################
# AUTHENTICATION #
##################
auth_strategy=keystone
# Seconds for auth tokens to linger

#############
# SCHEDULER #
#############
compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
scheduler_default_filters=AggregateInstanceExtraSpecsFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter

####################
# VOLUMES / CINDER #
####################
# The full class name of the volume API class to use (string value)
#volume_api_class=nova.volume.cinder.API

# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure=false

# Allow attach between instance and volume in different
# availability zones. (boolean value)
#cinder_cross_az_attach=true

# Libvirt handlers for remote volumes. (list value)
#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver

############
# RABBITMQ #
############
rabbit_host = ##RABBITMQ_HOST##
#fake_rabbit=false
#rabbit_virtual_host=/
rabbit_userid = ##RABBITMQ_USER##
rabbit_password = ##RABBITMQ_PASSWORD##
rabbit_port = ##RABBITMQ_PORT##
rabbit_use_ssl=false
#rabbit_retry_interval=1
# The messaging module to use, defaults to kombu (works for rabbit).
# You can also use qpid: nova.rpc.impl_qpid
rpc_backend = nova.openstack.common.rpc.impl_kombu

##########
# GLANCE #
##########
host=##GLANCE_HOST##
port=9292
protocol=http

# A list of the glance api servers available to nova. Prefix
# with https:// for ssl-based glance api servers.
# ([hostname|ip]:port) (list value)
api_servers=$glance_host:$glance_port
#api_servers=localhost:9292

# Allow to perform insecure SSL (https) requests to glance (boolean value)
#api_insecure=false

# Cache glance images locally
#cache_images=true
# Number retries when downloading an image from glance (integer value)
#num_retries=0

#image_service=nova.image.glance.GlanceImageService

###############################
# Type of network APIs to use #
###############################
# The full class name of the network API class to use (string value)
# Possible values are:
#    nova.network.api.API (if you wish to use nova-network)
#    nova.network.neutronv2.api.API (if you want to use Neutron)
network_api_class=nova.network.neutronv2.api.API

# Type of security group API. Possible values are:
#      nova (if you are using nova-network)
#      neutron (if you use neutron)
security_group_api = neutron

# Driver used to create ethernet devices. (string value)
# When using linux net, use: nova.network.linux_net.LinuxBridgeInterfaceDriver
# for Neutron, use: nova.network.linux_net.LinuxOVSInterfaceDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

# Firewall type to use. (defaults to hypervisor specific iptables driver) (string value)
# For linux net, use: nova.virt.libvirt.firewall.IptablesFirewallDriver
# For Neutron and OVS, use: nova.virt.firewall.NoopFirewallDriver (since this is handled by Neutron)
###firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
firewall_driver=nova.virt.firewall.NoopFirewallDriver

#######################
# NETWORK (linux net) #
#######################
#network_manager=nova.network.manager.VlanManager
network_manager=nova.network.manager.FlatDHCPManager
#force_dhcp_release=false
force_dhcp_release=True
#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
dhcpbridge_flagfile=/etc/nova/nova.conf
#dhcpbridge=$bindir/nova-dhcpbridge
#dhcp_lease_time=120
# Firewall driver (defaults to hypervisor specific iptables driver) (string value)
firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
# Interface for public IP addresses (default: eth0) (string value)
#public_interface=br-ext
public_interface=eth0
# vlans will bridge into this interface if set (default: <None>) (string value)
# FlatDhcp will bridge into this interface if set (default: <None>) (string value)
#vlan_interface=eth1
vlan_interface=eth0
# Bridge for simple network instances (default: <None>) (string value)
flat_network_bridge=br100
# FlatDhcp will bridge into this interface if set (default: <None>) (string value)
flat_interface=eth0

# set it to the /32 of your metadata server if you have just one
# It is a cidr in case there are multiple services that you want
# to keep using the internal private ips.
# A list of dmz range that should be accepted (list value)
#dmz_cidr=169.254.169.254/32
# Name of Open vSwitch bridge used with linuxnet (string value)
#linuxnet_ovs_integration_bridge="br-int"
#routing_source_ip="$my_ip"
# Only first nic of vm will get default gateway from dhcp server
#use_single_default_gateway=false

###########
# Neutron #
###########
# This is the URL of your neutron server:
neutron_url=##NEUTRON_PUBLIC_URL##
neutron_auth_strategy=keystone
neutron_admin_tenant_name=service
neutron_admin_username=##NEUTRON_SERVICE_USER##
neutron_admin_password=##NEUTRON_SERVICE_PASSWORD##
# This is the URL of your Keystone server
neutron_admin_auth_url=##KEYSTONE_ADMIN_URL##

# What's below is only needed for nova-compute.

# Set flag to indicate Neutron will proxy metadata requests
# and resolve instance ids. This is needed to use neutron-metadata-agent
# (instead of the metadata server of nova-api,
# which doesn't work with neutron) (boolean value)
service_neutron_metadata_proxy=True

# Shared secret to validate proxies Neutron metadata requests
# This password should match what is in /etc/neutron/metadata_agent.ini
# (string value)
neutron_metadata_proxy_shared_secret= ##METADATA_PROXY_SHARED_SECRET##

#################
# NOVNC CONSOLE #
#################
# By default with the Debian package, the spicehtml5 console is the default. To
# enable the NoVNC mode, enable the switch below, disable SPICE in this
# nova.conf file as well (see far below), then edit
# /etc/default/nova-consoleproxy to switch to NoVNC, shutdown the SPICE with
# /etc/init.d/nova-spicehtml5proxy stop, and finally start nova-novncproxy.
# Do not forget to restart Nova daemons and restart your VMs if you want to use
# NoVNC form now on (VMs video card needs to be attached to a console type, and
# they can accept only one video card at a time).
vnc_enabled=True
novncproxy_base_url=##NOVA_NOVNCPROXY_BASE_URL##
# Change vncserver_proxyclient_address and vncserver_listen to match each compute host
vncserver_proxyclient_address=##NOVA_HOST##
vncserver_listen=##NOVA_HOST##
vnc_keymap="en-us"

######################################
# nova-xenvncproxy (eg: xvpvncproxy) #
######################################
# See NoVNC comments above for switching away from SPICE to XVP
#xvpvncproxy_host="0.0.0.0"
#xvpvncproxy_port=6081

#########
# QUOTA #
#########
# number of instances allowed per project (integer value)
#quota_instances=10
# number of instance cores allowed per project (integer value)
#quota_cores=20
# megabytes of instance ram allowed per project (integer value)
#quota_ram=51200
# number of floating ips allowed per project (integer value)
#quota_floating_ips=10
# number of metadata items allowed per instance (integer value)
#quota_metadata_items=128
# number of injected files allowed (integer value)
#quota_injected_files=5
# number of bytes allowed per injected file (integer value)
#quota_injected_file_content_bytes=10240
# number of bytes allowed per injected file path (integer value)
#quota_injected_file_path_bytes=255
# number of security groups per project (integer value)
#quota_security_groups=10
# number of security rules per security group (integer value)
#quota_security_group_rules=20
# number of key pairs per user (integer value)
#quota_key_pairs=100
# number of seconds until a reservation expires (integer value)
#reservation_expire=86400
# count of reservations until usage is refreshed (integer value)
#until_refresh=0
# number of seconds between subsequent usage refreshes (integer value)
#max_age=0
# default driver to use for quota checks (string value)
#quota_driver=nova.quota.DbQuotaDriver

############
# DATABASE #
############
[database]
connection=postgresql://##NOVA_DB_USER##:##NOVA_DB_PASSWORD##@127.0.0.1/nova

#############
# CONDUCTOR #
#############
[conductor]
# Perform nova-conductor operations locally (boolean value)
# use_local enabled for one node. For multinode this should be disabled.
use_local=true
# the topic conductor nodes listen on (string value)
#topic=conductor
# full class name for the Manager for conductor (string value)
#manager=nova.conductor.manager.ConductorManager

#########
# CELLS #
#########
[cells]
# Cells communication driver to use (string value)
#driver=nova.cells.rpc_driver.CellsRPCDriver

# Number of seconds after an instance was updated or deleted
# to continue to update cells (integer value)
#instance_updated_at_threshold=3600

# Number of instances to update per periodic task run (integer
# value)
#instance_update_num_instances=1

# Maximum number of hops for cells routing. (integer value)
#max_hop_count=10

# Cells scheduler to use (string value)
#scheduler=nova.cells.scheduler.CellsScheduler

# Enable cell functionality (boolean value)
#enable=false

# the topic cells nodes listen on (string value)
#topic=cells

# Manager for cells (string value)
#manager=nova.cells.manager.CellsManager

# name of this cell (string value)
#name=nova

# Key/Multi-value list with the capabilities of the cell (list
# value)
#capabilities=hypervisor=xenserver;kvm,os=linux;windows

# Seconds to wait for response from a call to a cell. (integer
# value)
#call_timeout=60

# Percentage of cell capacity to hold in reserve. Affects both
# memory and disk utilization (floating point value)
#reserve_percent=10.0

# Type of cell: api or compute (string value)
#cell_type=<None>

# Base queue name to use when communicating between cells.
# Various topics by message type will be appended to this.
# (string value)
#rpc_driver_queue_base=cells.intercell

# Filter classes the cells scheduler should use.  An entry of
# "nova.cells.filters.all_filters"maps to all cells filters
# included with nova. (list value)
#scheduler_filter_classes=nova.cells.filters.all_filters

# Weigher classes the cells scheduler should use.  An entry of
# "nova.cells.weights.all_weighers"maps to all cell weighers
# included with nova. (list value)
#scheduler_weight_classes=nova.cells.weights.all_weighers

# How many retries when no cells are available. (integer
# value)
#scheduler_retries=10

# How often to retry in seconds when no cells are available.
# (integer value)
#scheduler_retry_delay=2

# Seconds between getting fresh cell info from db. (integer
# value)
#db_check_interval=60

# Multiplier used to weigh mute children.  (The value should
# be negative.) (floating point value)
#mute_weight_multiplier=-10.0

# Weight value assigned to mute children.  (The value should
# be positive.) (floating point value)
#mute_weight_value=1000.0

# Number of seconds after which a lack of capability and
# capacity updates signals the child cell is to be treated as
# a mute. (integer value)
#mute_child_interval=300

# Multiplier used for weighing ram.  Negative numbers mean to
# stack vs spread. (floating point value)
#ram_weight_multiplier=10.0

#############
# BAREMETAL #
#############
[baremetal]
# The backend to use for bare-metal database (string value)
#db_backend=sqlalchemy

# The SQLAlchemy connection string used to connect to the
# bare-metal database (string value)
#sql_connection=sqlite:///$state_path/baremetal_$sqlite_db

# Whether baremetal compute injects password or not (boolean value)
#inject_password=true

# Template file for injected network (string value)
#injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template

# Baremetal VIF driver. (string value)
#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver

# Baremetal volume driver. (string value)
#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver

# a list of additional capabilities corresponding to
# instance_type_extra_specs for this compute host to
# advertise. Valid entries are name=value, pairs For example,
# "key1:val1, key2:val2" (list value)
#instance_type_extra_specs=

# Baremetal driver back-end (pxe or tilera) (string value)
#driver=nova.virt.baremetal.pxe.PXE

# Baremetal power management method (string value)
#power_manager=nova.virt.baremetal.ipmi.IPMI

# Baremetal compute node's tftp root path (string value)
#tftp_root=/tftpboot

# path to baremetal terminal program (string value)
#terminal=shellinaboxd

# path to baremetal terminal SSL cert(PEM) (string value)
#terminal_cert_dir=<None>

# path to directory stores pidfiles of baremetal_terminal
# (string value)
#terminal_pid_dir=$state_path/baremetal/console

# maximal number of retries for IPMI operations (integer
# value)
#ipmi_power_retry=5

# Default kernel image ID used in deployment phase (string
# value)
#deploy_kernel=<None>

# Default ramdisk image ID used in deployment phase (string
# value)
#deploy_ramdisk=<None>

# Template file for injected network config (string value)
#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template

# additional append parameters for baremetal PXE boot (string
# value)
#pxe_append_params=<None>

# Template file for PXE configuration (string value)
#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template

# Timeout for PXE deployments. Default: 0 (unlimited) (integer
# value)
#pxe_deploy_timeout=0

# ip or name to virtual power host (string value)
#virtual_power_ssh_host=

# base command to use for virtual power(vbox,virsh) (string
# value)
#virtual_power_type=vbox

# user to execute virtual power commands as (string value)
#virtual_power_host_user=

# password for virtual power host_user (string value)
#virtual_power_host_pass=

# Do not set this out of dev/test environments. If a node does
# not have a fixed PXE IP address, volumes are exported with
# globally opened ACL (boolean value)
#use_unsafe_iscsi=false

# iSCSI IQN prefix used in baremetal volume connections.
# (string value)
#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal

##########
# VMWARE #
##########
[vmware]
# Name of Integration Bridge (string value)
#integration_bridge=br-int

#########
# SPICE #
#########
[spice]
# location of spice html5 console proxy, in the form
# "http://www.example.com:6082/spice_auto.html" (string value)
#html5proxy_base_url=http://localhost:6082/spice_auto.html

# IP address on which instance spice server should listen (string value)
server_listen=0.0.0.0

# the address to which proxy clients (like nova-spicehtml5proxy) should connect (string value)
server_proxyclient_address=$my_ip

# enable spice related features (boolean value)
enabled=true

# enable spice guest agent support (boolean value)
#agent_enabled=true

# keymap for spice (string value)
#keymap=en-us

######################
# Keystone authtoken #
######################
[keystone_authtoken]
auth_host = ##NOVA_HOST##
auth_uri = ##KEYSTONE_INTERNAL_URL##
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = ##NOVA_SERVICE_USER##
admin_password = ##NOVA_SERVICE_PASSWORD##
auth_version = v2.0

###########
# COMPUTE #
###########
compute_driver=libvirt.LibvirtDriver
instance_name_template=instance-%08x
api_paste_config=/etc/nova/api-paste.ini

# COMPUTE/APIS: if you have separate configs for separate services
# # this flag is required for both nova-api and nova-compute
allow_resize_to_same_host=True

############
## LIBVIRT #
############
[libvirt]
# Actual testing hardware does not support hardware acceleration
# so in this step we will configure libvirt to use qemu instead of KVM
virt_type=qemu