summaryrefslogtreecommitdiff
path: root/openstack/usr/share/openstack/nova/nova.conf
diff options
context:
space:
mode:
Diffstat (limited to 'openstack/usr/share/openstack/nova/nova.conf')
-rw-r--r--openstack/usr/share/openstack/nova/nova.conf642
1 files changed, 642 insertions, 0 deletions
diff --git a/openstack/usr/share/openstack/nova/nova.conf b/openstack/usr/share/openstack/nova/nova.conf
new file mode 100644
index 00000000..82c33ee4
--- /dev/null
+++ b/openstack/usr/share/openstack/nova/nova.conf
@@ -0,0 +1,642 @@
+# Full list of options available at: http://wiki.openstack.org/NovaConfigOptions
+[DEFAULT]
+
+# LOG/STATE
+#verbose=False
+#logdir=/var/log/nova
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+use_syslog = True
+
+### nova.availability_zones ###
+###############################
+# availability_zone to show internal services under (string value)
+#internal_service_availability_zone=internal
+
+# default compute node availability_zone (string value)
+#default_availability_zone=nova
+
+### nova.crypto ###
+###################
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+### nova.exception ###
+# make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+### nova.manager ###
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+#############################
+# Mandatory general options #
+#############################
+# ip address of this host (string value)
+my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+#use_ipv6=false
+
+
+########
+# APIs #
+########
+# Selects the type of APIs you want to activate.
+# Each API will bind on a specific port.
+# Compute nodes should run only the metadata API,
+# a nova API endpoint node should run osapi_compute.
+# If you want to use nova-volume you can also enable
+# osapi_volume, but if you want to run cinder, do not
+# activate it.
+# The list of API is: ec2,osapi_compute,metadata,osapi_volume
+enabled_apis=ec2,osapi_compute,metadata
+
+# NOVA API #
+# # # # # #
+#osapi_compute_listen="0.0.0.0"
+#osapi_compute_listen_port=8774
+
+#api_paste_config=api-paste.ini
+
+# Allows use of instance password during server creation
+#enable_instance_password=true
+
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host="firefly-2.local"
+
+#######################
+# Nova API extentions #
+#######################
+# osapi compute extension to load (multi valued)
+osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
+
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list value)
+#osapi_compute_ext_list=""
+
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
+
+# S3 #
+# # #
+s3_host=$my_ip
+#s3_port=3333
+
+# EC2 API #
+# # # # # #
+#ec2_host="$my_ip"
+ec2_dmz_host="$my_ip"
+#ec2_private_dns_show_ip=True
+#ec2_path="/services/Cloud"
+#ec2_port=8773
+# the protocol to use when connecting to the ec2 api server (http, https) (string value)
+#ec2_scheme=http
+
+# port and IP for ec2 api to listen
+#ec2_listen="0.0.0.0"
+#ec2_listen_port=8773
+
+# Metadata API #
+# # # # # # # #
+#metadata_host=$my_ip
+#metadata_port=8775
+#metadata_listen=0.0.0.0
+
+########
+# MISC #
+########
+#resume_guests_state_on_host_boot=false
+#instance_name_template="instance-%08x"
+# Inject the admin password at boot time, without an agent.
+#libvirt_inject_password=false
+
+########
+# LOGS #
+########
+#log-date-format="%Y-%m-%d %H:%M:%S"
+#debug=false
+
+##########
+# SYSTEM #
+##########
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+rootwrap_config=/etc/nova/rootwrap.conf
+#memcached_servers=<None>
+
+##################
+# AUTHENTICATION #
+##################
+auth_strategy=keystone
+# Seconds for auth tokens to linger
+
+#############
+# SCHEDULER #
+#############
+compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+scheduler_default_filters=AggregateInstanceExtraSpecsFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter
+
+####################
+# VOLUMES / CINDER #
+####################
+# The full class name of the volume API class to use (string value)
+#volume_api_class=nova.volume.cinder.API
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure=false
+
+# Allow attach between instance and volume in different
+# availability zones. (boolean value)
+#cinder_cross_az_attach=true
+
+# Libvirt handlers for remote volumes. (list value)
+#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver
+
+############
+# RABBITMQ #
+############
+rabbit_host = {{ RABBITMQ_HOST }}
+#fake_rabbit=false
+#rabbit_virtual_host=/
+rabbit_userid = {{ RABBITMQ_USER }}
+rabbit_password = {{ RABBITMQ_PASSWORD }}
+rabbit_port = {{ RABBITMQ_PORT }}
+rabbit_use_ssl=false
+#rabbit_retry_interval=1
+# The messaging module to use, defaults to kombu (works for rabbit).
+# You can also use qpid: nova.rpc.impl_qpid
+rpc_backend = nova.openstack.common.rpc.impl_kombu
+
+##########
+# GLANCE #
+##########
+glance_host={{ CONTROLLER_HOST_ADDRESS }}
+glance_port=9292
+glance_protocol=http
+
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+api_servers=$glance_host:$glance_port
+#api_servers=localhost:9292
+
+# Allow to perform insecure SSL (https) requests to glance (boolean value)
+#api_insecure=false
+
+# Cache glance images locally
+#cache_images=true
+# Number retries when downloading an image from glance (integer value)
+#num_retries=0
+
+#image_service=nova.image.glance.GlanceImageService
+
+###############################
+# Type of network APIs to use #
+###############################
+# The full class name of the network API class to use (string value)
+# Possible values are:
+# nova.network.api.API (if you wish to use nova-network)
+# nova.network.neutronv2.api.API (if you want to use Neutron)
+network_api_class=nova.network.neutronv2.api.API
+
+# Type of security group API. Possible values are:
+# nova (if you are using nova-network)
+# neutron (if you use neutron)
+security_group_api = neutron
+
+# Driver used to create ethernet devices. (string value)
+# When using linux net, use: nova.network.linux_net.LinuxBridgeInterfaceDriver
+# for Neutron, use: nova.network.linux_net.LinuxOVSInterfaceDriver
+linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
+
+# Firewall type to use. (defaults to hypervisor specific iptables driver) (string value)
+# For linux net, use: nova.virt.libvirt.firewall.IptablesFirewallDriver
+# For Neutron and OVS, use: nova.virt.firewall.NoopFirewallDriver (since this is handled by Neutron)
+###firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+
+#######################
+# NETWORK (linux net) #
+#######################
+#network_manager=nova.network.manager.VlanManager
+#force_dhcp_release=false
+#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
+#dhcpbridge=$bindir/nova-dhcpbridge
+#dhcp_lease_time=120
+# Firewall driver (defaults to hypervisor specific iptables driver) (string value)
+#firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
+# Interface for public IP addresses (default: eth0) (string value)
+#public_interface=br-ext
+# vlans will bridge into this interface if set (default: <None>) (string value)
+# FlatDhcp will bridge into this interface if set (default: <None>) (string value)
+#vlan_interface=eth1
+# Bridge for simple network instances (default: <None>) (string value)
+#flat_network_bridge=br100
+# FlatDhcp will bridge into this interface if set (default: <None>) (string value)
+#flat_interface=eth0
+# set it to the /32 of your metadata server if you have just one
+# It is a cidr in case there are multiple services that you want
+# to keep using the internal private ips.
+# A list of dmz range that should be accepted (list value)
+#dmz_cidr=169.254.169.254/32
+# Name of Open vSwitch bridge used with linuxnet (string value)
+#linuxnet_ovs_integration_bridge="br-int"
+#routing_source_ip="$my_ip"
+# Only first nic of vm will get default gateway from dhcp server
+#use_single_default_gateway=false
+
+###########
+# Neutron #
+###########
+# This is the URL of your neutron server:
+neutron_url = http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+neutron_auth_strategy=keystone
+neutron_admin_tenant_name=service
+neutron_admin_username={{ NEUTRON_SERVICE_USER }}
+neutron_admin_password={{ NEUTRON_SERVICE_PASSWORD }}
+# This is the URL of your Keystone server
+neutron_admin_auth_url=http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+# What's below is only needed for nova-compute.
+
+# Set flag to indicate Neutron will proxy metadata requests
+# and resolve instance ids. This is needed to use neutron-metadata-agent
+# (instead of the metadata server of nova-api,
+# which doesn't work with neutron) (boolean value)
+service_neutron_metadata_proxy=True
+
+# Shared secret to validate proxies Neutron metadata requests
+# This password should match what is in /etc/neutron/metadata_agent.ini
+# (string value)
+neutron_metadata_proxy_shared_secret= {{ METADATA_PROXY_SHARED_SECRET }}
+
+#################
+# NOVNC CONSOLE #
+#################
+# By default with the Debian package, the spicehtml5 console is the default. To
+# enable the NoVNC mode, enable the switch below, disable SPICE in this
+# nova.conf file as well (see far below), then edit
+# /etc/default/nova-consoleproxy to switch to NoVNC, shutdown the SPICE with
+# /etc/init.d/nova-spicehtml5proxy stop, and finally start nova-novncproxy.
+# Do not forget to restart Nova daemons and restart your VMs if you want to use
+# NoVNC form now on (VMs video card needs to be attached to a console type, and
+# they can accept only one video card at a time).
+#
+# NOTE: novncproxy_base_url and vncserver_proxyclient_address should point to the
+# compute node ip. novncproxy_base_url could replace the ip by a alias if the
+# horizon machine can resolve this alias as the compute node ip.
+#
+vnc_enabled=True
+novncproxy_base_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6080/vnc_auto.html
+# Change vncserver_proxyclient_address and vncserver_listen to match each compute host
+vncserver_proxyclient_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+vncserver_listen=0.0.0.0
+vnc_keymap="en-us"
+
+######################################
+# nova-xenvncproxy (eg: xvpvncproxy) #
+######################################
+# See NoVNC comments above for switching away from SPICE to XVP
+#xvpvncproxy_host="0.0.0.0"
+#xvpvncproxy_port=6081
+
+#########
+# QUOTA #
+#########
+# number of instances allowed per project (integer value)
+#quota_instances=10
+# number of instance cores allowed per project (integer value)
+#quota_cores=20
+# megabytes of instance ram allowed per project (integer value)
+#quota_ram=51200
+# number of floating ips allowed per project (integer value)
+#quota_floating_ips=10
+# number of metadata items allowed per instance (integer value)
+#quota_metadata_items=128
+# number of injected files allowed (integer value)
+#quota_injected_files=5
+# number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+# number of bytes allowed per injected file path (integer value)
+#quota_injected_file_path_bytes=255
+# number of security groups per project (integer value)
+#quota_security_groups=10
+# number of security rules per security group (integer value)
+#quota_security_group_rules=20
+# number of key pairs per user (integer value)
+#quota_key_pairs=100
+# number of seconds until a reservation expires (integer value)
+#reservation_expire=86400
+# count of reservations until usage is refreshed (integer value)
+#until_refresh=0
+# number of seconds between subsequent usage refreshes (integer value)
+#max_age=0
+# default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
+
+############
+# DATABASE #
+############
+[database]
+connection=postgresql://{{ NOVA_DB_USER }}:{{ NOVA_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/nova
+
+#############
+# CONDUCTOR #
+#############
+[conductor]
+# Perform nova-conductor operations locally (boolean value)
+# use_local enabled for one node. For multinode this should be disabled.
+use_local=true
+# the topic conductor nodes listen on (string value)
+#topic=conductor
+# full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
+
+#########
+# CELLS #
+#########
+[cells]
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
+
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
+
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
+
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
+
+# Enable cell functionality (boolean value)
+#enable=false
+
+# the topic cells nodes listen on (string value)
+#topic=cells
+
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
+
+# name of this cell (string value)
+#name=nova
+
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
+
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
+
+# Percentage of cell capacity to hold in reserve. Affects both
+# memory and disk utilization (floating point value)
+#reserve_percent=10.0
+
+# Type of cell: api or compute (string value)
+#cell_type=<None>
+
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
+
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters"maps to all cells filters
+# included with nova. (list value)
+#scheduler_filter_classes=nova.cells.filters.all_filters
+
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers"maps to all cell weighers
+# included with nova. (list value)
+#scheduler_weight_classes=nova.cells.weights.all_weighers
+
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
+
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
+
+# Seconds between getting fresh cell info from db. (integer
+# value)
+#db_check_interval=60
+
+# Multiplier used to weigh mute children. (The value should
+# be negative.) (floating point value)
+#mute_weight_multiplier=-10.0
+
+# Weight value assigned to mute children. (The value should
+# be positive.) (floating point value)
+#mute_weight_value=1000.0
+
+# Number of seconds after which a lack of capability and
+# capacity updates signals the child cell is to be treated as
+# a mute. (integer value)
+#mute_child_interval=300
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=10.0
+
+#############
+# BAREMETAL #
+#############
+[baremetal]
+# The backend to use for bare-metal database (string value)
+#db_backend=sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# bare-metal database (string value)
+#sql_connection=sqlite:///$state_path/baremetal_$sqlite_db
+
+# Whether baremetal compute injects password or not (boolean value)
+#inject_password=true
+
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template
+
+# Baremetal VIF driver. (string value)
+#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver
+
+# Baremetal volume driver. (string value)
+#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver
+
+# a list of additional capabilities corresponding to
+# instance_type_extra_specs for this compute host to
+# advertise. Valid entries are name=value, pairs For example,
+# "key1:val1, key2:val2" (list value)
+#instance_type_extra_specs=
+
+# Baremetal driver back-end (pxe or tilera) (string value)
+#driver=nova.virt.baremetal.pxe.PXE
+
+# Baremetal power management method (string value)
+#power_manager=nova.virt.baremetal.ipmi.IPMI
+
+# Baremetal compute node's tftp root path (string value)
+#tftp_root=/tftpboot
+
+# path to baremetal terminal program (string value)
+#terminal=shellinaboxd
+
+# path to baremetal terminal SSL cert(PEM) (string value)
+#terminal_cert_dir=<None>
+
+# path to directory stores pidfiles of baremetal_terminal
+# (string value)
+#terminal_pid_dir=$state_path/baremetal/console
+
+# maximal number of retries for IPMI operations (integer
+# value)
+#ipmi_power_retry=5
+
+# Default kernel image ID used in deployment phase (string
+# value)
+#deploy_kernel=<None>
+
+# Default ramdisk image ID used in deployment phase (string
+# value)
+#deploy_ramdisk=<None>
+
+# Template file for injected network config (string value)
+#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template
+
+# additional append parameters for baremetal PXE boot (string
+# value)
+#pxe_append_params=<None>
+
+# Template file for PXE configuration (string value)
+#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template
+
+# Timeout for PXE deployments. Default: 0 (unlimited) (integer
+# value)
+#pxe_deploy_timeout=0
+
+# ip or name to virtual power host (string value)
+#virtual_power_ssh_host=
+
+# base command to use for virtual power(vbox,virsh) (string
+# value)
+#virtual_power_type=vbox
+
+# user to execute virtual power commands as (string value)
+#virtual_power_host_user=
+
+# password for virtual power host_user (string value)
+#virtual_power_host_pass=
+
+# Do not set this out of dev/test environments. If a node does
+# not have a fixed PXE IP address, volumes are exported with
+# globally opened ACL (boolean value)
+#use_unsafe_iscsi=false
+
+# iSCSI IQN prefix used in baremetal volume connections.
+# (string value)
+#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal
+
+##########
+# VMWARE #
+##########
+[vmware]
+# Name of Integration Bridge (string value)
+#integration_bridge=br-int
+
+#########
+# SPICE #
+#########
+[spice]
+# location of spice html5 console proxy, in the form
+# "http://www.example.com:6082/spice_auto.html" (string value)
+#html5proxy_base_url=http://localhost:6082/spice_auto.html
+
+# IP address on which instance spice server should listen (string value)
+#server_listen=0.0.0.0
+
+# the address to which proxy clients (like nova-spicehtml5proxy) should connect (string value)
+#server_proxyclient_address=$my_ip
+
+# enable spice related features (boolean value)
+#enabled=true
+enabled=false
+
+# enable spice guest agent support (boolean value)
+#agent_enabled=true
+
+# keymap for spice (string value)
+#keymap=en-us
+
+######################
+# Keystone authtoken #
+######################
+[keystone_authtoken]
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+auth_port = 35357
+auth_protocol = http
+admin_tenant_name = service
+admin_user = {{ NOVA_SERVICE_USER }}
+admin_password = {{ NOVA_SERVICE_PASSWORD }}
+auth_version = v2.0
+
+###########
+# COMPUTE #
+###########
+compute_driver=libvirt.LibvirtDriver
+instance_name_template=instance-%08x
+api_paste_config=/etc/nova/api-paste.ini
+
+# COMPUTE/APIS: if you have separate configs for separate services
+# # this flag is required for both nova-api and nova-compute
+allow_resize_to_same_host=True
+
+############
+## LIBVIRT #
+############
+[libvirt]
+# Actual testing hardware does not support hardware acceleration
+# so in this step we will configure libvirt to use qemu instead of KVM
+virt_type={{ NOVA_VIRT_TYPE }}
+
+##################
+# SERIAL CONSOLE #
+##################
+# Enable serial console proxy for serial console access from the host only
+[serial_console]
+serialproxy_host=127.0.0.1
+enabled=false