summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitreview5
-rw-r--r--README6
-rw-r--r--VERSION1
-rw-r--r--busybox-init.configure145
-rw-r--r--ceph.configure266
-rw-r--r--chef-system-x86_64-container.morph32
-rw-r--r--chef/manifest3
-rwxr-xr-xcloud-init.configure63
-rw-r--r--clusters/cephclient.morph20
-rw-r--r--clusters/ci.morph117
-rw-r--r--clusters/example-distbuild-cluster.morph37
-rw-r--r--clusters/example-swift-storage-cluster.morph62
-rw-r--r--clusters/hardware-deployment.morph35
-rw-r--r--clusters/image-package-example.morph12
-rw-r--r--clusters/initramfs-test.morph17
-rw-r--r--clusters/installer-build-system-x86_64.morph52
-rw-r--r--clusters/jetson-upgrade.morph18
-rw-r--r--clusters/mason-openstack.morph39
-rw-r--r--clusters/mason.morph56
-rw-r--r--clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph13
-rw-r--r--clusters/minimal-system-deploy.morph14
-rw-r--r--clusters/moonshot-m2-armv8b64.morph56
-rw-r--r--clusters/moonshot-pxe-armv8b64.morph30
-rw-r--r--clusters/moonshot-pxe-armv8l64.morph22
-rw-r--r--clusters/openstack-one-node-swift.morph142
-rw-r--r--clusters/openstack-one-node.morph106
-rw-r--r--clusters/openstack-three-node-installer.morph239
-rw-r--r--clusters/openstack-two-node-installer.morph200
-rw-r--r--clusters/release.morph76
-rw-r--r--clusters/sdk-example-cluster.morph46
-rw-r--r--clusters/trove-example.morph58
-rw-r--r--clusters/trove.baserock.org-upgrade.morph23
-rw-r--r--clusters/upgrade-devel.morph39
-rw-r--r--clusters/weston-system-x86_64-generic-deploy.morph23
-rw-r--r--clusters/zookeeper.morph21
-rw-r--r--distbuild.configure132
-rwxr-xr-xdistbuild/lib/systemd/system-generators/ccache-nfs-mount-generator16
-rw-r--r--distbuild/manifest28
-rw-r--r--distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml115
-rw-r--r--distbuild/usr/lib/distbuild-setup/ansible/hosts1
-rw-r--r--distbuild/usr/lib/systemd/system/distbuild-setup.service16
-rw-r--r--distbuild/usr/lib/systemd/system/morph-cache-server.service12
-rw-r--r--distbuild/usr/lib/systemd/system/morph-controller-helper.service13
-rw-r--r--distbuild/usr/lib/systemd/system/morph-controller.service12
-rw-r--r--distbuild/usr/lib/systemd/system/morph-worker-helper.service13
-rw-r--r--distbuild/usr/lib/systemd/system/morph-worker.service13
l---------distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service1
-rw-r--r--distbuild/usr/share/distbuild-setup/morph-cache-server.conf5
-rw-r--r--distbuild/usr/share/distbuild-setup/morph-controller-helper.conf5
-rw-r--r--distbuild/usr/share/distbuild-setup/morph-controller.conf6
-rw-r--r--distbuild/usr/share/distbuild-setup/morph-worker-helper.conf4
-rw-r--r--distbuild/usr/share/distbuild-setup/morph-worker.conf4
-rw-r--r--distbuild/usr/share/distbuild-setup/morph.conf13
-rw-r--r--essential-files/etc/inputrc38
-rw-r--r--essential-files/etc/os-release5
-rw-r--r--essential-files/etc/profile13
-rw-r--r--essential-files/manifest8
-rw-r--r--essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf4
l---------genivi-devel-system-armv7/etc/morph.conf1
-rw-r--r--genivi-devel-system-armv7/manifest5
-rw-r--r--genivi-devel-system-armv7/src/morph.conf5
-rw-r--r--hosts1
-rw-r--r--image-package-example/README9
-rw-r--r--image-package-example/common.sh.in72
-rw-r--r--image-package-example/disk-install.sh.in51
-rw-r--r--image-package-example/make-disk-image.sh.in36
-rwxr-xr-ximage-package.write168
-rwxr-xr-xinstaller.configure48
-rw-r--r--jffs2.write64
-rw-r--r--jffs2.write.help28
-rw-r--r--mason.configure153
-rw-r--r--mason/ansible/hosts1
-rw-r--r--mason/ansible/mason-setup.yml83
-rw-r--r--mason/httpd.service10
-rwxr-xr-xmason/mason-generator.sh101
-rwxr-xr-xmason/mason-report.sh252
-rw-r--r--mason/mason-setup.service16
-rw-r--r--mason/mason.service12
-rwxr-xr-xmason/mason.sh93
-rw-r--r--mason/mason.timer10
-rw-r--r--mason/os-init-script6
-rw-r--r--mason/share/mason.conf14
-rw-r--r--mason/share/os.conf30
-rw-r--r--moonshot-kernel.configure33
-rw-r--r--moonshot/boot/m400-1003.dtbbin0 -> 18063 bytes
-rw-r--r--moonshot/manifest2
-rwxr-xr-xnfsboot-server.configure58
-rw-r--r--openstack-ceilometer.configure120
-rw-r--r--openstack-cinder.configure125
-rw-r--r--openstack-glance.configure101
-rw-r--r--openstack-ironic.configure155
-rw-r--r--openstack-keystone.configure123
-rw-r--r--openstack-network.configure50
-rw-r--r--openstack-neutron.configure138
-rw-r--r--openstack-nova.configure168
-rw-r--r--openstack-swift-controller.configure49
-rw-r--r--openstack/etc/horizon/apache-horizon.conf34
-rw-r--r--openstack/etc/horizon/openstack_dashboard/local_settings.py551
-rw-r--r--openstack/etc/tempest/tempest.conf1116
-rw-r--r--openstack/manifest190
-rw-r--r--openstack/usr/lib/sysctl.d/neutron.conf3
-rw-r--r--openstack/usr/lib/systemd/system/apache-httpd.service16
-rw-r--r--openstack/usr/lib/systemd/system/iscsi-setup.service12
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-api.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-central.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service11
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service13
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-cinder-api.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-cinder-backup.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service11
-rw-r--r--openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service13
-rw-r--r--openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service12
-rw-r--r--openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-cinder-volume.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-glance-api.service16
-rw-r--r--openstack/usr/lib/systemd/system/openstack-glance-registry.service16
-rw-r--r--openstack/usr/lib/systemd/system/openstack-glance-setup.service11
-rw-r--r--openstack/usr/lib/systemd/system/openstack-horizon-setup.service10
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ironic-api.service16
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ironic-conductor.service16
-rw-r--r--openstack/usr/lib/systemd/system/openstack-ironic-setup.service12
-rw-r--r--openstack/usr/lib/systemd/system/openstack-keystone-setup.service14
-rw-r--r--openstack/usr/lib/systemd/system/openstack-keystone.service16
-rw-r--r--openstack/usr/lib/systemd/system/openstack-network-setup.service12
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service13
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service13
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service17
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service18
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service17
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service18
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service17
-rw-r--r--openstack/usr/lib/systemd/system/openstack-neutron-server.service17
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-api.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-cert.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-compute.service16
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-conductor.service16
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-config-setup.service11
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-db-setup.service13
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-scheduler.service15
-rw-r--r--openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service15
-rw-r--r--openstack/usr/lib/systemd/system/openvswitch-db-server.service12
-rw-r--r--openstack/usr/lib/systemd/system/openvswitch-setup.service11
-rw-r--r--openstack/usr/lib/systemd/system/openvswitch.service12
-rw-r--r--openstack/usr/lib/systemd/system/postgres-server-setup.service12
-rw-r--r--openstack/usr/lib/systemd/system/postgres-server.service26
-rw-r--r--openstack/usr/lib/systemd/system/rabbitmq-server.service16
-rw-r--r--openstack/usr/lib/systemd/system/swift-controller-setup.service13
-rw-r--r--openstack/usr/lib/systemd/system/swift-proxy.service14
-rw-r--r--openstack/usr/share/openstack/ceilometer-config.yml36
-rw-r--r--openstack/usr/share/openstack/ceilometer-db.yml50
-rw-r--r--openstack/usr/share/openstack/ceilometer/ceilometer.conf1023
-rw-r--r--openstack/usr/share/openstack/cinder-config.yml37
-rw-r--r--openstack/usr/share/openstack/cinder-db.yml60
-rw-r--r--openstack/usr/share/openstack/cinder-lvs.yml21
-rw-r--r--openstack/usr/share/openstack/cinder/api-paste.ini60
-rw-r--r--openstack/usr/share/openstack/cinder/cinder.conf2825
-rw-r--r--openstack/usr/share/openstack/cinder/policy.json80
-rw-r--r--openstack/usr/share/openstack/extras/00-disable-device.network2
-rw-r--r--openstack/usr/share/openstack/extras/60-device-dhcp.network5
-rw-r--r--openstack/usr/share/openstack/glance.yml93
-rw-r--r--openstack/usr/share/openstack/glance/glance-api-paste.ini77
-rw-r--r--openstack/usr/share/openstack/glance/glance-api.conf699
-rw-r--r--openstack/usr/share/openstack/glance/glance-cache.conf200
-rw-r--r--openstack/usr/share/openstack/glance/glance-registry-paste.ini30
-rw-r--r--openstack/usr/share/openstack/glance/glance-registry.conf245
-rw-r--r--openstack/usr/share/openstack/glance/glance-scrubber.conf108
-rw-r--r--openstack/usr/share/openstack/glance/logging.conf54
-rw-r--r--openstack/usr/share/openstack/glance/policy.json52
-rw-r--r--openstack/usr/share/openstack/glance/schema-image.json28
-rw-r--r--openstack/usr/share/openstack/horizon.yml47
-rw-r--r--openstack/usr/share/openstack/hosts1
-rw-r--r--openstack/usr/share/openstack/ironic.yml104
-rw-r--r--openstack/usr/share/openstack/ironic/ironic.conf1247
-rw-r--r--openstack/usr/share/openstack/ironic/policy.json5
-rw-r--r--openstack/usr/share/openstack/iscsi.yml15
-rw-r--r--openstack/usr/share/openstack/keystone.yml143
-rw-r--r--openstack/usr/share/openstack/keystone/keystone-paste.ini121
-rw-r--r--openstack/usr/share/openstack/keystone/keystone.conf1588
-rw-r--r--openstack/usr/share/openstack/keystone/logging.conf65
-rw-r--r--openstack/usr/share/openstack/keystone/policy.json171
-rw-r--r--openstack/usr/share/openstack/network.yml67
-rw-r--r--openstack/usr/share/openstack/neutron-config.yml48
-rw-r--r--openstack/usr/share/openstack/neutron-db.yml51
-rw-r--r--openstack/usr/share/openstack/neutron/api-paste.ini30
-rw-r--r--openstack/usr/share/openstack/neutron/dhcp_agent.ini89
-rw-r--r--openstack/usr/share/openstack/neutron/fwaas_driver.ini3
-rw-r--r--openstack/usr/share/openstack/neutron/l3_agent.ini103
-rw-r--r--openstack/usr/share/openstack/neutron/lbaas_agent.ini42
-rw-r--r--openstack/usr/share/openstack/neutron/metadata_agent.ini60
-rw-r--r--openstack/usr/share/openstack/neutron/metering_agent.ini18
-rw-r--r--openstack/usr/share/openstack/neutron/neutron.conf640
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini114
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README3
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README6
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini29
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini15
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini100
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini76
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini26
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini41
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini63
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini50
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini78
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini31
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini19
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini86
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini100
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini15
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini118
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini52
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini4
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini28
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini30
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini13
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini31
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini79
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/nec/nec.ini60
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini41
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini35
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini26
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini190
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini14
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini44
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini200
-rw-r--r--openstack/usr/share/openstack/neutron/policy.json138
-rw-r--r--openstack/usr/share/openstack/neutron/vpn_agent.ini14
-rw-r--r--openstack/usr/share/openstack/nova-config.yml34
-rw-r--r--openstack/usr/share/openstack/nova-db.yml51
-rw-r--r--openstack/usr/share/openstack/nova/api-paste.ini118
-rw-r--r--openstack/usr/share/openstack/nova/cells.json26
-rw-r--r--openstack/usr/share/openstack/nova/logging.conf81
-rw-r--r--openstack/usr/share/openstack/nova/nova-compute.conf4
-rw-r--r--openstack/usr/share/openstack/nova/nova.conf3809
-rw-r--r--openstack/usr/share/openstack/nova/policy.json324
-rw-r--r--openstack/usr/share/openstack/openvswitch.yml38
-rw-r--r--openstack/usr/share/openstack/postgres.yml48
-rw-r--r--openstack/usr/share/openstack/postgres/pg_hba.conf5
-rw-r--r--openstack/usr/share/openstack/postgres/postgresql.conf11
-rw-r--r--openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf3
-rw-r--r--openstack/usr/share/openstack/rabbitmq/rabbitmq.config9
-rw-r--r--openstack/usr/share/openstack/swift-controller.yml52
-rw-r--r--openstack/usr/share/swift/etc/rsyncd.j223
-rw-r--r--openstack/usr/share/swift/etc/swift/proxy-server.j2630
-rwxr-xr-xpxeboot.check86
-rw-r--r--pxeboot.write755
-rw-r--r--pxeboot.write.help166
-rwxr-xr-xscripts/cycle.sh61
-rw-r--r--scripts/licensecheck.pl604
-rwxr-xr-xscripts/licensecheck.sh101
-rwxr-xr-xscripts/organize-morphologies.py266
-rwxr-xr-xscripts/release-build175
-rw-r--r--scripts/release-build.test.conf6
-rwxr-xr-xscripts/release-test401
-rwxr-xr-xscripts/release-test-os526
-rwxr-xr-xscripts/release-upload473
-rw-r--r--scripts/release-upload.test.conf10
-rwxr-xr-xsdk.write284
-rw-r--r--strata/NetworkManager-common.morph23
-rw-r--r--strata/NetworkManager-common/NetworkManager.morph5
-rw-r--r--strata/ansible.morph17
-rw-r--r--strata/ansible/ansible.morph9
-rw-r--r--strata/ansible/openstack-ansible-modules.morph5
-rw-r--r--strata/apache-httpd-server.morph39
-rw-r--r--strata/apache-httpd-server/apr.morph22
-rw-r--r--strata/apache-httpd-server/httpd-server.morph67
-rw-r--r--strata/apache-httpd-server/mod_wsgi.morph18
-rw-r--r--strata/armv7lhf-cross-toolchain.morph54
-rw-r--r--strata/armv7lhf-cross-toolchain/armv7lhf-cross-binutils.morph24
-rw-r--r--strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc-nolibc.morph74
-rw-r--r--strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc.morph48
-rw-r--r--strata/armv7lhf-cross-toolchain/armv7lhf-cross-glibc.morph51
-rw-r--r--strata/armv7lhf-cross-toolchain/armv7lhf-cross-libstdc++.morph32
-rw-r--r--strata/armv7lhf-cross-toolchain/armv7lhf-cross-linux-api-headers.morph10
-rw-r--r--strata/audio-bluetooth.morph92
-rw-r--r--strata/audio-bluetooth/alsa-utils.morph7
-rw-r--r--strata/audio-bluetooth/bluez-tools.morph10
-rw-r--r--strata/audio-bluetooth/bluez.morph12
-rw-r--r--strata/audio-bluetooth/json-c.morph7
-rw-r--r--strata/audio-bluetooth/libical.morph6
-rw-r--r--strata/audio-bluetooth/libsndfile.morph5
-rw-r--r--strata/audio-bluetooth/nohands.morph5
-rw-r--r--strata/audio-bluetooth/ofono.morph11
-rw-r--r--strata/audio-bluetooth/pulseaudio.morph11
-rw-r--r--strata/baserock-import.morph21
-rw-r--r--strata/bsp-armv5l-openbmc-aspeed.morph19
-rw-r--r--strata/bsp-armv5l-openbmc-aspeed/linux-armv5l-openbmc-aspeed.morph9
-rw-r--r--strata/bsp-armv5l-openbmc-aspeed/u-boot@aspeed.morph14
-rw-r--r--strata/bsp-armv7-highbank.morph17
-rw-r--r--strata/bsp-armv7-highbank/linux-armv7-highbank.morph52
-rw-r--r--strata/bsp-armv7-versatile.morph12
-rw-r--r--strata/bsp-armv7-versatile/linux-armv7-versatile.morph31
-rw-r--r--strata/bsp-armv7b-highbank.morph17
-rw-r--r--strata/bsp-armv7b-highbank/linux-armv7b-highbank.morph53
-rw-r--r--strata/bsp-armv7b-vexpress-tc2.morph11
-rw-r--r--strata/bsp-armv7b-vexpress-tc2/linux-armv7b-vexpress-tc2.morph57
-rw-r--r--strata/bsp-armv8b64-generic.morph16
-rw-r--r--strata/bsp-armv8b64-generic/linux-armv8b64-generic.morph278
-rw-r--r--strata/bsp-armv8l64-generic.morph15
-rw-r--r--strata/bsp-armv8l64-generic/linux-armv8l64-generic.morph276
-rw-r--r--strata/bsp-jetson.morph43
-rw-r--r--strata/bsp-jetson/bsp-support.morph6
-rw-r--r--strata/bsp-jetson/device-tree-compiler.morph6
-rw-r--r--strata/bsp-jetson/linux-firmware-jetson.morph15
-rw-r--r--strata/bsp-jetson/linux-jetson-tk1.morph248
-rw-r--r--strata/bsp-jetson/nouveau-drm.morph12
-rw-r--r--strata/bsp-jetson/u-boot@jetson.morph20
-rw-r--r--strata/bsp-ppc64-generic.morph11
-rw-r--r--strata/bsp-ppc64-generic/linux-ppc64.morph226
-rw-r--r--strata/bsp-wandboard.morph18
-rw-r--r--strata/bsp-wandboard/linux-armv7-wandboard.morph70
-rw-r--r--strata/bsp-wandboard/u-boot@wandboard.morph11
-rw-r--r--strata/bsp-x86_32-generic.morph24
-rw-r--r--strata/bsp-x86_32-generic/linux-x86-32-generic.morph287
-rw-r--r--strata/bsp-x86_32-generic/nasm.morph5
-rw-r--r--strata/bsp-x86_32-generic/syslinux.morph12
-rw-r--r--strata/bsp-x86_64-generic.morph24
-rw-r--r--strata/bsp-x86_64-generic/linux-x86-64-generic.morph287
-rw-r--r--strata/bsp-x86_64-generic/nasm.morph5
-rw-r--r--strata/bsp-x86_64-generic/syslinux.morph12
-rw-r--r--strata/bsp-x86_both-tools.morph19
-rw-r--r--strata/bsp-x86_both-tools/nasm.morph5
-rw-r--r--strata/bsp-x86_both-tools/syslinux.morph12
-rw-r--r--strata/build-essential.morph411
-rw-r--r--strata/build-essential/binutils.morph8
-rw-r--r--strata/build-essential/busybox.morph81
-rw-r--r--strata/build-essential/ccache.morph12
-rw-r--r--strata/build-essential/fhs-dirs.morph49
-rw-r--r--strata/build-essential/gawk.morph5
-rw-r--r--strata/build-essential/gcc.morph64
-rw-r--r--strata/build-essential/glibc.morph98
-rw-r--r--strata/build-essential/linux-api-headers.morph24
-rw-r--r--strata/build-essential/m4-tarball.morph5
-rw-r--r--strata/build-essential/make.morph5
-rw-r--r--strata/build-essential/stage1-binutils.morph23
-rw-r--r--strata/build-essential/stage1-gcc.morph80
-rw-r--r--strata/build-essential/stage2-binutils.morph21
-rw-r--r--strata/build-essential/stage2-busybox.morph72
-rw-r--r--strata/build-essential/stage2-fake-bash.morph4
-rw-r--r--strata/build-essential/stage2-fhs-dirs.morph51
-rw-r--r--strata/build-essential/stage2-gawk.morph9
-rw-r--r--strata/build-essential/stage2-gcc-fixed-headers.morph19
-rw-r--r--strata/build-essential/stage2-gcc.morph82
-rw-r--r--strata/build-essential/stage2-glibc.morph103
-rw-r--r--strata/build-essential/stage2-libstdc++.morph36
-rw-r--r--strata/build-essential/stage2-linux-api-headers.morph24
-rw-r--r--strata/build-essential/stage2-make.morph9
-rw-r--r--strata/build-essential/stage2-reset-specs.morph21
-rw-r--r--strata/build-essential/zlib.morph9
-rw-r--r--strata/c2man/c2man.morph11
-rw-r--r--strata/ceph-service.morph59
-rw-r--r--strata/ceph-service/boost.morph8
-rw-r--r--strata/ceph-service/ceph.morph25
-rw-r--r--strata/ceph-service/keyutils.morph7
-rw-r--r--strata/ceph-service/leveldb.morph9
-rw-r--r--strata/ceph-service/libaio.morph7
-rw-r--r--strata/chef.morph187
-rw-r--r--strata/chef/chef-master.morph20
-rw-r--r--strata/chef/chef-zero-2.2.morph13
-rw-r--r--strata/chef/coderay-1.1.0.morph13
-rw-r--r--strata/chef/diff-lcs-1.2.5.morph13
-rw-r--r--strata/chef/erubis-master.morph19
-rw-r--r--strata/chef/ffi-1.9.3.morph13
-rw-r--r--strata/chef/ffi-yajl-master.morph13
-rw-r--r--strata/chef/hashie-2.1.2.morph13
-rw-r--r--strata/chef/highline-master.morph13
-rw-r--r--strata/chef/hoe-master.morph14
-rw-r--r--strata/chef/ipaddress-master.morph13
-rw-r--r--strata/chef/json-1.8.1.morph13
-rw-r--r--strata/chef/libpopt.morph6
-rw-r--r--strata/chef/libyajl2-1.0.1.morph13
-rw-r--r--strata/chef/method_source-0.8.2.morph13
-rw-r--r--strata/chef/mime-types-1.25.1.morph13
-rw-r--r--strata/chef/mixlib-authentication-1.3.0.morph13
-rw-r--r--strata/chef/mixlib-cli-1.5.0.morph13
-rw-r--r--strata/chef/mixlib-config-2.1.0.morph13
-rw-r--r--strata/chef/mixlib-log-master.morph13
-rw-r--r--strata/chef/mixlib-shellout-1.4.0.morph13
-rw-r--r--strata/chef/net-dhcp-1.2.1.morph13
-rw-r--r--strata/chef/net-ssh-2.9.1.morph15
-rw-r--r--strata/chef/net-ssh-gateway-1.2.0.morph15
-rw-r--r--strata/chef/net-ssh-multi-1.2.0.morph15
-rw-r--r--strata/chef/ohai-master.morph13
-rw-r--r--strata/chef/plist-master.morph14
-rw-r--r--strata/chef/pry-master.morph13
-rw-r--r--strata/chef/rack-1.5.2.morph13
-rw-r--r--strata/chef/sgdisk.morph7
-rw-r--r--strata/chef/slop-3.6.0.morph13
-rw-r--r--strata/chef/systemu-master.morph13
-rw-r--r--strata/chef/wmi-lite-1.0.0.morph13
-rw-r--r--strata/chef/yajl.morph6
-rw-r--r--strata/cloudinit-support.morph20
-rw-r--r--strata/cloudinit-support/cloud-init.morph6
-rw-r--r--strata/connectivity.morph20
-rw-r--r--strata/connectivity/wpa_supplicant.morph8
-rw-r--r--strata/connman-common.morph10
-rw-r--r--strata/connman-common/connman.morph6
-rw-r--r--strata/core.morph387
-rw-r--r--strata/core/acl.morph6
-rw-r--r--strata/core/attr.morph18
-rw-r--r--strata/core/autoconf-tarball.morph5
-rw-r--r--strata/core/automake.morph8
-rw-r--r--strata/core/bash.morph37
-rw-r--r--strata/core/bison.morph5
-rw-r--r--strata/core/bzip2.morph10
-rw-r--r--strata/core/ca-certificates.morph22
-rw-r--r--strata/core/cmake.morph8
-rw-r--r--strata/core/cpython.morph7
-rw-r--r--strata/core/curl.morph9
-rw-r--r--strata/core/e2fsprogs.morph22
-rw-r--r--strata/core/flex.morph14
-rw-r--r--strata/core/gdbm.morph8
-rw-r--r--strata/core/gettext-tarball.morph5
-rw-r--r--strata/core/git.morph7
-rw-r--r--strata/core/gperf.morph3
-rw-r--r--strata/core/libcap2.morph6
-rw-r--r--strata/core/libexpat.morph6
-rw-r--r--strata/core/libffi.morph8
-rw-r--r--strata/core/libtool-tarball.morph5
-rw-r--r--strata/core/linux-pam.morph9
-rw-r--r--strata/core/mini-utils.morph6
-rw-r--r--strata/core/ncurses.morph45
-rw-r--r--strata/core/openssl-new.morph15
-rw-r--r--strata/core/patch.morph5
-rw-r--r--strata/core/perl.morph16
-rw-r--r--strata/core/pkg-config.morph5
-rw-r--r--strata/core/python-setuptools.morph6
-rw-r--r--strata/core/readline.morph6
-rw-r--r--strata/core/shadow.morph53
-rw-r--r--strata/core/texinfo-tarball.morph9
-rw-r--r--strata/core/util-linux.morph10
-rw-r--r--strata/coreutils-common.morph40
-rw-r--r--strata/coreutils-common/coreutils.morph16
-rw-r--r--strata/coreutils-common/diff.morph7
-rw-r--r--strata/coreutils-common/findutils.morph5
-rw-r--r--strata/coreutils-common/sed.morph12
-rw-r--r--strata/coreutils-common/tar.morph18
-rw-r--r--strata/cross-bootstrap.morph28
-rw-r--r--strata/cross-bootstrap/groff.morph13
-rw-r--r--strata/cross-bootstrap/openssh.morph28
-rw-r--r--strata/cross-bootstrap/rsync.morph6
-rw-r--r--strata/cross-tools.morph10
-rw-r--r--strata/cups.morph11
-rw-r--r--strata/cups/cups.morph12
-rw-r--r--strata/cxmanage.morph32
-rw-r--r--strata/cxmanage/pexpect.morph10
-rw-r--r--strata/databases.morph32
-rw-r--r--strata/databases/memcached.morph21
-rw-r--r--strata/databases/redis.morph6
-rw-r--r--strata/devtools.morph38
-rw-r--r--strata/devtools/nano.morph16
-rw-r--r--strata/devtools/screen.morph11
-rw-r--r--strata/devtools/vim.morph10
-rw-r--r--strata/django.morph32
-rw-r--r--strata/enlightenment.morph79
-rw-r--r--strata/enlightenment/bullet3.morph8
-rw-r--r--strata/enlightenment/efl.morph9
-rw-r--r--strata/enlightenment/elementary.morph8
-rw-r--r--strata/enlightenment/enlightenment.morph8
-rw-r--r--strata/enlightenment/eterm.morph8
-rw-r--r--strata/enlightenment/evas_generic_loaders.morph8
-rw-r--r--strata/enlightenment/imlib2.morph8
-rw-r--r--strata/enlightenment/libast.morph8
-rw-r--r--strata/erlang.morph26
-rw-r--r--strata/erlang/erlang-sd_notify.morph8
-rw-r--r--strata/erlang/erlang.morph9
-rw-r--r--strata/erlang/rebar.morph6
-rw-r--r--strata/foundation.morph113
-rw-r--r--strata/foundation/btrfs-progs.morph9
-rw-r--r--strata/foundation/dbus-pre.morph10
-rw-r--r--strata/foundation/dbus.morph10
-rw-r--r--strata/foundation/fuse.morph6
-rw-r--r--strata/foundation/groff.morph13
-rw-r--r--strata/foundation/kmod.morph49
-rw-r--r--strata/foundation/libgpg-error.morph7
-rw-r--r--strata/foundation/lzo.morph5
-rw-r--r--strata/foundation/openssh.morph28
-rw-r--r--strata/foundation/pciutils.morph11
-rw-r--r--strata/foundation/rsync.morph9
-rw-r--r--strata/foundation/systemd.morph46
-rw-r--r--strata/foundation/tbdiff.morph3
-rw-r--r--strata/foundation/time-zone-database.morph10
-rw-r--r--strata/genivi.morph110
-rw-r--r--strata/genivi/DLT-daemon.morph5
-rw-r--r--strata/genivi/genivi-common-api-dbus-runtime.morph5
-rw-r--r--strata/genivi/genivi-common-api-runtime.morph5
-rw-r--r--strata/genivi/googlemock.morph8
-rw-r--r--strata/genivi/googletest.morph7
-rw-r--r--strata/genivi/itzam-tarball.morph10
-rw-r--r--strata/genivi/libarchive.morph6
-rw-r--r--strata/genivi/linuxquota.morph10
-rw-r--r--strata/genivi/node-startup-controller.morph9
-rw-r--r--strata/genivi/persistence-administrator.morph5
-rw-r--r--strata/genivi/persistence-client-library.morph6
-rw-r--r--strata/genivi/persistence-common-object.morph5
-rw-r--r--strata/graphics-common.morph54
-rw-r--r--strata/graphics-common/cairo.morph5
-rw-r--r--strata/graphics-common/freefont-otf.morph5
-rw-r--r--strata/graphics-common/pixman.morph5
-rw-r--r--strata/gtk-deps.morph38
-rw-r--r--strata/gtk-deps/gdk-pixbuf.morph6
-rw-r--r--strata/gtk-deps/pango.morph8
-rw-r--r--strata/gtk-deps/shared-mime-info.morph4
-rw-r--r--strata/gtk2.morph12
-rw-r--r--strata/gtk2/gtk+.morph11
-rw-r--r--strata/gtk3.morph18
-rw-r--r--strata/gtk3/gtk3.morph11
-rw-r--r--strata/initramfs-utils.morph11
-rw-r--r--strata/initramfs-utils/initramfs-scripts.morph4
-rw-r--r--strata/input-common.morph26
-rw-r--r--strata/input-common/xkeyboard-config.morph7
-rw-r--r--strata/installer-utils.morph11
-rw-r--r--strata/installer-utils/installer-scripts.morph4
-rw-r--r--strata/libdrm-common.morph16
-rw-r--r--strata/libdrm-common/drm.morph15
-rw-r--r--strata/libsoup-common.morph10
-rw-r--r--strata/libsoup-common/libsoup.morph6
-rw-r--r--strata/lighttpd-server.morph12
-rw-r--r--strata/lighttpd-server/lighttpd.morph6
-rw-r--r--strata/llvm-common.morph10
-rw-r--r--strata/llvm-common/llvm.morph8
-rw-r--r--strata/lorry-controller.morph16
-rw-r--r--strata/lorry-controller/lorry-controller.morph10
-rw-r--r--strata/lorry.morph120
-rw-r--r--strata/lorry/cvs-tarball.morph9
-rw-r--r--strata/lorry/cvsps.morph6
-rw-r--r--strata/lorry/hg-fast-export.morph10
-rw-r--r--strata/lorry/libapr-util.morph7
-rw-r--r--strata/lorry/libapr.morph21
-rw-r--r--strata/lorry/libserf.morph6
-rw-r--r--strata/lorry/lorry.morph3
-rw-r--r--strata/lorry/mercurial-tarball.morph6
-rw-r--r--strata/lorry/neon.morph8
-rw-r--r--strata/lorry/perl-dbi-tarball.morph5
-rw-r--r--strata/lorry/subversion-tarball.morph12
-rw-r--r--strata/lorry/swig-tarball.morph7
-rw-r--r--strata/lua.morph16
-rw-r--r--strata/lua/lua.morph11
-rw-r--r--strata/lua/luajit2.morph7
-rw-r--r--strata/lvm.morph15
-rw-r--r--strata/lvm/lvm2.morph31
-rw-r--r--strata/mesa-common.morph13
-rw-r--r--strata/mesa-common/mesa.morph22
-rw-r--r--strata/morph-utils.morph44
-rw-r--r--strata/morph-utils/cmdtest.morph6
-rw-r--r--strata/morph-utils/pyfilesystem.morph6
-rw-r--r--strata/morph-utils/python-ttystatus.morph6
-rw-r--r--strata/mtd-utilities.morph11
-rw-r--r--strata/mtd-utilities/mtd-utils.morph6
-rw-r--r--strata/multimedia-common.morph16
-rw-r--r--strata/multimedia-gstreamer-0.10.morph33
-rw-r--r--strata/multimedia-gstreamer.morph40
-rw-r--r--strata/network-security.morph18
-rw-r--r--strata/network-security/nspr.morph12
-rw-r--r--strata/network-security/nss.morph7
-rw-r--r--strata/networking-utils.morph52
-rw-r--r--strata/networking-utils/arping.morph3
-rw-r--r--strata/networking-utils/iproute2.morph10
-rw-r--r--strata/networking-utils/ipset.morph6
-rw-r--r--strata/networking-utils/libnet.morph15
-rw-r--r--strata/networking-utils/libpcap.morph3
-rw-r--r--strata/networking-utils/tcpdump.morph3
-rw-r--r--strata/nfs.morph30
-rw-r--r--strata/nfs/nfs-utils.morph12
-rw-r--r--strata/nfs/rpcbind.morph11
-rw-r--r--strata/nfs/tcp-wrappers.morph9
-rw-r--r--strata/nfs/ti-rpc.morph6
-rw-r--r--strata/nodejs.morph10
-rw-r--r--strata/ntpd.morph10
-rw-r--r--strata/ntpd/ntpd.morph51
-rw-r--r--strata/ocaml-language.morph11
-rw-r--r--strata/ocaml/ocaml.morph8
-rw-r--r--strata/openbmc.morph22
-rw-r--r--strata/openbmc/i2c-tools.morph16
-rw-r--r--strata/openbmc/isc-dhcp.morph3
-rw-r--r--strata/openbmc/lm_sensors.morph7
-rw-r--r--strata/openstack-clients.morph103
-rw-r--r--strata/openstack-clients/pyparsing.morph6
-rw-r--r--strata/openstack-clients/python-ironicclient.morph3
-rw-r--r--strata/openstack-common.morph112
-rw-r--r--strata/openstack-services.morph566
-rw-r--r--strata/openstack-services/ceilometer.morph23
-rw-r--r--strata/openstack-services/cinder.morph18
-rw-r--r--strata/openstack-services/horizon.morph59
-rw-r--r--strata/openstack-services/ipaddr-py.morph6
-rw-r--r--strata/openstack-services/ironic.morph17
-rw-r--r--strata/openstack-services/librabbitmq.morph10
-rw-r--r--strata/openstack-services/neutron.morph17
-rw-r--r--strata/openstack-services/nova.morph18
-rw-r--r--strata/openstack-services/novnc.morph11
-rw-r--r--strata/openstack-services/open-iscsi.morph45
-rw-r--r--strata/openstack-services/pies.morph11
-rw-r--r--strata/openstack-services/pysendfile.morph3
-rw-r--r--strata/openstack-services/qpid-python.morph6
-rw-r--r--strata/openstack-services/rabbitmq-codegen.morph7
-rw-r--r--strata/openstack-services/rabbitmq-server.morph16
-rw-r--r--strata/openstack-services/rtslib-fb.morph27
-rw-r--r--strata/openstack-services/singledispatch.morph3
-rw-r--r--strata/openstack-services/tempest.morph12
-rw-r--r--strata/openstack-services/tftp-hpa.morph5
-rw-r--r--strata/openstack-services/thrift.morph6
-rw-r--r--strata/ostree-core.morph16
-rw-r--r--strata/pcre-utils.morph9
-rw-r--r--strata/python-cliapp.morph24
-rw-r--r--strata/python-cliapp/python-coveragepy.morph6
-rw-r--r--strata/python-common.morph74
-rw-r--r--strata/python-common/pycrypto.morph3
-rw-r--r--strata/python-core.morph52
-rw-r--r--strata/python-core/pyyaml.morph6
-rw-r--r--strata/python-pygobject.morph12
-rw-r--r--strata/python-pygobject/pygobject.morph5
-rw-r--r--strata/python-tools.morph25
-rw-r--r--strata/python-wsgi.morph15
-rw-r--r--strata/python3-core.morph11
-rw-r--r--strata/python3-core/python3.morph7
-rw-r--r--strata/qt4-sdk.morph11
-rw-r--r--strata/qt4-sdk/qt-creator.morph9
-rw-r--r--strata/qt4-tools.morph33
-rw-r--r--strata/qt4-tools/icu.morph8
-rw-r--r--strata/qt4-tools/qt4-tools.morph18
-rw-r--r--strata/qt4-tools/ruby-1.8.morph9
-rw-r--r--strata/qt4-tools/ruby-1.9.morph9
-rw-r--r--strata/qt5-sdk.morph12
-rw-r--r--strata/qt5-sdk/qt-creator.morph8
-rw-r--r--strata/qt5-sdk/snowshoe.morph8
-rw-r--r--strata/qt5-tools-qtmultimedia.morph12
-rw-r--r--strata/qt5-tools-qtwebkit.morph34
-rw-r--r--strata/qt5-tools.morph137
-rw-r--r--strata/qt5-tools/icu.morph9
-rw-r--r--strata/qt5-tools/qt3d.morph10
-rw-r--r--strata/qt5-tools/qtbase.morph17
-rw-r--r--strata/qt5-tools/qtconnectivity.morph10
-rw-r--r--strata/qt5-tools/qtdeclarative.morph10
-rw-r--r--strata/qt5-tools/qtdoc.morph8
-rw-r--r--strata/qt5-tools/qtgraphicaleffects.morph8
-rw-r--r--strata/qt5-tools/qtimageformats.morph10
-rw-r--r--strata/qt5-tools/qtjsbackend.morph10
-rw-r--r--strata/qt5-tools/qtlocation.morph10
-rw-r--r--strata/qt5-tools/qtmultimedia.morph10
-rw-r--r--strata/qt5-tools/qtquick1.morph10
-rw-r--r--strata/qt5-tools/qtquickcontrols.morph10
-rw-r--r--strata/qt5-tools/qtscript.morph10
-rw-r--r--strata/qt5-tools/qtsensors.morph10
-rw-r--r--strata/qt5-tools/qtserialport.morph10
-rw-r--r--strata/qt5-tools/qtsvg.morph10
-rw-r--r--strata/qt5-tools/qttools.morph10
-rw-r--r--strata/qt5-tools/qttranslations.morph8
-rw-r--r--strata/qt5-tools/qtwebkit-examples.morph8
-rw-r--r--strata/qt5-tools/qtwebkit.morph11
-rw-r--r--strata/qt5-tools/qtwebsockets.morph10
-rw-r--r--strata/qt5-tools/qtx11extras.morph10
-rw-r--r--strata/qt5-tools/qtxmlpatterns.morph10
-rw-r--r--strata/qt5-tools/ruby-1.8.morph9
-rw-r--r--strata/qt5-tools/ruby-1.9.morph9
-rw-r--r--strata/ruby.morph50
-rw-r--r--strata/ruby/bundler.morph6
-rw-r--r--strata/ruby/hoe.morph16
-rw-r--r--strata/ruby/rake-compiler.morph15
-rw-r--r--strata/ruby/ruby-1.8.morph9
-rw-r--r--strata/ruby/ruby.morph9
-rw-r--r--strata/swift.morph29
-rw-r--r--strata/swift/xattr.morph8
-rw-r--r--strata/test-tools.morph113
-rw-r--r--strata/test-tools/subunit.morph3
-rw-r--r--strata/tools.morph85
-rw-r--r--strata/tools/device-tree-compiler.morph6
-rw-r--r--strata/tools/distcc.morph10
-rw-r--r--strata/tools/gdb.morph5
-rw-r--r--strata/tools/git-fat.morph4
-rw-r--r--strata/tools/ipmitool.morph5
-rw-r--r--strata/tools/kexec-tools.morph6
-rw-r--r--strata/tools/lsof.morph12
-rw-r--r--strata/tools/parted.morph10
-rw-r--r--strata/tools/procps-ng.morph27
-rw-r--r--strata/tools/u-boot.morph11
-rw-r--r--strata/tools/vala-bootstrap.morph7
-rw-r--r--strata/tools/zip.morph9
-rw-r--r--strata/trove.morph82
-rw-r--r--strata/trove/cgit.morph6
-rw-r--r--strata/trove/clod.morph7
-rw-r--r--strata/trove/gall.morph7
-rw-r--r--strata/trove/gitano.morph5
-rw-r--r--strata/trove/lace.morph7
-rw-r--r--strata/trove/lrexlib-pcre.morph9
-rw-r--r--strata/trove/lua-scrypt.morph4
-rw-r--r--strata/trove/lua.morph11
-rw-r--r--strata/trove/luxio.morph7
-rw-r--r--strata/trove/supple.morph7
-rw-r--r--strata/trove/trove-setup.morph4
-rw-r--r--strata/unionfs-fuse-group.morph21
-rw-r--r--strata/unionfs-fuse-group/unionfs-fuse.morph13
-rw-r--r--strata/virtualbox-guest-x86_64.morph22
-rw-r--r--strata/virtualbox-guest-x86_64/vboxguest.morph34
-rw-r--r--strata/virtualbox-guest-x86_64/yasm.morph4
-rw-r--r--strata/virtualization.morph96
-rw-r--r--strata/virtualization/dmidecode.morph7
-rw-r--r--strata/virtualization/dnsmasq.morph10
-rw-r--r--strata/virtualization/ebtables.morph7
-rw-r--r--strata/virtualization/libosinfo.morph6
-rw-r--r--strata/virtualization/libvirt.morph31
-rw-r--r--strata/virtualization/openvswitch.morph11
-rw-r--r--strata/virtualization/qemu.morph5
-rw-r--r--strata/virtualization/xml-catalog.morph6
-rw-r--r--strata/virtualization/yajl.morph8
-rw-r--r--strata/wayland-generic.morph18
-rw-r--r--strata/wayland-generic/libxkbcommon.morph5
-rw-r--r--strata/wayland-generic/wayland.morph7
-rw-r--r--strata/webtools.morph21
-rw-r--r--strata/webtools/icu.morph8
-rw-r--r--strata/webtools/nginx.morph14
-rw-r--r--strata/weston-common.morph14
-rw-r--r--strata/weston-common/weston.morph7
-rw-r--r--strata/weston-genivi.morph21
-rw-r--r--strata/weston-genivi/wayland-ivi-extension.morph8
-rw-r--r--strata/weston-genivi/weston.morph53
-rw-r--r--strata/x-common.morph234
-rw-r--r--strata/x-generic.morph49
-rw-r--r--strata/x-generic/xserver.morph13
-rw-r--r--strata/xfce.morph164
-rw-r--r--strata/xfce/elementary-xfce.morph10
-rw-r--r--strata/xfce/exo.morph8
-rw-r--r--strata/xfce/garcon.morph8
-rw-r--r--strata/xfce/gtk-xfce-engine-2.morph8
-rw-r--r--strata/xfce/librsvg.morph6
-rw-r--r--strata/xfce/libwnck.morph11
-rw-r--r--strata/xfce/libxfce4ui.morph8
-rw-r--r--strata/xfce/libxfce4util.morph8
-rw-r--r--strata/xfce/thunar.morph9
-rw-r--r--strata/xfce/tumbler.morph8
-rw-r--r--strata/xfce/xfce4-appfinder.morph8
-rw-r--r--strata/xfce/xfce4-panel.morph8
-rw-r--r--strata/xfce/xfce4-session.morph9
-rw-r--r--strata/xfce/xfce4-settings.morph8
-rw-r--r--strata/xfce/xfconf.morph8
-rw-r--r--strata/xfce/xfdesktop.morph8
-rw-r--r--strata/xfce/xfwm4.morph11
-rw-r--r--strata/xorg-util-macros-common.morph15
-rw-r--r--strata/xstatic.morph114
-rw-r--r--strata/zookeeper-client.morph16
-rw-r--r--strata/zookeeper-server.morph10
-rw-r--r--strata/zookeeper.morph26
-rw-r--r--strata/zookeeper/java-ant.morph8
-rw-r--r--strata/zookeeper/java-binary.morph11
-rw-r--r--strata/zookeeper/zookeeper-client.morph17
-rw-r--r--strata/zookeeper/zookeeper-server.morph30
-rw-r--r--strata/zookeeper/zookeeper.morph14
-rwxr-xr-xstrip-gplv3.configure101
-rw-r--r--swift-build-rings.yml34
-rwxr-xr-xswift-storage-devices-validate.py60
-rw-r--r--swift-storage.configure107
-rw-r--r--swift/etc/ntp.conf25
-rw-r--r--swift/manifest15
-rw-r--r--swift/usr/lib/systemd/system/rsync.service11
-rw-r--r--swift/usr/lib/systemd/system/swift-storage-setup.service12
-rw-r--r--swift/usr/lib/systemd/system/swift-storage.service12
-rw-r--r--swift/usr/share/swift/etc/rsyncd.j223
-rw-r--r--swift/usr/share/swift/etc/swift/account-server.j2192
-rw-r--r--swift/usr/share/swift/etc/swift/container-server.j2203
-rw-r--r--swift/usr/share/swift/etc/swift/object-server.j2283
-rw-r--r--swift/usr/share/swift/etc/swift/swift.j2118
-rw-r--r--swift/usr/share/swift/hosts1
-rw-r--r--swift/usr/share/swift/swift-storage.yml24
-rw-r--r--systems/armv7lhf-cross-toolchain-system-x86_32.morph19
-rw-r--r--systems/armv7lhf-cross-toolchain-system-x86_64.morph19
-rw-r--r--systems/base-system-armv7-highbank.morph20
-rw-r--r--systems/base-system-armv7-versatile.morph20
-rw-r--r--systems/base-system-armv7b-highbank.morph20
-rw-r--r--systems/base-system-armv7b-vexpress-tc2.morph19
-rw-r--r--systems/base-system-armv7lhf-highbank.morph20
-rw-r--r--systems/base-system-armv8b64.morph22
-rw-r--r--systems/base-system-armv8l64.morph22
-rw-r--r--systems/base-system-ppc64-generic.morph20
-rw-r--r--systems/base-system-x86_32-generic.morph19
-rw-r--r--systems/base-system-x86_64-generic.morph20
-rw-r--r--systems/build-system-armv5l-openbmc-aspeed.morph43
-rw-r--r--systems/build-system-armv7lhf-highbank.morph55
-rw-r--r--systems/build-system-armv7lhf-jetson.morph53
-rw-r--r--systems/build-system-armv8b64.morph57
-rw-r--r--systems/build-system-armv8l64.morph57
-rw-r--r--systems/build-system-ppc64.morph53
-rw-r--r--systems/build-system-x86_32-chroot.morph53
-rw-r--r--systems/build-system-x86_32.morph55
-rw-r--r--systems/build-system-x86_64-chroot.morph53
-rw-r--r--systems/build-system-x86_64.morph55
-rw-r--r--systems/ceph-service-x86_64-generic.morph64
-rw-r--r--systems/cross-bootstrap-system-armv5l-generic.morph21
-rw-r--r--systems/cross-bootstrap-system-armv7lhf-generic.morph21
-rw-r--r--systems/cross-bootstrap-system-armv8b64-generic.morph21
-rw-r--r--systems/cross-bootstrap-system-armv8l64-generic.morph21
-rw-r--r--systems/cross-bootstrap-system-ppc64-generic.morph21
-rw-r--r--systems/cross-bootstrap-system-x86_64-generic.morph21
-rw-r--r--systems/cxmanage-system-x86_64-generic.morph24
-rw-r--r--systems/devel-system-armv7-chroot.morph62
-rw-r--r--systems/devel-system-armv7-highbank.morph65
-rw-r--r--systems/devel-system-armv7-versatile.morph63
-rw-r--r--systems/devel-system-armv7-wandboard.morph63
-rw-r--r--systems/devel-system-armv7b-chroot.morph54
-rw-r--r--systems/devel-system-armv7b-highbank.morph61
-rw-r--r--systems/devel-system-armv7lhf-chroot.morph62
-rw-r--r--systems/devel-system-armv7lhf-highbank.morph68
-rw-r--r--systems/devel-system-armv7lhf-jetson.morph66
-rw-r--r--systems/devel-system-armv7lhf-wandboard.morph66
-rw-r--r--systems/devel-system-armv8b64.morph67
-rw-r--r--systems/devel-system-armv8l64.morph68
-rw-r--r--systems/devel-system-ppc64-chroot.morph60
-rw-r--r--systems/devel-system-ppc64-generic.morph63
-rw-r--r--systems/devel-system-x86_32-chroot.morph62
-rw-r--r--systems/devel-system-x86_32-generic.morph68
-rw-r--r--systems/devel-system-x86_64-chroot.morph64
-rw-r--r--systems/devel-system-x86_64-generic.morph69
-rw-r--r--systems/devel-system-x86_64-vagrant.morph66
-rw-r--r--systems/genivi-baseline-system-armv7lhf-jetson.morph47
-rw-r--r--systems/genivi-baseline-system-armv7lhf-versatile.morph45
-rw-r--r--systems/genivi-baseline-system-x86_64-generic.morph48
-rw-r--r--systems/initramfs-x86_64.morph11
-rw-r--r--systems/installer-system-armv8b64.morph36
-rw-r--r--systems/installer-system-x86_64.morph35
-rw-r--r--systems/minimal-system-armv5l-openbmc-aspeed.morph20
-rw-r--r--systems/minimal-system-x86_32-generic.morph21
-rw-r--r--systems/minimal-system-x86_64-generic.morph21
-rw-r--r--systems/nodejs-system-x86_64.morph22
-rw-r--r--systems/ocaml-system-x86_64.morph20
-rw-r--r--systems/openstack-system-x86_64.morph85
-rw-r--r--systems/qt4-devel-system-x86_64-generic.morph44
-rw-r--r--systems/qt5-devel-system-x86_64-generic.morph46
-rw-r--r--systems/swift-system-x86_64.morph34
-rw-r--r--systems/trove-system-x86_64.morph57
-rw-r--r--systems/web-system-x86_64-generic.morph37
-rw-r--r--systems/weston-system-armv7lhf-jetson.morph49
-rw-r--r--systems/weston-system-x86_64-generic.morph49
-rw-r--r--systems/xfce-system.morph55
-rw-r--r--systems/zookeeper-client-x86_64.morph28
-rw-r--r--systems/zookeeper-server-x86_64.morph28
-rwxr-xr-xtrove.configure148
-rw-r--r--trove.configure.help126
-rw-r--r--vagrant-files/home/vagrant/.ssh/authorized_keys1
-rw-r--r--vagrant-files/manifest4
-rw-r--r--vagrant.configure55
844 files changed, 45899 insertions, 0 deletions
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 00000000..5da687ee
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,5 @@
+[gerrit]
+host=gerrit.baserock.org
+port=29418
+project=baserock/baserock/definitions
+defaultbranch=master
diff --git a/README b/README
new file mode 100644
index 00000000..7d72b743
--- /dev/null
+++ b/README
@@ -0,0 +1,6 @@
+README for morphs
+=================
+
+These are some morphologies for Baserock. Baserock is a system
+for developing embedded and appliance Linux systems. For
+more information, see <http://wiki.baserock.org>.
diff --git a/VERSION b/VERSION
new file mode 100644
index 00000000..0a70affa
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+version: 3
diff --git a/busybox-init.configure b/busybox-init.configure
new file mode 100644
index 00000000..c7dba3b9
--- /dev/null
+++ b/busybox-init.configure
@@ -0,0 +1,145 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to configure a system
+# to use busybox for its init, if INIT_SYSTEM=busybox is specified.
+#
+# As well as checking INIT_SYSTEM, the following variables are used.
+#
+# Getty configuration:
+# * CONSOLE_DEVICE: Which device to spawn a getty on (default: ttyS0)
+# * CONSOLE_BAUDRATE: Baud rate of the console (default: 115200)
+# * CONSOLE_MODE: What kind of terminal this console emulates
+# (default: vt100)
+
+if [ "$INIT_SYSTEM" != busybox ]; then
+ echo Not configuring system to use busybox init.
+ exit 0
+fi
+
+set -e
+echo Configuring system to use busybox init
+
+RUN_SCRIPT=/etc/rcS
+INIT_SCRIPT=/sbin/init
+
+install_mdev_config(){
+ install -D -m644 /dev/stdin "$1" <<'EOF'
+# support module loading on hotplug
+$MODALIAS=.* root:root 660 @modprobe "$MODALIAS"
+
+# null may already exist; therefore ownership has to be changed with command
+null root:root 666 @chmod 666 $MDEV
+zero root:root 666
+full root:root 666
+random root:root 444
+urandom root:root 444
+hwrandom root:root 444
+grsec root:root 660
+
+kmem root:root 640
+mem root:root 640
+port root:root 640
+# console may already exist; therefore ownership has to be changed with command
+console root:root 600 @chmod 600 $MDEV
+ptmx root:root 666
+pty.* root:root 660
+
+# Typical devices
+
+tty root:root 666
+tty[0-9]* root:root 660
+vcsa*[0-9]* root:root 660
+ttyS[0-9]* root:root 660
+
+# block devices
+ram[0-9]* root:root 660
+loop[0-9]+ root:root 660
+sd[a-z].* root:root 660
+hd[a-z][0-9]* root:root 660
+md[0-9]* root:root 660
+sr[0-9]* root:root 660 @ln -sf $MDEV cdrom
+fd[0-9]* root:root 660
+
+# net devices
+SUBSYSTEM=net;.* root:root 600 @nameif
+tun[0-9]* root:root 600 =net/
+tap[0-9]* root:root 600 =net/
+EOF
+}
+
+install_start_script(){
+ install -D -m755 /dev/stdin "$1" <<'EOF'
+#!/bin/sh
+mount -t devtmpfs devtmpfs /dev
+mount -t proc proc /proc
+mount -t sysfs sysfs /sys
+mkdir -p /dev/pts
+mount -t devpts devpts /dev/pts
+
+echo /sbin/mdev >/proc/sys/kernel/hotplug
+mdev -s
+
+hostname -F /etc/hostname
+
+run-parts -a start /etc/init.d
+EOF
+}
+
+install_inittab(){
+ local inittab="$1"
+ local dev="$2"
+ local baud="$3"
+ local mode="$4"
+ install -D -m644 /dev/stdin "$1" <<EOF
+::sysinit:$RUN_SCRIPT
+
+::askfirst:-/bin/cttyhack /bin/sh
+::askfirst:/sbin/getty -L $dev $baud $mode
+
+::ctrlaltdel:/sbin/reboot
+::shutdown:/sbin/swapoff -a
+::shutdown:/bin/umount -a -r
+::restart:/sbin/init
+EOF
+}
+
+install_init_symlink(){
+ local initdir="$(dirname "$1")"
+ local initname="$(basename "$1")"
+ mkdir -p "$initdir"
+ cd "$initdir"
+ for busybox_dir in . ../bin ../sbin ../usr/bin ../usr/sbin; do
+ local busybox="$busybox_dir/busybox"
+ if [ ! -x "$busybox" ]; then
+ continue
+ fi
+ ln -sf "$busybox" "$initname"
+ return 0
+ done
+ echo Unable to find busybox >&2
+ exit 1
+}
+
+install_mdev_config "$1/etc/mdev.conf"
+
+install_start_script "$1$RUN_SCRIPT"
+
+install_inittab "$1/etc/inittab" "${CONSOLE_DEV-ttyS0}" \
+ "${CONSOLE_BAUD-115200}" "${CONSOLE_MODE-vt100}"
+
+install_init_symlink "$1$INIT_SCRIPT"
diff --git a/ceph.configure b/ceph.configure
new file mode 100644
index 00000000..c3cd92d1
--- /dev/null
+++ b/ceph.configure
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# Copyright (C) 2013 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import cliapp
+import sys
+import os
+import subprocess
+import shutil
+import re
+import stat
+
+systemd_monitor_template = """
+[Unit]
+Description=Ceph Monitor firstboot setup
+After=network-online.target
+
+[Service]
+ExecStart=/bin/bash -c "/root/setup-ceph-head | tee /root/monitor-setuplog"
+ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-monitor-fboot.service
+
+[Install]
+Wanted-By=multi-user.target
+"""
+
+systemd_monitor_fname_template = "ceph-monitor-fboot.service"
+
+systemd_osd_template = """
+[Unit]
+Description=Ceph osd firstboot setup
+After=network-online.target
+
+[Service]
+ExecStart=/bin/bash -c "/root/setup-ceph-node | tee /root/storage-setuplog"
+ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-storage-fboot.service
+
+[Install]
+Wanted-By=multi-user.target
+"""
+systemd_osd_fname_template = "ceph-storage-fboot.service"
+
+ceph_monitor_config_template = """#!/bin/bash
+ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
+ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
+ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
+monmaptool --create --add 0 10.0.100.2 --fsid 9ceb9257-7541-4de4-b34b-586079986700 /tmp/monmap
+mkdir /var/lib/ceph/mon/ceph-0
+ceph-mon --mkfs -i 0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
+/etc/init.d/ceph start mon.0
+touch ~/monitor-configured
+"""
+
+ceph_storage_config_template = """#!/bin/bash
+scp 10.0.100.2:/var/lib/ceph/bootstrap-osd/ceph.keyring /var/lib/ceph/bootstrap-osd/
+echo -e "n\np\n1\n\n\nw\n" | fdisk /dev/sdb
+ceph-disk prepare --cluster ceph --cluster-uuid 9ceb9257-7541-4de4-b34b-586079986700 --fs-type ext4 /dev/sdb1
+sudo ceph-disk activate /dev/sdb1
+/etc/init.d/ceph start osd.0
+touch ~/storage-configured
+"""
+
+executable_file_permissions = stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR | \
+ stat.S_IXGRP | stat.S_IRGRP | \
+ stat.S_IXOTH | stat.S_IROTH
+
+class CephConfigurationExtension(cliapp.Application):
+ """
+ Set up ceph server daemons.
+
+ Must include the following environment variables:
+
+ HOSTNAME - Must be defined it is used as the ID for
+ the monitor and metadata daemons.
+ CEPH_CONF - Provide a ceph configuration file.
+
+ Optional environment variables:
+
+ CEPH_CLUSTER - Cluster name, if not provided defaults to 'ceph'.
+
+ CEPH_BOOTSTRAP_OSD - Registered key capable of generating OSD
+ keys.
+ CEPH_BOOTSTRAP_MDS - Registered key capable of generating MDS
+ keys.
+
+ Bootstrap keys are required for creating OSD daemons on servers
+ that do not have a running monitor daemon. They are gathered
+ by 'ceph-deploy gatherkeys' but can be generated and registered
+ separately.
+
+ CEPH_MON - (Blank) Create a ceph monitor daemon on the image.
+ CEPH_MON_KEYRING - Location of monitor keyring. Required by the
+ monitor if using cephx authentication.
+
+ CEPH_OSD_X_DATA_DIR - Location of data directory for OSD.
+ Create an OSD daemon on image. 'X' is an integer
+ id, many osd daemons may be run on same server.
+
+ CEPH_MDS - (Blank) Create a metadata server daemon on server.
+ """
+
+ def process_args(self, args):
+
+ if "HOSTNAME" not in os.environ:
+ print "ERROR: Need a hostname defined by 'HOSTNAME'"
+ sys.exit(1)
+ if "CEPH_CLUSTER" not in os.environ:
+ print "ERROR: Need a cluster name defined by 'CEPH_CLUSTER'"
+ sys.exit(1)
+ if "CEPH_CONF" not in os.environ:
+ print "ERROR: Need a ceph conf file defined by 'CEPH_CONF'"
+ sys.exit(1)
+
+ self.dest_dir = args[0]
+
+ self.cluster_name = os.environ["CEPH_CLUSTER"]
+ self.hostname = os.environ["HOSTNAME"]
+
+ self.conf_file = "/etc/ceph/{}.conf".format(self.cluster_name)
+ self.mon_dir = "/var/lib/ceph/mon/"
+ self.osd_dir = "/var/lib/ceph/osd/"
+ self.mds_dir = "/var/lib/ceph/mds/"
+ self.tmp_dir = "/var/lib/ceph/tmp/"
+ self.bootstrap_mds_dir = "/var/lib/ceph/bootstrap-mds/"
+ self.bootstrap_osd_dir = "/var/lib/ceph/bootstrap-osd/"
+ self.systemd_dir = "/etc/systemd/system/"
+ self.systemd_multiuser_dir = "/etc/systemd/system/multi-user.target.wants/"
+
+ self.copy_to_img(os.environ["CEPH_CONF"], self.conf_file)
+
+ # Copy over bootstrap keyrings
+ if "CEPH_BOOTSTRAP_OSD" in os.environ:
+ self.copy_bootstrap_osd(os.environ["CEPH_BOOTSTRAP_OSD"]);
+ if "CEPH_BOOTSTRAP_MDS" in os.environ:
+ self.copy_bootstrap_mds(os.environ["CEPH_BOOTSTRAP_MDS"]);
+
+ # Configure any monitor daemons
+ if "CEPH_MON" in os.environ:
+ self.create_mon_data_dir(os.environ.get("CEPH_MON_KEYRING"))
+ else:
+ self.create_osd_startup_script("None", "None")
+
+ # Configure any object storage daemons
+ osd_re = r"CEPH_OSD_(\d+)_DATA_DIR$"
+
+ for env in os.environ.keys():
+ match = re.match(osd_re, env)
+ if match:
+ osd_data_dir_env = match.group(0)
+ osd_id = match.group(1)
+
+ self.create_osd_data_dir(osd_id, os.environ.get(osd_data_dir_env))
+
+ # Configure any mds daemons
+ if "CEPH_MDS" in os.environ:
+ self.create_mds_data_dir()
+
+ # Create a fake 'partprobe'
+ fake_partprobe_filename = self.dest_dir + "/sbin/partprobe"
+ fake_partprobe = open(fake_partprobe_filename, 'w')
+ fake_partprobe.write("#!/bin/bash\nexit 0;\n")
+ fake_partprobe.close()
+ os.chmod(fake_partprobe_filename, executable_file_permissions)
+ self.create_startup_scripts()
+
+ def copy_to_img(self, src_file, dest_file):
+ shutil.copy(src_file, self.dest_dir + dest_file)
+
+ def copy_bootstrap_osd(self, src_file):
+ self.copy_to_img(src_file,
+ os.path.join(self.bootstrap_osd_dir, "{}.keyring".format(self.cluster_name)))
+
+ def copy_bootstrap_mds(self, src_file):
+ self.copy_to_img(src_file,
+ os.path.join(self.bootstrap_mds_dir, "{}.keyring".format(self.cluster_name)))
+
+ def symlink_to_multiuser(self, fname):
+ print >> sys.stderr, os.path.join("../", fname)
+ print >> sys.stderr, self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname)
+ os.symlink(os.path.join("../", fname),
+ self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname))
+
+ def create_mon_data_dir(self, src_keyring):
+
+ #Create the monitor data directory
+ mon_data_dir = os.path.join(self.mon_dir, "{}-{}".format(self.cluster_name, self.hostname))
+ os.makedirs(self.dest_dir + mon_data_dir)
+
+ #Create sysvinit file to start via sysvinit
+ sysvinit_file = os.path.join(mon_data_dir, "sysvinit")
+ open(self.dest_dir + sysvinit_file, 'a').close()
+
+ #Create systemd file to initialize the monitor data directory
+ keyring = ""
+ if src_keyring:
+ #Copy the keyring from local to the image
+ dest_keyring = os.path.join(self.tmp_dir,
+ "{}-{}.mon.keyring".format(self.cluster_name, self.hostname))
+ self.copy_to_img(src_keyring, dest_keyring)
+ keyring = "--keyring " + dest_keyring
+
+ mon_systemd_fname = systemd_monitor_fname_template
+ systemd_script_name = self.dest_dir + os.path.join(self.systemd_dir, mon_systemd_fname)
+ mon_systemd = open(systemd_script_name, 'w')
+ mon_systemd.write(systemd_monitor_template)
+ mon_systemd.close()
+ #Create a symlink to the multi user target
+ self.symlink_to_multiuser(mon_systemd_fname)
+
+ def create_osd_data_dir(self, osd_id, data_dir):
+ if not data_dir:
+ data_dir = '/srv/osd' + osd_id
+
+ #Create the osd data dir
+ os.makedirs(self.dest_dir + data_dir)
+
+ def create_osd_startup_script(self, osd_id, data_dir):
+ osd_systemd_fname = systemd_osd_fname_template
+ osd_full_name = self.dest_dir + os.path.join(self.systemd_dir, osd_systemd_fname)
+
+ osd_systemd = open(osd_full_name, 'w')
+
+ osd_systemd.write(systemd_osd_template)
+ osd_systemd.close()
+
+ #Create a symlink to the multi user target
+ self.symlink_to_multiuser(osd_systemd_fname)
+
+ def create_mds_data_dir(self):
+
+ #Create the monitor data directory
+ mds_data_dir = os.path.join(self.mds_dir, "{}-{}".format(self.cluster_name, self.hostname))
+ os.makedirs(self.dest_dir + mds_data_dir)
+
+ #Create sysvinit file to start via sysvinit
+ sysvinit_file = os.path.join(mds_data_dir, "sysvinit")
+ open(self.dest_dir + sysvinit_file, 'a').close()
+
+
+ def create_startup_scripts(self):
+ head_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-head")
+
+ ceph_head_setup = open(head_setup_file, "w")
+ ceph_head_setup.write(ceph_monitor_config_template)
+ ceph_head_setup.close()
+ os.chmod(head_setup_file, executable_file_permissions)
+
+ osd_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-node")
+ ceph_node_setup = open(osd_setup_file, "w")
+ ceph_node_setup.write(ceph_storage_config_template)
+ ceph_node_setup.close()
+ os.chmod(osd_setup_file, executable_file_permissions)
+
+
+CephConfigurationExtension().run()
diff --git a/chef-system-x86_64-container.morph b/chef-system-x86_64-container.morph
new file mode 100644
index 00000000..3e81c73e
--- /dev/null
+++ b/chef-system-x86_64-container.morph
@@ -0,0 +1,32 @@
+name: chef-system-x86_64-container
+kind: system
+arch: x86_64
+description: Minimal chef system suitable for running in a container
+configuration-extensions:
+- set-hostname
+- simple-network
+- nfsboot
+- install-files
+- busybox-init
+- remove-gcc
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+ artifacts:
+ - build-essential-minimal
+- name: core
+ morph: strata/core.morph
+ artifacts:
+ - core-openssl
+- name: foundation
+ morph: strata/foundation.morph
+ artifacts:
+ - foundation-runtime
+- name: ruby
+ morph: strata/ruby.morph
+ artifacts:
+ - ruby-runtime
+- name: chef
+ morph: strata/chef.morph
+ artifacts:
+ - chef-runtime
diff --git a/chef/manifest b/chef/manifest
new file mode 100644
index 00000000..de6cc542
--- /dev/null
+++ b/chef/manifest
@@ -0,0 +1,3 @@
+0040755 0 0 /root
+0040700 1000 1000 /root/.ssh
+0100600 1000 1000 /root/.ssh/authorized_keys
diff --git a/cloud-init.configure b/cloud-init.configure
new file mode 100755
index 00000000..aa83e0e2
--- /dev/null
+++ b/cloud-init.configure
@@ -0,0 +1,63 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#
+# This is a "morph deploy" configuration extension to enable the
+# cloud-init services.
+set -e
+
+ROOT="$1"
+
+##########################################################################
+
+set -e
+
+case "$CLOUD_INIT" in
+''|False|no)
+ exit 0
+ ;;
+True|yes)
+ echo "Configuring cloud-init"
+ ;;
+*)
+ echo Unrecognised option "$CLOUD_INIT" to CLOUD_INIT
+ exit 1
+ ;;
+esac
+
+
+cloud_init_services="cloud-config.service
+ cloud-init-local.service
+ cloud-init.service
+ cloud-final.service"
+
+# Iterate over the cloud-init services and enable them creating a link
+# into /etc/systemd/system/multi-user.target.wants.
+# If the services to link are not present, fail.
+
+services_folder="lib/systemd/system"
+for service_name in $cloud_init_services; do
+ if [ ! -f "$ROOT/$services_folder/$service_name" ]; then
+ echo "ERROR: Service $service_name is missing." >&2
+ echo "Failed to configure cloud-init."
+ exit 1
+ else
+ echo Enabling systemd service "$service_name" >"$MORPH_LOG_FD"
+ ln -sf "/$services_folder/$service_name" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$service_name"
+ fi
+done
diff --git a/clusters/cephclient.morph b/clusters/cephclient.morph
new file mode 100644
index 00000000..b4db22e0
--- /dev/null
+++ b/clusters/cephclient.morph
@@ -0,0 +1,20 @@
+name: ceph-cluster
+kind: cluster
+systems:
+- morph: systems/ceph-service-x86_64-generic.morph
+ deploy:
+ ceph-node-virtualbox-image:
+ type: virtualbox-ssh
+ SYSTEM: systems/ceph-service-x86_64-generic.morph
+ location: vbox+ssh://user@machine/ChefNode4/home/user/chefnode4.vdi
+ # HOST_IPADDR and NETMASK should be set to the IP address and netmask of the virtualbox host on the host-only interface.
+ #HOST_IPADDR: 10.0.100.100
+ #NETMASK: 255.255.255.0
+
+ # This is an example of how to configure the three interfaces necessary to support ceph in the BCPC configuration.
+ #NETWORK_CONFIG: lo:loopback;enp0s3:static,address=10.0.100.14,netmask=255.255.255.0;enp0s8:static,address=172.16.100.14,netmask=255.255.255.0;enp0s9:static,address=192.168.100.14,netmask=255.255.255.0
+ DISK_SIZE: 8G
+ HOSTNAME: CephNode4
+
+ # You must install authorized_keys in chef/root/.ssh/ before this will work.
+ INSTALL_FILES: chef/manifest
diff --git a/clusters/ci.morph b/clusters/ci.morph
new file mode 100644
index 00000000..cb56328c
--- /dev/null
+++ b/clusters/ci.morph
@@ -0,0 +1,117 @@
+name: ci
+kind: cluster
+description: |
+ Deploy all the systems for CD.
+
+ This cluster morph is for use by the Mason Continuous Delivery pipeline
+ during development.
+systems:
+- morph: systems/devel-system-x86_64-generic.morph
+ deploy:
+ devel-system-x86_64-generic:
+ type: rawdisk
+ location: devel-system-x86_64-generic.img
+ DISK_SIZE: 4G
+- morph: systems/devel-system-x86_32-generic.morph
+ deploy:
+ devel-system-x86_32-generic:
+ type: rawdisk
+ location: devel-system-x86_32-generic.img
+ DISK_SIZE: 4G
+- morph: systems/build-system-armv7lhf-jetson.morph
+ deploy:
+ build-system-armv7lhf-jetson:
+ type: rawdisk
+ location: build-system-armv7lhf-jetson.img
+ DISK_SIZE: 2G
+ BOOT_DEVICE: "/dev/mmcblk0p1"
+ ROOT_DEVICE: "/dev/mmcblk0p2"
+ DTB_PATH: "boot/tegra124-jetson-tk1.dtb"
+ BOOTLOADER_CONFIG_FORMAT: "extlinux"
+ BOOTLOADER_INSTALL: "none"
+ KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1
+- morph: systems/weston-system-x86_64-generic.morph
+ deploy:
+ weston-system-x86_64-generic:
+ type: rawdisk
+ location: weston-system-x86_64-generic.img
+ DISK_SIZE: 4G
+ KERNEL_ARGS: vga=788
+- morph: systems/weston-system-armv7lhf-jetson.morph
+ deploy:
+ weston-system-armv7lhf-jetson:
+ type: rawdisk
+ location: weston-system-armv7lhf-jetson.img
+ DISK_SIZE: 4G
+ BOOT_DEVICE: "/dev/mmcblk0p1"
+ ROOT_DEVICE: "/dev/mmcblk0p2"
+ DTB_PATH: "boot/tegra124-jetson-tk1.dtb"
+ BOOTLOADER_CONFIG_FORMAT: "extlinux"
+ BOOTLOADER_INSTALL: "none"
+ KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1
+- morph: systems/genivi-baseline-system-x86_64-generic.morph
+ deploy:
+ genivi-baseline-system-x86_64-generic:
+ type: rawdisk
+ location: genivi-baseline-system-x86_64-generic.img
+ DISK_SIZE: 4G
+ KERNEL_ARGS: vga=788
+- morph: systems/genivi-baseline-system-armv7lhf-jetson.morph
+ deploy:
+ genivi-baseline-system-armv7lhf-jetson:
+ type: rawdisk
+ location: genivi-baseline-system-armv7lhf-jetson.img
+ DISK_SIZE: 4G
+ BOOT_DEVICE: "/dev/mmcblk0p1"
+ ROOT_DEVICE: "/dev/mmcblk0p2"
+ DTB_PATH: "boot/tegra124-jetson-tk1.dtb"
+ BOOTLOADER_CONFIG_FORMAT: "extlinux"
+ BOOTLOADER_INSTALL: "none"
+ KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1
+- morph: systems/openstack-system-x86_64.morph
+ deploy:
+ openstack-system-x86_64:
+ type: rawdisk
+ location: baserock-openstack-system-x86_64.img
+ DISK_SIZE: 5G
+ INSTALL_FILES: openstack/manifest
+ HOSTNAME: onenode
+ RABBITMQ_HOST: onenode
+ RABBITMQ_PORT: 5672
+ RABBITMQ_USER: rabbitmq
+ RABBITMQ_PASSWORD: veryinsecure
+ CONTROLLER_HOST_ADDRESS: onenode
+ MANAGEMENT_INTERFACE_IP_ADDRESS: 127.0.0.1
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8
+ KEYSTONE_ADMIN_PASSWORD: veryinsecure
+ KEYSTONE_DB_USER: keystoneDB
+ KEYSTONE_DB_PASSWORD: veryinsecure
+ GLANCE_SERVICE_USER: glance
+ GLANCE_SERVICE_PASSWORD: veryinsecure
+ GLANCE_DB_USER: glanceDB
+ GLANCE_DB_PASSWORD: veryinsecure
+ NOVA_SERVICE_USER: nova
+ NOVA_SERVICE_PASSWORD: veryinsecure
+ NOVA_DB_USER: novaDB
+ NOVA_DB_PASSWORD: veryinsecure
+ NOVA_VIRT_TYPE: qemu
+ CINDER_SERVICE_USER: cinder
+ CINDER_SERVICE_PASSWORD: veryinsecure
+ CINDER_DB_USER: cinderDB
+ CINDER_DB_PASSWORD: veryinsecure
+ CINDER_DEVICE: /dev/sdb
+ NEUTRON_SERVICE_USER: neutron
+ NEUTRON_SERVICE_PASSWORD: veryinsecure
+ NEUTRON_DB_USER: neutronDB
+ NEUTRON_DB_PASSWORD: veryinsecure
+ METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret
+ IRONIC_SERVICE_USER: ironic
+ IRONIC_SERVICE_PASSWORD: veryinsecure
+ IRONIC_DB_USER: ironicDB
+ IRONIC_DB_PASSWORD: veryinsecure
+ CEILOMETER_SERVICE_USER: ceilometer
+ CEILOMETER_SERVICE_PASSWORD: veryinsecure
+ CEILOMETER_DB_USER: ceilometerDB
+ CEILOMETER_DB_PASSWORD: veryinsecure
+ METERING_SECRET: insecureceilometersecret
+ HOSTS_CONTROLLER: 127.0.0.1 onenode
diff --git a/clusters/example-distbuild-cluster.morph b/clusters/example-distbuild-cluster.morph
new file mode 100644
index 00000000..513c16c5
--- /dev/null
+++ b/clusters/example-distbuild-cluster.morph
@@ -0,0 +1,37 @@
+name: example-distbuild-cluster
+kind: cluster
+description: |
+ This is an example cluster morph that can be adapted to set up a
+ Baserock distributed build network.
+
+ You will need to deploy a Trove for the distributed build network
+ to use before deploying this cluster. The worker SSH key file should
+ be generated as part of the Trove deployment. It is the key used by
+ workers to authenticate with the Trove to give them read access to
+ all source repositories.
+systems:
+- morph: systems/build-system-x86_64.morph
+ deploy-defaults:
+ CONTROLLERHOST: build-controller
+ DISTBUILD_CONTROLLER: false
+ DISTBUILD_WORKER: true
+ FSTAB_SRC: LABEL=src /srv/distbuild auto defaults,rw,noatime 0 2
+ INSTALL_FILES: distbuild/manifest
+ NFSBOOT_CONFIGURE: true
+ TROVE_ID: $MY_TROVE
+ WORKER_SSH_KEY: ssh-keys/worker.key
+ deploy:
+ build-controller:
+ type: nfsboot
+ location: $MY_TROVE
+ DISTBUILD_CONTROLLER: true
+ HOSTNAME: build-controller
+ WORKERS: build-node-1, build-node-2
+ build-node-1:
+ type: nfsboot
+ location: $MY_TROVE
+ HOSTNAME: build-node-1
+ build-node-2:
+ type: nfsboot
+ location: $MY_TROVE
+ HOSTNAME: build-node-2
diff --git a/clusters/example-swift-storage-cluster.morph b/clusters/example-swift-storage-cluster.morph
new file mode 100644
index 00000000..b1ea784f
--- /dev/null
+++ b/clusters/example-swift-storage-cluster.morph
@@ -0,0 +1,62 @@
+name: example-swift-storage-cluster
+kind: cluster
+systems:
+- morph: systems/swift-system-x86_64.morph
+ deploy-defaults:
+ INSTALL_FILES: swift/manifest
+
+ CONTROLLER_HOST_ADDRESS: <controller host address>
+
+ SWIFT_PART_POWER: 10
+ SWIFT_REPLICAS: 3
+ SWIFT_MIN_PART_HOURS: 1
+
+ SWIFT_STORAGE_DEVICES: [{ ip: <node0 management ip>, device: sdb, weight: 100 },
+ { ip: <node0 management ip>, device: sdc, weight: 100 },
+ { ip: <node0 management ip>, device: sdd, weight: 100 },
+
+ { ip: <node1 management ip>, device: sdb, weight: 100 },
+ { ip: <node1 management ip>, device: sdc, weight: 100 },
+ { ip: <node1 management ip>, device: sdd, weight: 100 }]
+
+ # This value can be any random string or number
+ # but each node in your swift cluster must have the same value
+ SWIFT_REBALANCE_SEED: 3828
+
+ # NOTE: Replace SWIFT_HASH_PATH_PREFIX and SWIFT_HASH_PATH_SUFFIX
+ # with your own unique values,
+ #
+ # `openssl rand -hex 10' can be used to generate unique values
+ #
+ # These values should be kept secret, do not lose them.
+ #
+ SWIFT_HASH_PATH_PREFIX: 041fc210e4e1d333ce1d
+ SWIFT_HASH_PATH_SUFFIX: 4d6f5362a356dda7fb7d
+
+ FSTAB_SDB: /dev/sdb /srv/node/sdb xfs defaults,user,rw 0 0
+ FSTAB_SDC: /dev/sdc /srv/node/sdc xfs defaults,user,rw 0 0
+ FSTAB_SDD: /dev/sdd /srv/node/sdd xfs defaults,user,rw 0 0
+
+ deploy:
+ node0:
+ type: kvm
+ location: kvm+ssh://user@host/swift-storage-0/home/user/swift-storage-0.img
+ DISK_SIZE: 10G
+ RAM_SIZE: 1G
+ VCPUS: 1
+ HOSTNAME: swift-storage-0
+ NIC_CONFIG: network=default
+ NETWORK_CONFIG: ens3:static,address=<node0 management ip>,netmask=255.255.255.0
+ MANAGEMENT_INTERFACE_IP_ADDRESS: <node0 management ip>
+ ATTACH_DISKS: /dev/node0_sdb:/dev/node0_sdc:/dev/node0_sdd
+ node1:
+ type: kvm
+ location: kvm+ssh://user@host/swift-storage-1/home/user/swift-storage-1.img
+ DISK_SIZE: 10G
+ RAM_SIZE: 1G
+ VCPUS: 1
+ HOSTNAME: swift-storage-1
+ NIC_CONFIG: network=default
+ NETWORK_CONFIG: ens3:static,address=<node1 management ip>,netmask=255.255.255.0
+ MANAGEMENT_INTERFACE_IP_ADDRESS: <node1 management ip>
+ ATTACH_DISKS: /dev/node1_sdb:/dev/node1_sdc:/dev/node1_sdd
diff --git a/clusters/hardware-deployment.morph b/clusters/hardware-deployment.morph
new file mode 100644
index 00000000..c6b7dce9
--- /dev/null
+++ b/clusters/hardware-deployment.morph
@@ -0,0 +1,35 @@
+name: hardware-deployment
+kind: cluster
+description: |
+ Deploy a build-system into hardware using the combination
+ of the pxeboot.write extension and the installer system.
+ This examples uses the spawn-novlan mode of pxeboot.write.
+systems:
+- morph: systems/installer-system-x86_64.morph
+ deploy:
+ installer:
+ type: pxeboot
+ location: AB:CD:EF:12:34:56:78 #MAC address.
+ PXEBOOT_MODE: spawn-novlan
+ PXEBOOT_DEPLOYER_INTERFACE: ens6
+ KERNEL_ARGS: console=ttyS1,9600 console=tty0 init=/usr/lib/baserock-installer/installer
+ HOSTNAME: installer-system
+ IPMI_USER: myipmiuser
+ IPMI_PASSWORD: myipmipassword
+ IPMI_HOST: 123.34.45.120 #IPMI ip address
+ INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda
+ INSTALLER_ROOTFS_TO_INSTALL: /rootfs
+ subsystems:
+ - morph: systems/build-system-x86_64.morph
+ deploy:
+ to-install:
+ type: sysroot
+ location: /rootfs
+ INITRAMFS_PATH: boot/initramfs.gz
+ KERNEL_ARGS: console=ttyS1,9600 console=tty0
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ initramfs:
+ type: initramfs
+ location: boot/initramfs.gz
diff --git a/clusters/image-package-example.morph b/clusters/image-package-example.morph
new file mode 100644
index 00000000..fd8487e2
--- /dev/null
+++ b/clusters/image-package-example.morph
@@ -0,0 +1,12 @@
+name: image-package-example
+kind: cluster
+description: |
+ Packaged system and script for installing it, for deferred instantiation.
+systems:
+- morph: systems/base-system-x86_32-generic.morph
+ deploy:
+ imgpkg:
+ type: image-package
+ location: image-package-example.tar
+ BOOTLOADER_BLOBS: /usr/share/syslinux/mbr.bin
+ INCLUDE_SCRIPTS: image-package-example/make-disk-image.sh.in:image-package-example/disk-install.sh.in:image-package-example/common.sh.in
diff --git a/clusters/initramfs-test.morph b/clusters/initramfs-test.morph
new file mode 100644
index 00000000..afc94961
--- /dev/null
+++ b/clusters/initramfs-test.morph
@@ -0,0 +1,17 @@
+name: initramfs-test
+kind: cluster
+systems:
+- morph: systems/base-system-x86_64-generic.morph
+ deploy:
+ system:
+ type: rawdisk
+ location: initramfs-system-x86_64.img
+ DISK_SIZE: 1G
+ HOSTNAME: initramfs-system
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ initramfs:
+ type: initramfs
+ location: boot/initramfs.gz
diff --git a/clusters/installer-build-system-x86_64.morph b/clusters/installer-build-system-x86_64.morph
new file mode 100644
index 00000000..a9ebcaca
--- /dev/null
+++ b/clusters/installer-build-system-x86_64.morph
@@ -0,0 +1,52 @@
+name: installer-build-system-x86_64
+kind: cluster
+description: |
+ This is a cluster morphology that can be used to deploy
+ installer systems. This is done by adding the files needed
+ using a manifest file (installer/manifest) with the INSTALL_FILES
+ extension, and using the installer.configure extension to generate
+ the configuration needed in the system.
+
+ This manifest, which is installing the installer script in
+ /usr/lib/installer/installer.py, in combination of adding
+ "init=/usr/lib/installer/installer.py" as KERNEL_ARGS in the system
+ makes the system run the installer.py script as init script.
+
+ The installer.py script will read the information needed to
+ install the system (where is the root filesystem to install and
+ where to install it) from /etc/install.conf.
+
+ This cluster also deploys a subsystem (a build-system in this case)
+ which is going to be the system that the installer system/script is
+ going to install.
+
+systems:
+- morph: systems/installer-system-x86_64.morph
+ deploy:
+ installer:
+ type: rawdisk
+ location: installer-build-system-x86_64.img
+ KERNEL_ARGS: init=/usr/lib/baserock-installer/installer
+ DISK_SIZE: 6G
+ HOSTNAME: installer-x86_64
+ INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda
+ INSTALLER_ROOTFS_TO_INSTALL: /rootfs
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ installer-initramfs:
+ type: initramfs
+ location: boot/initramfs.gz
+ - morph: systems/build-system-x86_64.morph
+ deploy:
+ to-install:
+ type: sysroot
+ location: /rootfs
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ to-install-initramfs:
+ type: initramfs
+ location: boot/initramfs.gz
diff --git a/clusters/jetson-upgrade.morph b/clusters/jetson-upgrade.morph
new file mode 100644
index 00000000..9fd5155b
--- /dev/null
+++ b/clusters/jetson-upgrade.morph
@@ -0,0 +1,18 @@
+name: jetson-upgrade
+kind: cluster
+systems:
+- morph: systems/devel-system-armv7lhf-jetson.morph
+ deploy-defaults:
+ TROVE_HOST: TROVE_HOST
+ TROVE_ID: TROVE_ID
+ BOOT_DEVICE: "/dev/mmcblk0p1"
+ ROOT_DEVICE: "/dev/mmcblk0p2"
+ DTB_PATH: "boot/tegra124-jetson-tk1.dtb"
+ BOOTLOADER_CONFIG_FORMAT: "extlinux"
+ BOOTLOADER_INSTALL: "none"
+ KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1
+ FSTAB_SRC: LABEL=src /src auto defaults,rw,noatime,nofail 0 2
+ deploy:
+ self:
+ type: ssh-rsync
+ location: root@127.0.0.1
diff --git a/clusters/mason-openstack.morph b/clusters/mason-openstack.morph
new file mode 100644
index 00000000..6ef14888
--- /dev/null
+++ b/clusters/mason-openstack.morph
@@ -0,0 +1,39 @@
+name: openstack-mason
+kind: cluster
+description: |
+ This is a template cluster morphology that can be adapted to set up a
+ Mason. Masons are composed of a trove and a distbuild system.
+systems:
+- morph: systems/build-system-x86_64.morph
+ deploy-defaults:
+ ARTIFACT_CACHE_SERVER: example-cache-server
+ CONTROLLERHOST: controller-hostname
+ DISTBUILD_CONTROLLER: true
+ DISTBUILD_WORKER: true
+ INSTALL_FILES: distbuild/manifest
+ RAM_SIZE: 8G
+ TROVE_HOST: your-upstream-trove
+ TROVE_ID: your-upstream-trove-prefix
+ VCPUS: 2
+ WORKER_SSH_KEY: ssh-keys/worker.key
+ deploy:
+ mason-openstack:
+ type: openstack
+ location: openstack-auth-url (eg example.com:5000/v2.0)
+ DISK_SIZE: 6G
+ DISTBUILD_CONTROLLER: true
+ HOSTNAME: controller-hostname
+ MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph
+ MASON_DEFINITIONS_REF: master
+ MASON_DISTBUILD_ARCH: x86_64
+ MASON_TEST_HOST: user@openstack-auth-url
+ WORKERS: controller-hostname
+ OPENSTACK_AUTH_URL: openstack-auth-url
+ OPENSTACK_USER: baserock
+ OPENSTACK_TENANT: baserock
+ OPENSTACK_TENANT_ID: 7d7ebfe23367490b973a10fa426c3aec
+ OPENSTACK_IMAGENAME: mason-openstack
+ OPENSTACK_NETWORK_ID: 71f5151a-b7c3-405d-a841-d1b07e5db099
+ CLOUD_INIT: yes
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ TEST_INFRASTRUCTURE_TYPE: openstack
diff --git a/clusters/mason.morph b/clusters/mason.morph
new file mode 100644
index 00000000..9717239d
--- /dev/null
+++ b/clusters/mason.morph
@@ -0,0 +1,56 @@
+name: example-mason-cluster
+kind: cluster
+description: |
+ This is a template cluster morphology that can be adapted to set up a
+ Mason. Masons are composed of a trove and a distbuild system.
+
+ It is suggested that you use mason/mason-generator.sh to adapt this
+ template to suit your needs. It also handles the generation of
+ keys to let the systems communicate.
+systems:
+- morph: systems/trove-system-x86_64.morph
+ deploy:
+ red-box-v1-trove:
+ type: kvm
+ location: kvm+ssh://vm-user@vm-host/red-box-v1-trove/vm-path/red-box-v1-trove.img
+ AUTOSTART: true
+ DISK_SIZE: 20G
+ HOSTNAME: red-box-v1-trove
+ LORRY_SSH_KEY: ssh_keys/lorry.key
+ MASON_SSH_PUBKEY: ssh_keys/mason.key.pub
+ RAM_SIZE: 8G
+ TROVE_ADMIN_EMAIL: adminuser@example.com
+ TROVE_ADMIN_NAME: Nobody
+ TROVE_ADMIN_SSH_PUBKEY: ssh_keys/id_rsa.pub
+ TROVE_ADMIN_USER: adminuser
+ TROVE_COMPANY: Company name goes here
+ TROVE_HOST: red-box-v1
+ TROVE_ID: red-box-v1-trove
+ UPSTREAM_TROVE: upstream-trove
+ VCPUS: 2
+ VERSION_LABEL: 45
+ WORKER_SSH_PUBKEY: ssh_keys/worker.key.pub
+- morph: systems/build-system-x86_64.morph
+ deploy-defaults:
+ ARTIFACT_CACHE_SERVER: red-box-v1-trove.example.com
+ CONTROLLERHOST: red-box-v1-controller.example.com
+ DISTBUILD_CONTROLLER: false
+ DISTBUILD_WORKER: true
+ INSTALL_FILES: distbuild/manifest
+ RAM_SIZE: 8G
+ TROVE_HOST: upstream-trove
+ TROVE_ID: upstream-trove
+ VCPUS: 2
+ WORKER_SSH_KEY: ssh_keys/worker.key
+ deploy:
+ red-box-v1-controller:
+ type: kvm
+ location: kvm+ssh://vm-user@vm-host/red-box-v1-controller/vm-path/red-box-v1-controller.img
+ DISK_SIZE: 60G
+ DISTBUILD_CONTROLLER: true
+ HOSTNAME: red-box-v1-controller
+ MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph
+ MASON_DEFINITIONS_REF: master
+ MASON_DISTBUILD_ARCH: x86_64
+ MASON_TEST_HOST: vm-user@vm-host:/vm-path/
+ WORKERS: red-box-v1-controller
diff --git a/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph b/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph
new file mode 100644
index 00000000..eea600cf
--- /dev/null
+++ b/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph
@@ -0,0 +1,13 @@
+name: minimal-system-armv5l-openbmc-aspeed-deploy
+kind: cluster
+systems:
+- morph: systems/minimal-system-armv5l-openbmc-aspeed.morph
+ deploy:
+ minimal-system-armv5l-openbmc-aspeed:
+ type: jffs2
+ location: minimal-system-armv5l-openbmc-aspeed.img
+ ROOT_DEVICE: "/dev/mtdblock"
+ BOOTLOADER_CONFIG_FORMAT: "extlinux"
+ BOOTLOADER_INSTALL: "none"
+ ERASE_BLOCK: 64
+ INIT_SYSTEM: busybox
diff --git a/clusters/minimal-system-deploy.morph b/clusters/minimal-system-deploy.morph
new file mode 100644
index 00000000..06629ffc
--- /dev/null
+++ b/clusters/minimal-system-deploy.morph
@@ -0,0 +1,14 @@
+name: minimal-system-deploy
+kind: cluster
+description: |
+ Deploy a minimal system to a system running KVM
+systems:
+- morph: systems/minimal-system-x86_32-generic.morph
+ deploy:
+ vm:
+ type: kvm
+ location: kvm+ssh://192.168.122.1/tiny-x86_32/srv/VMs/tiny-x86_32.img
+ DISK_SIZE: 512M
+ HOSTNAME: tiny-x86_32
+ INIT_SYSTEM: busybox
+ RAM_SIZE: 512M
diff --git a/clusters/moonshot-m2-armv8b64.morph b/clusters/moonshot-m2-armv8b64.morph
new file mode 100644
index 00000000..c8e5bc81
--- /dev/null
+++ b/clusters/moonshot-m2-armv8b64.morph
@@ -0,0 +1,56 @@
+name: moonshot-m2-deployment
+kind: cluster
+description: |
+ Install a build armv8b64 system into the M.2 SSD storage of an HP
+ Moonshot node, using a combination of the pxeboot.write extension and
+ the installer system.
+systems:
+- morph: systems/installer-system-armv8b64.morph
+ deploy:
+ installer:
+ type: pxeboot
+ location: 14:58:d0:57:7f:42
+ PXEBOOT_MODE: existing-server
+ PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS: rsync://192.168.0.1/srv/nfsboot/
+ PXEBOOT_PXE_REBOOT_COMMAND: |
+ ssh Administrator@10.0.1.10 set node power off force c31n1
+ ssh Administrator@10.0.1.10 set node boot pxe c31n1
+ # Nodes are powered on twice as sometimes powering them on
+ # once is not enough
+ ssh Administrator@10.0.1.10 set node power on c31n1
+ ssh Administrator@10.0.1.10 set node power on c31n1
+ PXEBOOT_REBOOT_COMMAND: |
+ ssh Administrator@10.0.1.10 set node power off force c31n1
+ ssh Administrator@10.0.1.10 set node boot m.2 c31n1
+ ssh Administrator@10.0.1.10 set node power on c31n1
+ ssh Administrator@10.0.1.10 set node power on c31n1
+
+ INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda
+ INSTALLER_ROOTFS_TO_INSTALL: /rootfs
+ INSTALLER_POST_INSTALL_COMMAND: |
+ while : ; do
+ echo "enter 'installed' in your deployment machine to finish the installation"
+ sleep 2
+ done
+ INSTALLER_CREATE_BOOT_PARTITION: yes
+
+ HOSTNAME: installer-system-c31n1
+ DTB_PATH: boot/m400-1003.dtb
+ KERNEL_ARGS: console=ttyS0,9600n8r init=/usr/lib/baserock-installer/installer
+ INSTALL_FILES: moonshot/manifest
+ MOONSHOT_KERNEL: yes
+ subsystems:
+ - morph: systems/devel-system-armv8b64.morph
+ deploy:
+ to-install:
+ type: sysroot
+ location: /rootfs
+ HOSTNAME: baserock-c31n1
+ DTB_PATH: boot/m400-1003.dtb
+ INSTALL_FILES: moonshot/manifest
+ MOONSHOT_KERNEL: yes
+ BOOT_DEVICE: /dev/sda1
+ ROOT_DEVICE: /dev/sda2
+ BOOTLOADER_CONFIG_FORMAT: extlinux
+ BOOTLOADER_INSTALL: "none"
diff --git a/clusters/moonshot-pxe-armv8b64.morph b/clusters/moonshot-pxe-armv8b64.morph
new file mode 100644
index 00000000..2d32efb0
--- /dev/null
+++ b/clusters/moonshot-pxe-armv8b64.morph
@@ -0,0 +1,30 @@
+name: moonshot-m400-armv8b64-netboot
+kind: cluster
+description: |
+ Deploy a big-endian armv8b64 devel system onto an HP Moonshot node
+
+ The system will be configured to boot through PXE from existing DHCP,
+ TFTP and NFS servers.
+systems:
+- morph: systems/devel-system-armv8b64.morph
+ deploy:
+ netboot:
+ type: pxeboot
+ location: 14:58:d0:57:7f:42
+ PXEBOOT_MODE: existing-server
+ PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS: rsync://192.168.0.1/srv/nfsboot/
+ PXEBOOT_PXE_REBOOT_COMMAND: |
+ ssh Administrator@10.0.1.10 set node power off force c31n1
+ ssh Administrator@10.0.1.10 set node boot pxe c31n1
+ # Nodes are powered on twice as sometimes powering them on
+ # once is not enough
+ ssh Administrator@10.0.1.10 set node power on c31n1
+ ssh Administrator@10.0.1.10 set node power on c31n1
+ PXE_INSTALLER: no
+
+ HOSTNAME: baserock-c31n1
+ DTB_PATH: boot/m400-1003.dtb
+ KERNEL_ARGS: console=ttyS0,9600n8r rw
+ INSTALL_FILES: moonshot/manifest
+ MOONSHOT_KERNEL: yes
diff --git a/clusters/moonshot-pxe-armv8l64.morph b/clusters/moonshot-pxe-armv8l64.morph
new file mode 100644
index 00000000..3286c72e
--- /dev/null
+++ b/clusters/moonshot-pxe-armv8l64.morph
@@ -0,0 +1,22 @@
+name: moonshot-m400-armv8l64-netboot
+kind: cluster
+description: |
+ Deploy an armv8l64 devel system into a HP Moonshot node
+
+ The system will be configured to boot through PXE from existing DHCP,
+ TFTP and NFS servers.
+systems:
+- morph: systems/devel-system-armv8l64.morph
+ deploy:
+ netboot:
+ type: pxeboot
+ location: 14:58:d0:57:7f:42
+ PXEBOOT_MODE: existing-server
+ PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS: rsync://192.168.0.1/srv/nfsboot/
+ KERNEL_ARGS: console=ttyS0,9600n8r rw
+ DTB_PATH: boot/m400-1003.dtb
+ HOSTNAME: baserock-m400-node31
+ MOONSHOT_KERNEL: yes
+ INSTALL_FILES: moonshot/manifest
+ PXE_INSTALLER: no
diff --git a/clusters/openstack-one-node-swift.morph b/clusters/openstack-one-node-swift.morph
new file mode 100644
index 00000000..588b6e81
--- /dev/null
+++ b/clusters/openstack-one-node-swift.morph
@@ -0,0 +1,142 @@
+name: openstack-one-node-swift
+kind: cluster
+description: |
+ This is a cluster morphology for deploying a x86_64 OpenStack system
+ all-in-one-node.
+
+ Requirements to be able to run and test the system:
+
+ - DISK_SIZE should be bigger than 5G
+ - The system has to have available at least 4G of RAM, but once
+ you start instantiating VMs you will need more.
+ - The IP of the system can't change, and you need to know it beforehand,
+ that is, the system needs a static IP address.
+
+ This cluster is configurable, but with the following constraints:
+
+ - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS,
+ and HOST_CONTROLLER.
+ - HOSTS_CONTROLLER is only needed if the hostname (see previous point)
+ is not a FQDN.
+ - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one
+ used in HOSTS_CONTROLLER.
+ - CINDER_DEVICE should be a path to a storage device ready to be
+ used/formated for cinder data.
+ - EXTERNAL_INTERFACE is required when the system has more than one network
+ interface.
+
+ You can also have a look at the following suggestions:
+
+ - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the
+ system is being deployed to.
+ - We recommend changing all the PASSWORDs variables, also the
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and
+ METERING_SECRET
+ - Setting NOVA_BAREMETAL_SCHEDULING with an YAML truth value will configure
+ Nova to schedule baremetal machines through the Ironic driver, instead of
+ sheduling virtual machines.
+
+systems:
+- morph: systems/openstack-system-x86_64.morph
+ deploy:
+ release:
+ type: rawdisk
+ location: baserock-openstack-system-x86_64.img
+ DISK_SIZE: 10G
+ INSTALL_FILES: openstack/manifest swift/manifest
+
+ HOSTNAME: onenode
+
+ #########################################################################
+ ## Swift config options
+ #########################################################################
+
+ SWIFT_CONTROLLER: True
+
+ SWIFT_ADMIN_PASSWORD: insecure
+
+ SWIFT_PART_POWER: 10
+ SWIFT_REPLICAS: 3
+ SWIFT_MIN_PART_HOURS: 1
+
+ SWIFT_STORAGE_DEVICES: [{ ip: <storage node 0 management ip>, device: sdb, weight: 100 },
+ { ip: <storage node 0 management ip>, device: sdc, weight: 100 },
+ { ip: <storage node 0 management ip>, device: sdd, weight: 100 },
+
+ { ip: <storage node 1 management ip>, device: sdb, weight: 100 },
+ { ip: <storage node 1 management ip>, device: sdc, weight: 100 },
+ { ip: <storage node 1 management ip>, device: sdd, weight: 100 }]
+
+ # This value can be any random string or number
+ # but each node in your swift cluster must have the same values
+ SWIFT_REBALANCE_SEED: 3828
+
+ # NOTE: Replace SWIFT_HASH_PATH_PREFIX and SWIFT_HASH_PATH_SUFFIX
+ # with your own unique values,
+ #
+ # `openssl rand -hex 10' can be used to generate unique values
+ #
+ # These values should be kept secret, do not lose them.
+ #
+ SWIFT_HASH_PATH_PREFIX: 041fc210e4e1d333ce1d
+ SWIFT_HASH_PATH_SUFFIX: 4d6f5362a356dda7fb7d
+
+ #########################################################################
+
+ RABBITMQ_HOST: onenode
+ RABBITMQ_PORT: 5672
+ RABBITMQ_USER: rabbitmq
+ RABBITMQ_PASSWORD: veryinsecure
+
+ CONTROLLER_HOST_ADDRESS: onenode
+ MANAGEMENT_INTERFACE_IP_ADDRESS: <management ip>
+
+ KEYSTONE_ENABLE_SERVICE: True
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8
+ KEYSTONE_ADMIN_PASSWORD: veryinsecure
+ KEYSTONE_DB_USER: keystoneDB
+ KEYSTONE_DB_PASSWORD: veryinsecure
+
+ GLANCE_ENABLE_SERVICE: True
+ GLANCE_SERVICE_USER: glance
+ GLANCE_SERVICE_PASSWORD: veryinsecure
+ GLANCE_DB_USER: glanceDB
+ GLANCE_DB_PASSWORD: veryinsecure
+
+ NOVA_SERVICE_USER: nova
+ NOVA_SERVICE_PASSWORD: veryinsecure
+ NOVA_DB_USER: novaDB
+ NOVA_DB_PASSWORD: veryinsecure
+ NOVA_VIRT_TYPE: qemu
+ NOVA_BAREMETAL_SCHEDULING: no
+
+ CINDER_SERVICE_USER: cinder
+ CINDER_SERVICE_PASSWORD: veryinsecure
+ CINDER_DB_USER: cinderDB
+ CINDER_DB_PASSWORD: veryinsecure
+ # Storage device to be used by Cinder
+ CINDER_DEVICE: /dev/sdb
+
+ NEUTRON_SERVICE_USER: neutron
+ NEUTRON_SERVICE_PASSWORD: veryinsecure
+ NEUTRON_DB_USER: neutronDB
+ NEUTRON_DB_PASSWORD: veryinsecure
+ METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret
+
+ IRONIC_ENABLE_SERVICE: True
+ IRONIC_SERVICE_USER: ironic
+ IRONIC_SERVICE_PASSWORD: veryinsecure
+ IRONIC_DB_USER: ironicDB
+ IRONIC_DB_PASSWORD: veryinsecure
+
+ CEILOMETER_SERVICE_USER: ceilometer
+ CEILOMETER_SERVICE_PASSWORD: veryinsecure
+ CEILOMETER_DB_USER: ceilometerDB
+ CEILOMETER_DB_PASSWORD: veryinsecure
+ METERING_SECRET: insecureceilometersecret
+
+ HOSTS_CONTROLLER: <management ip> onenode
+
+ # Network interface to be used, only needed if there are more
+ # than one available.
+ # EXTERNAL_INTERFACE: eno1
diff --git a/clusters/openstack-one-node.morph b/clusters/openstack-one-node.morph
new file mode 100644
index 00000000..037cd23c
--- /dev/null
+++ b/clusters/openstack-one-node.morph
@@ -0,0 +1,106 @@
+name: openstack-one-node
+kind: cluster
+description: |
+ This is a cluster morphology for deploying a x86_64 OpenStack system
+ all-in-one-node.
+
+ Requirements to be able to run and test the system:
+
+ - DISK_SIZE should be bigger than 5G
+ - The system has to have available at least 4G of RAM, but once
+ you start instantiating VMs you will need more.
+ - The IP of the system can't change, and you need to know it beforehand,
+ that is, the system needs a static IP address.
+
+ This cluster is configurable, but with the following constraints:
+
+ - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS,
+ and HOST_CONTROLLER.
+ - HOSTS_CONTROLLER is only needed if the hostname (see previous point)
+ is not a FQDN.
+ - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one
+ used in HOSTS_CONTROLLER.
+ - CINDER_DEVICE should be a path to a storage device ready to be
+ used/formated for cinder data.
+ - EXTERNAL_INTERFACE is required when the system has more than one network
+ interface.
+
+ You can also have a look at the following suggestions:
+
+ - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the
+ system is being deployed to.
+ - We recommend changing all the PASSWORDs variables, also the
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and
+ METERING_SECRET
+ - Setting NOVA_BAREMETAL_SCHEDULING with an YAML truth value will configure
+ Nova to schedule baremetal machines through the Ironic driver, instead of
+ sheduling virtual machines.
+
+systems:
+- morph: systems/openstack-system-x86_64.morph
+ deploy:
+ release:
+ type: rawdisk
+ location: baserock-openstack-system-x86_64.img
+ DISK_SIZE: 10G
+ INSTALL_FILES: openstack/manifest
+
+ HOSTNAME: onenode
+
+ RABBITMQ_HOST: onenode
+ RABBITMQ_PORT: 5672
+ RABBITMQ_USER: rabbitmq
+ RABBITMQ_PASSWORD: veryinsecure
+
+ CONTROLLER_HOST_ADDRESS: onenode
+ MANAGEMENT_INTERFACE_IP_ADDRESS: <management ip>
+
+ KEYSTONE_ENABLE_SERVICE: True
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8
+ KEYSTONE_ADMIN_PASSWORD: veryinsecure
+ KEYSTONE_DB_USER: keystoneDB
+ KEYSTONE_DB_PASSWORD: veryinsecure
+
+ GLANCE_ENABLE_SERVICE: True
+ GLANCE_SERVICE_USER: glance
+ GLANCE_SERVICE_PASSWORD: veryinsecure
+ GLANCE_DB_USER: glanceDB
+ GLANCE_DB_PASSWORD: veryinsecure
+
+ NOVA_SERVICE_USER: nova
+ NOVA_SERVICE_PASSWORD: veryinsecure
+ NOVA_DB_USER: novaDB
+ NOVA_DB_PASSWORD: veryinsecure
+ NOVA_VIRT_TYPE: qemu
+ NOVA_BAREMETAL_SCHEDULING: no
+
+ CINDER_SERVICE_USER: cinder
+ CINDER_SERVICE_PASSWORD: veryinsecure
+ CINDER_DB_USER: cinderDB
+ CINDER_DB_PASSWORD: veryinsecure
+ # Storage device to be used by Cinder
+ CINDER_DEVICE: /dev/sdb
+
+ NEUTRON_SERVICE_USER: neutron
+ NEUTRON_SERVICE_PASSWORD: veryinsecure
+ NEUTRON_DB_USER: neutronDB
+ NEUTRON_DB_PASSWORD: veryinsecure
+ METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret
+
+ IRONIC_ENABLE_SERVICE: True
+ IRONIC_SERVICE_USER: ironic
+ IRONIC_SERVICE_PASSWORD: veryinsecure
+ IRONIC_DB_USER: ironicDB
+ IRONIC_DB_PASSWORD: veryinsecure
+
+ CEILOMETER_SERVICE_USER: ceilometer
+ CEILOMETER_SERVICE_PASSWORD: veryinsecure
+ CEILOMETER_DB_USER: ceilometerDB
+ CEILOMETER_DB_PASSWORD: veryinsecure
+ METERING_SECRET: insecureceilometersecret
+
+ HOSTS_CONTROLLER: <management ip> onenode
+
+ # Network interface to be used, only needed if there are more
+ # than one available.
+ # EXTERNAL_INTERFACE: eno1
diff --git a/clusters/openstack-three-node-installer.morph b/clusters/openstack-three-node-installer.morph
new file mode 100644
index 00000000..6285217a
--- /dev/null
+++ b/clusters/openstack-three-node-installer.morph
@@ -0,0 +1,239 @@
+name: openstack-three-node-installer
+kind: cluster
+description: |
+
+ This is a cluster morphology for deploying an installer for an x86_64
+ OpenStack system spread across three nodes.
+
+ This cluster creates disk images that may be `dd`'d onto install media to
+ produce an OpenStack cluster when instantiated.
+
+ Alternatively it may be used to install directly onto a physical disk by
+ running:
+
+ morph deploy clusters/openstack-three-node-installer.morph \
+ network-installer network-installer.location=/dev/vdb
+
+ Substituting network-installer for either compute-installer or
+ controller-installer will produce different configurations, and it is possible
+ to substitue /dev/vdb for a different path to a disk image to install to a
+ different disk image.
+
+ Substitute the values of HOSTNAME, NETWORK_CONFIG, EXTERNAL_INTERFACE,
+ MANAGEMENT_IP_ADDRESS, CONTROLLER_HOST_ADDRESS, RABBITMQ_HOST and HOSTS_* to
+ match your hardware and networking configuration.
+
+ Requirements to be able to run and test the system:
+
+ - DISK_SIZE should be bigger than 5G
+ - The system has to have available at least 4G of RAM, but once
+ you start instantiating VMs you will need more.
+ - The IP of the system can't change, and you need to know it beforehand,
+ that is, the system needs a static IP address.
+
+ This cluster is configurable, but with the following constraints:
+
+ - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS,
+ and HOST_CONTROLLER.
+ - HOSTS_CONTROLLER is only needed if the hostname (see previous point)
+ is not a FQDN.
+ - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one
+ used in HOSTS_CONTROLLER.
+ - CINDER_DEVICE should be a path to a storage device ready to be
+ used/formated for cinder data.
+ - EXTERNAL_INTERFACE is required when the system has more than one network
+ interface.
+
+ You can also have a look at the following suggestions:
+
+ - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the
+ system is being deployed to.
+ - We recommend changing all the PASSWORDs variables, also the
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and
+ METERING_SECRET.
+ - Setting NOVA_BAREMETAL_SCHEDULING with an YAML truth value will configure
+ Nova to schedule baremetal machines through the Ironic driver, instead of
+ sheduling virtual machines.
+
+systems:
+- morph: systems/installer-system-x86_64.morph
+ deploy:
+ network-installer: &installer
+ type: rawdisk
+ location: installer-openstack-network-x86_64.img
+ KERNEL_ARGS: init=/usr/lib/baserock-installer/installer
+ DISK_SIZE: 6G
+ HOSTNAME: installer-x86_64
+ INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda
+ INSTALLER_ROOTFS_TO_INSTALL: /rootfs
+ INSTALLER_POST_INSTALL_COMMAND: 'sync; poweroff -f'
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ network-initramfs: &initramfs
+ type: initramfs
+ location: boot/initramfs.gz
+ - morph: systems/openstack-system-x86_64.morph
+ deploy:
+ network-to-install: &stack-node
+ type: sysroot
+ location: rootfs
+ INSTALL_FILES: openstack/manifest
+ INITRAMFS_PATH: boot/initramfs.gz
+
+ HOSTNAME: threenode-network
+
+ RABBITMQ_HOST: threenode-controller.os-mgmt
+ RABBITMQ_PORT: 5672
+ RABBITMQ_USER: rabbitmq
+ RABBITMQ_PASSWORD: veryinsecure
+
+ # This token needs to be unique and secret
+ KEYSTONE_ENABLE_SERVICE: False
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8
+ KEYSTONE_ADMIN_PASSWORD: veryinsecure
+ KEYSTONE_DB_USER: keystoneDB
+ KEYSTONE_DB_PASSWORD: veryinsecure
+
+ GLANCE_ENABLE_SERVICE: False
+ GLANCE_SERVICE_USER: glance
+ GLANCE_SERVICE_PASSWORD: veryinsecure
+ GLANCE_DB_USER: glanceDB
+ GLANCE_DB_PASSWORD: veryinsecure
+
+ NOVA_ENABLE_CONTROLLER: False
+ NOVA_ENABLE_COMPUTE: False
+ NOVA_SERVICE_USER: nova
+ NOVA_SERVICE_PASSWORD: veryinsecure
+ NOVA_DB_USER: novaDB
+ NOVA_DB_PASSWORD: veryinsecure
+ NOVA_VIRT_TYPE: kvm
+ NOVA_BAREMETAL_SCHEDULING: no
+
+ CINDER_ENABLE_CONTROLLER: False
+ CINDER_ENABLE_COMPUTE: False
+ CINDER_ENABLE_STORAGE: False
+ CINDER_SERVICE_USER: cinder
+ CINDER_SERVICE_PASSWORD: veryinsecure
+ CINDER_DB_USER: cinderDB
+ CINDER_DB_PASSWORD: veryinsecure
+ # Storage device to be used by Cinder
+ CINDER_DEVICE: /dev/sdb
+
+ NEUTRON_ENABLE_AGENT: False
+ NEUTRON_ENABLE_MANAGER: True
+ NEUTRON_ENABLE_CONTROLLER: False
+ NEUTRON_SERVICE_USER: neutron
+ NEUTRON_SERVICE_PASSWORD: veryinsecure
+ NEUTRON_DB_USER: neutronDB
+ NEUTRON_DB_PASSWORD: veryinsecure
+ METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret
+
+ IRONIC_ENABLE_SERVICE: False
+ IRONIC_SERVICE_USER: ironic
+ IRONIC_SERVICE_PASSWORD: veryinsecure
+ IRONIC_DB_USER: ironicDB
+ IRONIC_DB_PASSWORD: veryinsecure
+
+ CEILOMETER_SERVICE_USER: ceilometer
+ CEILOMETER_SERVICE_PASSWORD: veryinsecure
+ CEILOMETER_DB_USER: ceilometerDB
+ CEILOMETER_DB_PASSWORD: veryinsecure
+ CEILOMETER_ENABLE_CONTROLLER: False
+ CEILOMETER_ENABLE_COMPUTE: False
+ METERING_SECRET: insecureceilometersecret
+
+ CONTROLLER_HOST_ADDRESS: threenode-controller.os-mgmt
+ MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.1
+
+ HOSTS_SELF: 10.0.0.1 threenode-network
+ HOSTS_NETWORK: 10.0.0.1 threenode-network.os-mgmt
+ HOSTS_CONTROL: 10.0.0.2 threenode-controller.os-mgmt
+ HOSTS_COMPUTE: 10.0.0.3 threenode-compute.os-mgmt
+
+ EXTERNAL_INTERFACE: enp3s0
+ NETWORK_CONFIG: enp3s0:dhcp;enp2s0:static,address=10.0.0.1,netmask=255.255.255.0
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ network-to-install-initramfs: *initramfs
+- morph: systems/installer-system-x86_64.morph
+ deploy:
+ controller-installer:
+ <<: *installer
+ location: installer-openstack-controller-x86_64.img
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ controller-initramfs: *initramfs
+ - morph: systems/openstack-system-x86_64.morph
+ deploy:
+ controller-to-install:
+ <<: *stack-node
+ HOSTNAME: threenode-controller
+
+ KEYSTONE_ENABLE_SERVICE: True
+
+ GLANCE_ENABLE_SERVICE: True
+
+ NOVA_ENABLE_CONTROLLER: True
+
+ CINDER_ENABLE_CONTROLLER: True
+ CINDER_ENABLE_COMPUTE: False
+ CINDER_ENABLE_STORAGE: False
+
+ NEUTRON_ENABLE_AGENT: False
+ NEUTRON_ENABLE_MANAGER: False
+ NEUTRON_ENABLE_CONTROLLER: True
+ METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret
+
+ IRONIC_ENABLE_SERVICE: True
+
+ CEILOMETER_ENABLE_CONTROLLER: True
+ CEILOMETER_ENABLE_COMPUTE: False
+
+ MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.2
+ HOSTS_SELF: 10.0.0.2 threenode-controller
+ EXTERNAL_INTERFACE: enp2s0
+ NETWORK_CONFIG: enp2s0:dhcp;enp0s26u1u2:static,address=10.0.0.2,netmask=255.255.255.0
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ controller-to-install-initramfs: *initramfs
+- morph: systems/installer-system-x86_64.morph
+ deploy:
+ compute-installer:
+ <<: *installer
+ location: installer-openstack-compute-x86_64.img
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ compute-initramfs: *initramfs
+ - morph: systems/openstack-system-x86_64.morph
+ deploy:
+ compute-to-install:
+ <<: *stack-node
+ HOSTNAME: threenode-compute
+
+ NOVA_ENABLE_COMPUTE: True
+
+ CINDER_ENABLE_CONTROLLER: False
+ CINDER_ENABLE_COMPUTE: True
+ CINDER_ENABLE_STORAGE: True
+
+ NEUTRON_ENABLE_AGENT: True
+ NEUTRON_ENABLE_MANAGER: False
+ NEUTRON_ENABLE_CONTROLLER: False
+
+ CEILOMETER_ENABLE_CONTROLLER: False
+ CEILOMETER_ENABLE_COMPUTE: True
+
+ MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.3
+ HOSTS_SELF: 10.0.0.3 threenode-compute
+ EXTERNAL_INTERFACE: eno1
+ NETWORK_CONFIG: eno1:dhcp;enp0s29u1u3:static,address=10.0.0.3,netmask=255.255.255.0
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ compute-to-install-initramfs: *initramfs
diff --git a/clusters/openstack-two-node-installer.morph b/clusters/openstack-two-node-installer.morph
new file mode 100644
index 00000000..f05b0e9b
--- /dev/null
+++ b/clusters/openstack-two-node-installer.morph
@@ -0,0 +1,200 @@
+name: openstack-two-node-installer
+kind: cluster
+description: |
+
+ This is a cluster morphology for deploying an installer for an x86_64
+ OpenStack system spread across three nodes.
+
+ This cluster creates disk images that may be `dd`'d onto install media to
+ produce an OpenStack cluster when instantiated.
+
+ Alternatively it may be used to install directly onto a physical disk by
+ running:
+
+ morph deploy clusters/openstack-two-node-installer.morph \
+ controller-installer controller-installer.location=/dev/vdb
+
+ Substituting contrller-installer for compute-installer will produce
+ different configurations, and it is possible to substitue /dev/vdb for a
+ different path to a disk image to install to a different disk image.
+
+ Substitute the values of HOSTNAME, NETWORK_CONFIG, EXTERNAL_INTERFACE,
+ MANAGEMENT_IP_ADDRESS, CONTROLLER_HOST_ADDRESS, RABBITMQ_HOST and HOSTS_* to
+ match your hardware and networking configuration.
+
+ Requirements to be able to run and test the system:
+
+ - DISK_SIZE should be bigger than 5G
+ - The system has to have available at least 4G of RAM, but once
+ you start instantiating VMs you will need more.
+ - The IP of the system can't change, and you need to know it beforehand,
+ that is, the system needs a static IP address.
+
+ This cluster is configurable, but with the following constraints:
+
+ - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS,
+ and HOST_CONTROLLER.
+ - HOSTS_CONTROLLER is only needed if the hostname (see previous point)
+ is not a FQDN.
+ - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one
+ used in HOSTS_CONTROLLER.
+ - CINDER_DEVICE should be a path to a storage device ready to be
+ used/formated for cinder data.
+ - EXTERNAL_INTERFACE is required when the system has more than one network
+ interface.
+
+ You can also have a look at the following suggestions:
+
+ - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the
+ system is being deployed to.
+ - We recommend changing all the PASSWORDs variables, also the
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and
+ METERING_SECRET.
+ - Setting NOVA_BAREMETAL_SCHEDULING with a YAML truth value will configure
+ Nova to schedule baremetal machines through the Ironic driver, instead of
+ sheduling virtual machines.
+
+systems:
+- morph: systems/installer-system-x86_64.morph
+ deploy:
+ controller-installer: &installer
+ type: rawdisk
+ location: installer-openstack-controller-x86_64.img
+ KERNEL_ARGS: init=/usr/lib/baserock-installer/installer
+ DISK_SIZE: 6G
+ HOSTNAME: installer-x86_64
+ INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda
+ INSTALLER_ROOTFS_TO_INSTALL: /rootfs
+ INSTALLER_POST_INSTALL_COMMAND: 'sync; poweroff -f'
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ controller-initramfs: &initramfs
+ type: initramfs
+ location: boot/initramfs.gz
+ - morph: systems/openstack-system-x86_64.morph
+ deploy:
+ controller-to-install: &stack-node
+ type: sysroot
+ location: rootfs
+ INSTALL_FILES: openstack/manifest
+ INITRAMFS_PATH: boot/initramfs.gz
+
+ HOSTNAME: twonode-controller
+
+ RABBITMQ_HOST: twonode-controller.os-mgmt
+ RABBITMQ_PORT: 5672
+ RABBITMQ_USER: rabbitmq
+ RABBITMQ_PASSWORD: veryinsecure
+
+ # This token needs to be unique and secret
+ KEYSTONE_ENABLE_SERVICE: True
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8
+ KEYSTONE_ADMIN_PASSWORD: veryinsecure
+ KEYSTONE_DB_USER: keystoneDB
+ KEYSTONE_DB_PASSWORD: veryinsecure
+
+ GLANCE_ENABLE_SERVICE: True
+ GLANCE_SERVICE_USER: glance
+ GLANCE_SERVICE_PASSWORD: veryinsecure
+ GLANCE_DB_USER: glanceDB
+ GLANCE_DB_PASSWORD: veryinsecure
+
+ NOVA_ENABLE_CONTROLLER: True
+ NOVA_ENABLE_COMPUTE: False
+ NOVA_SERVICE_USER: nova
+ NOVA_SERVICE_PASSWORD: veryinsecure
+ NOVA_DB_USER: novaDB
+ NOVA_DB_PASSWORD: veryinsecure
+ NOVA_VIRT_TYPE: kvm
+ NOVA_BAREMETAL_SCHEDULING: no
+
+ CINDER_ENABLE_CONTROLLER: True
+ CINDER_ENABLE_COMPUTE: False
+ CINDER_ENABLE_STORAGE: False
+ CINDER_SERVICE_USER: cinder
+ CINDER_SERVICE_PASSWORD: veryinsecure
+ CINDER_DB_USER: cinderDB
+ CINDER_DB_PASSWORD: veryinsecure
+ # Storage device to be used by Cinder
+ CINDER_DEVICE: /dev/sdb
+
+ NEUTRON_ENABLE_AGENT: False
+ NEUTRON_ENABLE_MANAGER: True
+ NEUTRON_ENABLE_CONTROLLER: True
+ NEUTRON_SERVICE_USER: neutron
+ NEUTRON_SERVICE_PASSWORD: veryinsecure
+ NEUTRON_DB_USER: neutronDB
+ NEUTRON_DB_PASSWORD: veryinsecure
+ METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret
+
+ IRONIC_ENABLE_SERVICE: True
+ IRONIC_SERVICE_USER: ironic
+ IRONIC_SERVICE_PASSWORD: veryinsecure
+ IRONIC_DB_USER: ironicDB
+ IRONIC_DB_PASSWORD: veryinsecure
+
+ CEILOMETER_SERVICE_USER: ceilometer
+ CEILOMETER_SERVICE_PASSWORD: veryinsecure
+ CEILOMETER_DB_USER: ceilometerDB
+ CEILOMETER_DB_PASSWORD: veryinsecure
+ CEILOMETER_ENABLE_CONTROLLER: True
+ CEILOMETER_ENABLE_COMPUTE: False
+ METERING_SECRET: insecureceilometersecret
+
+ CONTROLLER_HOST_ADDRESS: twonode-controller.os-mgmt
+ MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.1
+
+ HOSTS_SELF: 10.0.0.1 twonode-controller
+ HOSTS_CONTROL: 10.0.0.1 twonode-controller.os-mgmt
+ HOSTS_COMPUTE: 10.0.0.3 twonode-compute.os-mgmt
+ EXTERNAL_INTERFACE: enp3s0
+ NETWORK_CONFIG: enp3s0:dhcp;enp2s0:static,address=10.0.0.1,netmask=255.255.255.0
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ controller-to-install-initramfs: *initramfs
+- morph: systems/installer-system-x86_64.morph
+ deploy:
+ compute-installer:
+ <<: *installer
+ location: installer-openstack-compute-x86_64.img
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ compute-initramfs: *initramfs
+ - morph: systems/openstack-system-x86_64.morph
+ deploy:
+ compute-to-install:
+ <<: *stack-node
+ HOSTNAME: twonode-compute
+
+ KEYSTONE_ENABLE_SERVICE: False
+
+ GLANCE_ENABLE_SERVICE: False
+
+ NOVA_ENABLE_COMPUTE: True
+ NOVA_ENABLE_CONTROLLER: False
+
+ CINDER_ENABLE_CONTROLLER: False
+ CINDER_ENABLE_COMPUTE: True
+ CINDER_ENABLE_STORAGE: True
+
+ NEUTRON_ENABLE_AGENT: True
+ NEUTRON_ENABLE_MANAGER: False
+ NEUTRON_ENABLE_CONTROLLER: False
+
+ IRONIC_ENABLE_SERVICE: False
+
+ CEILOMETER_ENABLE_CONTROLLER: False
+ CEILOMETER_ENABLE_COMPUTE: True
+
+ MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.3
+ HOSTS_SELF: 10.0.0.3 twonode-compute
+ EXTERNAL_INTERFACE: eno1
+ NETWORK_CONFIG: eno1:dhcp;enp0s29u1u3:static,address=10.0.0.3,netmask=255.255.255.0
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ compute-to-install-initramfs: *initramfs
diff --git a/clusters/release.morph b/clusters/release.morph
new file mode 100644
index 00000000..c5bfffca
--- /dev/null
+++ b/clusters/release.morph
@@ -0,0 +1,76 @@
+name: release
+kind: cluster
+description: |
+ Deploy all the systems for we support in a release.
+
+ This cluster morph is used by the tool 'scripts/do-release'. While
+ you can deploy the systems yourself, if you are making a Baserock release
+ then the script should be used.
+systems:
+- morph: systems/build-system-x86_32-chroot.morph
+ deploy:
+ build-system-x86_32-chroot:
+ type: tar
+ location: build-system-x86_32-chroot.tar
+- morph: systems/build-system-x86_32.morph
+ deploy:
+ build-system-x86_32:
+ type: rawdisk
+ location: build-system-x86_32.img
+ DISK_SIZE: 6G
+- morph: systems/build-system-x86_64-chroot.morph
+ deploy:
+ build-system-x86_64-chroot:
+ type: tar
+ location: build-system-x86_64-chroot.tar
+- morph: systems/build-system-x86_64.morph
+ deploy:
+ build-system-x86_64:
+ type: rawdisk
+ location: build-system-x86_64.img
+ DISK_SIZE: 6G
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ initramfs-build-system-x86_64:
+ type: initramfs
+ location: boot/initramfs.gz
+- morph: systems/build-system-armv7lhf-jetson.morph
+ deploy:
+ build-system-armv7lhf-jetson:
+ type: rawdisk
+ location: build-system-armv7lhf-jetson.img
+ DISK_SIZE: 2G
+ BOOT_DEVICE: "/dev/mmcblk0p1"
+ ROOT_DEVICE: "/dev/mmcblk0p2"
+ DTB_PATH: "boot/tegra124-jetson-tk1.dtb"
+ BOOTLOADER_CONFIG_FORMAT: "extlinux"
+ BOOTLOADER_INSTALL: "none"
+ KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1
+- morph: systems/genivi-baseline-system-x86_64-generic.morph
+ deploy:
+ genivi-baseline-system-x86_64-generic:
+ type: rawdisk
+ location: genivi-baseline-system-x86_64-generic.img
+ DISK_SIZE: 4G
+ KERNEL_ARGS: vga=788
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ initramfs-genivi-baseline-system-x86_64-generic:
+ type: initramfs
+ location: boot/initramfs.gz
+- morph: systems/genivi-baseline-system-armv7lhf-jetson.morph
+ deploy:
+ genivi-baseline-system-armv7lhf-jetson:
+ type: rawdisk
+ location: genivi-baseline-system-armv7lhf-jetson.img
+ DISK_SIZE: 4G
+ BOOT_DEVICE: "/dev/mmcblk0p1"
+ ROOT_DEVICE: "/dev/mmcblk0p2"
+ DTB_PATH: "boot/tegra124-jetson-tk1.dtb"
+ BOOTLOADER_CONFIG_FORMAT: "extlinux"
+ BOOTLOADER_INSTALL: "none"
+ KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1
diff --git a/clusters/sdk-example-cluster.morph b/clusters/sdk-example-cluster.morph
new file mode 100644
index 00000000..92e4a413
--- /dev/null
+++ b/clusters/sdk-example-cluster.morph
@@ -0,0 +1,46 @@
+name: sdk
+kind: cluster
+description: |
+ An example of creating a cross-compile SDK for an embedded Baserock system.
+
+ This cluster demonstrates how you can use the 'sdk' write extension to
+ produce a cross-compile SDK tarball for an Baserock applicance. In this
+ example the system is assumed to run on ARMv7, and the SDK is built to
+ run on any x86_32 GNU/Linux system.
+
+ The SDK is a Baserock system itself, containing just 'build-essential' and a
+ 'cross-toolchain' stratum. The SDK system also includes the target
+ appliance's system, as a 'subsystem', so that the libraries and headers are
+ available when building.
+
+ This cluster deploys the SDK system using the 'sdk' write extension, which
+ produces a tarball with a small shell header. When the shell header is
+ executed, and passed a directory name on the commandline, it extracts the SDK
+ to that path and patches the binaries so that they execute correctly from
+ that directory.
+
+ Deploying the applicate system artifact to the target device should be
+ done with a separate cluster morphology, because you will often want to
+ do this without rebuilding the SDK.
+
+ You must build each system with `morph build` before deploying. We recommend
+ doing this all from your Baserock development machine, using a Baserock
+ ARM distributed build network to produce the system artifact. Once both
+ system artifacts are cached locally, the `morph deploy` command will produce
+ a self-extracting shell script/tarball following the 'location' field.
+
+ See the documentation of the sdk.write extension for more information.
+systems:
+- morph: systems/armv7lhf-cross-toolchain-system-x86_64.morph
+ deploy:
+ sdk:
+ type: sdk
+ location: armv7lhf-cross-toolchain-system-x86_64.sh
+ PREFIX: /usr
+ TARGET: armv7lhf-baserock-linux-gnueabi
+ subsystems:
+ - morph: systems/devel-system-armv7lhf-highbank.morph
+ deploy:
+ sysroot:
+ type: sysroot
+ location: usr/armv7lhf-baserock-linux-gnueabi/sys-root
diff --git a/clusters/trove-example.morph b/clusters/trove-example.morph
new file mode 100644
index 00000000..2812f60e
--- /dev/null
+++ b/clusters/trove-example.morph
@@ -0,0 +1,58 @@
+name: trove-example
+kind: cluster
+description: |
+ This is an example cluster morphology for deploying a Trove,
+ both the initial deployment and an upgrade.
+
+ You need some ssh keys, which you can create like this:
+
+ mkdir ssh_keys
+ ssh-keygen -N '' -f ssh_keys/lorry.key
+ ssh-keygen -N '' -f ssh_keys/worker.key
+ ssh-keygen -N '' -f ssh_keys/trove-admin.key
+
+ You may also put in your own keys instead of creating new ones.
+
+ To do the initial deployment:
+
+ morph deploy clusters/trove-example.morph \
+ initial \
+ initial.location=kvm+ssh://liw@192.168.122.1/test-trove/tmp/test-trove.img
+
+ To do an upgrade:
+
+ morph deploy clusters/trove-example.morph \
+ upgrade upgrade.VERSION_LABEL=123
+
+ where `VERSION_LABEL` gets a new unique value each time.
+
+ Remember to always specify either initial or upgrade as the
+ deployment name to use, otherwise morph will attempt to deploy both.
+
+ You can find documentation for Trove at the following web address:
+ http://wiki.baserock.org/Trove/
+systems:
+- morph: systems/trove-system-x86_64.morph
+ deploy-defaults:
+ HOSTNAME: test-trove
+ VCPUS: 2
+ RAM_SIZE: 2G
+ DISK_SIZE: 8G
+ LORRY_SSH_KEY: ssh_keys/lorry.key
+ WORKER_SSH_PUBKEY: ssh_keys/worker.key.pub
+ TROVE_ADMIN_EMAIL: adminuser@example.com
+ TROVE_ADMIN_NAME: Nobody
+ TROVE_ADMIN_SSH_PUBKEY: ssh_keys/trove-admin.key.pub
+ TROVE_ADMIN_USER: adminuser
+ TROVE_COMPANY: Company name goes here
+ TROVE_HOST: test-trove
+ TROVE_ID: test-trove
+ UPSTREAM_TROVE: ''
+ deploy:
+ initial:
+ type: kvm
+ location: kvm+ssh://vm-user@vm-host/test-trove/vm-path/test-trove.img
+ VERSION_LABEL: 1
+ upgrade:
+ type: ssh-rsync
+ location: test-trove
diff --git a/clusters/trove.baserock.org-upgrade.morph b/clusters/trove.baserock.org-upgrade.morph
new file mode 100644
index 00000000..eaf939e1
--- /dev/null
+++ b/clusters/trove.baserock.org-upgrade.morph
@@ -0,0 +1,23 @@
+name: trove.baserock.org-upgrade
+kind: cluster
+description: |
+ This is a cluster morphology for deploying an UPGRADE to
+ git.baserock.org. It doesn't work for the initial deployment. The
+ deployer MUST have ssh access to root@git.baserock.org. To use:
+
+ morph deploy --upgrade trove.baserock.org-upgrade gbo.VERSION_LABEL=2014-05-29
+
+ Replace the value of gbo.VERSION_LABEL above with the current date.
+ You can add letters if you need to upgrade multiple times in a day.
+systems:
+- morph: systems/trove-system-x86_64.morph
+ deploy:
+ gbo:
+ type: ssh-rsync
+ location: root@git.baserock.org
+ FSTAB_HOME: LABEL=homes /home auto defaults,noatime,rw 0 2
+ HOSTNAME: firehose1
+ LORRY_CONTROLLER_MINIONS: 4
+ TROVE_COMPANY: Baserock
+ TROVE_HOSTNAME: git.baserock.org
+ TROVE_ID: baserock
diff --git a/clusters/upgrade-devel.morph b/clusters/upgrade-devel.morph
new file mode 100644
index 00000000..b7ce9bc0
--- /dev/null
+++ b/clusters/upgrade-devel.morph
@@ -0,0 +1,39 @@
+name: upgrade-devel
+kind: cluster
+description: |
+ This is a cluster morphology that can be used to deploy systems to a
+ an existing Baserock devel system, as an upgrade of the running system.
+
+ This method is for users who deployed a system manually from one of the
+ images provided on http://download.baserock.org. IT IS ONLY POSSIBLE TO
+ UPGRADE BASEROCK 14 RELEASES OR NEWER.
+
+ If you deployed your system using `morph deploy` then you should reuse the
+ cluster morphology you did the initial deployment with, instead of this one,
+ so that the configuration is preserved in the new system.
+
+ Ensure that your root user has passwordless SSH access to localhost with
+ `ssh root@localhost whoami`. If not, run `ssh-copy-id root@localhost`.
+ Make sure the 'morph' field below matches the system you are upgrading.
+
+ To upgrade, select a sensible a value for VERSION_LABEL and run:
+
+ morph deploy --upgrade upgrade-devel.morph self.HOSTNAME=$(hostname) self.VERSION_LABEL=$VERSION_LABEL
+
+ Your configuration in /etc should be propagated to the new system, but there
+ may be merge conflicts. Check /etc for files named '.rej' and '.orig' in the
+ new system, which will indicate that there are changes from the old system
+ that must be merged manually. You can get a nice diff from the old /etc as
+ follows:
+
+ mount /dev/sda /mnt
+ git diff --no-index /mnt/systems/factory/run/etc /mnt/systems/$VERSION_LABEL/run/etc
+
+ On a base system, use 'diff -r' instead of 'git diff --no-index'. It will
+ not be as colourful.
+systems:
+- morph: systems/devel-system-x86_64-generic.morph
+ deploy:
+ self:
+ type: ssh-rsync
+ location: root@127.0.0.1
diff --git a/clusters/weston-system-x86_64-generic-deploy.morph b/clusters/weston-system-x86_64-generic-deploy.morph
new file mode 100644
index 00000000..3a6f29ef
--- /dev/null
+++ b/clusters/weston-system-x86_64-generic-deploy.morph
@@ -0,0 +1,23 @@
+name: weston-system-x86_64-generic-deploy
+kind: cluster
+description: |
+ Deploy a stock weston system.
+
+ The resulting image can be copied to a USB and booted from there,
+ as well as in a virtual machine.
+
+systems:
+- morph: systems/weston-system-x86_64-generic.morph
+ deploy:
+ weston-system-x86_64-generic:
+ type: rawdisk
+ location: /weston-system-x86_64-generic.img
+ DISK_SIZE: 4G
+ KERNEL_ARGS: vga=788
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: systems/initramfs-x86_64.morph
+ deploy:
+ initramfs:
+ type: initramfs
+ location: boot/initramfs.gz
diff --git a/clusters/zookeeper.morph b/clusters/zookeeper.morph
new file mode 100644
index 00000000..1153d4b0
--- /dev/null
+++ b/clusters/zookeeper.morph
@@ -0,0 +1,21 @@
+name: zookeeper
+kind: cluster
+systems:
+ - morph: systems/zookeeper-client-x86_64.morph
+ deploy:
+ my-client-system:
+ type: kvm
+ location: kvm+ssh://username@HOSTNAME/machinename/path/to/zookeeper-client.img
+ DISK_SIZE: 4G
+ RAM_SIZE: 1G
+ VCPUS: 1
+ HOSTNAME: zkclient
+ - morph: systems/zookeeper-server-x86_64.morph
+ deploy:
+ my-server-system:
+ type: kvm
+ location: kvm+ssh://username@HOSTNAME/machinename/path/to/zookeeper-server.img
+ DISK_SIZE: 4G
+ RAM_SIZE: 1G
+ VCPUS: 1
+ HOSTNAME: zkserver
diff --git a/distbuild.configure b/distbuild.configure
new file mode 100644
index 00000000..062aaecc
--- /dev/null
+++ b/distbuild.configure
@@ -0,0 +1,132 @@
+#!/bin/sh
+# Copyright (C) 2013-2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configure extension to configure a Baserock
+# build node, as part of a distributed building cluster. It uses the
+# following variables from the environment:
+#
+# * DISTBUILD_CONTROLLER: if 'yes', machine is set up as the controller.
+# * DISTBUILD_WORKER: if 'yes', machine is set up as a worker.
+# * TROVE_ID: hostname and Trove prefix of the server to pull source
+# from and push built artifacts to.
+# * TROVE_HOST: FQDN of the same server as in TROVE_ID
+#
+# The following variable is optional:
+#
+# * ARTIFACT_CACHE_SERVER: by default artifacts are pushed to the same
+# Trove that served the source, but you can use a different one.
+#
+# The following variable is required for worker nodes only:
+#
+# * CONTROLLERHOST: hostname or IP address of distbuild controller machine.
+# * WORKER_SSH_KEY: identity used to authenticate with Trove
+#
+# The following variable is required for the controller node only:
+#
+# * WORKERS: hostnames or IP address of worker nodes, comma-separated.
+
+set -e
+
+if [ -n "$DISTBUILD_GENERIC" ]; then
+ echo "Not configuring the distbuild node, it will be generic"
+ exit 0
+fi
+
+# Set default values for these two options if they are unset, so that if the
+# user specifies no distbuild config at all the configure extension exits
+# without doing anything but does not raise an error.
+DISTBUILD_CONTROLLER=${DISTBUILD_CONTROLLER-False}
+DISTBUILD_WORKER=${DISTBUILD_WORKER-False}
+
+if [ "$DISTBUILD_CONTROLLER" = False -a "$DISTBUILD_WORKER" = False ]; then
+ exit 0
+fi
+
+set -u
+
+# Check that all the variables needed are present:
+
+error_vars=false
+
+if [ "x$TROVE_HOST" = "x" ]; then
+ echo "ERROR: TROVE_HOST needs to be defined."
+ error_vars=true
+fi
+
+if [ "x$TROVE_ID" = "x" ]; then
+ echo "ERROR: TROVE_ID needs to be defined."
+ error_vars=true
+fi
+
+if [ "$DISTBUILD_WORKER" = True ]; then
+ if ! ssh-keygen -lf "$WORKER_SSH_KEY" > /dev/null 2>&1; then
+ echo "ERROR: WORKER_SSH_KEY is not a vaild ssh key."
+ error_vars=true
+ fi
+
+ if [ "x$CONTROLLERHOST" = "x" ]; then
+ echo "ERROR: CONTROLLERHOST needs to be defined."
+ error_vars=true
+ fi
+fi
+
+if [ "$DISTBUILD_CONTROLLER" = True ]; then
+ if [ "x$WORKERS" = "x" ]; then
+ echo "ERROR: WORKERS needs to be defined."
+ error_vars=true
+ fi
+fi
+
+if "$error_vars"; then
+ exit 1
+fi
+
+
+ROOT="$1"
+
+DISTBUILD_DATA="$ROOT/etc/distbuild"
+mkdir -p "$DISTBUILD_DATA"
+
+# If it's a worker, install the worker ssh key.
+if [ "$DISTBUILD_WORKER" = True ]
+then
+ install -m 0644 "$WORKER_SSH_KEY" "$DISTBUILD_DATA/worker.key"
+fi
+
+
+
+# Create the configuration file
+python <<'EOF' >"$DISTBUILD_DATA/distbuild.conf"
+import os, sys, yaml
+
+distbuild_configuration={
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_HOST': os.environ['TROVE_HOST'],
+ 'DISTBUILD_WORKER': os.environ['DISTBUILD_WORKER'],
+ 'DISTBUILD_CONTROLLER': os.environ['DISTBUILD_CONTROLLER'],
+ 'WORKER_SSH_KEY': '/etc/distbuild/worker.key',
+}
+
+
+optional_keys = ('ARTIFACT_CACHE_SERVER', 'CONTROLLERHOST', 'WORKERS',
+ 'TROVE_BACKUP_KEYS')
+
+for key in optional_keys:
+ if key in os.environ:
+ distbuild_configuration[key] = os.environ[key]
+
+yaml.dump(distbuild_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator b/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator
new file mode 100755
index 00000000..127bc84f
--- /dev/null
+++ b/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator
@@ -0,0 +1,16 @@
+#!/bin/sh
+read trove_host </etc/trove-host
+while read line; do echo "$line"; done >"$1/srv-distbuild-ccache.mount" <<EOF
+[Unit]
+Requires=rpcbind.service
+After=rpcbind.service
+SourcePath=/etc/trove-host
+
+[Mount]
+Type=nfs
+What=$trove_host:/home/cache/ccache
+Where=/srv/distbuild/ccache
+
+[Install]
+WantedBy=morph-worker.service
+EOF
diff --git a/distbuild/manifest b/distbuild/manifest
new file mode 100644
index 00000000..9363fa85
--- /dev/null
+++ b/distbuild/manifest
@@ -0,0 +1,28 @@
+0040755 0 0 /lib
+0040755 0 0 /lib/systemd
+0040755 0 0 /lib/systemd/system-generators
+0100755 0 0 /lib/systemd/system-generators/ccache-nfs-mount-generator
+0040755 0 0 /usr
+0040755 0 0 /usr/lib
+0040755 0 0 /usr/lib/distbuild-setup
+0040755 0 0 /usr/lib/distbuild-setup/ansible
+0100644 0 0 /usr/lib/distbuild-setup/ansible/hosts
+0100644 0 0 /usr/lib/distbuild-setup/ansible/distbuild-setup.yml
+0040755 0 0 /usr/lib/systemd
+0040755 0 0 /usr/lib/systemd/system
+0100644 0 0 /usr/lib/systemd/system/morph-cache-server.service
+0100644 0 0 /usr/lib/systemd/system/morph-controller.service
+0100644 0 0 /usr/lib/systemd/system/morph-controller-helper.service
+0100644 0 0 /usr/lib/systemd/system/morph-worker.service
+0100644 0 0 /usr/lib/systemd/system/morph-worker-helper.service
+0100644 0 0 /usr/lib/systemd/system/distbuild-setup.service
+0040755 0 0 /usr/lib/systemd/system/multi-user.target.wants
+0120644 0 0 /usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service
+0040755 0 0 /usr/share
+0040755 0 0 /usr/share/distbuild-setup
+0100644 0 0 /usr/share/distbuild-setup/morph.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-cache-server.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-controller.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-controller-helper.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-worker.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-worker-helper.conf
diff --git a/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml b/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml
new file mode 100644
index 00000000..c3074c63
--- /dev/null
+++ b/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml
@@ -0,0 +1,115 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/distbuild/distbuild.conf"
+ tasks:
+
+ - set_fact: ARTIFACT_CACHE_SERVER={{ TROVE_HOST }}
+ when: ARTIFACT_CACHE_SERVER is not defined
+
+ - name: Create mountpoint for extra disk space /srv/distbuild/
+ file: path=/srv/distbuild state=directory owner=root group=root mode=0755
+
+ - name: Create the morph and morph-cache-server configuration files
+ template: src=/usr/share/distbuild-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - morph.conf
+ - morph-cache-server.conf
+
+ - name: Link the morph log file
+ file: src=/srv/distbuild/morph.log dest=/var/log/morph.log state=link force=yes
+
+ - name: Create the controller configuration files
+ template: src=/usr/share/distbuild-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - morph-controller.conf
+ - morph-controller-helper.conf
+ when: DISTBUILD_CONTROLLER
+
+ - name: Link the controller log files
+ file: src=/srv/distbuild/{{ item }} dest=/var/log/{{ item }} state=link force=yes
+ with_items:
+ - morph-controller.log
+ - morph-controller-helper.log
+ when: DISTBUILD_CONTROLLER
+
+ - name: Create the worker configuration files
+ template: src=/usr/share/distbuild-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - morph-worker.conf
+ - morph-worker-helper.conf
+ when: DISTBUILD_WORKER
+
+ - name: Link the controller log files
+ file: src=/srv/distbuild/{{ item }} dest=/var/log/{{ item }} state=link force=yes
+ with_items:
+ - morph-worker.log
+ - morph-worker-helper.log
+ when: DISTBUILD_WORKER
+
+ - name: Create /root/.ssh directory
+ file: path=/root/.ssh state=directory owner=root group=root mode=0700
+
+ - name: Copy the worker ssh key
+ copy: src={{ WORKER_SSH_KEY }} dest=/root/.ssh/id_rsa owner=root group=root mode=0600
+
+ - name: Create ssh public key
+ shell: ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub creates=/root/.ssh/id_rsa.pub
+
+ - name: Add trove's host key
+ shell: |
+ trove_key="$(ssh-keyscan -t dsa,ecdsa,rsa {{ TROVE_HOST|quote }})"
+ if [ -n "$trove_key" ]; then
+ echo "$trove_key" > /etc/ssh/ssh_known_hosts
+ fi
+ creates=/etc/ssh/ssh_known_hosts
+
+ # This is a kludge. We can add the host key for the TROVE_HOST that was
+ # specified, but users may access the Trove by other names, e.g. IP address
+ # or domain name. Distbuild is currently not safe to run except on a private
+ # network where host key checking is not important, so we disable it by
+ # default to avoid errors when users don't stick to using the exact same
+ # TROVE_HOST in repo URLs.
+ - name: Disable strict SSH host key checking
+ lineinfile:
+ dest: /etc/ssh/ssh_config
+ line: StrictHostKeyChecking no
+
+ - name: Enable the morph-cache-server service
+ service: name=morph-cache-server.service enabled=yes
+ register: morph_cache_server_service
+ - name: Restart the morph-cache-server service
+ service: name=morph-cache-server state=restarted
+ when: morph_cache_server_service|changed
+
+ - name: Enable the morph-worker service
+ service: name=morph-worker.service enabled=yes
+ register: morph_worker_service
+ when: DISTBUILD_WORKER
+ - name: Restart the morph-worker service
+ service: name=morph-worker state=restarted
+ when: morph_worker_service|changed
+
+ - name: Enable the morph-worker-helper service
+ service: name=morph-worker-helper.service enabled=yes
+ register: morph_worker_helper_service
+ when: DISTBUILD_WORKER
+ - name: Restart the morph-worker-helper service
+ service: name=morph-worker-helper state=restarted
+ when: morph_worker_helper_service|changed
+
+ - name: Enable the morph-controller service
+ service: name=morph-controller.service enabled=yes
+ register: morph_controller_service
+ when: DISTBUILD_CONTROLLER
+ - name: Restart the morph-controller service
+ service: name=morph-controller state=restarted
+ when: morph_controller_service|changed
+
+ - name: Enable the morph-controller-helper service
+ service: name=morph-controller-helper.service enabled=yes
+ register: morph_controller_helper_service
+ when: DISTBUILD_CONTROLLER
+ - name: Restart the morph-controller-helper service
+ service: name=morph-controller-helper state=restarted
+ when: morph_controller_helper_service|changed
diff --git a/distbuild/usr/lib/distbuild-setup/ansible/hosts b/distbuild/usr/lib/distbuild-setup/ansible/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/distbuild/usr/lib/distbuild-setup/ansible/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/distbuild/usr/lib/systemd/system/distbuild-setup.service b/distbuild/usr/lib/systemd/system/distbuild-setup.service
new file mode 100644
index 00000000..ec5f5a2d
--- /dev/null
+++ b/distbuild/usr/lib/systemd/system/distbuild-setup.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Run distbuild-setup Ansible scripts
+Requires=network.target
+After=network.target
+Requires=opensshd.service
+After=opensshd.service
+
+# If there's a shared /var subvolume, it must be mounted before this
+# unit runs.
+Requires=local-fs.target
+After=local-fs.target
+
+ConditionPathExists=/etc/distbuild/distbuild.conf
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/distbuild-setup/ansible/hosts /usr/lib/distbuild-setup/ansible/distbuild-setup.yml
diff --git a/distbuild/usr/lib/systemd/system/morph-cache-server.service b/distbuild/usr/lib/systemd/system/morph-cache-server.service
new file mode 100644
index 00000000..f55f3b6d
--- /dev/null
+++ b/distbuild/usr/lib/systemd/system/morph-cache-server.service
@@ -0,0 +1,12 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph cache server
+Requires=local-fs.target network.target
+After=local-fs.target network.target
+ConditionPathExists=/etc/morph-cache-server.conf
+
+[Service]
+ExecStart=/usr/bin/morph-cache-server
+Restart=always
diff --git a/distbuild/usr/lib/systemd/system/morph-controller-helper.service b/distbuild/usr/lib/systemd/system/morph-controller-helper.service
new file mode 100644
index 00000000..3f30cbcf
--- /dev/null
+++ b/distbuild/usr/lib/systemd/system/morph-controller-helper.service
@@ -0,0 +1,13 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build controller helper
+Requires=morph-controller.service
+After=morph-controller.service
+ConditionPathExists=/etc/morph-controller.conf
+ConditionPathExists=/etc/morph-controller-helper.conf
+
+[Service]
+ExecStart=/usr/bin/distbuild-helper --config /etc/morph-controller-helper.conf
+Restart=always
diff --git a/distbuild/usr/lib/systemd/system/morph-controller.service b/distbuild/usr/lib/systemd/system/morph-controller.service
new file mode 100644
index 00000000..1556d232
--- /dev/null
+++ b/distbuild/usr/lib/systemd/system/morph-controller.service
@@ -0,0 +1,12 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build controller
+Requires=local-fs.target network.target
+After=local-fs.target network.target
+ConditionPathExists=/etc/morph-controller.conf
+
+[Service]
+ExecStart=/usr/bin/morph controller-daemon --config /etc/morph-controller.conf
+Restart=always
diff --git a/distbuild/usr/lib/systemd/system/morph-worker-helper.service b/distbuild/usr/lib/systemd/system/morph-worker-helper.service
new file mode 100644
index 00000000..28400701
--- /dev/null
+++ b/distbuild/usr/lib/systemd/system/morph-worker-helper.service
@@ -0,0 +1,13 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build worker helper
+Requires=morph-worker.service
+After=morph-worker.service
+ConditionPathExists=/etc/morph-worker.conf
+ConditionPathExists=/etc/morph-worker-helper.conf
+
+[Service]
+ExecStart=/usr/bin/distbuild-helper --config /etc/morph-worker-helper.conf
+Restart=always
diff --git a/distbuild/usr/lib/systemd/system/morph-worker.service b/distbuild/usr/lib/systemd/system/morph-worker.service
new file mode 100644
index 00000000..90fea404
--- /dev/null
+++ b/distbuild/usr/lib/systemd/system/morph-worker.service
@@ -0,0 +1,13 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build worker
+Requires=local-fs.target network.target
+Wants=srv-distbuild-ccache.mount
+After=local-fs.target network.target srv-distbuild-ccache.mount
+ConditionPathExists=/etc/morph-worker.conf
+
+[Service]
+ExecStart=/usr/bin/morph worker-daemon --config /etc/morph-worker.conf
+Restart=always
diff --git a/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service b/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service
new file mode 120000
index 00000000..8f06febd
--- /dev/null
+++ b/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service
@@ -0,0 +1 @@
+../distbuild-setup.service \ No newline at end of file
diff --git a/distbuild/usr/share/distbuild-setup/morph-cache-server.conf b/distbuild/usr/share/distbuild-setup/morph-cache-server.conf
new file mode 100644
index 00000000..b9020e7d
--- /dev/null
+++ b/distbuild/usr/share/distbuild-setup/morph-cache-server.conf
@@ -0,0 +1,5 @@
+[config]
+port = 8080
+artifact-dir = /srv/distbuild/artifacts
+direct-mode = True
+fcgi-server = False
diff --git a/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf b/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf
new file mode 100644
index 00000000..99d38739
--- /dev/null
+++ b/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf
@@ -0,0 +1,5 @@
+[config]
+log = /srv/distbuild/morph-controller-helper.log
+log-max = 100M
+parent-port = 5656
+parent-address = 127.0.0.1
diff --git a/distbuild/usr/share/distbuild-setup/morph-controller.conf b/distbuild/usr/share/distbuild-setup/morph-controller.conf
new file mode 100644
index 00000000..c16c0343
--- /dev/null
+++ b/distbuild/usr/share/distbuild-setup/morph-controller.conf
@@ -0,0 +1,6 @@
+[config]
+log = /srv/distbuild/morph-controller.log
+log-max = 100M
+writeable-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8081/
+worker = {{ WORKERS }}
+controller-helper-address = 127.0.0.1
diff --git a/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf b/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf
new file mode 100644
index 00000000..29d4ef3f
--- /dev/null
+++ b/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf
@@ -0,0 +1,4 @@
+[config]
+log = /srv/distbuild/morph-worker-helper.log
+log-max = 100M
+parent-address = 127.0.0.1
diff --git a/distbuild/usr/share/distbuild-setup/morph-worker.conf b/distbuild/usr/share/distbuild-setup/morph-worker.conf
new file mode 100644
index 00000000..fb382bad
--- /dev/null
+++ b/distbuild/usr/share/distbuild-setup/morph-worker.conf
@@ -0,0 +1,4 @@
+[config]
+log = /srv/distbuild/morph-worker.log
+log-max = 100M
+controller-initiator-address =
diff --git a/distbuild/usr/share/distbuild-setup/morph.conf b/distbuild/usr/share/distbuild-setup/morph.conf
new file mode 100644
index 00000000..29de684c
--- /dev/null
+++ b/distbuild/usr/share/distbuild-setup/morph.conf
@@ -0,0 +1,13 @@
+[config]
+log = /srv/distbuild/morph.log
+log-max = 100M
+cachedir = /srv/distbuild
+tempdir = /srv/distbuild/tmp
+trove-host = {{ TROVE_HOST }}
+trove-id = {{ TROVE_ID }}
+controller-initiator-address = {{ CONTROLLERHOST }}
+tempdir-min-space = 4G
+cachedir-min-space = 4G
+build-ref-prefix = {{ TROVE_ID }}
+artifact-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8080/
+git-resolve-cache-server = http://{{ TROVE_HOST }}:8080/
diff --git a/essential-files/etc/inputrc b/essential-files/etc/inputrc
new file mode 100644
index 00000000..ddee44cd
--- /dev/null
+++ b/essential-files/etc/inputrc
@@ -0,0 +1,38 @@
+# Allow the command prompt to wrap to the next line
+set horizontal-scroll-mode Off
+
+# Enable 8bit input
+set meta-flag On
+set input-meta On
+
+# Turns off 8th bit stripping
+set convert-meta Off
+
+# Keep the 8th bit for display
+set output-meta On
+
+# none, visible or audible
+set bell-style none
+
+# for linux console and RH/Debian xterm
+"\e[1~": beginning-of-line
+"\e[4~": end-of-line
+"\e[5~": beginning-of-history
+"\e[6~": end-of-history
+"\e[7~": beginning-of-line
+"\e[3~": delete-char
+"\e[2~": quoted-insert
+"\e[5C": forward-word
+"\e[5D": backward-word
+"\e\e[C": forward-word
+"\e\e[D": backward-word
+"\e[1;5C": forward-word
+"\e[1;5D": backward-word
+
+# for non RH/Debian xterm, can't hurt for RH/DEbian xterm
+"\eOH": beginning-of-line
+"\eOF": end-of-line
+
+# for Konsole and freebsd console
+"\e[H": beginning-of-line
+"\e[F": end-of-line
diff --git a/essential-files/etc/os-release b/essential-files/etc/os-release
new file mode 100644
index 00000000..b729c75f
--- /dev/null
+++ b/essential-files/etc/os-release
@@ -0,0 +1,5 @@
+NAME="Baserock"
+ID=baserock
+HOME_URL="http://wiki.baserock.org"
+SUPPORT_URL="http://wiki.baserock.org/mailinglist"
+BUG_REPORT_URL="http://wiki.baserock.org/mailinglist"
diff --git a/essential-files/etc/profile b/essential-files/etc/profile
new file mode 100644
index 00000000..b306a132
--- /dev/null
+++ b/essential-files/etc/profile
@@ -0,0 +1,13 @@
+# /etc/profile
+
+# Set our default path
+PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+export PATH
+
+# Source global bash config
+if test "$PS1" && test "$BASH" && test -r /etc/bash.bashrc; then
+ . /etc/bash.bashrc
+fi
+
+# Set default pager to less
+export MANPAGER='less -R'
diff --git a/essential-files/manifest b/essential-files/manifest
new file mode 100644
index 00000000..2b77c237
--- /dev/null
+++ b/essential-files/manifest
@@ -0,0 +1,8 @@
+0040755 0 0 /etc
+overwrite 0100644 0 0 /etc/os-release
+overwrite 0100644 0 0 /etc/profile
+overwrite 0100644 0 0 /etc/inputrc
+0040755 0 0 /usr
+0040755 0 0 /usr/lib
+0040755 0 0 /usr/lib/tmpfiles.d
+0100644 0 0 /usr/lib/tmpfiles.d/shutdownramfs.conf
diff --git a/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf b/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf
new file mode 100644
index 00000000..174f1f03
--- /dev/null
+++ b/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf
@@ -0,0 +1,4 @@
+# If /usr/lib/shutdownramfs has been populated, copy it into /run/initramfs so
+# /run/initramfs/shutdown will be executed on shut-down, so that it may unmount
+# the rootfs.
+C /run/initramfs - - - - /usr/lib/shutdownramfs
diff --git a/genivi-devel-system-armv7/etc/morph.conf b/genivi-devel-system-armv7/etc/morph.conf
new file mode 120000
index 00000000..8f384049
--- /dev/null
+++ b/genivi-devel-system-armv7/etc/morph.conf
@@ -0,0 +1 @@
+/src/morph.conf \ No newline at end of file
diff --git a/genivi-devel-system-armv7/manifest b/genivi-devel-system-armv7/manifest
new file mode 100644
index 00000000..31980633
--- /dev/null
+++ b/genivi-devel-system-armv7/manifest
@@ -0,0 +1,5 @@
+0040755 0 0 /src
+0040755 0 0 /src/tmp
+0100666 0 0 /src/morph.conf
+0040755 0 0 /etc
+0120666 0 0 /etc/morph.conf
diff --git a/genivi-devel-system-armv7/src/morph.conf b/genivi-devel-system-armv7/src/morph.conf
new file mode 100644
index 00000000..76b6fde9
--- /dev/null
+++ b/genivi-devel-system-armv7/src/morph.conf
@@ -0,0 +1,5 @@
+[config]
+log = /src/morph.log
+cachedir = /src/cache
+tempdir = /src/tmp
+staging-chroot = true
diff --git a/hosts b/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/image-package-example/README b/image-package-example/README
new file mode 100644
index 00000000..c1322f25
--- /dev/null
+++ b/image-package-example/README
@@ -0,0 +1,9 @@
+Image package example scripts
+=============================
+
+These are scripts used to create disk images or install the system onto
+an existing disk.
+
+This is also implemented independently for the rawdisk.write write
+extension; see morphlib.writeexts.WriteExtension.create_local_system()
+for a similar, python implementation.
diff --git a/image-package-example/common.sh.in b/image-package-example/common.sh.in
new file mode 100644
index 00000000..9a7389a7
--- /dev/null
+++ b/image-package-example/common.sh.in
@@ -0,0 +1,72 @@
+#!/bin/false
+# Script library to be used by disk-install.sh and make-disk-image.sh
+
+status(){
+ echo "$@"
+}
+
+info(){
+ echo "$@" >&2
+}
+
+warn(){
+ echo "$@" >&2
+}
+
+extract_rootfs(){
+ tar -C "$1" -xf @@ROOTFS_TAR_PATH@@ .
+}
+
+make_disk_image(){
+ truncate --size "$1" "$2"
+}
+
+format_disk(){
+ local disk="$1"
+ mkfs.ext4 -F -L rootfs "$disk"
+}
+
+install_fs_config(){
+ local mountpoint="$1"
+ local rootdisk="${2-/dev/vda}"
+ cat >>"$mountpoint/etc/fstab" <<EOF
+$rootdisk / ext4 rw,errors=remount-ro 0 0
+EOF
+ install -D -m 644 /proc/self/fd/0 "$mountpoint/boot/extlinux.conf" <<EOF
+DEFAULT baserock
+LABEL baserock
+SAY Booting Baserock
+LINUX /boot/vmlinuz
+APPEND root=$rootdisk
+EOF
+}
+
+install_bootloader(){
+ local disk="$1"
+ local mountpoint="$2"
+ dd if=@@IMAGE_DIR@@/mbr.bin conv=notrunc bs=440 count=1 of="$disk"
+ extlinux --install "$mountpoint/boot"
+}
+
+loop_file(){
+ losetup --show --find "$1"
+}
+unloop_file(){
+ #losetup --detach "$1"
+ # unlooping handled by umount -d, for busybox compatibility
+ true
+}
+
+temp_mount(){
+ local mp="$(mktemp -d)"
+ if ! mount "$@" "$mp"; then
+ rmdir "$mp"
+ return 1
+ fi
+ echo "$mp"
+}
+untemp_mount(){
+ # Unmount and detach in one step for busybox compatibility
+ umount -d "$1"
+ rmdir "$1"
+}
diff --git a/image-package-example/disk-install.sh.in b/image-package-example/disk-install.sh.in
new file mode 100644
index 00000000..bc8e0e67
--- /dev/null
+++ b/image-package-example/disk-install.sh.in
@@ -0,0 +1,51 @@
+#!/bin/sh
+# Script for writing the system to an existing disk.
+# This formats the disk, extracts the rootfs to it, installs the
+# bootloader, and ensures there's appropriate configuration for the
+# bootloader, kernel and userland to agree what the rootfs is.
+
+set -eu
+
+usage(){
+ cat <<EOF
+usage: $0 DISK [TARGET_DISK]
+
+DISK: Where the disk appears on your development machine
+TARGET_DISK: What the disk will appear as on the target machine
+EOF
+}
+
+. @@SCRIPT_DIR@@/common.sh
+
+if [ "$#" -lt 1 -o "$#" -gt 2 ]; then
+ usage
+ exit 1
+fi
+
+DISK="$1"
+TARGET_DISK="${1-/dev/sda}"
+
+status Formatting "$DISK" as ext4
+format_disk "$DISK"
+(
+ info Mounting "$DISK"
+ MP="$(temp_mount -t ext4 "$DISK")"
+ info Mounted "$DISK" to "$MP"
+ set +e
+ (
+ set -e
+ info Copying rootfs onto disk
+ extract_rootfs "$MP"
+ info Configuring disk paths
+ install_fs_config "$MP" "$TARGET_DISK"
+ info Installing bootloader
+ install_bootloader "$DISK" "$MP"
+ )
+ ret="$?"
+ if [ "$ret" != 0 ]; then
+ warn Filling rootfs failed with "$ret"
+ fi
+ info Unmounting "$DISK" from "$MP" and removing "$MP"
+ untemp_mount "$MP"
+ exit "$ret"
+)
diff --git a/image-package-example/make-disk-image.sh.in b/image-package-example/make-disk-image.sh.in
new file mode 100644
index 00000000..61264fa0
--- /dev/null
+++ b/image-package-example/make-disk-image.sh.in
@@ -0,0 +1,36 @@
+#!/bin/sh
+# Script for writing the system to a disk image file.
+# This creates a file of the right size, attaches it to a loop device,
+# then hands the rest of the work off to disk-install.sh
+
+usage(){
+ cat <<EOF
+usage: $0 FILENAME SIZE [TARGET_DISK]
+
+FILENAME: Location to write the disk image to
+SIZE: Size to create the disk image with
+TARGET_DISK: What the disk will appear as on the target machine
+EOF
+}
+
+. @@SCRIPT_DIR@@/common.sh
+
+if [ "$#" -lt 2 -o "$#" -gt 3 ]; then
+ usage
+ exit 1
+fi
+
+DISK_IMAGE="$1"
+DISK_SIZE="$2"
+TARGET_DISK="${3-/dev/vda}"
+
+make_disk_image "$DISK_SIZE" "$DISK_IMAGE"
+
+(
+ LOOP="$(loop_file "$DISK_IMAGE")"
+ set +e
+ @@SCRIPT_DIR@@/disk-install.sh "$DISK_IMAGE" "$TARGET_DISK"
+ ret="$?"
+ unloop_file "$LOOP"
+ exit "$ret"
+)
diff --git a/image-package.write b/image-package.write
new file mode 100755
index 00000000..15ceadcf
--- /dev/null
+++ b/image-package.write
@@ -0,0 +1,168 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+#
+#
+# This is a write extension for making a package that can be used to
+# install the produced system. Ideally we'd instead have Baserock
+# everywhere to do the deployment, but we need to support this workflow
+# until that is possible.
+#
+# This write extension produces a tarball, which contains:
+# - a tarball of the configured system root file system
+# - any supporting files listed in BOOTLOADER_BLOBS
+# - any supporting scripts, generated from templates listed in
+# INCLUDE_SCRIPTS
+#
+# The extension requires the following environment variables to be set:
+#
+# * BOOTLOADER_BLOBS: files to include besides rootfs tarball,
+# paths are relative to the root of the built rootfs
+# works on any kind of file in the rootfs, named
+# BOOTLOADER_BLOBS since that's the common use-case
+# :-separated by default
+# * INCLUDE_SCRIPTS: script templates that are included in the package
+# after being filled out
+# file paths are relative to the definitions repository
+# :-separated by default
+#
+# The script templates may contain any of the following strings, which
+# will be replaced with a string which will expand to the appropriate
+# value as a shell word:
+# - @@SCRIPT_DIR@@: the path the script files are installed to
+# - @@IMAGE_DIR@@: the path BOOTLOADER_BLOBS are installed to
+# - @@ROOTFS_TAR_PATH@@: path to the rootfs tarball
+#
+# The interpolated strings may run commands dependant on the current
+# working directory, so if `cd` is required, bind these values to a
+# variable beforehand.
+#
+# The following optional variables can be set as well:
+#
+# * INCLUDE_SCRIPTS_SEPARATOR: character to separate INCLUDE_SCRIPTS with (default: :)
+# * BOOTLOADER_BLOBS_SEPARATOR: character to separate BOOTLOADER_BLOBS with (default: :)
+# * SCRIPT_SUBDIR: where in the package processed scripts are installed to (default: tools)
+# * IMAGE_SUBDIR: where in the package BOOTLOADER_BLOBS are copied to (default: image_files)
+# * ROOTFS_TAR: name to call the rootfs tarball inside IMAGE_SUBDIR (default: rootfs.tar)
+# * OUTPUT_COMPRESS: compression used for output tarball (default: none)
+# * ROOTFS_COMPRESS: compression used for rootfs (default: none)
+
+set -eu
+
+die(){
+ echo "$@" >&2
+ exit 1
+}
+
+warn(){
+ echo "$@" >&2
+}
+
+info(){
+ echo "$@" >&2
+}
+
+shellescape(){
+ echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'"
+}
+
+sedescape(){
+ # Escape the passed in string so it can be safely interpolated into
+ # a sed expression as a literal value.
+ echo "$1" | sed -e 's/[\/&]/\\&/g'
+}
+
+ROOTDIR="$1"
+OUTPUT_TAR="$2"
+td="$(mktemp -d)"
+IMAGE_SUBDIR="${IMAGE_SUBDIR-image_files}"
+SCRIPT_SUBDIR="${SCRIPT_SUBDIR-tools}"
+ROOTFS_TAR="${ROOTFS_TAR-rootfs.tar}"
+
+# Generate shell snippets that will expand to paths to various resources
+# needed by the scripts.
+# They expand to a single shell word, so constructs like the following work
+# SCRIPT_DIR=@@SCRIPT_DIR@@
+# dd if="$SCRIPT_DIR/mbr" of="$disk" count=1
+# tar -C "$mountpoint" -xf @@ROOTFS_TAR_PATH@@ .
+find_script_dir='"$(readlink -f "$(dirname "$0")")"'
+image_dir="$find_script_dir/../$(shellescape "$IMAGE_SUBDIR")"
+rootfs_tar_path="$image_dir/$(shellescape "$ROOTFS_TAR")"
+
+install_script(){
+ local source_file="$1"
+ local output_dir="$2"
+ local target_file="$output_dir/$SCRIPT_SUBDIR/$(basename "$source_file" .in)"
+ sed -e "s/@@SCRIPT_DIR@@/$(sedescape "$find_script_dir")/g" \
+ -e "s/@@IMAGE_DIR@@/$(sedescape "$image_dir")/g" \
+ -e "s/@@ROOTFS_TAR_PATH@@/$(sedescape "$rootfs_tar_path")/g" \
+ "$source_file" \
+ | install -D -m 755 /proc/self/fd/0 "$target_file"
+}
+
+install_scripts(){
+ local output_dir="$1"
+ (
+ IFS="${INCLUDE_SCRIPTS_SEPARATOR-:}"
+ for script in $INCLUDE_SCRIPTS; do
+ local script_path="$(pwd)/$script"
+ if [ ! -e "$script_path" ]; then
+ warn Script "$script" not found, ignoring
+ continue
+ fi
+ install_script "$script" "$output_dir"
+ done
+ )
+}
+
+install_bootloader_blobs(){
+ local output_dir="$1"
+ local image_dir="$output_dir/$IMAGE_SUBDIR"
+ (
+ IFS="${BOOTLOADER_BLOBS_SEPARATOR-:}"
+ for blob in $BOOTLOADER_BLOBS; do
+ local blob_path="$ROOTDIR/$blob"
+ if [ ! -e "$blob_path" ]; then
+ warn Bootloader blob "$blob" not found, ignoring
+ continue
+ fi
+ install -D -m644 "$blob_path" "$image_dir/$(basename "$blob_path")"
+ done
+ )
+}
+
+# Determine a basename for our directory as the same as our tarball with
+# extensions removed. This is needed, since tarball packages usually
+# have a base directory of its contents, rather then extracting into the
+# current directory.
+output_dir="$(basename "$OUTPUT_TAR")"
+for ext in .xz .bz2 .gzip .gz .tgz .tar; do
+ output_dir="${output_dir%$ext}"
+done
+
+info Installing scripts
+install_scripts "$td/$output_dir"
+
+info Installing bootloader blobs
+install_bootloader_blobs "$td/$output_dir"
+
+info Writing rootfs tar to "$IMAGE_SUBDIR/$ROOTFS_TAR"
+tar -C "$ROOTDIR" -c . \
+| sh -c "${ROOTFS_COMPRESS-cat}" >"$td/$output_dir/$IMAGE_SUBDIR/$ROOTFS_TAR"
+
+info Writing image package tar to "$OUTPUT_TAR"
+tar -C "$td" -c "$output_dir" | sh -c "${OUTPUT_COMPRESS-cat}" >"$OUTPUT_TAR"
diff --git a/installer.configure b/installer.configure
new file mode 100755
index 00000000..a77dc851
--- /dev/null
+++ b/installer.configure
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to configure an installer
+# system. It will create the configuration needed in the installer system
+# to perform an installation. It uses the following variables from the
+# environment:
+#
+# * INSTALLER_TARGET_STORAGE_DEVICE
+# * INSTALLER_ROOTFS_TO_INSTALL
+# * INSTALLER_POST_INSTALL_COMMAND (optional, defaults to `reboot -f`)
+
+import os
+import sys
+import yaml
+
+install_config_file = os.path.join(sys.argv[1], 'etc', 'install.conf')
+
+try:
+ installer_configuration = {
+ 'INSTALLER_TARGET_STORAGE_DEVICE': os.environ['INSTALLER_TARGET_STORAGE_DEVICE'],
+ 'INSTALLER_ROOTFS_TO_INSTALL': os.environ['INSTALLER_ROOTFS_TO_INSTALL'],
+ }
+except KeyError as e:
+ print "Not configuring as an installer system"
+ sys.exit(0)
+
+postinstkey = 'INSTALLER_POST_INSTALL_COMMAND'
+installer_configuration[postinstkey] = os.environ.get(postinstkey, 'reboot -f')
+
+with open(install_config_file, 'w') as f:
+ f.write( yaml.dump(installer_configuration, default_flow_style=False) )
+
+print "Configuration of the installer system in %s" % install_config_file
diff --git a/jffs2.write b/jffs2.write
new file mode 100644
index 00000000..46b69a53
--- /dev/null
+++ b/jffs2.write
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+#-*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for creating images with jffs2
+ as the root filesystem.'''
+
+
+import cliapp
+import os
+
+import morphlib.writeexts
+
+
+class Jffs2WriteExtension(morphlib.writeexts.WriteExtension):
+
+ '''See jffs2.write.help for documentation.'''
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+
+ try:
+ self.create_jffs2_system(temp_root, location)
+ self.status(msg='Disk image has been created at %(location)s',
+ location = location)
+ except Exception:
+ self.status(msg='Failure to deploy system to %(location)s',
+ location = location)
+ raise
+
+ def create_jffs2_system(self, temp_root, location):
+ erase_block = self.get_erase_block_size()
+ cliapp.runcmd(
+ ['mkfs.jffs2', '--pad', '--no-cleanmarkers',
+ '--eraseblock='+erase_block, '-d', temp_root, '-o', location])
+
+ def get_erase_block_size(self):
+ erase_block = os.environ.get('ERASE_BLOCK', '')
+
+ if erase_block == '':
+ raise cliapp.AppException('ERASE_BLOCK was not given')
+
+ if not erase_block.isdigit():
+ raise cliapp.AppException('ERASE_BLOCK must be a whole number')
+
+ return erase_block
+
+Jffs2WriteExtension().run()
diff --git a/jffs2.write.help b/jffs2.write.help
new file mode 100644
index 00000000..059a354b
--- /dev/null
+++ b/jffs2.write.help
@@ -0,0 +1,28 @@
+#-*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Creates a system produced by Morph build with a jffs2 filesystem and then
+ writes to an image. To use this extension, the host system must have access
+ to mkfs.jffs2 which is provided in the mtd-utilities.morph stratum.
+
+ Parameters:
+
+ * location: the pathname of the disk image to be created/upgraded, or the
+ path to the physical device.
+
+ * ERASE_BLOCK: the erase block size of the target system, which can be
+ found in '/sys/class/mtd/mtdx/erasesize'
diff --git a/mason.configure b/mason.configure
new file mode 100644
index 00000000..1198ebd0
--- /dev/null
+++ b/mason.configure
@@ -0,0 +1,153 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to fully configure
+# a Mason instance at deployment time. It uses the following variables
+# from the environment:
+#
+# * ARTIFACT_CACHE_SERVER
+# * MASON_CLUSTER_MORPHOLOGY
+# * MASON_DEFINITIONS_REF
+# * MASON_DISTBUILD_ARCH
+# * MASON_TEST_HOST
+# * OPENSTACK_NETWORK_ID
+# * TEST_INFRASTRUCTURE_TYPE
+# * TROVE_HOST
+# * TROVE_ID
+# * CONTROLLERHOST
+
+set -e
+
+##########################################################################
+# Copy Mason files into root filesystem
+##########################################################################
+
+ROOT="$1"
+
+mkdir -p "$ROOT"/usr/lib/mason
+cp mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh
+cp mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh
+cp mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script
+
+cp mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer
+
+cp mason/mason.service "$ROOT"/etc/systemd/system/mason.service
+
+##########################################################################
+# Set up httpd web server
+##########################################################################
+
+cp mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service
+
+mkdir -p "$ROOT"/srv/mason
+
+cat >>"$ROOT"/etc/httpd.conf <<EOF
+.log:text/plain
+EOF
+
+mkdir -p "$ROOT"/var/mason
+
+##########################################################################
+# Copy files needed for Ansible configuration
+##########################################################################
+
+mkdir -p "$ROOT/usr/share/mason-setup"
+mkdir -p "$ROOT/usr/lib/mason-setup"
+
+cp mason/share/* "$ROOT/usr/share/mason-setup"
+cp -r mason/ansible "$ROOT/usr/lib/mason-setup/"
+cp mason/mason-setup.service "$ROOT"/etc/systemd/system/mason-setup.service
+
+ln -s ../mason-setup.service "$ROOT"/etc/systemd/system/multi-user.target.wants/mason-setup.service
+
+##########################################################################
+# Check variables
+##########################################################################
+
+if [ -n "$MASON_GENERIC" ]; then
+ echo Not configuring Mason, it will be generic
+ exit 0
+fi
+
+if [ -z "$MASON_CLUSTER_MORPHOLOGY" -a \
+ -z "$MASON_DEFINITIONS_REF" -a \
+ -z "$MASON_DISTBUILD_ARCH" -a \
+ -z "$MASON_TEST_HOST" ]; then
+ # No Mason options defined, do nothing.
+ exit 0
+fi
+
+if [ -z "$ARTIFACT_CACHE_SERVER" -o \
+ -z "$CONTROLLERHOST" -o \
+ -z "$MASON_CLUSTER_MORPHOLOGY" -o \
+ -z "$MASON_DEFINITIONS_REF" -o \
+ -z "$MASON_DISTBUILD_ARCH" -o \
+ -z "$MASON_TEST_HOST" -o \
+ -z "$TROVE_HOST" -o \
+ -z "$TROVE_ID" ]; then
+ echo Some options required for Mason were defined, but not all.
+ exit 1
+fi
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+MASON_DATA="$ROOT/etc/mason"
+mkdir -p "$MASON_DATA"
+
+python <<'EOF' >"$MASON_DATA/mason.conf"
+import os, sys, yaml
+
+mason_configuration={
+ 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'],
+ 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'],
+ 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'],
+ 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'],
+ 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'],
+ 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'],
+ 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'],
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_HOST': os.environ['TROVE_HOST'],
+ 'CONTROLLERHOST': os.environ['CONTROLLERHOST'],
+}
+
+yaml.dump(mason_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
+ python <<'EOF' >>"$MASON_DATA/mason.conf"
+import os, sys, yaml
+
+openstack_credentials={
+ 'OS_USERNAME': os.environ['OPENSTACK_USER'],
+ 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'],
+ 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'],
+ 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'],
+ 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'],
+}
+
+yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False)
+EOF
+fi
+
+##########################################################################
+# Enable services
+##########################################################################
+
+ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer
+ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service
diff --git a/mason/ansible/hosts b/mason/ansible/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/mason/ansible/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/mason/ansible/mason-setup.yml b/mason/ansible/mason-setup.yml
new file mode 100644
index 00000000..d1528dbb
--- /dev/null
+++ b/mason/ansible/mason-setup.yml
@@ -0,0 +1,83 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/mason/mason.conf"
+ tasks:
+
+
+ - fail: msg='TROVE_ID is mandatory'
+ when: TROVE_ID is not defined
+
+ - fail: msg='TROVE_HOST is mandatory'
+ when: TROVE_HOST is not defined
+
+ - fail: msg='ARTIFACT_CACHE_SERVER is mandatory'
+ when: ARTIFACT_CACHE_SERVER is not defined
+
+ - fail: msg='MASON_CLUSTER_MORPHOLOGY is mandatory'
+ when: MASON_CLUSTER_MORPHOLOGY is not defined
+
+ - fail: msg='MASON_DEFINITIONS_REF is mandatory'
+ when: MASON_DEFINITIONS_REF is not defined
+
+ - fail: msg='MASON_DISTBUILD_ARCH is mandatory'
+ when: MASON_DISTBUILD_ARCH is not defined
+
+ - fail: msg='MASON_TEST_HOST is mandatory'
+ when: MASON_TEST_HOST is not defined
+
+ - fail: msg='CONTROLLERHOST is mandatory'
+ when: CONTROLLERHOST is not defined
+
+ - fail: msg='TEST_INFRASTRUCTURE_TYPE is mandatory'
+ when: TEST_INFRASTRUCTURE_TYPE is not defined
+
+ - fail: msg='OPENSTACK_NETWORK_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OPENSTACK_NETWORK_ID is not defined
+
+ - fail: msg='OS_USERNAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_USERNAME is not defined
+
+ - fail: msg='OS_PASSWORD is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_PASSWORD is not defined
+
+ - fail: msg='OS_TENANT_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_ID is not defined
+
+ - fail: msg='OS_TENANT_NAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_NAME is not defined
+
+ - fail: msg='OS_AUTH_URL is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_AUTH_URL is not defined
+
+ - name: Create the Mason configuration file
+ template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - mason.conf
+
+ - name: Create the OpenStack credentials file
+ template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - os.conf
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack"
+
+ - name: Enable the mason service
+ service: name=mason.service enabled=yes
+ register: mason_service
+ - name: Restart the mason service
+ service: name=mason.service state=restarted
+ when: mason_service|changed
+
+ - name: Enable the mason timer
+ service: name=mason.timer enabled=yes
+ register: mason_timer
+ - name: Restart the mason timer
+ service: name=mason.timer state=restarted
+ when: mason_timer|changed
+
+ - name: Enable the httpd service
+ service: name=httpd.service enabled=yes
+ register: httpd_service
+ - name: Restart the httpd service
+ service: name=httpd state=restarted
+ when: httpd_service|changed
diff --git a/mason/httpd.service b/mason/httpd.service
new file mode 100644
index 00000000..7572b732
--- /dev/null
+++ b/mason/httpd.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=HTTP server for Mason
+After=network.target
+
+[Service]
+User=root
+ExecStart=/usr/sbin/httpd -f -p 80 -h /srv/mason
+
+[Install]
+WantedBy=multi-user.target
diff --git a/mason/mason-generator.sh b/mason/mason-generator.sh
new file mode 100755
index 00000000..187db72c
--- /dev/null
+++ b/mason/mason-generator.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+set -e
+
+if [ "$#" -lt 5 -o "$#" -gt 6 -o "$1" == "-h" -o "$1" == "--help" ]; then
+ cat <<EOF
+Usage:
+ `basename $0` HOST_PREFIX UPSTREAM_TROVE_HOSTNAME VM_USER VM_HOST VM_PATH [HOST_POSTFIX]
+
+Where:
+ HOST_PREFIX -- Name of your Mason instance
+ e.g. "my-mason" to produce hostnames:
+ my-mason-trove and my-mason-controller
+ UPSTREAM_TROVE_HOSTNAME -- Upstream trove's hostname
+ VM_USER -- User on VM host for VM deployment
+ VM_HOST -- VM host for VM deployment
+ VM_PATH -- Path to store VM images in on VM host
+ HOST_POSTFIX -- e.g. ".example.com" to get
+ my-mason-trove.example.com
+
+This script makes deploying a Mason system simpler by automating
+the generation of keys for the systems to use, building of the
+systems, filling out the mason deployment cluster morphology
+template with useful values, and finally deploying the systems.
+
+To ensure that the deployed system can deploy test systems, you
+must supply an ssh key to the VM host. Do so with the following
+command:
+ ssh-copy-id -i ssh_keys-HOST_PREFIX/worker.key.pub VM_USER@VM_HOST
+
+To ensure that the mason can upload artifacts to the upstream trove,
+you must supply an ssh key to the upstream trove. Do so with the
+following command:
+ ssh-copy-id -i ssh_keys-HOST_PREFIX/id_rsa.key.pub root@UPSTREAM_TROVE_HOSTNAME
+
+EOF
+ exit 0
+fi
+
+
+HOST_PREFIX="$1"
+UPSTREAM_TROVE="$2"
+VM_USER="$3"
+VM_HOST="$4"
+VM_PATH="$5"
+HOST_POSTFIX="$6"
+
+sedescape() {
+ # Escape all non-alphanumeric characters
+ printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
+}
+
+
+##############################################################################
+# Key generation
+##############################################################################
+
+mkdir -p "ssh_keys-${HOST_PREFIX}"
+cd "ssh_keys-${HOST_PREFIX}"
+test -e mason.key || ssh-keygen -t rsa -b 2048 -f mason.key -C mason@TROVE_HOST -N ''
+test -e lorry.key || ssh-keygen -t rsa -b 2048 -f lorry.key -C lorry@TROVE_HOST -N ''
+test -e worker.key || ssh-keygen -t rsa -b 2048 -f worker.key -C worker@TROVE_HOST -N ''
+test -e id_rsa || ssh-keygen -t rsa -b 2048 -f id_rsa -C trove-admin@TROVE_HOST -N ''
+cd ../
+
+
+##############################################################################
+# Mason setup
+##############################################################################
+
+cp clusters/mason.morph mason-${HOST_PREFIX}.morph
+
+sed -i "s/red-box-v1/$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/ssh_keys/ssh_keys-$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/upstream-trove/$(sedescape "$UPSTREAM_TROVE")/" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-user/$(sedescape "$VM_USER")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-host/$(sedescape "$VM_HOST")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-path/$(sedescape "$VM_PATH")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/\.example\.com/$(sedescape "$HOST_POSTFIX")/g" "mason-$HOST_PREFIX.morph"
+
+
+##############################################################################
+# System building
+##############################################################################
+
+morph build systems/trove-system-x86_64.morph
+morph build systems/build-system-x86_64.morph
+
+
+##############################################################################
+# System deployment
+##############################################################################
+
+morph deploy mason-${HOST_PREFIX}.morph
+
+
+##############################################################################
+# Cleanup
+##############################################################################
+
+rm mason-${HOST_PREFIX}.morph
diff --git a/mason/mason-report.sh b/mason/mason-report.sh
new file mode 100755
index 00000000..9c20b65b
--- /dev/null
+++ b/mason/mason-report.sh
@@ -0,0 +1,252 @@
+#!/bin/bash
+
+set -x
+
+. /etc/mason.conf
+
+REPORT_PATH=/var/mason/report.html
+SERVER_PATH=/srv/mason
+
+sed_escape() {
+ printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
+}
+
+create_report() {
+cat > $REPORT_PATH <<'EOF'
+<html>
+<head>
+<meta charset="UTF-8">
+<meta http-equiv="refresh" content="60">
+<style>
+html, body {
+ margin: 0;
+ padding: 0;
+}
+p.branding {
+ background: black;
+ color: #fff;
+ padding: 0.4em;
+ margin: 0;
+ font-weight: bold;
+}
+h1 {
+ background: #225588;
+ color: white;
+ margin: 0;
+ padding: 0.6em;
+}
+table {
+ width: 90%;
+ margin: 1em auto 6em auto;
+ border: 1px solid black;
+ border-spacing: 0;
+}
+table tr.headings {
+ background: #555;
+ color: white;
+}
+table tr.pass {
+ background: #aaffaa;
+}
+table tr.pass:hover {
+ background: #bbffbb;
+}
+table tr.fail {
+ background: #ffaaaa;
+}
+table tr.fail:hover {
+ background: #ffbbbb;
+}
+table tr.nonet {
+ background: #ffdd99;
+}
+table tr.nonet:hover {
+ background: #ffeeaa;
+}
+table tr.headings th {
+ font-weight: bold;
+ text-align: left;
+ padding: 3px 2px;
+}
+table td {
+ padding: 2px;
+}
+td.result {
+ font-weight: bold;
+ text-transform: uppercase;
+}
+td.result a {
+ text-decoration: none;
+}
+td.result a:before {
+ content: "âž« ";
+}
+tr.pass td.result a {
+ color: #252;
+}
+tr.pass td.result a:hover {
+ color: #373;
+}
+tr.fail td.result a {
+ color: #622;
+}
+tr.fail td.result a:hover {
+ color: #933;
+}
+tr.nonet td.result a {
+ color: #641;
+}
+tr.nonet td.result a:hover {
+ color: #962;
+}
+td.ref {
+ font-family: monospace;
+}
+td.ref a {
+ color: #333;
+}
+td.ref a:hover {
+ color: #555;
+}
+table tr.pass td, table tr.fail td {
+ border-top: solid white 1px;
+}
+p {
+ margin: 1.3em;
+}
+code {
+ padding: 0.3em 0.5em;
+ background: #eee;
+ border: 1px solid #bbb;
+ border-radius: 1em;
+}
+#footer {
+ margin: 0;
+ background: #aaa;
+ color: #222;
+ border-top: #888 1px solid;
+ font-size: 80%;
+ padding: 0;
+ position: fixed;
+ bottom: 0;
+ width: 100%;
+ display: table;
+}
+#footer p {
+ padding: 1.3em;
+ display: table-cell;
+}
+#footer p code {
+ font-size: 110%;
+}
+#footer p.about {
+ text-align: right;
+}
+</style>
+</head>
+<body>
+<p class="branding">Mason</p>
+<h1>Baserock: Continuous Delivery</h1>
+<p>Build log of changes to <code>BRANCH</code> from <code>TROVE</code>. Most recent first.</p>
+<table>
+<tr class="headings">
+ <th>Started</th>
+ <th>Ref</th>
+ <th>Duration</th>
+ <th>Result</th>
+</tr>
+<!--INSERTION POINT-->
+</table>
+<div id="footer">
+<p>Last checked for updates at: <code>....-..-.. ..:..:..</code></p>
+<p class="about">Generated by Mason | Powered by Baserock</p>
+</div>
+</body>
+</html>
+EOF
+
+ sed -i 's/BRANCH/'"$(sed_escape "$1")"'/' $REPORT_PATH
+ sed -i 's/TROVE/'"$(sed_escape "$2")"'/' $REPORT_PATH
+}
+
+update_report() {
+ # Give function params sensible names
+ build_start_time="$1"
+ build_trove_host="$2"
+ build_ref="$3"
+ build_sha1="$4"
+ build_duration="$5"
+ build_result="$6"
+
+ # Generate template if report file is not there
+ if [ ! -f $REPORT_PATH ]; then
+ create_report $build_ref $build_trove_host
+ fi
+
+ # Build table row for insertion into report file
+ if [ "$build_result" = nonet ]; then
+ msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref">Failed to contact '"${build_trove_host}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="log/'"${build_sha1}"'--'"${build_start_time}"'.log">'"${build_result}"'</a></td></tr>'
+ else
+ msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref"><a href="http://'"${build_trove_host}"'/cgi-bin/cgit.cgi/baserock/baserock/definitions.git/commit/?h='"${build_ref}"'&id='"${build_sha1}"'">'"${build_sha1}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="log/'"${build_sha1}"'--'"${build_start_time}"'.log">'"${build_result}"'</a></td></tr>'
+ fi
+
+ # Insert report line, newest at top
+ sed -i 's/<!--INSERTION POINT-->/<!--INSERTION POINT-->\n'"$(sed_escape "$msg")"'/' $REPORT_PATH
+}
+
+update_report_time() {
+ # Give function params sensible names
+ build_start_time="$1"
+
+ # If the report file exists, update the last-checked-for-updates time
+ if [ -f $REPORT_PATH ]; then
+ sed -i 's/<code>....-..-.. ..:..:..<\/code>/<code>'"$(sed_escape "$build_start_time")"'<\/code>/' $REPORT_PATH
+ fi
+}
+
+START_TIME=`date +%Y-%m-%d\ %T`
+
+update_report_time "$START_TIME"
+cp "$REPORT_PATH" "$SERVER_PATH/index.html"
+
+logfile="$(mktemp)"
+/usr/lib/mason/mason.sh 2>&1 | tee "$logfile"
+case "${PIPESTATUS[0]}" in
+0)
+ RESULT=pass
+ ;;
+33)
+ RESULT=skip
+ ;;
+42)
+ RESULT=nonet
+ ;;
+*)
+ RESULT=fail
+ ;;
+esac
+
+# TODO: Update page with last executed time
+if [ "$RESULT" = skip ]; then
+ rm "$logfile"
+ exit 0
+fi
+
+DURATION=$(( $(date +%s) - $(date --date="$START_TIME" +%s) ))
+SHA1="$(cd "ws/$DEFINITIONS_REF/$UPSTREAM_TROVE_ADDRESS/baserock/baserock/definitions" && git rev-parse HEAD)"
+
+update_report "$START_TIME" \
+ "$UPSTREAM_TROVE_ADDRESS" \
+ "$DEFINITIONS_REF" \
+ "$SHA1" \
+ "$DURATION" \
+ "$RESULT"
+
+
+#
+# Copy report into server directory
+#
+
+cp "$REPORT_PATH" "$SERVER_PATH/index.html"
+mkdir "$SERVER_PATH/log"
+mv "$logfile" "$SERVER_PATH/log/$SHA1--$START_TIME.log"
diff --git a/mason/mason-setup.service b/mason/mason-setup.service
new file mode 100644
index 00000000..60403bde
--- /dev/null
+++ b/mason/mason-setup.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Run mason-setup Ansible scripts
+Requires=network.target
+After=network.target
+Requires=opensshd.service
+After=opensshd.service
+
+# If there's a shared /var subvolume, it must be mounted before this
+# unit runs.
+Requires=local-fs.target
+After=local-fs.target
+
+ConditionPathExists=/etc/mason/mason.conf
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/mason-setup/ansible/hosts /usr/lib/mason-setup/ansible/mason-setup.yml
diff --git a/mason/mason.service b/mason/mason.service
new file mode 100644
index 00000000..d5c99498
--- /dev/null
+++ b/mason/mason.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Mason: Continuous Delivery Service
+After=mason-setup.service
+ConditionPathIsDirectory=/srv/distbuild
+
+[Service]
+User=root
+ExecStart=/usr/lib/mason/mason-report.sh
+WorkingDirectory=/srv/distbuild
+
+[Install]
+WantedBy=multi-user.target
diff --git a/mason/mason.sh b/mason/mason.sh
new file mode 100755
index 00000000..dba99dfa
--- /dev/null
+++ b/mason/mason.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+# Load OpenStack credentials
+if [ -f "/etc/os.conf" ]; then
+ . /etc/os.conf
+fi
+
+set -e
+set -x
+
+# Load our deployment config
+. /etc/mason.conf
+
+if [ ! -e ws ]; then
+ morph init ws
+fi
+cd ws
+
+definitions_repo="$DEFINITIONS_REF"/"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions
+if [ ! -e "$definitions_repo" ]; then
+ morph checkout git://"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions "$DEFINITIONS_REF"
+ cd "$definitions_repo"
+ git config user.name "$TROVE_ID"-mason
+ git config user.email "$TROVE_ID"-mason@$(hostname)
+else
+ cd "$definitions_repo"
+ SHA1_PREV="$(git rev-parse HEAD)"
+fi
+
+if ! git remote update origin; then
+ echo ERROR: Unable to contact trove
+ exit 42
+fi
+git clean -fxd
+git reset --hard origin/"$DEFINITIONS_REF"
+
+SHA1="$(git rev-parse HEAD)"
+
+if [ -f "$HOME/success" ] && [ "$SHA1" = "$SHA1_PREV" ]; then
+ echo INFO: No changes to "$DEFINITIONS_REF", nothing to do
+ exit 33
+fi
+
+rm -f "$HOME/success"
+
+echo INFO: Mason building: $DEFINITIONS_REF at $SHA1
+
+if ! "scripts/release-build" --no-default-configs \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --artifact-cache-server "http://$ARTIFACT_CACHE_SERVER:8080/" \
+ --controllers "$DISTBUILD_ARCH:$DISTBUILD_CONTROLLER_ADDRESS" \
+ "$BUILD_CLUSTER_MORPHOLOGY"; then
+ echo ERROR: Failed to build release images
+ echo Build logs for chunks:
+ find builds -type f -exec echo {} \; -exec cat {} \;
+ exit 1
+fi
+
+releases_made="$(cd release && ls | wc -l)"
+if [ "$releases_made" = 0 ]; then
+ echo ERROR: No release images created
+ exit 1
+else
+ echo INFO: Created "$releases_made" release images
+fi
+
+if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
+ "scripts/release-test-os" \
+ --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --trove-id "$TROVE_ID" \
+ --net-id "$OPENSTACK_NETWORK_ID" \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+elif [ "$TEST_INFRASTRUCTURE_TYPE" = "kvmhost" ]; then
+ "scripts/release-test" \
+ --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --trove-id "$TROVE_ID" \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+fi
+
+"scripts/release-upload" --build-trove-host "$ARTIFACT_CACHE_SERVER" \
+ --arch "$DISTBUILD_ARCH" \
+ --log-level=debug --log="$HOME"/release-upload.log \
+ --public-trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --public-trove-username root \
+ --public-trove-artifact-dir /home/cache/artifacts \
+ --no-upload-release-artifacts \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+
+echo INFO: Artifact upload complete for $DEFINITIONS_REF at $SHA1
+
+touch "$HOME/success"
diff --git a/mason/mason.timer b/mason/mason.timer
new file mode 100644
index 00000000..107dff97
--- /dev/null
+++ b/mason/mason.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Runs Mason continually with 1 min between calls
+
+[Timer]
+#Time between Mason finishing and calling it again
+OnUnitActiveSec=1min
+Unit=mason.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/mason/os-init-script b/mason/os-init-script
new file mode 100644
index 00000000..77afb926
--- /dev/null
+++ b/mason/os-init-script
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# This allows the test runner to know that cloud-init has completed the
+# disc resizing, and there is enough free space to continue.
+touch /root/cloud-init-finished
+
diff --git a/mason/share/mason.conf b/mason/share/mason.conf
new file mode 100644
index 00000000..1295ce84
--- /dev/null
+++ b/mason/share/mason.conf
@@ -0,0 +1,14 @@
+# This file is generarated by the mason-setup systemd unit.
+# If you want to change the configuration, change the configuration
+# in /etc/mason/mason.conf and restart the service.
+
+ARTIFACT_CACHE_SERVER={{ ARTIFACT_CACHE_SERVER|quote }}
+UPSTREAM_TROVE_ADDRESS={{ TROVE_HOST|quote }}
+DEFINITIONS_REF={{ MASON_DEFINITIONS_REF|quote }}
+DISTBUILD_ARCH={{ MASON_DISTBUILD_ARCH|quote }}
+DISTBUILD_CONTROLLER_ADDRESS={{ CONTROLLERHOST|quote }}
+TROVE_ID={{ TROVE_ID|quote }}
+BUILD_CLUSTER_MORPHOLOGY={{ MASON_CLUSTER_MORPHOLOGY|quote }}
+MASON_TEST_HOST={{ MASON_TEST_HOST|quote }}
+TEST_INFRASTRUCTURE_TYPE={{ TEST_INFRASTRUCTURE_TYPE|quote }}
+{% if OPENSTACK_NETWORK_ID is defined %}OPENSTACK_NETWORK_ID={{ OPENSTACK_NETWORK_ID|quote }}{% endif %}
diff --git a/mason/share/os.conf b/mason/share/os.conf
new file mode 100644
index 00000000..21ef398c
--- /dev/null
+++ b/mason/share/os.conf
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# A version of this file with the relevant information included can be
+# obtained by navigating to 'Access & Security' -> 'API Access' ->
+# 'Download OpenStack RC file' in The Horizon web interface of your
+# OpenStack. However, the file obtained from there sets OS_PASSWORD
+# such that it will ask the user for a password, so you will need to
+# change that for Mason to work automatically.
+#
+# With the addition of Keystone, to use an openstack cloud you should
+# authenticate against keystone, which returns a **Token** and **Service
+# Catalog**. The catalog contains the endpoint for all services the
+# user/tenant has access to - including nova, glance, keystone, swift.
+#
+# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
+# will use the 1.1 *compute api*
+export OS_AUTH_URL={{ OS_AUTH_URL|quote }}
+
+# With the addition of Keystone we have standardized on the term **tenant**
+# as the entity that owns the resources.
+export OS_TENANT_ID={{ OS_TENANT_ID|quote }}
+export OS_TENANT_NAME={{ OS_TENANT_NAME|quote }}
+
+# In addition to the owning entity (tenant), openstack stores the entity
+# performing the action as the **user**.
+export OS_USERNAME={{ OS_USERNAME|quote }}
+
+# With Keystone you pass the keystone password.
+export OS_PASSWORD={{ OS_PASSWORD|quote }}
+
diff --git a/moonshot-kernel.configure b/moonshot-kernel.configure
new file mode 100644
index 00000000..11d01751
--- /dev/null
+++ b/moonshot-kernel.configure
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to convert a plain
+# kernel Image to uImage, for an HP Moonshot m400 cartridge
+
+set -eu
+
+case "$MOONSHOT_KERNEL" in
+ True|yes)
+ echo "Converting kernel image for Moonshot"
+ mkimage -A arm -O linux -C none -T kernel -a 0x00080000 \
+ -e 0x00080000 -n Linux -d "$1/boot/vmlinux" "$1/boot/uImage"
+ ;;
+ *)
+ echo Unrecognised option "$MOONSHOT_KERNEL" to MOONSHOT_KERNEL
+ exit 1
+ ;;
+esac
diff --git a/moonshot/boot/m400-1003.dtb b/moonshot/boot/m400-1003.dtb
new file mode 100644
index 00000000..d6fd83ee
--- /dev/null
+++ b/moonshot/boot/m400-1003.dtb
Binary files differ
diff --git a/moonshot/manifest b/moonshot/manifest
new file mode 100644
index 00000000..dd80fe49
--- /dev/null
+++ b/moonshot/manifest
@@ -0,0 +1,2 @@
+0040755 0 0 /boot
+0100744 0 0 /boot/m400-1003.dtb
diff --git a/nfsboot-server.configure b/nfsboot-server.configure
new file mode 100755
index 00000000..9fb48096
--- /dev/null
+++ b/nfsboot-server.configure
@@ -0,0 +1,58 @@
+#!/bin/sh
+#
+# Copyright (C) 2013-2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#
+# This is a "morph deploy" configuration extension to set up a server for
+# booting over nfs and tftp.
+set -e
+
+ROOT="$1"
+
+##########################################################################
+
+nfsboot_root=/srv/nfsboot
+tftp_root="$nfsboot_root"/tftp
+nfs_root="$nfsboot_root"/nfs
+mkdir -p "$ROOT$tftp_root" "$ROOT$nfs_root"
+
+install -D /dev/stdin "$ROOT/usr/lib/systemd/system/nfsboot-tftp.service" <<EOF
+[Unit]
+Description=tftp service for booting kernels
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/udpsvd -E 0 69 /usr/sbin/tftpd $tftp_root
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+for prefix in / /usr; do
+ for unit in nfsboot-tftp.service nfs-server.service; do
+ unit_path="${prefix}/lib/systemd/system/$unit"
+ if [ -e "$ROOT/$unit_path" ]; then
+ ln -s "../../../../$unit_path" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$unit"
+ fi
+ done
+done
+
+pxelinux_file="$ROOT/usr/share/syslinux/pxelinux.0"
+if [ -e "$pxelinux_file" ]; then
+ cp "$pxelinux_file" "$ROOT$tftp_root/pxelinux.0"
+fi
diff --git a/openstack-ceilometer.configure b/openstack-ceilometer.configure
new file mode 100644
index 00000000..9c0b7b6d
--- /dev/null
+++ b/openstack-ceilometer.configure
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool CEILOMETER_ENABLE_CONTROLLER
+check_bool CEILOMETER_ENABLE_COMPUTE
+
+if ! "$CEILOMETER_ENABLE_CONTROLLER" && \
+ ! "$CEILOMETER_ENABLE_COMPUTE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$CEILOMETER_SERVICE_USER" -o \
+ -z "$CEILOMETER_SERVICE_PASSWORD" -o \
+ -z "$CEILOMETER_DB_USER" -o \
+ -z "$CEILOMETER_DB_PASSWORD" -o \
+ -z "$METERING_SECRET" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Ceilometer were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$CEILOMETER_ENABLE_COMPUTE" || "$CEILOMETER_ENABLE_CONTROLLER"; then
+ enable openstack-ceilometer-config-setup
+fi
+if "$CEILOMETER_ENABLE_COMPUTE"; then
+ enable openstack-ceilometer-compute
+fi
+if "$CEILOMETER_ENABLE_CONTROLLER"; then
+ enable openstack-ceilometer-db-setup
+ enable openstack-ceilometer-api
+ enable openstack-ceilometer-collector
+ enable openstack-ceilometer-notification
+ enable openstack-ceilometer-central
+ enable openstack-ceilometer-alarm-evaluator
+ enable openstack-ceilometer-alarm-notifier
+fi
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/ceilometer.conf"
+import os, sys, yaml
+
+ceilometer_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'CEILOMETER_SERVICE_PASSWORD': os.environ['CEILOMETER_SERVICE_PASSWORD'],
+ 'CEILOMETER_SERVICE_USER': os.environ['CEILOMETER_SERVICE_USER'],
+ 'CEILOMETER_DB_USER': os.environ['CEILOMETER_DB_USER'],
+ 'CEILOMETER_DB_PASSWORD': os.environ['CEILOMETER_DB_PASSWORD'],
+ 'METERING_SECRET': os.environ['METERING_SECRET'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(ceilometer_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/openstack-cinder.configure b/openstack-cinder.configure
new file mode 100644
index 00000000..4c32e11a
--- /dev/null
+++ b/openstack-cinder.configure
@@ -0,0 +1,125 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool CINDER_ENABLE_CONTROLLER
+check_bool CINDER_ENABLE_COMPUTE
+check_bool CINDER_ENABLE_STORAGE
+
+if ! "$CINDER_ENABLE_CONTROLLER" && \
+ ! "$CINDER_ENABLE_COMPUTE" && \
+ ! "$CINDER_ENABLE_STORAGE"; then
+ exit 0
+fi
+
+if [ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$CINDER_DB_USER" -o \
+ -z "$CINDER_DB_PASSWORD" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$CINDER_SERVICE_USER" -o \
+ -z "$CINDER_SERVICE_PASSWORD" -o \
+ -z "$CINDER_DEVICE" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" ]; then
+ echo Some options required for Cinder were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_STORAGE"; then
+ enable iscsi-setup
+ enable target #target.service!
+ enable iscsid
+fi
+if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_CONTROLLER" || "$CINDER_ENABLE_STORAGE"; then
+ enable openstack-cinder-config-setup
+fi
+if "$CINDER_ENABLE_STORAGE"; then
+ enable openstack-cinder-lv-setup
+ enable lvm2-lvmetad
+ enable openstack-cinder-volume
+ enable openstack-cinder-backup
+ enable openstack-cinder-scheduler
+fi
+if "$CINDER_ENABLE_CONTROLLER"; then
+ enable openstack-cinder-db-setup
+ enable openstack-cinder-api
+fi
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/cinder.conf"
+import os, sys, yaml
+
+cinder_configuration={
+ 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER':os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN':os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'CINDER_DB_USER':os.environ['CINDER_DB_USER'],
+ 'CINDER_DB_PASSWORD':os.environ['CINDER_DB_PASSWORD'],
+ 'CONTROLLER_HOST_ADDRESS':os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'CINDER_SERVICE_USER':os.environ['CINDER_SERVICE_USER'],
+ 'CINDER_SERVICE_PASSWORD':os.environ['CINDER_SERVICE_PASSWORD'],
+ 'CINDER_DEVICE':os.environ['CINDER_DEVICE'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS':os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+}
+
+yaml.dump(cinder_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/openstack-glance.configure b/openstack-glance.configure
new file mode 100644
index 00000000..5da08895
--- /dev/null
+++ b/openstack-glance.configure
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool GLANCE_ENABLE_SERVICE
+
+if ! "$GLANCE_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$GLANCE_SERVICE_USER" -o \
+ -z "$GLANCE_SERVICE_PASSWORD" -o \
+ -z "$GLANCE_DB_USER" -o \
+ -z "$GLANCE_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Glance were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-glance-setup
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/glance.conf"
+import os, sys, yaml
+
+glance_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'GLANCE_SERVICE_PASSWORD': os.environ['GLANCE_SERVICE_PASSWORD'],
+ 'GLANCE_SERVICE_USER': os.environ['GLANCE_SERVICE_USER'],
+ 'GLANCE_DB_USER': os.environ['GLANCE_DB_USER'],
+ 'GLANCE_DB_PASSWORD': os.environ['GLANCE_DB_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(glance_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/openstack-ironic.configure b/openstack-ironic.configure
new file mode 100644
index 00000000..962bbcd1
--- /dev/null
+++ b/openstack-ironic.configure
@@ -0,0 +1,155 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool IRONIC_ENABLE_SERVICE
+
+if ! "$IRONIC_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$IRONIC_SERVICE_USER" -o \
+ -z "$IRONIC_SERVICE_PASSWORD" -o \
+ -z "$IRONIC_DB_USER" -o \
+ -z "$IRONIC_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Ironic were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-ironic-setup
+enable iscsi-setup
+enable target #target.service!
+enable iscsid
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/ironic.conf"
+import os, sys, yaml
+
+ironic_configuration={
+ 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'],
+ 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'],
+ 'IRONIC_DB_USER': os.environ['IRONIC_DB_USER'],
+ 'IRONIC_DB_PASSWORD': os.environ['IRONIC_DB_PASSWORD'],
+ 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER':os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+
+}
+
+yaml.dump(ironic_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+##########################################################################
+# Configure the TFTP service #
+##########################################################################
+
+tftp_root="/srv/tftp_root/" # trailing slash is essential
+mkdir -p "$ROOT/$tftp_root"
+
+install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.service" << 'EOF'
+[Unit]
+Description=tftp service for booting kernels
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+EnvironmentFile=/etc/tftp-hpa.conf
+ExecStart=/usr/sbin/in.tftpd $TFTP_OPTIONS ${TFTP_ROOT}
+StandardInput=socket
+StandardOutput=inherit
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.socket" << EOF
+[Unit]
+Description=Tftp server activation socket
+
+[Socket]
+ListenDatagram=$MANAGEMENT_INTERFACE_IP_ADDRESS:69
+FreeBind=yes
+
+[Install]
+WantedBy=sockets.target
+EOF
+
+install -D -m 644 /dev/stdin "$ROOT"/etc/tftp-hpa.conf << EOF
+TFTP_ROOT=$tftp_root
+TFTP_OPTIONS="-v -v -v -v -v --map-file $tftp_root/map-file"
+EOF
+
+install -D /dev/stdin -m 644 "$ROOT/$tftp_root"/map-file << EOF
+r ^([^/]) $tftp_root\1
+r ^/tftpboot/ $tftp_root\2
+EOF
+
+cp "$ROOT"/usr/share/syslinux/pxelinux.0 "$ROOT/$tftp_root"
diff --git a/openstack-keystone.configure b/openstack-keystone.configure
new file mode 100644
index 00000000..6b011b14
--- /dev/null
+++ b/openstack-keystone.configure
@@ -0,0 +1,123 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool KEYSTONE_ENABLE_SERVICE
+
+if ! "$KEYSTONE_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$KEYSTONE_ADMIN_PASSWORD" -o \
+ -z "$KEYSTONE_DB_USER" -o \
+ -z "$KEYSTONE_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Keystone were defined, but not all.
+ exit 1
+fi
+
+python <<'EOF'
+import socket
+import sys
+import os
+
+try:
+ socket.inet_pton(socket.AF_INET, os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'])
+except:
+ print "Error: MANAGEMENT_INTERFACE_IP_ADDRESS is not a valid IP"
+ sys.exit(1)
+EOF
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-keystone-setup
+enable openstack-horizon-setup
+enable postgres-server-setup
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/keystone.conf"
+import os, sys, yaml
+
+keystone_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'KEYSTONE_ADMIN_PASSWORD': os.environ['KEYSTONE_ADMIN_PASSWORD'],
+ 'KEYSTONE_DB_USER': os.environ['KEYSTONE_DB_USER'],
+ 'KEYSTONE_DB_PASSWORD': os.environ['KEYSTONE_DB_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(keystone_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+python << 'EOF' > "$OPENSTACK_DATA/postgres.conf"
+import os, sys, yaml
+
+postgres_configuration={
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+}
+
+yaml.dump(postgres_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/openstack-network.configure b/openstack-network.configure
new file mode 100644
index 00000000..10be5a1c
--- /dev/null
+++ b/openstack-network.configure
@@ -0,0 +1,50 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+###################
+# Enable services #
+###################
+
+enable openvswitch-setup
+enable openstack-network-setup
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/network.conf"
+import os, sys, yaml
+
+network_configuration = {}
+
+optional_keys = ('EXTERNAL_INTERFACE',)
+
+network_configuration.update((k, os.environ[k]) for k in optional_keys if k in os.environ)
+
+yaml.dump(network_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/openstack-neutron.configure b/openstack-neutron.configure
new file mode 100644
index 00000000..210222db
--- /dev/null
+++ b/openstack-neutron.configure
@@ -0,0 +1,138 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/openstack-neutron-$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-neutron-$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool NEUTRON_ENABLE_CONTROLLER
+check_bool NEUTRON_ENABLE_MANAGER
+check_bool NEUTRON_ENABLE_AGENT
+
+if ! "$NEUTRON_ENABLE_CONTROLLER" && \
+ ! "$NEUTRON_ENABLE_MANAGER" && \
+ ! "$NEUTRON_ENABLE_AGENT"; then
+ exit 0
+fi
+
+if [ -z "$NEUTRON_SERVICE_USER" -o \
+ -z "$NEUTRON_SERVICE_PASSWORD" -o \
+ -z "$NEUTRON_DB_USER" -o \
+ -z "$NEUTRON_DB_PASSWORD" -o \
+ -z "$METADATA_PROXY_SHARED_SECRET" -o \
+ -z "$NOVA_SERVICE_USER" -o \
+ -z "$NOVA_SERVICE_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Neutron were defined, but not all.
+ exit 1
+fi
+
+#############################################
+# Ensure /var/run is an appropriate symlink #
+#############################################
+
+if ! link="$(readlink "$ROOT/var/run")" || [ "$link" != ../run ]; then
+ rm -rf "$ROOT/var/run"
+ ln -s ../run "$ROOT/var/run"
+fi
+
+###################
+# Enable services #
+###################
+
+if "$NEUTRON_ENABLE_CONTROLLER"; then
+ enable config-setup
+ enable db-setup
+ enable server
+fi
+
+if "$NEUTRON_ENABLE_MANAGER"; then
+ enable config-setup
+ enable ovs-cleanup
+ enable dhcp-agent
+ enable l3-agent
+ enable plugin-openvswitch-agent
+ enable metadata-agent
+fi
+
+if "$NEUTRON_ENABLE_AGENT"; then
+ enable config-setup
+ enable plugin-openvswitch-agent
+fi
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/neutron.conf"
+import os, sys, yaml
+
+nova_configuration={
+ 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'],
+ 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'],
+ 'NEUTRON_DB_USER': os.environ['NEUTRON_DB_USER'],
+ 'NEUTRON_DB_PASSWORD': os.environ['NEUTRON_DB_PASSWORD'],
+ 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'],
+ 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'],
+ 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+}
+
+yaml.dump(nova_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/openstack-nova.configure b/openstack-nova.configure
new file mode 100644
index 00000000..213f1852
--- /dev/null
+++ b/openstack-nova.configure
@@ -0,0 +1,168 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/openstack-nova-$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool NOVA_ENABLE_CONTROLLER
+check_bool NOVA_ENABLE_COMPUTE
+
+if ! "$NOVA_ENABLE_CONTROLLER" && \
+ ! "$NOVA_ENABLE_COMPUTE"; then
+ exit 0
+fi
+
+if [ -z "$NOVA_SERVICE_USER" -o \
+ -z "$NOVA_SERVICE_PASSWORD" -o \
+ -z "$NOVA_DB_USER" -o \
+ -z "$NOVA_DB_PASSWORD" -o \
+ -z "$NOVA_VIRT_TYPE" -o \
+ -z "$NEUTRON_SERVICE_USER" -o \
+ -z "$NEUTRON_SERVICE_PASSWORD" -o \
+ -z "$IRONIC_SERVICE_USER" -a \
+ -z "$IRONIC_SERVICE_PASSWORD" -a \
+ -z "$METADATA_PROXY_SHARED_SECRET" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Nova were defined, but not all.
+ exit 1
+fi
+
+###############################################
+# Enable libvirtd and libvirt-guests services #
+###############################################
+
+wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants
+mkdir -p "$wants_dir"
+mkdir -p "$ROOT"/var/lock/subsys
+ln -sf ../libvirtd.service "$wants_dir/libvirtd.service"
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$NOVA_ENABLE_CONTROLLER" || "$NOVA_ENABLE_COMPUTE"; then
+ enable config-setup
+fi
+if "$NOVA_ENABLE_CONTROLLER" && ! "$NOVA_ENABLE_COMPUTE"; then
+ enable conductor
+fi
+if "$NOVA_ENABLE_COMPUTE"; then
+ enable compute
+fi
+if "$NOVA_ENABLE_CONTROLLER"; then
+ for service in db-setup api cert consoleauth novncproxy scheduler serialproxy; do
+ enable "$service"
+ done
+fi
+
+##########################################################################
+# Change iprange for the interal libvirt to avoid clashes
+# with eth0 ip range
+##########################################################################
+
+sed -i "s/192\.168\.122\./192\.168\.1\./g" \
+ "$ROOT"/etc/libvirt/qemu/networks/default.xml
+
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+case "$NOVA_BAREMETAL_SCHEDULING" in
+ True|true|yes)
+ export COMPUTE_MANAGER=ironic.nova.compute.manager.ClusteredComputeManager
+ export RESERVED_HOST_MEMORY_MB=0
+ export SCHEDULER_HOST_MANAGER=nova.scheduler.ironic_host_manager.IronicHostManager
+ export RAM_ALLOCATION_RATIO=1.0
+ export COMPUTE_DRIVER=nova.virt.ironic.IronicDriver
+ ;;
+ *)
+ export COMPUTE_MANAGER=nova.compute.manager.ComputeManager
+ export RESERVED_HOST_MEMORY_MB=512
+ export SCHEDULER_HOST_MANAGER=nova.scheduler.host_manager.HostManager
+ export RAM_ALLOCATION_RATIO=1.5
+ export COMPUTE_DRIVER=libvirt.LibvirtDriver
+ ;;
+esac
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/nova.conf"
+import os, sys, yaml
+
+nova_configuration={
+ 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'],
+ 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'],
+ 'NOVA_DB_USER': os.environ['NOVA_DB_USER'],
+ 'NOVA_DB_PASSWORD': os.environ['NOVA_DB_PASSWORD'],
+ 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'],
+ 'COMPUTE_MANAGER': os.environ['COMPUTE_MANAGER'],
+ 'RESERVED_HOST_MEMORY_MB': os.environ['RESERVED_HOST_MEMORY_MB'],
+ 'SCHEDULER_HOST_MANAGER': os.environ['SCHEDULER_HOST_MANAGER'],
+ 'RAM_ALLOCATION_RATIO': os.environ['RAM_ALLOCATION_RATIO'],
+ 'COMPUTE_DRIVER': os.environ['COMPUTE_DRIVER'],
+ 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'],
+ 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'],
+ 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'],
+ 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'],
+ 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+}
+
+yaml.dump(nova_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/openstack-swift-controller.configure b/openstack-swift-controller.configure
new file mode 100644
index 00000000..424ab57b
--- /dev/null
+++ b/openstack-swift-controller.configure
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+set -e
+
+export ROOT="$1"
+
+MANDATORY_OPTIONS="SWIFT_ADMIN_PASSWORD KEYSTONE_TEMPORARY_ADMIN_TOKEN"
+
+for option in $MANDATORY_OPTIONS
+do
+ if ! [[ -v $option ]]
+ then
+ missing_option=True
+ echo "Required option $option isn't set!" >&2
+ fi
+done
+
+if [[ $missing_option = True ]]; then exit 1; fi
+
+mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks
+
+ln -s "/usr/lib/systemd/system/swift-controller-setup.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-controller-setup.service"
+ln -s "/usr/lib/systemd/system/memcached.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/memcached.service"
+ln -s "/usr/lib/systemd/system/openstack-swift-proxy.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-proxy.service"
+
+cat << EOF > "$ROOT"/usr/share/openstack/swift-controller-vars.yml
+---
+SWIFT_ADMIN_PASSWORD: $SWIFT_ADMIN_PASSWORD
+MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS
+KEYSTONE_TEMPORARY_ADMIN_TOKEN: $KEYSTONE_TEMPORARY_ADMIN_TOKEN
+EOF
diff --git a/openstack/etc/horizon/apache-horizon.conf b/openstack/etc/horizon/apache-horizon.conf
new file mode 100644
index 00000000..ea88897a
--- /dev/null
+++ b/openstack/etc/horizon/apache-horizon.conf
@@ -0,0 +1,34 @@
+<VirtualHost *:80>
+ WSGIScriptAlias /horizon /var/lib/horizon/openstack_dashboard/django.wsgi
+ WSGIDaemonProcess horizon user=horizon group=horizon processes=3 threads=10 home=/var/lib/horizon display-name=horizon
+ WSGIApplicationGroup %{GLOBAL}
+
+ RedirectMatch ^/$ /horizon/
+
+ SetEnv APACHE_RUN_USER apache
+ SetEnv APACHE_RUN_GROUP apache
+ WSGIProcessGroup horizon
+
+ DocumentRoot /var/lib/horizon/.blackhole
+ Alias /static /var/lib/horizon/openstack_dashboard/static
+
+ <Directory /var/lib/horizon/openstack_dashboard >
+ Options Indexes FollowSymLinks MultiViews
+ AllowOverride None
+ # Apache 2.4 uses mod_authz_host for access control now (instead of
+ # "Allow")
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ </Directory>
+
+ ErrorLog /var/log/httpd/horizon_error.log
+ LogLevel warn
+ CustomLog /var/log/httpd/horizon_access.log combined
+</VirtualHost>
+
+WSGISocketPrefix /var/run/httpd
diff --git a/openstack/etc/horizon/openstack_dashboard/local_settings.py b/openstack/etc/horizon/openstack_dashboard/local_settings.py
new file mode 100644
index 00000000..febc3e70
--- /dev/null
+++ b/openstack/etc/horizon/openstack_dashboard/local_settings.py
@@ -0,0 +1,551 @@
+import os
+
+from django.utils.translation import ugettext_lazy as _
+
+from openstack_dashboard import exceptions
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+STATIC_ROOT = "/var/lib/horizon/openstack_dashboard/static"
+
+# Required for Django 1.5.
+# If horizon is running in production (DEBUG is False), set this
+# with the list of host/domain names that the application can serve.
+# For more information see:
+# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
+#ALLOWED_HOSTS = ['horizon.example.com', ]
+ALLOWED_HOSTS = ['*']
+
+# Set SSL proxy settings:
+# For Django 1.4+ pass this header from the proxy after terminating the SSL,
+# and don't forget to strip it from the client's request.
+# For more information see:
+# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
+# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
+
+# If Horizon is being served through SSL, then uncomment the following two
+# settings to better secure the cookies from security exploits
+#CSRF_COOKIE_SECURE = True
+#SESSION_COOKIE_SECURE = True
+
+# Overrides for OpenStack API versions. Use this setting to force the
+# OpenStack dashboard to use a specific API version for a given service API.
+# NOTE: The version should be formatted as it appears in the URL for the
+# service API. For example, The identity service APIs have inconsistent
+# use of the decimal point, so valid options would be "2.0" or "3".
+# OPENSTACK_API_VERSIONS = {
+# "data_processing": 1.1,
+# "identity": 3,
+# "volume": 2
+# }
+
+# Set this to True if running on multi-domain model. When this is enabled, it
+# will require user to enter the Domain name in addition to username for login.
+# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
+
+# Overrides the default domain used when running on single-domain model
+# with Keystone V3. All entities will be created in the default domain.
+# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
+
+# Set Console type:
+# valid options would be "AUTO"(default), "VNC", "SPICE", "RDP" or None
+# Set to None explicitly if you want to deactivate the console.
+# CONSOLE_TYPE = "AUTO"
+
+# Default OpenStack Dashboard configuration.
+HORIZON_CONFIG = {
+ 'user_home': 'openstack_dashboard.views.get_user_home',
+ 'ajax_queue_limit': 10,
+ 'auto_fade_alerts': {
+ 'delay': 3000,
+ 'fade_duration': 1500,
+ 'types': ['alert-success', 'alert-info']
+ },
+ 'help_url': "http://docs.openstack.org",
+ 'exceptions': {'recoverable': exceptions.RECOVERABLE,
+ 'not_found': exceptions.NOT_FOUND,
+ 'unauthorized': exceptions.UNAUTHORIZED},
+ 'modal_backdrop': 'static',
+ 'angular_modules': [],
+ 'js_files': [],
+}
+
+# Specify a regular expression to validate user passwords.
+# HORIZON_CONFIG["password_validator"] = {
+# "regex": '.*',
+# "help_text": _("Your password does not meet the requirements.")
+# }
+
+# Disable simplified floating IP address management for deployments with
+# multiple floating IP pools or complex network requirements.
+# HORIZON_CONFIG["simple_ip_management"] = False
+
+# Turn off browser autocompletion for forms including the login form and
+# the database creation workflow if so desired.
+# HORIZON_CONFIG["password_autocomplete"] = "off"
+
+# Setting this to True will disable the reveal button for password fields,
+# including on the login form.
+# HORIZON_CONFIG["disable_password_reveal"] = False
+
+#LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+
+LOCAL_PATH = "/var/lib/horizon"
+
+# Set custom secret key:
+# You can either set it to a specific value or you can let horizon generate a
+# default secret key that is unique on this machine, e.i. regardless of the
+# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
+# may be situations where you would want to set this explicitly, e.g. when
+# multiple dashboard instances are distributed on different machines (usually
+# behind a load-balancer). Either you have to make sure that a session gets all
+# requests routed to the same dashboard instance or you set the same SECRET_KEY
+# for all of them.
+from horizon.utils import secret_key
+SECRET_KEY = secret_key.generate_or_read_from_file(
+ os.path.join(LOCAL_PATH, '.secret_key_store'))
+
+# We recommend you use memcached for development; otherwise after every reload
+# of the django development server, you will have to login again. To use
+# memcached set CACHES to something like
+CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': '127.0.0.1:11211',
+ }
+}
+
+#CACHES = {
+# 'default': {
+# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
+# }
+#}
+
+# Send email to the console by default
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+# Or send them to /dev/null
+#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
+
+# Configure these for your outgoing email host
+# EMAIL_HOST = 'smtp.my-company.com'
+# EMAIL_PORT = 25
+# EMAIL_HOST_USER = 'djangomail'
+# EMAIL_HOST_PASSWORD = 'top-secret!'
+
+# For multiple regions uncomment this configuration, and add (endpoint, title).
+# AVAILABLE_REGIONS = [
+# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
+# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
+# ]
+
+OPENSTACK_HOST = "127.0.0.1"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+
+# Disable SSL certificate checks (useful for self-signed certificates):
+# OPENSTACK_SSL_NO_VERIFY = True
+
+# The CA certificate to use to verify SSL connections
+# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
+
+# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
+# capabilities of the auth backend for Keystone.
+# If Keystone has been configured to use LDAP as the auth backend then set
+# can_edit_user to False and name to 'ldap'.
+#
+# TODO(tres): Remove these once Keystone has an API to identify auth backend.
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True,
+ 'can_edit_group': True,
+ 'can_edit_project': True,
+ 'can_edit_domain': True,
+ 'can_edit_role': True
+}
+
+#Setting this to True, will add a new "Retrieve Password" action on instance,
+#allowing Admin session password retrieval/decryption.
+#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
+
+# The Xen Hypervisor has the ability to set the mount point for volumes
+# attached to instances (other Hypervisors currently do not). Setting
+# can_set_mount_point to True will add the option to set the mount point
+# from the UI.
+OPENSTACK_HYPERVISOR_FEATURES = {
+ 'can_set_mount_point': False,
+ 'can_set_password': False,
+}
+
+# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
+# services provided by cinder that is not exposed by its extension API.
+OPENSTACK_CINDER_FEATURES = {
+ 'enable_backup': False,
+}
+
+# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
+# services provided by neutron. Options currently available are load
+# balancer service, security groups, quotas, VPN service.
+OPENSTACK_NEUTRON_NETWORK = {
+ 'enable_router': True,
+ 'enable_quotas': True,
+ 'enable_ipv6': True,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': True,
+ 'enable_firewall': True,
+ 'enable_vpn': True,
+ # The profile_support option is used to detect if an external router can be
+ # configured via the dashboard. When using specific plugins the
+ # profile_support can be turned on if needed.
+ 'profile_support': None,
+ #'profile_support': 'cisco',
+ # Set which provider network types are supported. Only the network types
+ # in this list will be available to choose from when creating a network.
+ # Network types include local, flat, vlan, gre, and vxlan.
+ 'supported_provider_types': ['*'],
+}
+
+# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
+# in the OpenStack Dashboard related to the Image service, such as the list
+# of supported image formats.
+# OPENSTACK_IMAGE_BACKEND = {
+# 'image_formats': [
+# ('', _('Select format')),
+# ('aki', _('AKI - Amazon Kernel Image')),
+# ('ami', _('AMI - Amazon Machine Image')),
+# ('ari', _('ARI - Amazon Ramdisk Image')),
+# ('iso', _('ISO - Optical Disk Image')),
+# ('qcow2', _('QCOW2 - QEMU Emulator')),
+# ('raw', _('Raw')),
+# ('vdi', _('VDI')),
+# ('vhd', _('VHD')),
+# ('vmdk', _('VMDK'))
+# ]
+# }
+
+# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
+# image custom property attributes that appear on image detail pages.
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+ "architecture": _("Architecture"),
+ "kernel_id": _("Kernel ID"),
+ "ramdisk_id": _("Ramdisk ID"),
+ "image_state": _("Euca2ools state"),
+ "project_id": _("Project ID"),
+ "image_type": _("Image Type")
+}
+
+# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
+# custom properties should not be displayed in the Image Custom Properties
+# table.
+IMAGE_RESERVED_CUSTOM_PROPERTIES = []
+
+# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is 'publicURL'.
+#OPENSTACK_ENDPOINT_TYPE = "publicURL"
+
+# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
+# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is None. This
+# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
+#SECONDARY_ENDPOINT_TYPE = "publicURL"
+
+# The number of objects (Swift containers/objects or images) to display
+# on a single page before providing a paging element (a "more" link)
+# to paginate results.
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+
+# Specify a maximum number of items to display in a dropdown.
+DROPDOWN_MAX_ITEMS = 30
+
+# The timezone of the server. This should correspond with the timezone
+# of your entire OpenStack installation, and hopefully be in UTC.
+TIME_ZONE = "UTC"
+
+# When launching an instance, the menu of available flavors is
+# sorted by RAM usage, ascending. If you would like a different sort order,
+# you can provide another flavor attribute as sorting key. Alternatively, you
+# can provide a custom callback method to use for sorting. You can also provide
+# a flag for reverse sort. For more info, see
+# http://docs.python.org/2/library/functions.html#sorted
+# CREATE_INSTANCE_FLAVOR_SORT = {
+# 'key': 'name',
+# # or
+# 'key': my_awesome_callback_method,
+# 'reverse': False,
+# }
+
+# The Horizon Policy Enforcement engine uses these values to load per service
+# policy rule files. The content of these files should match the files the
+# OpenStack services are using to determine role based access control in the
+# target installation.
+
+# Path to directory containing policy.json files
+#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
+# Map of local copy of service policy files
+#POLICY_FILES = {
+# 'identity': 'keystone_policy.json',
+# 'compute': 'nova_policy.json',
+# 'volume': 'cinder_policy.json',
+# 'image': 'glance_policy.json',
+# 'orchestration': 'heat_policy.json',
+# 'network': 'neutron_policy.json',
+#}
+
+# Trove user and database extension support. By default support for
+# creating users and databases on database instances is turned on.
+# To disable these extensions set the permission here to something
+# unusable such as ["!"].
+# TROVE_ADD_USER_PERMS = []
+# TROVE_ADD_DATABASE_PERMS = []
+
+LOGGING = {
+ 'version': 1,
+ # When set to True this will disable all logging except
+ # for loggers specified in this configuration dictionary. Note that
+ # if nothing is specified here and disable_existing_loggers is True,
+ # django.db.backends will still log unless it is disabled explicitly.
+ 'disable_existing_loggers': False,
+ 'handlers': {
+ 'null': {
+ 'level': 'DEBUG',
+ 'class': 'django.utils.log.NullHandler',
+ },
+ 'console': {
+ # Set the level to "DEBUG" for verbose output logging.
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ },
+ },
+ 'loggers': {
+ # Logging from django.db.backends is VERY verbose, send to null
+ # by default.
+ 'django.db.backends': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'requests': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'horizon': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_dashboard': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'novaclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'cinderclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'keystoneclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'glanceclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'neutronclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'heatclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'ceilometerclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'troveclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'swiftclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_auth': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'nose.plugins.manager': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'iso8601': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'scss': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ }
+}
+
+# 'direction' should not be specified for all_tcp/udp/icmp.
+# It is specified in the form.
+SECURITY_GROUP_RULES = {
+ 'all_tcp': {
+ 'name': _('All TCP'),
+ 'ip_protocol': 'tcp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_udp': {
+ 'name': _('All UDP'),
+ 'ip_protocol': 'udp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_icmp': {
+ 'name': _('All ICMP'),
+ 'ip_protocol': 'icmp',
+ 'from_port': '-1',
+ 'to_port': '-1',
+ },
+ 'ssh': {
+ 'name': 'SSH',
+ 'ip_protocol': 'tcp',
+ 'from_port': '22',
+ 'to_port': '22',
+ },
+ 'smtp': {
+ 'name': 'SMTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '25',
+ 'to_port': '25',
+ },
+ 'dns': {
+ 'name': 'DNS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '53',
+ 'to_port': '53',
+ },
+ 'http': {
+ 'name': 'HTTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '80',
+ 'to_port': '80',
+ },
+ 'pop3': {
+ 'name': 'POP3',
+ 'ip_protocol': 'tcp',
+ 'from_port': '110',
+ 'to_port': '110',
+ },
+ 'imap': {
+ 'name': 'IMAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '143',
+ 'to_port': '143',
+ },
+ 'ldap': {
+ 'name': 'LDAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '389',
+ 'to_port': '389',
+ },
+ 'https': {
+ 'name': 'HTTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '443',
+ 'to_port': '443',
+ },
+ 'smtps': {
+ 'name': 'SMTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '465',
+ 'to_port': '465',
+ },
+ 'imaps': {
+ 'name': 'IMAPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '993',
+ 'to_port': '993',
+ },
+ 'pop3s': {
+ 'name': 'POP3S',
+ 'ip_protocol': 'tcp',
+ 'from_port': '995',
+ 'to_port': '995',
+ },
+ 'ms_sql': {
+ 'name': 'MS SQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1433',
+ 'to_port': '1433',
+ },
+ 'mysql': {
+ 'name': 'MYSQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3306',
+ 'to_port': '3306',
+ },
+ 'rdp': {
+ 'name': 'RDP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3389',
+ 'to_port': '3389',
+ },
+}
+
+# Deprecation Notice:
+#
+# The setting FLAVOR_EXTRA_KEYS has been deprecated.
+# Please load extra spec metadata into the Glance Metadata Definition Catalog.
+#
+# The sample quota definitions can be found in:
+# <glance_source>/etc/metadefs/compute-quota.json
+#
+# The metadata definition catalog supports CLI and API:
+# $glance --os-image-api-version 2 help md-namespace-import
+# $glance-manage db_load_metadefs <directory_with_definition_files>
+#
+# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
+
+# Indicate to the Sahara data processing service whether or not
+# automatic floating IP allocation is in effect. If it is not
+# in effect, the user will be prompted to choose a floating IP
+# pool for use in their cluster. False by default. You would want
+# to set this to True if you were running Nova Networking with
+# auto_assign_floating_ip = True.
+# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
+
+# The hash algorithm to use for authentication tokens. This must
+# match the hash algorithm that the identity server and the
+# auth_token middleware are using. Allowed values are the
+# algorithms supported by Python's hashlib library.
+# OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
+LOGIN_URL='/horizon/auth/login/'
+LOGOUT_URL='/horizon/auth/logout/'
+LOGIN_REDIRECT_URL='/horizon/'
diff --git a/openstack/etc/tempest/tempest.conf b/openstack/etc/tempest/tempest.conf
new file mode 100644
index 00000000..05f0eca1
--- /dev/null
+++ b/openstack/etc/tempest/tempest.conf
@@ -0,0 +1,1116 @@
+[DEFAULT]
+
+#
+# From tempest.config
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking = false
+
+# Directory to use for lock files. (string value)
+lock_path = /run/lock
+
+#
+# From tempest.config
+#
+
+# Print debugging output (set logging level to DEBUG instead of
+# default WARNING level). (boolean value)
+#debug = false
+
+# Print more verbose output (set logging level to INFO instead of
+# default WARNING level). (boolean value)
+#verbose = false
+
+#
+# From tempest.config
+#
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Format string for %%(asctime)s in log records. Default: %(default)s
+# . (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) The base directory used for relative --log-file paths.
+# (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# (Optional) Name of log file to output to. If no default is set,
+# logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# DEPRECATED. A logging.Formatter log message format string which may
+# use any of the available logging.LogRecord attributes. This option
+# is deprecated. Please use logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format = <None>
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility = LOG_USER
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during
+# I, and will change in J to honor RFC5424. (boolean value)
+use_syslog = true
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If
+# enabled, prefixes the MSG part of the syslog message with APP-NAME
+# (RFC5424). The format without the APP-NAME is deprecated in I, and
+# will be removed in J. (boolean value)
+#use_syslog_rfc_format = false
+
+#
+# From tempest.config
+#
+
+# Log output to standard error. (boolean value)
+#use_stderr = true
+
+#
+# From tempest.config
+#
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Format string to use for log messages without context. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+
+[auth]
+
+#
+# From tempest.config
+#
+
+# Allows test cases to create/destroy tenants and users. This option
+# requires that OpenStack Identity API admin credentials are known. If
+# false, isolated test cases and parallel execution, can still be
+# achieved configuring a list of test accounts (boolean value)
+# Deprecated group/name - [compute]/allow_tenant_isolation
+# Deprecated group/name - [orchestration]/allow_tenant_isolation
+allow_tenant_isolation = true
+
+# If set to True it enables the Accounts provider, which locks
+# credentials to allow for parallel execution with pre-provisioned
+# accounts. It can only be used to run tests that ensure credentials
+# cleanup happens. It requires at least `2 * CONC` distinct accounts
+# configured in `test_accounts_file`, with CONC == the number of
+# concurrent test processes. (boolean value)
+#locking_credentials_provider = false
+
+# Path to the yaml file that contains the list of credentials to use
+# for running tests (string value)
+#test_accounts_file = etc/accounts.yaml
+
+
+[baremetal]
+
+#
+# From tempest.config
+#
+
+# Timeout for Ironic node to completely provision (integer value)
+#active_timeout = 300
+
+# Timeout for association of Nova instance and Ironic node (integer
+# value)
+#association_timeout = 30
+
+# Catalog type of the baremetal provisioning service (string value)
+#catalog_type = baremetal
+
+# Driver name which Ironic uses (string value)
+#driver = fake
+
+# Whether the Ironic nova-compute driver is enabled (boolean value)
+#driver_enabled = false
+
+# The endpoint type to use for the baremetal provisioning service
+# (string value)
+#endpoint_type = publicURL
+
+# Timeout for Ironic power transitions. (integer value)
+#power_timeout = 60
+
+# Timeout for unprovisioning an Ironic node. (integer value)
+#unprovision_timeout = 60
+
+
+[boto]
+
+#
+# From tempest.config
+#
+
+# AKI Kernel Image manifest (string value)
+#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml
+
+# AMI Machine Image manifest (string value)
+#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml
+
+# ARI Ramdisk Image manifest (string value)
+#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml
+
+# AWS Access Key (string value)
+#aws_access = <None>
+
+# AWS Secret Key (string value)
+#aws_secret = <None>
+
+# AWS Zone for EC2 tests (string value)
+#aws_zone = nova
+
+# Status Change Test Interval (integer value)
+#build_interval = 1
+
+# Status Change Timeout (integer value)
+#build_timeout = 60
+
+# EC2 URL (string value)
+#ec2_url = http://localhost:8773/services/Cloud
+
+# boto Http socket timeout (integer value)
+#http_socket_timeout = 3
+
+# Instance type (string value)
+#instance_type = m1.tiny
+
+# boto num_retries on error (integer value)
+#num_retries = 1
+
+# S3 Materials Path (string value)
+#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
+
+# S3 URL (string value)
+#s3_url = http://localhost:8080
+
+
+[cli]
+
+#
+# From tempest.config
+#
+
+# directory where python client binaries are located (string value)
+cli_dir = /usr/bin
+
+# enable cli tests (boolean value)
+#enabled = true
+
+# Whether the tempest run location has access to the *-manage
+# commands. In a pure blackbox environment it will not. (boolean
+# value)
+#has_manage = true
+
+# Number of seconds to wait on a CLI timeout (integer value)
+#timeout = 15
+
+
+[compute]
+
+#
+# From tempest.config
+#
+
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for an instance to build. (integer value)
+#build_timeout = 300
+
+# Catalog type of the Compute service. (string value)
+#catalog_type = compute
+
+# Catalog type of the Compute v3 service. (string value)
+#catalog_v3_type = computev3
+
+# The endpoint type to use for the compute service. (string value)
+#endpoint_type = publicURL
+
+# Visible fixed network name (string value)
+#fixed_network_name = private
+
+# Valid primary flavor to use in tests. (string value)
+#flavor_ref = 1
+
+# Valid secondary flavor to be used in tests. (string value)
+#flavor_ref_alt = 2
+
+# Unallocated floating IP range, which will be used to test the
+# floating IP bulk feature for CRUD operation. (string value)
+#floating_ip_range = 10.0.0.0/29
+
+# Password used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_password = password
+
+# User name used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_user = root
+
+# Valid primary image reference to be used in tests. This is a
+# required option (string value)
+#image_ref = <None>
+
+# Valid secondary image reference to be used in tests. This is a
+# required option, but if only one image is available duplicate the
+# value of image_ref above (string value)
+#image_ref_alt = <None>
+
+# Password used to authenticate to an instance. (string value)
+#image_ssh_password = password
+
+# User name used to authenticate to an instance. (string value)
+#image_ssh_user = root
+
+# IP version used for SSH connections. (integer value)
+#ip_version_for_ssh = 4
+
+# Network used for SSH connections. (string value)
+#network_for_ssh = public
+
+# Path to a private key file for SSH access to remote hosts (string
+# value)
+#path_to_private_key = <None>
+
+# Timeout in seconds to wait for ping to succeed. (integer value)
+#ping_timeout = 120
+
+# Additional wait time for clean state, when there is no OS-EXT-STS
+# extension available (integer value)
+#ready_wait = 0
+
+# The compute region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Should the tests ssh to instances? (boolean value)
+#run_ssh = false
+
+# Time in seconds before a shelved instance is eligible for removing
+# from a host. -1 never offload, 0 offload when shelved. This time
+# should be the same as the time of nova.conf, and some tests will run
+# for as long as the time. (integer value)
+#shelved_offload_time = 0
+
+# Auth method used for authenticate to the instance. Valid choices
+# are: keypair, configured, adminpass. keypair: start the servers with
+# an ssh keypair. configured: use the configured user and password.
+# adminpass: use the injected adminPass. disabled: avoid using ssh
+# when it is an option. (string value)
+#ssh_auth_method = keypair
+
+# Timeout in seconds to wait for output from ssh channel. (integer
+# value)
+#ssh_channel_timeout = 60
+
+# How to connect to the instance? fixed: using the first ip belongs
+# the fixed network floating: creating and using a floating ip (string
+# value)
+#ssh_connect_method = fixed
+
+# Timeout in seconds to wait for authentication to succeed. (integer
+# value)
+#ssh_timeout = 300
+
+# User name used to authenticate to an instance. (string value)
+#ssh_user = root
+
+# Does SSH use Floating IPs? (boolean value)
+#use_floatingip_for_ssh = true
+
+# Expected device name when a volume is attached to an instance
+# (string value)
+#volume_device_name = vdb
+
+
+[compute-admin]
+
+#
+# From tempest.config
+#
+
+# Domain name for authentication as admin (Keystone V3).The same
+# domain applies to user and project (string value)
+#domain_name = <None>
+
+# API key to use when authenticating as admin. (string value)
+password = {{ NOVA_SERVICE_PASSWORD }}
+
+# Administrative Tenant name to use for Nova API requests. (string
+# value)
+tenant_name = service
+
+# Administrative Username to use for Nova API requests. (string value)
+username = {{ NOVA_SERVICE_USER }}
+
+
+[compute-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# A list of enabled compute extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_extensions = all
+
+# If false, skip all nova v3 tests. (boolean value)
+api_v3 = false
+
+# A list of enabled v3 extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_v3_extensions = all
+
+# Does the test environment block migration support cinder iSCSI
+# volumes (boolean value)
+#block_migrate_cinder_iscsi = false
+
+# Does the test environment use block devices for live migration
+# (boolean value)
+#block_migration_for_live_migration = false
+
+# Does the test environment support changing the admin password?
+# (boolean value)
+#change_password = false
+
+# Does the test environment support obtaining instance serial console
+# output? (boolean value)
+#console_output = true
+
+# If false, skip disk config tests (boolean value)
+#disk_config = true
+
+# Enables returning of the instance password by the relevant server
+# API calls such as create, rebuild or rescue. (boolean value)
+#enable_instance_password = true
+
+# Does the test environment support dynamic network interface
+# attachment? (boolean value)
+#interface_attach = true
+
+# Does the test environment support live migration available? (boolean
+# value)
+#live_migration = false
+
+# Does the test environment support pausing? (boolean value)
+#pause = true
+
+# Enable RDP console. This configuration value should be same as
+# [nova.rdp]->enabled in nova.conf (boolean value)
+#rdp_console = false
+
+# Does the test environment support instance rescue mode? (boolean
+# value)
+#rescue = true
+
+# Does the test environment support resizing? (boolean value)
+#resize = false
+
+# Does the test environment support shelving/unshelving? (boolean
+# value)
+#shelve = true
+
+# Does the test environment support creating snapshot images of
+# running instances? (boolean value)
+snapshot = true
+
+# Enable Spice console. This configuration value should be same as
+# [nova.spice]->enabled in nova.conf (boolean value)
+spice_console = false
+
+# Does the test environment support suspend/resume? (boolean value)
+#suspend = true
+
+# Enable VNC console. This configuration value should be same as
+# [nova.vnc]->vnc_enabled in nova.conf (boolean value)
+vnc_console = true
+
+# If false skip all v2 api tests with xml (boolean value)
+#xml_api_v2 = true
+
+
+[dashboard]
+
+#
+# From tempest.config
+#
+
+# Where the dashboard can be found (string value)
+dashboard_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon
+
+# Login page for the dashboard (string value)
+login_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon/auth/login/
+
+
+[data_processing]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the data processing service. (string value)
+#catalog_type = data_processing
+
+# The endpoint type to use for the data processing service. (string
+# value)
+#endpoint_type = publicURL
+
+
+[database]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Database service. (string value)
+#catalog_type = database
+
+# Current database version to use in database tests. (string value)
+#db_current_version = v1.0
+
+# Valid primary flavor to use in database tests. (string value)
+#db_flavor_ref = 1
+
+
+[debug]
+
+#
+# From tempest.config
+#
+
+# Enable diagnostic commands (boolean value)
+#enable = true
+
+# A regex to determine which requests should be traced. This is a
+# regex to match the caller for rest client requests to be able to
+# selectively trace calls out of specific classes and methods. It
+# largely exists for test development, and is not expected to be used
+# in a real deploy of tempest. This will be matched against the
+# discovered ClassName:method in the test environment. Expected
+# values for this field are: * ClassName:test_method_name - traces
+# one test_method * ClassName:setUp(Class) - traces specific setup
+# functions * ClassName:tearDown(Class) - traces specific teardown
+# functions * ClassName:_run_cleanups - traces the cleanup functions
+# If nothing is specified, this feature is not enabled. To trace
+# everything specify .* as the regex. (string value)
+#trace_requests =
+
+
+[identity]
+
+#
+# From tempest.config
+#
+
+# Admin domain name for authentication (Keystone V3).The same domain
+# applies to user and project (string value)
+#admin_domain_name = <None>
+
+# API key to use when authenticating as admin. (string value)
+admin_password = {{ KEYSTONE_ADMIN_PASSWORD }}
+
+# Role required to administrate keystone. (string value)
+admin_role = admin
+
+# Administrative Tenant name to use for Keystone API requests. (string
+# value)
+admin_tenant_name = admin
+
+# Administrative Username to use for Keystone API requests. (string
+# value)
+admin_username = admin
+
+# Alternate domain name for authentication (Keystone V3).The same
+# domain applies to user and project (string value)
+#alt_domain_name = <None>
+
+# API key to use when authenticating as alternate user. (string value)
+#alt_password = <None>
+
+# Alternate user's Tenant name to use for Nova API requests. (string
+# value)
+#alt_tenant_name = <None>
+
+# Username of alternate user to use for Nova API requests. (string
+# value)
+#alt_username = <None>
+
+# Identity API version to be used for authentication for API tests.
+# (string value)
+auth_version = v2
+
+# Catalog type of the Identity service. (string value)
+catalog_type = identity
+
+# Set to True if using self-signed SSL certificates. (boolean value)
+#disable_ssl_certificate_validation = false
+
+# Domain name for authentication (Keystone V3).The same domain applies
+# to user and project (string value)
+#domain_name = <None>
+
+# The endpoint type to use for the identity service. (string value)
+#endpoint_type = publicURL
+
+# API key to use when authenticating. (string value)
+password = {{ NOVA_SERVICE_PASSWORD }}
+
+# The identity region name to use. Also used as the other services'
+# region name unless they are set explicitly. If no such region is
+# found in the service catalog, the first found one is used. (string
+# value)
+#region = RegionOne
+
+# Tenant name to use for Nova API requests. (string value)
+tenant_name = service
+
+# Full URI of the OpenStack Identity API (Keystone), v2 (string value)
+uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0/
+
+# Full URI of the OpenStack Identity API (Keystone), v3 (string value)
+#
+# Tempest complains if we don't set any uri_v3, even if it's disabled.
+uri_v3 = <None>
+
+# Username to use for Nova API requests. (string value)
+username = {{ NOVA_SERVICE_USER }}
+
+
+[identity-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# Is the v2 identity API enabled (boolean value)
+api_v2 = true
+
+# Is the v3 identity API enabled (boolean value)
+api_v3 = false
+
+# Does the identity service have delegation and impersonation enabled
+# (boolean value)
+#trust = true
+
+
+[image]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Image service. (string value)
+catalog_type = image
+
+# The endpoint type to use for the image service. (string value)
+endpoint_type = publicURL
+
+# http accessible image (string value)
+http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
+
+# The image region name to use. If empty, the value of identity.region
+# is used instead. If no such region is found in the service catalog,
+# the first found one is used. (string value)
+#region =
+
+
+[image-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# Is the v1 image API enabled (boolean value)
+#api_v1 = true
+
+# Is the v2 image API enabled (boolean value)
+api_v2 = true
+
+
+[input-scenario]
+
+#
+# From tempest.config
+#
+
+# Matching flavors become parameters for scenario tests (string value)
+#flavor_regex = ^m1.nano$
+
+# Matching images become parameters for scenario tests (string value)
+#image_regex = ^cirros-0.3.1-x86_64-uec$
+
+# SSH verification in tests is skippedfor matching images (string
+# value)
+#non_ssh_image_regex = ^.*[Ww]in.*$
+
+# List of user mapped to regex to matching image names. (string value)
+#ssh_user_regex = [["^.*[Cc]irros.*$", "root"]]
+
+
+[messaging]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Messaging service. (string value)
+#catalog_type = messaging
+
+# The maximum grace period for a claim (integer value)
+#max_claim_grace = 43200
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl = 43200
+
+# The maximum size of a message body (integer value)
+#max_message_size = 262144
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl = 1209600
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim = 20
+
+# The maximum number of queue message per page when listing (or)
+# posting messages (integer value)
+#max_messages_per_page = 20
+
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata = 65536
+
+# The maximum number of queue records per page when listing queues
+# (integer value)
+#max_queues_per_page = 20
+
+
+[negative]
+
+#
+# From tempest.config
+#
+
+# Test generator class for all negative tests (string value)
+#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator
+
+
+[network]
+
+#
+# From tempest.config
+#
+
+# Time in seconds between network operation status checks. (integer
+# value)
+#build_interval = 1
+
+# Timeout in seconds to wait for network operation to complete.
+# (integer value)
+#build_timeout = 300
+
+# Catalog type of the Neutron service. (string value)
+#catalog_type = network
+
+# List of dns servers whichs hould be used for subnet creation (list
+# value)
+#dns_servers = 8.8.8.8,8.8.4.4
+
+# The endpoint type to use for the network service. (string value)
+#endpoint_type = publicURL
+
+# Id of the public network that provides external connectivity (string
+# value)
+#public_network_id =
+
+# Id of the public router that provides external connectivity (string
+# value)
+#public_router_id =
+
+# The network region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# The cidr block to allocate tenant ipv4 subnets from (string value)
+#tenant_network_cidr = 10.100.0.0/16
+
+# The mask bits for tenant ipv4 subnets (integer value)
+#tenant_network_mask_bits = 28
+
+# The cidr block to allocate tenant ipv6 subnets from (string value)
+#tenant_network_v6_cidr = 2003::/48
+
+# The mask bits for tenant ipv6 subnets (integer value)
+#tenant_network_v6_mask_bits = 64
+
+# Whether tenant network connectivity should be evaluated directly
+# (boolean value)
+#tenant_networks_reachable = false
+
+
+[network-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# A list of enabled network extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
+
+# Allow the execution of IPv6 tests (boolean value)
+#ipv6 = true
+
+# Allow the execution of IPv6 subnet tests that use the extended IPv6
+# attributes ipv6_ra_mode and ipv6_address_mode (boolean value)
+#ipv6_subnet_attributes = false
+
+
+[object-storage]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Object-Storage service. (string value)
+#catalog_type = object-store
+
+# Number of seconds to wait while looping to check the status of a
+# container to container synchronization (integer value)
+#container_sync_interval = 5
+
+# Number of seconds to time on waiting for a container to container
+# synchronization complete. (integer value)
+#container_sync_timeout = 120
+
+# The endpoint type to use for the object-store service. (string
+# value)
+#endpoint_type = publicURL
+
+# Role to add to users created for swift tests to enable creating
+# containers (string value)
+#operator_role = Member
+
+# The object-storage region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# User role that has reseller admin (string value)
+#reseller_admin_role = ResellerAdmin
+
+
+[object-storage-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# Execute (old style) container-sync tests (boolean value)
+#container_sync = true
+
+# Execute discoverability tests (boolean value)
+#discoverability = true
+
+# A list of the enabled optional discoverable apis. A single entry,
+# all, indicates that all of these features are expected to be enabled
+# (list value)
+#discoverable_apis = all
+
+# Execute object-versioning tests (boolean value)
+#object_versioning = true
+
+
+[orchestration]
+
+#
+# From tempest.config
+#
+
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a stack to build. (integer value)
+#build_timeout = 1200
+
+# Catalog type of the Orchestration service. (string value)
+#catalog_type = orchestration
+
+# The endpoint type to use for the orchestration service. (string
+# value)
+#endpoint_type = publicURL
+
+# Name of heat-cfntools enabled image to use when launching test
+# instances. (string value)
+#image_ref = <None>
+
+# Instance type for tests. Needs to be big enough for a full OS plus
+# the test workload (string value)
+#instance_type = m1.micro
+
+# Name of existing keypair to launch servers with. (string value)
+#keypair_name = <None>
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_resources_per_stack = 1000
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_template_size = 524288
+
+# The orchestration region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+
+[scenario]
+
+#
+# From tempest.config
+#
+
+# AKI image file name (string value)
+#aki_img_file = cirros-0.3.1-x86_64-vmlinuz
+
+# AMI image file name (string value)
+#ami_img_file = cirros-0.3.1-x86_64-blank.img
+
+# ARI image file name (string value)
+#ari_img_file = cirros-0.3.1-x86_64-initrd
+
+# Image container format (string value)
+#img_container_format = bare
+
+# Directory containing image files (string value)
+#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+
+# Image disk format (string value)
+#img_disk_format = qcow2
+
+# Image file name (string value)
+# Deprecated group/name - [DEFAULT]/qcow2_img_file
+#img_file = cirros-0.3.1-x86_64-disk.img
+
+# specifies how many resources to request at once. Used for large
+# operations testing. (integer value)
+#large_ops_number = 0
+
+# ssh username for the image file (string value)
+#ssh_user = cirros
+
+
+[service_available]
+
+#
+# From tempest.config
+#
+
+# Whether or not Ceilometer is expected to be available (boolean
+# value)
+ceilometer = false
+
+# Whether or not cinder is expected to be available (boolean value)
+cinder = true
+
+# Whether or not glance is expected to be available (boolean value)
+glance = true
+
+# Whether or not Heat is expected to be available (boolean value)
+heat = false
+
+# Whether or not Horizon is expected to be available (boolean value)
+horizon = true
+
+# Whether or not Ironic is expected to be available (boolean value)
+ironic = false
+
+# Whether or not neutron is expected to be available (boolean value)
+neutron = true
+
+# Whether or not nova is expected to be available (boolean value)
+nova = true
+
+# Whether or not Sahara is expected to be available (boolean value)
+sahara = false
+
+# Whether or not swift is expected to be available (boolean value)
+swift = false
+
+# Whether or not Trove is expected to be available (boolean value)
+trove = false
+
+# Whether or not Zaqar is expected to be available (boolean value)
+zaqar = false
+
+
+[stress]
+
+#
+# From tempest.config
+#
+
+# Controller host. (string value)
+#controller = <None>
+
+# The number of threads created while stress test. (integer value)
+#default_thread_number_per_action = 4
+
+# Allows a full cleaning process after a stress test. Caution : this
+# cleanup will remove every objects of every tenant. (boolean value)
+#full_clean_stack = false
+
+# Prevent the cleaning (tearDownClass()) between each stress test run
+# if an exception occurs during this run. (boolean value)
+#leave_dirty_stack = false
+
+# time (in seconds) between log file error checks. (integer value)
+#log_check_interval = 60
+
+# Maximum number of instances to create during test. (integer value)
+#max_instances = 16
+
+# Directory containing log files on the compute nodes (string value)
+#nova_logdir = <None>
+
+# Controller host. (string value)
+#target_controller = <None>
+
+# regexp for list of log files. (string value)
+#target_logfiles = <None>
+
+# Path to private key. (string value)
+#target_private_key_path = <None>
+
+# ssh user. (string value)
+#target_ssh_user = <None>
+
+
+[telemetry]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Telemetry service. (string value)
+#catalog_type = metering
+
+# The endpoint type to use for the telemetry service. (string value)
+#endpoint_type = publicURL
+
+# This variable is used as flag to enable notification tests (boolean
+# value)
+#too_slow_to_test = true
+
+
+[volume]
+
+#
+# From tempest.config
+#
+
+# Name of the backend1 (must be declared in cinder.conf) (string
+# value)
+backend1_name = LVM_iSCSI
+
+# Name of the backend2 (must be declared in cinder.conf) (string
+# value)
+#backend2_name = BACKEND_2
+
+# Time in seconds between volume availability checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a volume to become available.
+# (integer value)
+#build_timeout = 300
+
+# Catalog type of the Volume Service (string value)
+catalog_type = volume
+
+# Disk format to use when copying a volume to image (string value)
+disk_format = raw
+
+# The endpoint type to use for the volume service. (string value)
+endpoint_type = publicURL
+
+# The volume region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Backend protocol to target when creating volume types (string value)
+storage_protocol = iSCSI
+
+# Backend vendor to target when creating volume types (string value)
+#vendor_name = Open Source
+
+# Default size in GB for volumes created by volumes tests (integer
+# value)
+volume_size = 1
+
+
+[volume-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# A list of enabled volume extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
+
+# Is the v1 volume API enabled (boolean value)
+api_v1 = true
+
+# Is the v2 volume API enabled (boolean value)
+api_v2 = true
+
+# Runs Cinder volumes backup test (boolean value)
+backup = true
+
+# Runs Cinder multi-backend test (requires 2 backends) (boolean value)
+multi_backend = false
+
+# Runs Cinder volume snapshot test (boolean value)
+snapshot = true
diff --git a/openstack/manifest b/openstack/manifest
new file mode 100644
index 00000000..aa4d5430
--- /dev/null
+++ b/openstack/manifest
@@ -0,0 +1,190 @@
+0040755 0 0 /etc/horizon
+0100644 0 0 /etc/horizon/apache-horizon.conf
+0040755 0 0 /etc/horizon/openstack_dashboard
+0100644 0 0 /etc/horizon/openstack_dashboard/local_settings.py
+template 0100644 0 0 /etc/tempest/tempest.conf
+0040755 0 0 /usr/share/openstack
+0100644 0 0 /usr/share/openstack/hosts
+0040755 0 0 /usr/share/openstack/ceilometer
+0100644 0 0 /usr/share/openstack/ceilometer-config.yml
+0100644 0 0 /usr/share/openstack/ceilometer-db.yml
+0100644 0 0 /usr/share/openstack/ceilometer/ceilometer.conf
+0040755 0 0 /usr/share/openstack/cinder
+0100644 0 0 /usr/share/openstack/cinder-config.yml
+0100644 0 0 /usr/share/openstack/cinder-db.yml
+0100644 0 0 /usr/share/openstack/cinder-lvs.yml
+0100644 0 0 /usr/share/openstack/cinder/cinder.conf
+0100644 0 0 /usr/share/openstack/cinder/api-paste.ini
+0100644 0 0 /usr/share/openstack/cinder/policy.json
+0040755 0 0 /usr/share/openstack/extras
+0100644 0 0 /usr/share/openstack/extras/00-disable-device.network
+0100644 0 0 /usr/share/openstack/extras/60-device-dhcp.network
+0100644 0 0 /usr/share/openstack/glance.yml
+0040755 0 0 /usr/share/openstack/glance
+0100644 0 0 /usr/share/openstack/glance/logging.conf
+0100644 0 0 /usr/share/openstack/glance/glance-api.conf
+0100644 0 0 /usr/share/openstack/glance/glance-registry.conf
+0100644 0 0 /usr/share/openstack/glance/glance-scrubber.conf
+0100644 0 0 /usr/share/openstack/glance/glance-cache.conf
+0100644 0 0 /usr/share/openstack/glance/schema-image.json
+0100644 0 0 /usr/share/openstack/glance/policy.json
+0100644 0 0 /usr/share/openstack/glance/glance-api-paste.ini
+0100644 0 0 /usr/share/openstack/glance/glance-registry-paste.ini
+0100644 0 0 /usr/share/openstack/horizon.yml
+0040755 0 0 /usr/share/openstack/ironic
+0100644 0 0 /usr/share/openstack/ironic.yml
+0100644 0 0 /usr/share/openstack/ironic/ironic.conf
+0100644 0 0 /usr/share/openstack/ironic/policy.json
+0100644 0 0 /usr/share/openstack/iscsi.yml
+0100644 0 0 /usr/share/openstack/keystone.yml
+0040755 0 0 /usr/share/openstack/keystone
+0100644 0 0 /usr/share/openstack/keystone/logging.conf
+0100644 0 0 /usr/share/openstack/keystone/keystone.conf
+0100644 0 0 /usr/share/openstack/keystone/policy.json
+0100644 0 0 /usr/share/openstack/keystone/keystone-paste.ini
+0100644 0 0 /usr/share/openstack/network.yml
+0040755 0 0 /usr/share/openstack/neutron
+0100644 0 0 /usr/share/openstack/neutron-config.yml
+0100644 0 0 /usr/share/openstack/neutron-db.yml
+0100644 0 0 /usr/share/openstack/neutron/neutron.conf
+0100644 0 0 /usr/share/openstack/neutron/api-paste.ini
+0100644 0 0 /usr/share/openstack/neutron/policy.json
+0100644 0 0 /usr/share/openstack/neutron/l3_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/dhcp_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/lbaas_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/metadata_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/fwaas_driver.ini
+0100644 0 0 /usr/share/openstack/neutron/metering_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/vpn_agent.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch
+0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs
+0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
+0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
+0040755 0 0 /usr/share/openstack/neutron/plugins/brocade
+0100644 0 0 /usr/share/openstack/neutron/plugins/brocade/brocade.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/cisco
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/embrane
+0100644 0 0 /usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/hyperv
+0100644 0 0 /usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/ibm
+0100644 0 0 /usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/linuxbridge
+0100644 0 0 /usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/metaplugin
+0100644 0 0 /usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/midonet
+0100644 0 0 /usr/share/openstack/neutron/plugins/midonet/midonet.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/ml2
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/mlnx
+0100644 0 0 /usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/nec
+0100644 0 0 /usr/share/openstack/neutron/plugins/nec/nec.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/nuage
+0100644 0 0 /usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/oneconvergence
+0100644 0 0 /usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/opencontrail
+0100644 0 0 /usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/openvswitch
+0100644 0 0 /usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/plumgrid
+0100644 0 0 /usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/vmware
+0100644 0 0 /usr/share/openstack/neutron/plugins/vmware/nsx.ini
+0040755 0 0 /usr/share/openstack/nova
+0100644 0 0 /usr/share/openstack/nova-config.yml
+0100644 0 0 /usr/share/openstack/nova-db.yml
+0100644 0 0 /usr/share/openstack/nova/logging.conf
+0100644 0 0 /usr/share/openstack/nova/nova.conf
+0100644 0 0 /usr/share/openstack/nova/nova-compute.conf
+0100644 0 0 /usr/share/openstack/nova/policy.json
+0100644 0 0 /usr/share/openstack/nova/cells.json
+0100644 0 0 /usr/share/openstack/nova/api-paste.ini
+0100644 0 0 /usr/share/openstack/openvswitch.yml
+0040755 0 0 /usr/share/openstack/postgres
+0100644 0 0 /usr/share/openstack/postgres.yml
+0100644 0 0 /usr/share/openstack/postgres/pg_hba.conf
+0100644 0 0 /usr/share/openstack/postgres/postgresql.conf
+0040755 0 0 /usr/share/openstack/rabbitmq
+0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq-env.conf
+0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq.config
+0040755 0 0 /usr/lib/sysctl.d
+0100644 0 0 /usr/lib/sysctl.d/neutron.conf
+0100644 0 0 /usr/lib/systemd/system/apache-httpd.service
+0100644 0 0 /usr/lib/systemd/system/iscsi-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-keystone.service
+0100644 0 0 /usr/lib/systemd/system/openstack-keystone-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-glance-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-glance-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-glance-registry.service
+0100644 0 0 /usr/lib/systemd/system/openstack-horizon-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ironic-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ironic-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ironic-conductor.service
+0100644 0 0 /usr/lib/systemd/system/openstack-network-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-server.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-metadata-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-dhcp-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-l3-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-compute.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-conductor.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-scheduler.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-consoleauth.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-novncproxy.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-cert.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-serialproxy.service
+0100644 0 0 /usr/lib/systemd/system/rabbitmq-server.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-lv-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-scheduler.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-volume.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-backup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-central.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-collector.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-compute.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-notification.service
+0100644 0 0 /usr/lib/systemd/system/openvswitch-setup.service
+0100644 0 0 /usr/lib/systemd/system/openvswitch-db-server.service
+0100644 0 0 /usr/lib/systemd/system/openvswitch.service
+0100644 0 0 /usr/lib/systemd/system/postgres-server.service
+0100644 0 0 /usr/lib/systemd/system/postgres-server-setup.service
+0100644 0 0 /usr/share/openstack/swift-controller.yml
+0100644 0 0 /usr/lib/systemd/system/swift-controller-setup.service
+0100644 0 0 /usr/lib/systemd/system/swift-proxy.service
+0040755 0 0 /usr/share/swift
+0040755 0 0 /usr/share/swift/etc
+0040755 0 0 /usr/share/swift/etc/swift
+0100644 0 0 /usr/share/swift/etc/swift/proxy-server.j2
diff --git a/openstack/usr/lib/sysctl.d/neutron.conf b/openstack/usr/lib/sysctl.d/neutron.conf
new file mode 100644
index 00000000..644ca116
--- /dev/null
+++ b/openstack/usr/lib/sysctl.d/neutron.conf
@@ -0,0 +1,3 @@
+# Disable rp filtering, enabling forwarding is handled by networkd
+net.ipv4.conf.all.rp_filter=0
+net.ipv4.conf.default.rp_filter=0
diff --git a/openstack/usr/lib/systemd/system/apache-httpd.service b/openstack/usr/lib/systemd/system/apache-httpd.service
new file mode 100644
index 00000000..e2a840c6
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/apache-httpd.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Apache Web Server
+After=network.target remote-fs.target nss-lookup.target
+Wants=network.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/httpd.pid
+ExecStart=/usr/sbin/apachectl start
+ExecStop=/usr/sbin/apachectl graceful-stop
+ExecReload=/usr/sbin/apachectl graceful
+PrivateTmp=true
+LimitNOFILE=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/iscsi-setup.service b/openstack/usr/lib/systemd/system/iscsi-setup.service
new file mode 100644
index 00000000..4cb10045
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/iscsi-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run iscsi-setup Ansible scripts
+Before=iscsid.service target.service
+Wants=iscsid.service target.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/iscsi.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service
new file mode 100644
index 00000000..6e3ada59
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer alarm evaluation service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-alarm-evaluator --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service
new file mode 100644
index 00000000..7a3e1c91
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer alarm notification service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-alarm-notifier --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service
new file mode 100644
index 00000000..eb0293bf
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer API service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-api --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service
new file mode 100644
index 00000000..a1bc11ee
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer central agent
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-agent-central --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service
new file mode 100644
index 00000000..dafc3ac7
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer collection service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-collector --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service
new file mode 100644
index 00000000..9fe8a1e6
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer compute agent
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-agent-compute --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service
new file mode 100644
index 00000000..c3e809d7
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run ceilometer-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/ceilometer.conf
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service
new file mode 100644
index 00000000..7a785227
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run ceilometer-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/ceilometer.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-ceilometer-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service
new file mode 100644
index 00000000..6696116e
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer notification agent
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-agent-notification --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-api.service b/openstack/usr/lib/systemd/system/openstack-cinder-api.service
new file mode 100644
index 00000000..a284f31d
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-cinder-api.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Volume Service (code-named Cinder) API server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-api --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-backup.service b/openstack/usr/lib/systemd/system/openstack-cinder-backup.service
new file mode 100644
index 00000000..c14e13aa
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-cinder-backup.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Cinder backup server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-backup --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service b/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service
new file mode 100644
index 00000000..1c966933
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run cinder-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/cinder.conf
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service b/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service
new file mode 100644
index 00000000..a3c66d67
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run cinder-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/cinder.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-cinder-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service b/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service
new file mode 100644
index 00000000..82e9b08d
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run cinder-lvs-setup Ansible scripts
+ConditionPathExists=/etc/openstack/cinder.conf
+Wants=lvm2-lvmetad.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-lvs.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service b/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service
new file mode 100644
index 00000000..f205aaff
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Cinder scheduler server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-scheduler --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-volume.service b/openstack/usr/lib/systemd/system/openstack-cinder-volume.service
new file mode 100644
index 00000000..c56ee693
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-cinder-volume.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Cinder volume server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-lv-setup.service lvm2-lvmetad.service iscsid.service target.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-volume --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-glance-api.service b/openstack/usr/lib/systemd/system/openstack-glance-api.service
new file mode 100644
index 00000000..4c34ff10
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-glance-api.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Image Service (code-named Glance) API server
+ConditionPathExists=/etc/glance/glance-api.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=glance
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/glance-api --config-file /etc/glance/glance-api.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/usr/lib/systemd/system/openstack-glance-registry.service b/openstack/usr/lib/systemd/system/openstack-glance-registry.service
new file mode 100644
index 00000000..d53c8b33
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-glance-registry.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Image Service (code-named Glance) Registry server
+ConditionPathExists=/etc/glance/glance-registry.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=glance
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/glance-registry --config-file /etc/glance/glance-registry.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/usr/lib/systemd/system/openstack-glance-setup.service b/openstack/usr/lib/systemd/system/openstack-glance-setup.service
new file mode 100644
index 00000000..43810797
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-glance-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run glance-setup Ansible scripts
+ConditionPathExists=/etc/openstack/glance.conf
+After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/glance.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-horizon-setup.service b/openstack/usr/lib/systemd/system/openstack-horizon-setup.service
new file mode 100644
index 00000000..9ec3197a
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-horizon-setup.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Run horizon-setup Ansible scripts
+After=local-fs.target
+Before=apache-httpd.service
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/horizon.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-ironic-api.service b/openstack/usr/lib/systemd/system/openstack-ironic-api.service
new file mode 100644
index 00000000..5a286a95
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ironic-api.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) API server
+ConditionPathExists=/etc/ironic/ironic.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ironic
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ironic-api --config-file /etc/ironic/ironic.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service b/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service
new file mode 100644
index 00000000..b3b226e0
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) Conductor server
+ConditionPathExists=/etc/ironic/ironic.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ironic
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ironic-conductor --config-file /etc/ironic/ironic.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/usr/lib/systemd/system/openstack-ironic-setup.service b/openstack/usr/lib/systemd/system/openstack-ironic-setup.service
new file mode 100644
index 00000000..e3a58eb5
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-ironic-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run ironic-setup Ansible scripts
+ConditionPathExists=/etc/openstack/ironic.conf
+After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ironic.yml
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/usr/lib/systemd/system/openstack-keystone-setup.service b/openstack/usr/lib/systemd/system/openstack-keystone-setup.service
new file mode 100644
index 00000000..db9d0b2b
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-keystone-setup.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Run keystone-setup Ansible scripts
+ConditionPathExists=/etc/openstack/keystone.conf
+After=local-fs.target network-online.target postgres-server-setup.service
+Wants=network-online.target
+
+[Service]
+# Oneshot, since others setup have to wait until this service finishes
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/keystone.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-keystone.service b/openstack/usr/lib/systemd/system/openstack-keystone.service
new file mode 100644
index 00000000..6f6ff644
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-keystone.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Identity Service (code-named Keystone)
+ConditionPathExists=/etc/keystone/keystone.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=notify
+Restart=always
+User=keystone
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/keystone-all --config-file /etc/keystone/keystone.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-network-setup.service b/openstack/usr/lib/systemd/system/openstack-network-setup.service
new file mode 100644
index 00000000..021370d9
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-network-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run Ansible scripts to configure internal network for OpenStack
+After=openvswitch-setup.service openvswitch.service
+Before=systemd-networkd.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/network.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service b/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service
new file mode 100644
index 00000000..b74f44ab
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run neutron-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/neutron.conf
+After=network-online.target openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service b/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service
new file mode 100644
index 00000000..5d07da2e
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run neutron-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/neutron.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-neutron-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service
new file mode 100644
index 00000000..9080f3c1
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron DHCP Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-dhcp-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/dhcp_agent.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service
new file mode 100644
index 00000000..76efea5c
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=Neutron Layer 3 Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-l3-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/l3_agent.ini \
+ --config-file=/etc/neutron/fwaas_driver.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service
new file mode 100644
index 00000000..20540e4c
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron Metadata Plugin Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-metadata-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/metadata_agent.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service b/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service
new file mode 100644
index 00000000..f5709028
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=Neutron OVS cleanup
+ConditionPathExists=/etc/neutron/neutron.conf
+ConditionFileIsExecutable=/usr/bin/neutron-ovs-cleanup
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openvswitch.service
+Before=openstack-neutron-plugin-openvswitch-agent.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+StandardOutput=null
+StandardError=null
+User=neutron
+ExecStart=/usr/bin/neutron-ovs-cleanup --config-file /etc/neutron/neutron.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service
new file mode 100644
index 00000000..6c579a62
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron OpenvSwitch Plugin Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-openvswitch-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-server.service b/openstack/usr/lib/systemd/system/openstack-neutron-server.service
new file mode 100644
index 00000000..6376c3d8
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-neutron-server.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron Api Server
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-server \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-api.service b/openstack/usr/lib/systemd/system/openstack-nova-api.service
new file mode 100644
index 00000000..521353db
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-api.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Compute Service (code-named Nova) API server
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-api --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-cert.service b/openstack/usr/lib/systemd/system/openstack-nova-cert.service
new file mode 100644
index 00000000..b3733816
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-cert.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova Cert
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-cert --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-compute.service b/openstack/usr/lib/systemd/system/openstack-nova-compute.service
new file mode 100644
index 00000000..4f9b8196
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-compute.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Compute Service (code-named Nova) compute server
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service
+Wants=network-online.target
+Requires=libvirtd.service
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-conductor.service b/openstack/usr/lib/systemd/system/openstack-nova-conductor.service
new file mode 100644
index 00000000..4c0d7d43
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-conductor.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Database-access support for Compute nodes (nova-conductor)
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service
+Wants=network-online.target
+Requires=libvirtd.service
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-conductor --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service b/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service
new file mode 100644
index 00000000..df669aa9
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run nova-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/nova.conf
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service b/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service
new file mode 100644
index 00000000..e22780a9
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Openstack Console Auth (nova-consoleauth)
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-consoleauth --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service b/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service
new file mode 100644
index 00000000..8e004327
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run nova-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/nova.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-nova-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service b/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service
new file mode 100644
index 00000000..8cbb20fd
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova NoVNC proxy
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-novncproxy --config-file /etc/nova/nova.conf --web /usr/share/novnc
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service b/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service
new file mode 100644
index 00000000..e89f0d3e
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova Scheduler
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-scheduler --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service b/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service
new file mode 100644
index 00000000..30af8305
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova Serial Proxy
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-serialproxy --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openvswitch-db-server.service b/openstack/usr/lib/systemd/system/openvswitch-db-server.service
new file mode 100644
index 00000000..34a7c812
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openvswitch-db-server.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Open vSwitch Database Server Daemon
+After=local-fs.target
+
+[Service]
+Type=forking
+ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch
+ExecStart=/usr/sbin/ovsdb-server --remote=punix:/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --pidfile --detach
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/usr/lib/systemd/system/openvswitch-setup.service b/openstack/usr/lib/systemd/system/openvswitch-setup.service
new file mode 100644
index 00000000..8393ebbc
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openvswitch-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run openvswitch-setup Ansible scripts
+After=local-fs.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/openvswitch.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/openvswitch.service b/openstack/usr/lib/systemd/system/openvswitch.service
new file mode 100644
index 00000000..113911f6
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/openvswitch.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Open vSwitch Daemon
+Before=network-pre.target
+Wants=network-pre.target
+
+[Service]
+Type=forking
+ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch
+ExecStart=/usr/sbin/ovs-vswitchd --pidfile --detach
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/postgres-server-setup.service b/openstack/usr/lib/systemd/system/postgres-server-setup.service
new file mode 100644
index 00000000..202c0636
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/postgres-server-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run postgres-setup Ansible scripts
+ConditionPathExists=/etc/openstack/postgres.conf
+After=local-fs.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/postgres.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/postgres-server.service b/openstack/usr/lib/systemd/system/postgres-server.service
new file mode 100644
index 00000000..9e11f26d
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/postgres-server.service
@@ -0,0 +1,26 @@
+[Unit]
+Description=PostgreSQL database server
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=forking
+TimeoutSec=120
+User=postgres
+Group=postgres
+
+Environment=PGROOT=/var/lib/pgsql
+
+SyslogIdentifier=postgres
+PIDFile=/var/lib/pgsql/data/postmaster.pid
+
+ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120
+ExecReload=/usr/bin/pg_ctl -s -D ${PGROOT}/data reload
+ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast
+
+# Due to PostgreSQL's use of shared memory, OOM killer is often overzealous in
+# killing Postgres, so adjust it downward
+OOMScoreAdjust=-200
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/rabbitmq-server.service b/openstack/usr/lib/systemd/system/rabbitmq-server.service
new file mode 100644
index 00000000..1a20f3e4
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/rabbitmq-server.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=RabbitMQ broker
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=notify
+User=rabbitmq
+Group=rabbitmq
+Environment=HOME=/var/lib/rabbitmq
+WorkingDirectory=/var/lib/rabbitmq
+ExecStart=/usr/sbin/rabbitmq-server
+ExecStop=/usr/sbin/rabbitmqctl stop
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/swift-controller-setup.service b/openstack/usr/lib/systemd/system/swift-controller-setup.service
new file mode 100644
index 00000000..ccfbcbe6
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/swift-controller-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run swift-controller-setup (once)
+After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/swift-controller.yml
+Restart=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/lib/systemd/system/swift-proxy.service b/openstack/usr/lib/systemd/system/swift-proxy.service
new file mode 100644
index 00000000..7b0a2e17
--- /dev/null
+++ b/openstack/usr/lib/systemd/system/swift-proxy.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=OpenStack Swift Proxy Server
+After=network-online.target swift-controller-setup.service memcached.service
+Wants=network-online.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/swift/proxy-server.pid
+Restart=on-failure
+ExecStart=/usr/bin/swift-init proxy-server start
+ExecStop=/usr/bin/swift-init proxy-server stop
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/usr/share/openstack/ceilometer-config.yml b/openstack/usr/share/openstack/ceilometer-config.yml
new file mode 100644
index 00000000..9850d84d
--- /dev/null
+++ b/openstack/usr/share/openstack/ceilometer-config.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/ceilometer.conf"
+ tasks:
+# Configure ceilometer
+ - name: Create the ceilometer user.
+ user:
+ name: ceilometer
+ comment: Openstack Ceilometer Daemons
+ shell: /sbin/nologin
+ home: /var/lib/ceilometer
+
+ - name: Create the /var folders for ceilometer
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: ceilometer
+ group: ceilometer
+ with_items:
+ - /var/run/ceilometer
+ - /var/lock/ceilometer
+ - /var/log/ceilometer
+ - /var/lib/ceilometer
+
+ - name: Create /etc/ceilometer directory
+ file:
+ path: /etc/ceilometer
+ state: directory
+
+ - name: Add the configuration needed for ceilometer in /etc/ceilometer using templates
+ template:
+ src: /usr/share/openstack/ceilometer/{{ item }}
+ dest: /etc/ceilometer/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/ceilometer && find -type f
diff --git a/openstack/usr/share/openstack/ceilometer-db.yml b/openstack/usr/share/openstack/ceilometer-db.yml
new file mode 100644
index 00000000..717c7d7d
--- /dev/null
+++ b/openstack/usr/share/openstack/ceilometer-db.yml
@@ -0,0 +1,50 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/ceilometer.conf"
+ tasks:
+ - name: Create ceilometer service user in service tenant
+ keystone_user:
+ user: "{{ CEILOMETER_SERVICE_USER }}"
+ password: "{{ CEILOMETER_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to ceilometers service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ CEILOMETER_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add ceilometer endpoint
+ keystone_service:
+ name: ceilometer
+ type: metering
+ description: Openstack Metering Service
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for ceilometer
+ postgresql_user:
+ name: "{{ CEILOMETER_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ password: "{{ CEILOMETER_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: ceilometer
+
+ - name: Create database for ceilometer services
+ postgresql_db:
+ name: ceilometer
+ owner: "{{ CEILOMETER_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ sudo: yes
+ sudo_user: ceilometer
+
+ - name: Initiate ceilometer database
+ command: ceilometer-dbsync
+ sudo: yes
+ sudo_user: ceilometer
diff --git a/openstack/usr/share/openstack/ceilometer/ceilometer.conf b/openstack/usr/share/openstack/ceilometer/ceilometer.conf
new file mode 100644
index 00000000..b572d40f
--- /dev/null
+++ b/openstack/usr/share/openstack/ceilometer/ceilometer.conf
@@ -0,0 +1,1023 @@
+[DEFAULT]
+
+#
+# Options defined in ceilometer.middleware
+#
+
+# Exchanges name to listen for notifications. (multi valued)
+#http_control_exchanges=nova
+#http_control_exchanges=glance
+#http_control_exchanges=neutron
+#http_control_exchanges=cinder
+
+
+#
+# Options defined in ceilometer.pipeline
+#
+
+# Configuration file for pipeline definition. (string value)
+#pipeline_cfg_file=pipeline.yaml
+
+
+#
+# Options defined in ceilometer.sample
+#
+
+# Source for samples emitted on this instance. (string value)
+# Deprecated group/name - [DEFAULT]/counter_source
+#sample_source=openstack
+
+
+#
+# Options defined in ceilometer.service
+#
+
+# Name of this node, which must be valid in an AMQP key. Can
+# be an opaque identifier. For ZeroMQ only, must be a valid
+# host name, FQDN, or IP address. (string value)
+#host=ceilometer
+
+# Dispatcher to process data. (multi valued)
+#dispatcher=database
+
+# Number of workers for collector service. A single
+# collector is enabled by default. (integer value)
+#collector_workers=1
+
+# Number of workers for notification service. A single
+# notification agent is enabled by default. (integer value)
+#notification_workers=1
+
+
+#
+# Options defined in ceilometer.api.app
+#
+
+# The strategy to use for auth: noauth or keystone. (string
+# value)
+auth_strategy=keystone
+
+# Deploy the deprecated v1 API. (boolean value)
+#enable_v1_api=true
+
+
+#
+# Options defined in ceilometer.compute.notifications
+#
+
+# Exchange name for Nova notifications. (string value)
+#nova_control_exchange=nova
+
+
+#
+# Options defined in ceilometer.compute.util
+#
+
+# List of metadata prefixes reserved for metering use. (list
+# value)
+#reserved_metadata_namespace=metering.
+
+# Limit on length of reserved metadata values. (integer value)
+#reserved_metadata_length=256
+
+
+#
+# Options defined in ceilometer.compute.virt.inspector
+#
+
+# Inspector to use for inspecting the hypervisor layer.
+# (string value)
+#hypervisor_inspector=libvirt
+
+
+#
+# Options defined in ceilometer.compute.virt.libvirt.inspector
+#
+
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen). (string value)
+#libvirt_type=kvm
+
+# Override the default libvirt URI (which is dependent on
+# libvirt_type). (string value)
+#libvirt_uri=
+
+
+#
+# Options defined in ceilometer.image.notifications
+#
+
+# Exchange name for Glance notifications. (string value)
+#glance_control_exchange=glance
+
+
+#
+# Options defined in ceilometer.network.notifications
+#
+
+# Exchange name for Neutron notifications. (string value)
+# Deprecated group/name - [DEFAULT]/quantum_control_exchange
+#neutron_control_exchange=neutron
+
+
+#
+# Options defined in ceilometer.objectstore.swift
+#
+
+# Swift reseller prefix. Must be on par with reseller_prefix
+# in proxy-server.conf. (string value)
+#reseller_prefix=AUTH_
+
+
+#
+# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
+#
+
+# The file name to use with SQLite (string value)
+#sqlite_db=ceilometer.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous=true
+
+
+#
+# Options defined in ceilometer.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in ceilometer.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in ceilometer.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
+
+# Publish error events (boolean value)
+#publish_errors=false
+
+# Make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of logging configuration file. It does not disable
+# existing loggers, but just appends specified logging
+# configuration to any other existing logging options. Please
+# see the Python logging module documentation for details on
+# logging configuration files. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and then will be changed in J to honor RFC5424
+# (boolean value)
+use_syslog=true
+
+# (Optional) Use syslog rfc5424 format for logging. If
+# enabled, will add APP-NAME (RFC5424) before the MSG part of
+# the syslog message. The old format without APP-NAME is
+# deprecated in I, and will be removed in J. (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in ceilometer.openstack.common.middleware.sizelimit
+#
+
+# The maximum body size per request, in bytes (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+#max_request_body_size=114688
+
+
+#
+# Options defined in ceilometer.openstack.common.notifier.api
+#
+
+# Driver or drivers to handle sending notifications (multi
+# valued)
+#notification_driver=
+
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in ceilometer.openstack.common.notifier.rpc_notifier
+#
+
+# AMQP topic used for OpenStack notifications (list value)
+#notification_topics=notifications
+
+
+#
+# Options defined in ceilometer.openstack.common.policy
+#
+
+# JSON file containing policy (string value)
+#policy_file=policy.json
+
+# Rule enforced when requested rule is not found (string
+# value)
+#policy_default_rule=default
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc
+#
+
+# The messaging module to use, defaults to kombu. (string
+# value)
+rpc_backend=rabbit
+
+# Size of RPC thread pool (integer value)
+#rpc_thread_pool_size=64
+
+# Size of RPC connection pool (integer value)
+#rpc_conn_pool_size=30
+
+# Seconds to wait for a response from call or multicall
+# (integer value)
+#rpc_response_timeout=60
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions
+
+# If passed, use a fake RabbitMQ provider (boolean value)
+#fake_rabbit=false
+
+# AMQP exchange to connect to if using RabbitMQ or Qpid
+# (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.amqp
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.impl_kombu
+#
+
+# If SSL is enabled, the SSL version to use. Valid values are
+# TLSv1, SSLv23 and SSLv3. SSLv2 might be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled) (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled) (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL enabled)
+# (string value)
+#kombu_ssl_ca_certs=
+
+# The RabbitMQ broker address where a single node is used
+# (string value)
+rabbit_host = {{ RABBITMQ_HOST }}
+
+
+# The RabbitMQ broker port where a single node is used
+# (integer value)
+rabbit_port= {{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ (boolean value)
+rabbit_use_ssl=false
+
+# The RabbitMQ userid (string value)
+rabbit_userid= {{ RABBITMQ_USER }}
+
+# The RabbitMQ password (string value)
+rabbit_password = {{ RABBITMQ_PASSWORD }}
+
+
+# The RabbitMQ virtual host (string value)
+rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count) (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.impl_qpid
+#
+
+# Qpid broker hostname (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for qpid connection (string value)
+#qpid_username=
+
+# Password for qpid connection (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl' (string value)
+#qpid_protocol=tcp
+
+# Disable Nagle algorithm (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.impl_zmq
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver (string value)
+#rpc_zmq_matchmaker=ceilometer.openstack.common.rpc.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1 (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=ceilometer
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.matchmaker
+#
+
+# Heartbeat frequency (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+
+#
+# Options defined in ceilometer.orchestration.notifications
+#
+
+# Exchange name for Heat notifications (string value)
+#heat_control_exchange=heat
+
+
+#
+# Options defined in ceilometer.storage
+#
+
+# DEPRECATED - Database connection string. (string value)
+#database_connection=<None>
+
+
+#
+# Options defined in ceilometer.storage.sqlalchemy.models
+#
+
+# MySQL engine to use. (string value)
+#mysql_engine=InnoDB
+
+
+#
+# Options defined in ceilometer.volume.notifications
+#
+
+# Exchange name for Cinder notifications. (string value)
+cinder_control_exchange=cinder
+
+
+[alarm]
+
+#
+# Options defined in ceilometer.cli
+#
+
+# Class to launch as alarm evaluation service. (string value)
+#evaluation_service=ceilometer.alarm.service.SingletonAlarmService
+
+
+#
+# Options defined in ceilometer.alarm.notifier.rest
+#
+
+# SSL Client certificate for REST notifier. (string value)
+#rest_notifier_certificate_file=
+
+# SSL Client private key for REST notifier. (string value)
+#rest_notifier_certificate_key=
+
+# Whether to verify the SSL Server certificate when calling
+# alarm action. (boolean value)
+#rest_notifier_ssl_verify=true
+
+
+#
+# Options defined in ceilometer.alarm.rpc
+#
+
+# The topic that ceilometer uses for alarm notifier messages.
+# (string value)
+#notifier_rpc_topic=alarm_notifier
+
+# The topic that ceilometer uses for alarm partition
+# coordination messages. (string value)
+#partition_rpc_topic=alarm_partition_coordination
+
+
+#
+# Options defined in ceilometer.alarm.service
+#
+
+# Period of evaluation cycle, should be >= than configured
+# pipeline interval for collection of underlying metrics.
+# (integer value)
+# Deprecated group/name - [alarm]/threshold_evaluation_interval
+#evaluation_interval=60
+
+
+#
+# Options defined in ceilometer.api.controllers.v2
+#
+
+# Record alarm change events. (boolean value)
+#record_history=true
+
+
+[api]
+
+#
+# Options defined in ceilometer.api
+#
+
+# The port for the ceilometer API server. (integer value)
+# Deprecated group/name - [DEFAULT]/metering_api_port
+#port=8777
+
+# The listen IP for the ceilometer API server. (string value)
+#host=0.0.0.0
+
+
+[collector]
+
+#
+# Options defined in ceilometer.collector
+#
+
+# Address to which the UDP socket is bound. Set to an empty
+# string to disable. (string value)
+#udp_address=0.0.0.0
+
+# Port to which the UDP socket is bound. (integer value)
+#udp_port=4952
+
+
+[database]
+
+#
+# Options defined in ceilometer.openstack.common.db.api
+#
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+
+#
+# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection=postgresql://{{ CEILOMETER_DB_USER }}:{{ CEILOMETER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ceilometer
+
+# The SQLAlchemy connection string used to connect to the
+# slave database (string value)
+#slave_connection=
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+
+#
+# Options defined in ceilometer.storage
+#
+
+# Number of seconds that samples are kept in the database for
+# (<= 0 means forever). (integer value)
+#time_to_live=-1
+
+
+[dispatcher_file]
+
+#
+# Options defined in ceilometer.dispatcher.file
+#
+
+# Name and the location of the file to record meters. (string
+# value)
+#file_path=<None>
+
+# The max size of the file. (integer value)
+#max_bytes=0
+
+# The max number of the files to keep. (integer value)
+#backup_count=0
+
+
+[event]
+
+#
+# Options defined in ceilometer.event.converter
+#
+
+# Configuration file for event definitions. (string value)
+#definitions_cfg_file=event_definitions.yaml
+
+# Drop notifications if no event definition matches.
+# (Otherwise, we convert them with just the default traits)
+# (boolean value)
+#drop_unmatched_notifications=false
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystoneclient.middleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path (string
+# value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint (string
+# value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint(http or https)
+# (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+auth_uri= http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# Allows to pass in the name of a fake http_handler callback
+# function used instead of httplib.HTTPConnection or
+# httplib.HTTPSConnection. Useful for unit testing where
+# network is not available. (string value)
+#http_handler=<None>
+
+# Single shared secret with the Keystone configuration used
+# for bootstrapping a Keystone installation, or otherwise
+# bypassing the normal authentication process. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user = {{ CEILOMETER_SERVICE_USER }}
+
+# Keystone account password (string value)
+admin_password = {{ CEILOMETER_SERVICE_PASSWORD }}
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name = service
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPS connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# If defined, the memcache server(s) to use for caching (list
+# value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive requests and validations, the
+# middleware uses an in-memory cache for the tokens the
+# Keystone API returns. This is only valid if memcache_servers
+# is defined. Set to -1 to disable caching completely.
+# (integer value)
+#token_cache_time=300
+
+# Value only used for unit testing (integer value)
+#revocation_cache_time=1
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+
+[matchmaker_redis]
+
+#
+# Options defined in ceilometer.openstack.common.rpc.matchmaker_redis
+#
+
+# Host to locate redis (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server. (optional) (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in ceilometer.openstack.common.rpc.matchmaker_ring
+#
+
+# Matchmaker ring file (JSON) (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[notification]
+
+#
+# Options defined in ceilometer.notification
+#
+
+# Acknowledge message when event persistence fails. (boolean
+# value)
+#ack_on_event_error=true
+
+# Save event details. (boolean value)
+#store_events=false
+
+
+[publisher]
+
+#
+# Options defined in ceilometer.publisher.utils
+#
+
+# Secret value for signing metering messages. (string value)
+# Deprecated group/name - [DEFAULT]/metering_secret
+# Deprecated group/name - [publisher_rpc]/metering_secret
+# It should be set to some random value
+metering_secret = {{ METERING_SECRET }}
+
+[publisher_rpc]
+
+#
+# Options defined in ceilometer.publisher.rpc
+#
+
+# The topic that ceilometer uses for metering messages.
+# (string value)
+#metering_topic=metering
+
+
+[rpc_notifier2]
+
+#
+# Options defined in ceilometer.openstack.common.notifier.rpc_notifier2
+#
+
+# AMQP topic(s) used for OpenStack notifications (list value)
+#topics=notifications
+
+
+[service_credentials]
+
+#
+# Options defined in ceilometer.service
+#
+
+# User name to use for OpenStack service access. (string
+# value)
+os_username = {{ CEILOMETER_SERVICE_USER }}
+
+# Password to use for OpenStack service access. (string value)
+os_password = {{ CEILOMETER_SERVICE_PASSWORD }}
+
+# Tenant ID to use for OpenStack service access. (string
+# value)
+#os_tenant_id=
+
+# Tenant name to use for OpenStack service access. (string
+# value)
+os_tenant_name = service
+
+# Certificate chain for SSL validation. (string value)
+#os_cacert=<None>
+
+# Auth URL to use for OpenStack service access. (string value)
+os_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+
+# Region name to use for OpenStack service endpoints. (string
+# value)
+os_region_name=regionOne
+
+# Type of endpoint in Identity service catalog to use for
+# communication with OpenStack services. (string value)
+os_endpoint_type=internalURL
+
+# Disables X.509 certificate validation when an SSL connection
+# to Identity Service is established. (boolean value)
+#insecure=false
+
+
+[ssl]
+
+#
+# Options defined in ceilometer.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#key_file=<None>
+
+
+[vmware]
+
+#
+# Options defined in ceilometer.compute.virt.vmware.inspector
+#
+
+# IP address of the VMware Vsphere host (string value)
+#host_ip=
+
+# Username of VMware Vsphere (string value)
+#host_username=
+
+# Password of VMware Vsphere (string value)
+#host_password=
+
+# Number of times a VMware Vsphere API must be retried
+# (integer value)
+#api_retry_count=10
+
+# Sleep time in seconds for polling an ongoing async task
+# (floating point value)
+#task_poll_interval=0.5
diff --git a/openstack/usr/share/openstack/cinder-config.yml b/openstack/usr/share/openstack/cinder-config.yml
new file mode 100644
index 00000000..fd3e2cd0
--- /dev/null
+++ b/openstack/usr/share/openstack/cinder-config.yml
@@ -0,0 +1,37 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/cinder.conf"
+ tasks:
+# Configure cinder
+ - name: Create the cinder user.
+ user:
+ name: cinder
+ comment: Openstack Cinder Daemons
+ shell: /sbin/nologin
+ home: /var/lib/cinder
+
+ - name: Create the /var folders for cinder
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: cinder
+ group: cinder
+ with_items:
+ - /var/run/cinder
+ - /var/lock/cinder
+ - /var/log/cinder
+ - /var/lib/cinder
+ - /var/lib/cinder/volumes
+
+ - name: Create /etc/cinder directory
+ file:
+ path: /etc/cinder
+ state: directory
+
+ - name: Add the configuration needed for cinder in /etc/cinder using templates
+ template:
+ src: /usr/share/openstack/cinder/{{ item }}
+ dest: /etc/cinder/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/cinder && find -type f
diff --git a/openstack/usr/share/openstack/cinder-db.yml b/openstack/usr/share/openstack/cinder-db.yml
new file mode 100644
index 00000000..2a211720
--- /dev/null
+++ b/openstack/usr/share/openstack/cinder-db.yml
@@ -0,0 +1,60 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/cinder.conf"
+ tasks:
+ - name: Create cinder service user in service tenant
+ keystone_user:
+ user: "{{ CINDER_SERVICE_USER }}"
+ password: "{{ CINDER_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to cinder service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ CINDER_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add cinder endpoint
+ keystone_service:
+ name: cinder
+ type: volume
+ description: Openstack Block Storage
+ publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s'
+ internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s'
+ adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s'
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add cinderv2 endpoint
+ keystone_service:
+ name: cinderv2
+ type: volumev2
+ description: Openstack Block Storage
+ publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s'
+ internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s'
+ adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s'
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for cinder
+ postgresql_user:
+ name: "{{ CINDER_DB_USER }}"
+ password: "{{ CINDER_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: cinder
+
+ - name: Create database for cinder services
+ postgresql_db:
+ name: cinder
+ owner: "{{ CINDER_DB_USER }}"
+ sudo: yes
+ sudo_user: cinder
+
+ - name: Initiate cinder database
+ cinder_manage:
+ action: dbsync
+ sudo: yes
+ sudo_user: cinder
diff --git a/openstack/usr/share/openstack/cinder-lvs.yml b/openstack/usr/share/openstack/cinder-lvs.yml
new file mode 100644
index 00000000..7a91a306
--- /dev/null
+++ b/openstack/usr/share/openstack/cinder-lvs.yml
@@ -0,0 +1,21 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/cinder.conf"
+ tasks:
+ - name: Check that CINDER_DEVICE exists
+ stat:
+ path: "{{ CINDER_DEVICE }}"
+ register: cinder_device_stats
+ failed_when: cinder_device_stats.stat.exists == false
+
+ - name: Configure LVM group for cinder
+ lvg:
+ vg: cinder-volumes
+ pvs: "{{ CINDER_DEVICE }}"
+
+ - lineinfile:
+ dest: /etc/lvm/lvm.conf
+ regexp: '# filter = \[ \"a\/\.\*/\" \]'
+ line: ' filter = [ "a|{{ CINDER_DEVICE }}|", "r/.*/" ]'
+ backrefs: yes
diff --git a/openstack/usr/share/openstack/cinder/api-paste.ini b/openstack/usr/share/openstack/cinder/api-paste.ini
new file mode 100644
index 00000000..ba922d5f
--- /dev/null
+++ b/openstack/usr/share/openstack/cinder/api-paste.ini
@@ -0,0 +1,60 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/openstack/usr/share/openstack/cinder/cinder.conf b/openstack/usr/share/openstack/cinder/cinder.conf
new file mode 100644
index 00000000..a58004b5
--- /dev/null
+++ b/openstack/usr/share/openstack/cinder/cinder.conf
@@ -0,0 +1,2825 @@
+[DEFAULT]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1 and SSLv23. SSLv2 and SSLv3 may be available on
+# some distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=cinder
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+notification_driver=messagingv2
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+control_exchange=cinder
+
+
+#
+# Options defined in cinder.exception
+#
+
+# Make exception message format errors fatal. (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in cinder.quota
+#
+
+# Number of volumes allowed per project (integer value)
+#quota_volumes=10
+
+# Number of volume snapshots allowed per project (integer
+# value)
+#quota_snapshots=10
+
+# Number of consistencygroups allowed per project (integer
+# value)
+#quota_consistencygroups=10
+
+# Total amount of storage, in gigabytes, allowed for volumes
+# and snapshots per project (integer value)
+#quota_gigabytes=1000
+
+# Number of volume backups allowed per project (integer value)
+#quota_backups=10
+
+# Total amount of storage, in gigabytes, allowed for backups
+# per project (integer value)
+#quota_backup_gigabytes=1000
+
+# Number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
+
+# Count of reservations until usage is refreshed (integer
+# value)
+#until_refresh=0
+
+# Number of seconds between subsequent usage refreshes
+# (integer value)
+#max_age=0
+
+# Default driver to use for quota checks (string value)
+#quota_driver=cinder.quota.DbQuotaDriver
+
+# Enables or disables use of default quota class with default
+# quota. (boolean value)
+#use_default_quota_class=true
+
+
+#
+# Options defined in cinder.service
+#
+
+# Interval, in seconds, between nodes reporting state to
+# datastore (integer value)
+#report_interval=10
+
+# Interval, in seconds, between running periodic tasks
+# (integer value)
+#periodic_interval=60
+
+# Range, in seconds, to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+
+# IP address on which OpenStack Volume API listens (string
+# value)
+#osapi_volume_listen=0.0.0.0
+
+# Port on which OpenStack Volume API listens (integer value)
+#osapi_volume_listen_port=8776
+
+# Number of workers for OpenStack Volume API service. The
+# default is equal to the number of CPUs available. (integer
+# value)
+#osapi_volume_workers=<None>
+
+
+#
+# Options defined in cinder.ssh_utils
+#
+
+# Option to enable strict host key checking. When set to
+# "True" Cinder will only connect to systems with a host key
+# present in the configured "ssh_hosts_key_file". When set to
+# "False" the host key will be saved upon first connection and
+# used for subsequent connections. Default=False (boolean
+# value)
+#strict_ssh_host_key_policy=false
+
+# File containing SSH host keys for the systems with which
+# Cinder needs to communicate. OPTIONAL:
+# Default=$state_path/ssh_known_hosts (string value)
+#ssh_hosts_key_file=$state_path/ssh_known_hosts
+
+
+#
+# Options defined in cinder.test
+#
+
+# File name of clean sqlite db (string value)
+#sqlite_clean_db=clean.sqlite
+
+
+#
+# Options defined in cinder.wsgi
+#
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large
+# tokens (typically those generated by the Keystone v3 API
+# with big service catalogs). (integer value)
+#max_header_line=16384
+
+# If False, closes the client socket connection explicitly.
+# Setting it to True to maintain backward compatibility.
+# Recommended setting is set it to False. (boolean value)
+#wsgi_keep_alive=true
+
+# Sets the value of TCP_KEEPALIVE (True/False) for each server
+# socket. (boolean value)
+#tcp_keepalive=true
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepidle=600
+
+# Sets the value of TCP_KEEPINTVL in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepalive_interval=<None>
+
+# Sets the value of TCP_KEEPCNT for each server socket. Not
+# supported on OS X. (integer value)
+#tcp_keepalive_count=<None>
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ssl_ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#ssl_cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#ssl_key_file=<None>
+
+
+#
+# Options defined in cinder.api.common
+#
+
+# The maximum number of items that a collection resource
+# returns in a single response (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Volume API (string value)
+# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
+#osapi_volume_base_URL=<None>
+
+
+#
+# Options defined in cinder.api.middleware.auth
+#
+
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
+
+
+#
+# Options defined in cinder.api.middleware.sizelimit
+#
+
+# Max size for body of a request (integer value)
+#osapi_max_request_body_size=114688
+
+
+#
+# Options defined in cinder.backup.driver
+#
+
+# Backup metadata version to be used when backing up volume
+# metadata. If this number is bumped, make sure the service
+# doing the restore supports the new version. (integer value)
+#backup_metadata_version=1
+
+
+#
+# Options defined in cinder.backup.drivers.ceph
+#
+
+# Ceph configuration file to use. (string value)
+#backup_ceph_conf=/etc/ceph/ceph.conf
+
+# The Ceph user to connect with. Default here is to use the
+# same user as for Cinder volumes. If not using cephx this
+# should be set to None. (string value)
+#backup_ceph_user=cinder
+
+# The chunk size, in bytes, that a backup is broken into
+# before transfer to the Ceph object store. (integer value)
+#backup_ceph_chunk_size=134217728
+
+# The Ceph pool where volume backups are stored. (string
+# value)
+#backup_ceph_pool=backups
+
+# RBD stripe unit to use when creating a backup image.
+# (integer value)
+#backup_ceph_stripe_unit=0
+
+# RBD stripe count to use when creating a backup image.
+# (integer value)
+#backup_ceph_stripe_count=0
+
+# If True, always discard excess bytes when restoring volumes
+# i.e. pad with zeroes. (boolean value)
+#restore_discard_excess_bytes=true
+
+
+#
+# Options defined in cinder.backup.drivers.swift
+#
+
+# The URL of the Swift endpoint (string value)
+#backup_swift_url=<None>
+
+# Info to match when looking for swift in the service catalog.
+# Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> - Only used if
+# backup_swift_url is unset (string value)
+#swift_catalog_info=object-store:swift:publicURL
+
+# Swift authentication mechanism (string value)
+#backup_swift_auth=per_user
+
+# Swift authentication version. Specify "1" for auth 1.0, or
+# "2" for auth 2.0 (string value)
+#backup_swift_auth_version=1
+
+# Swift tenant/account name. Required when connecting to an
+# auth 2.0 system (string value)
+#backup_swift_tenant=<None>
+
+# Swift user name (string value)
+#backup_swift_user=<None>
+
+# Swift key for authentication (string value)
+#backup_swift_key=<None>
+
+# The default Swift container to use (string value)
+#backup_swift_container=volumebackups
+
+# The size in bytes of Swift backup objects (integer value)
+#backup_swift_object_size=52428800
+
+# The number of retries to make for Swift operations (integer
+# value)
+#backup_swift_retry_attempts=3
+
+# The backoff time in seconds between Swift retries (integer
+# value)
+#backup_swift_retry_backoff=2
+
+# Compression algorithm (None to disable) (string value)
+#backup_compression_algorithm=zlib
+
+
+#
+# Options defined in cinder.backup.drivers.tsm
+#
+
+# Volume prefix for the backup id when backing up to TSM
+# (string value)
+#backup_tsm_volume_prefix=backup
+
+# TSM password for the running username (string value)
+#backup_tsm_password=password
+
+# Enable or Disable compression for backups (boolean value)
+#backup_tsm_compression=true
+
+
+#
+# Options defined in cinder.backup.manager
+#
+
+# Driver to use for backups. (string value)
+# Deprecated group/name - [DEFAULT]/backup_service
+#backup_driver=cinder.backup.drivers.swift
+
+
+#
+# Options defined in cinder.common.config
+#
+
+# File name for the paste.deploy config for cinder-api (string
+# value)
+api_paste_config=api-paste.ini
+
+# Top-level directory for maintaining cinder's state (string
+# value)
+# Deprecated group/name - [DEFAULT]/pybasedir
+state_path=/var/lib/cinder
+
+# IP address of this host (string value)
+my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Default glance host name or IP (string value)
+glance_host={{ CONTROLLER_HOST_ADDRESS }}
+
+# Default glance port (integer value)
+#glance_port=9292
+
+# A list of the glance API servers available to cinder
+# ([hostname|ip]:port) (list value)
+#glance_api_servers=$glance_host:$glance_port
+
+# Version of the glance API to use (integer value)
+#glance_api_version=1
+
+# Number retries when downloading an image from glance
+# (integer value)
+#glance_num_retries=0
+
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#glance_api_insecure=false
+
+# Enables or disables negotiation of SSL layer compression. In
+# some cases disabling compression can improve data
+# throughput, such as when high network bandwidth is available
+# and you use compressed image formats like qcow2. (boolean
+# value)
+#glance_api_ssl_compression=false
+
+# Location of ca certificates file to use for glance client
+# requests. (string value)
+#glance_ca_certificates_file=<None>
+
+# http/https timeout value for glance operations. If no value
+# (None) is supplied here, the glanceclient default value is
+# used. (integer value)
+#glance_request_timeout=<None>
+
+# The topic that scheduler nodes listen on (string value)
+#scheduler_topic=cinder-scheduler
+
+# The topic that volume nodes listen on (string value)
+#volume_topic=cinder-volume
+
+# The topic that volume backup nodes listen on (string value)
+#backup_topic=cinder-backup
+
+# DEPRECATED: Deploy v1 of the Cinder API. (boolean value)
+#enable_v1_api=true
+
+# Deploy v2 of the Cinder API. (boolean value)
+#enable_v2_api=true
+
+# Enables or disables rate limit of the API. (boolean value)
+#api_rate_limit=true
+
+# Specify list of extensions to load when using
+# osapi_volume_extension option with
+# cinder.api.contrib.select_extensions (list value)
+#osapi_volume_ext_list=
+
+# osapi volume extension to load (multi valued)
+#osapi_volume_extension=cinder.api.contrib.standard_extensions
+
+# Full class name for the Manager for volume (string value)
+#volume_manager=cinder.volume.manager.VolumeManager
+
+# Full class name for the Manager for volume backup (string
+# value)
+#backup_manager=cinder.backup.manager.BackupManager
+
+# Full class name for the Manager for scheduler (string value)
+#scheduler_manager=cinder.scheduler.manager.SchedulerManager
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a host name, FQDN, or IP address. (string
+# value)
+#host=cinder
+
+# Availability zone of this node (string value)
+#storage_availability_zone=nova
+
+# Default availability zone for new volumes. If not set, the
+# storage_availability_zone option value is used as the
+# default for new volumes. (string value)
+#default_availability_zone=<None>
+
+# Default volume type to use (string value)
+#default_volume_type=<None>
+
+# Time period for which to generate volume usages. The options
+# are hour, day, month, or year. (string value)
+#volume_usage_audit_period=month
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+rootwrap_config=/etc/cinder/rootwrap.conf
+
+# Enable monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=
+
+# Maximum time since last check-in for a service to be
+# considered up (integer value)
+#service_down_time=60
+
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=cinder.volume.api.API
+
+# The full class name of the volume backup API class (string
+# value)
+#backup_api_class=cinder.backup.api.API
+
+# The strategy to use for auth. Supports noauth, keystone, and
+# deprecated. (string value)
+auth_strategy=keystone
+
+# A list of backend names to use. These backend names should
+# be backed by a unique [CONFIG] group with its options (list
+# value)
+#enabled_backends=<None>
+
+# Whether snapshots count against GigaByte quota (boolean
+# value)
+#no_snapshot_gb_quota=false
+
+# The full class name of the volume transfer API class (string
+# value)
+#transfer_api_class=cinder.transfer.api.API
+
+# The full class name of the volume replication API class
+# (string value)
+#replication_api_class=cinder.replication.api.API
+
+# The full class name of the consistencygroup API class
+# (string value)
+#consistencygroup_api_class=cinder.consistencygroup.api.API
+
+
+#
+# Options defined in cinder.compute
+#
+
+# The full class name of the compute API class to use (string
+# value)
+#compute_api_class=cinder.compute.nova.API
+
+
+#
+# Options defined in cinder.compute.nova
+#
+
+# Match this value when searching for nova in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#nova_catalog_info=compute:nova:publicURL
+
+# Same as nova_catalog_info, but for admin endpoint. (string
+# value)
+#nova_catalog_admin_info=compute:nova:adminURL
+
+# Override service catalog lookup with template for nova
+# endpoint e.g. http://localhost:8774/v2/%(project_id)s
+# (string value)
+#nova_endpoint_template=<None>
+
+# Same as nova_endpoint_template, but for admin endpoint.
+# (string value)
+#nova_endpoint_admin_template=<None>
+
+# Region name of this node (string value)
+#os_region_name=<None>
+
+# Location of ca certificates file to use for nova client
+# requests. (string value)
+#nova_ca_certificates_file=<None>
+
+# Allow to perform insecure SSL requests to nova (boolean
+# value)
+#nova_api_insecure=false
+
+
+#
+# Options defined in cinder.db.api
+#
+
+# The backend to use for db (string value)
+#db_backend=sqlalchemy
+
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
+
+# Template string to be used to generate volume names (string
+# value)
+volume_name_template=volume-%s
+
+# Template string to be used to generate snapshot names
+# (string value)
+#snapshot_name_template=snapshot-%s
+
+# Template string to be used to generate backup names (string
+# value)
+#backup_name_template=backup-%s
+
+
+#
+# Options defined in cinder.db.base
+#
+
+# Driver to use for database access (string value)
+#db_driver=cinder.db
+
+
+#
+# Options defined in cinder.image.glance
+#
+
+# Default core properties of image (list value)
+#glance_core_properties=checksum,container_format,disk_format,image_name,image_id,min_disk,min_ram,name,size
+
+# A list of url schemes that can be downloaded directly via
+# the direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
+#
+# Options defined in cinder.image.image_utils
+#
+
+# Directory used for temporary storage during image conversion
+# (string value)
+#image_conversion_dir=$state_path/conversion
+
+
+#
+# Options defined in cinder.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in cinder.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. Default to a temp directory
+# (string value)
+lock_path=/var/lock/cinder
+
+
+#
+# Options defined in cinder.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+use_syslog = True
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in cinder.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in cinder.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+
+#
+# Options defined in cinder.scheduler.driver
+#
+
+# The scheduler host manager class to use (string value)
+#scheduler_host_manager=cinder.scheduler.host_manager.HostManager
+
+# Maximum number of attempts to schedule an volume (integer
+# value)
+#scheduler_max_attempts=3
+
+
+#
+# Options defined in cinder.scheduler.host_manager
+#
+
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+#scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter
+
+# Which weigher class names to use for weighing hosts. (list
+# value)
+#scheduler_default_weighers=CapacityWeigher
+
+
+#
+# Options defined in cinder.scheduler.manager
+#
+
+# Default scheduler driver to use (string value)
+#scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler
+
+
+#
+# Options defined in cinder.scheduler.scheduler_options
+#
+
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
+
+
+#
+# Options defined in cinder.scheduler.simple
+#
+
+# This configure option has been deprecated along with the
+# SimpleScheduler. New scheduler is able to gather capacity
+# information for each host, thus setting the maximum number
+# of volume gigabytes for host is no longer needed. It's safe
+# to remove this configure from cinder.conf. (integer value)
+#max_gigabytes=10000
+
+
+#
+# Options defined in cinder.scheduler.weights.capacity
+#
+
+# Multiplier used for weighing volume capacity. Negative
+# numbers mean to stack vs spread. (floating point value)
+#capacity_weight_multiplier=1.0
+
+# Multiplier used for weighing volume capacity. Negative
+# numbers mean to stack vs spread. (floating point value)
+#allocated_capacity_weight_multiplier=-1.0
+
+
+#
+# Options defined in cinder.scheduler.weights.volume_number
+#
+
+# Multiplier used for weighing volume number. Negative numbers
+# mean to spread vs stack. (floating point value)
+#volume_number_multiplier=-1.0
+
+
+#
+# Options defined in cinder.transfer.api
+#
+
+# The number of characters in the salt. (integer value)
+#volume_transfer_salt_length=8
+
+# The number of characters in the autogenerated auth key.
+# (integer value)
+#volume_transfer_key_length=16
+
+
+#
+# Options defined in cinder.volume.api
+#
+
+# Cache volume availability zones in memory for the provided
+# duration in seconds (integer value)
+#az_cache_duration=3600
+
+# Create volume from snapshot at the host where snapshot
+# resides (boolean value)
+#snapshot_same_host=true
+
+# Ensure that the new volumes are the same AZ as snapshot or
+# source volume (boolean value)
+#cloned_volume_same_az=true
+
+
+#
+# Options defined in cinder.volume.driver
+#
+
+# The maximum number of times to rescan iSER targetto find
+# volume (integer value)
+#num_iser_scan_tries=3
+
+# The maximum number of iSER target IDs per host (integer
+# value)
+#iser_num_targets=100
+
+# Prefix for iSER volumes (string value)
+#iser_target_prefix=iqn.2010-10.org.iser.openstack:
+
+# The IP address that the iSER daemon is listening on (string
+# value)
+#iser_ip_address=$my_ip
+
+# The port that the iSER daemon is listening on (integer
+# value)
+#iser_port=3260
+
+# The name of the iSER target user-land tool to use (string
+# value)
+#iser_helper=tgtadm
+
+# Number of times to attempt to run flakey shell commands
+# (integer value)
+#num_shell_tries=3
+
+# The percentage of backend capacity is reserved (integer
+# value)
+#reserved_percentage=0
+
+# The maximum number of iSCSI target IDs per host (integer
+# value)
+#iscsi_num_targets=100
+
+# Prefix for iSCSI volumes (string value)
+#iscsi_target_prefix=iqn.2010-10.org.openstack:
+
+# The IP address that the iSCSI daemon is listening on (string
+# value)
+iscsi_ip_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# The port that the iSCSI daemon is listening on (integer
+# value)
+#iscsi_port=3260
+
+# The maximum number of times to rescan targets to find volume
+# (integer value)
+# Deprecated group/name - [DEFAULT]/num_iscsi_scan_tries
+#num_volume_device_scan_tries=3
+
+# The backend name for a given driver implementation (string
+# value)
+volume_backend_name=LVM_iSCSI
+
+# Do we attach/detach volumes in cinder using multipath for
+# volume to image and image to volume transfers? (boolean
+# value)
+#use_multipath_for_image_xfer=false
+
+# Method used to wipe old volumes (valid options are: none,
+# zero, shred) (string value)
+#volume_clear=zero
+
+# Size in MiB to wipe at start of old volumes. 0 => all
+# (integer value)
+#volume_clear_size=0
+
+# The flag to pass to ionice to alter the i/o priority of the
+# process used to zero a volume after deletion, for example
+# "-c3" for idle only priority. (string value)
+#volume_clear_ionice=<None>
+
+# iSCSI target user-land tool to use. tgtadm is default, use
+# lioadm for LIO iSCSI support, iseradm for the ISER protocol,
+# or fake for testing. (string value)
+iscsi_helper=lioadm
+
+# Volume configuration file storage directory (string value)
+volumes_dir=$state_path/volumes
+
+# IET configuration file (string value)
+#iet_conf=/etc/iet/ietd.conf
+
+# Comma-separated list of initiator IQNs allowed to connect to
+# the iSCSI target. (From Nova compute nodes.) (string value)
+#lio_initiator_iqns=
+
+# Sets the behavior of the iSCSI target to either perform
+# blockio or fileio optionally, auto can be set and Cinder
+# will autodetect type of backing device (string value)
+#iscsi_iotype=fileio
+
+# The default block size used when copying/clearing volumes
+# (string value)
+#volume_dd_blocksize=1M
+
+# The blkio cgroup name to be used to limit bandwidth of
+# volume copy (string value)
+#volume_copy_blkio_cgroup_name=cinder-volume-copy
+
+# The upper limit of bandwidth of volume copy. 0 => unlimited
+# (integer value)
+#volume_copy_bps_limit=0
+
+# Sets the behavior of the iSCSI target to either perform
+# write-back(on) or write-through(off). This parameter is
+# valid if iscsi_helper is set to tgtadm or iseradm. (string
+# value)
+#iscsi_write_cache=on
+
+# The path to the client certificate key for verification, if
+# the driver supports it. (string value)
+#driver_client_cert_key=<None>
+
+# The path to the client certificate for verification, if the
+# driver supports it. (string value)
+#driver_client_cert=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.block_device
+#
+
+# List of all available devices (list value)
+#available_devices=
+
+
+#
+# Options defined in cinder.volume.drivers.coraid
+#
+
+# IP address of Coraid ESM (string value)
+#coraid_esm_address=
+
+# User name to connect to Coraid ESM (string value)
+#coraid_user=admin
+
+# Name of group on Coraid ESM to which coraid_user belongs
+# (must have admin privilege) (string value)
+#coraid_group=admin
+
+# Password to connect to Coraid ESM (string value)
+#coraid_password=password
+
+# Volume Type key name to store ESM Repository Name (string
+# value)
+#coraid_repository_key=coraid_repository
+
+
+#
+# Options defined in cinder.volume.drivers.datera
+#
+
+# Datera API token. (string value)
+#datera_api_token=<None>
+
+# Datera API port. (string value)
+#datera_api_port=7717
+
+# Datera API version. (string value)
+#datera_api_version=1
+
+# Number of replicas to create of an inode. (string value)
+#datera_num_replicas=3
+
+
+#
+# Options defined in cinder.volume.drivers.emc.emc_vmax_common
+#
+
+# use this file for cinder emc plugin config data (string
+# value)
+#cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml
+
+
+#
+# Options defined in cinder.volume.drivers.emc.emc_vnx_cli
+#
+
+# VNX authentication scope type. (string value)
+#storage_vnx_authentication_type=global
+
+# Directory path that contains the VNX security file. Make
+# sure the security file is generated first. (string value)
+#storage_vnx_security_file_dir=<None>
+
+# Naviseccli Path. (string value)
+#naviseccli_path=
+
+# Storage pool name. (string value)
+#storage_vnx_pool_name=<None>
+
+# VNX secondary SP IP Address. (string value)
+#san_secondary_ip=<None>
+
+# Default timeout for CLI operations in minutes. For example,
+# LUN migration is a typical long running operation, which
+# depends on the LUN size and the load of the array. An upper
+# bound in the specific deployment can be set to avoid
+# unnecessary long wait. By default, it is 365 days long.
+# (integer value)
+#default_timeout=525600
+
+# Default max number of LUNs in a storage group. By default,
+# the value is 255. (integer value)
+#max_luns_per_storage_group=255
+
+# To destroy storage group when the last LUN is removed from
+# it. By default, the value is False. (boolean value)
+#destroy_empty_storage_group=false
+
+# Mapping between hostname and its iSCSI initiator IP
+# addresses. (string value)
+#iscsi_initiators=
+
+# Automatically register initiators. By default, the value is
+# False. (boolean value)
+#initiator_auto_registration=false
+
+
+#
+# Options defined in cinder.volume.drivers.eqlx
+#
+
+# Group name to use for creating volumes (string value)
+#eqlx_group_name=group-0
+
+# Timeout for the Group Manager cli command execution (integer
+# value)
+#eqlx_cli_timeout=30
+
+# Maximum retry count for reconnection (integer value)
+#eqlx_cli_max_retries=5
+
+# Use CHAP authentication for targets? (boolean value)
+#eqlx_use_chap=false
+
+# Existing CHAP account name (string value)
+#eqlx_chap_login=admin
+
+# Password for specified CHAP account name (string value)
+#eqlx_chap_password=password
+
+# Pool in which volumes will be created (string value)
+#eqlx_pool=default
+
+
+#
+# Options defined in cinder.volume.drivers.fujitsu_eternus_dx_common
+#
+
+# The configuration file for the Cinder SMI-S driver (string
+# value)
+#cinder_smis_config_file=/etc/cinder/cinder_fujitsu_eternus_dx.xml
+
+
+#
+# Options defined in cinder.volume.drivers.fusionio.ioControl
+#
+
+# amount of time wait for iSCSI target to come online (integer
+# value)
+#fusionio_iocontrol_targetdelay=5
+
+# number of retries for GET operations (integer value)
+#fusionio_iocontrol_retry=3
+
+# verify the array certificate on each transaction (boolean
+# value)
+#fusionio_iocontrol_verify_cert=true
+
+
+#
+# Options defined in cinder.volume.drivers.glusterfs
+#
+
+# File with the list of available gluster shares (string
+# value)
+#glusterfs_shares_config=/etc/cinder/glusterfs_shares
+
+# Create volumes as sparsed files which take no space.If set
+# to False volume is created as regular file.In such case
+# volume creation takes a lot of time. (boolean value)
+#glusterfs_sparsed_volumes=true
+
+# Create volumes as QCOW2 files rather than raw files.
+# (boolean value)
+#glusterfs_qcow2_volumes=false
+
+# Base dir containing mount points for gluster shares. (string
+# value)
+#glusterfs_mount_point_base=$state_path/mnt
+
+
+#
+# Options defined in cinder.volume.drivers.hds.hds
+#
+
+# The configuration file for the Cinder HDS driver for HUS
+# (string value)
+#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.hds.iscsi
+#
+
+# Configuration file for HDS iSCSI cinder plugin (string
+# value)
+#hds_hnas_iscsi_config_file=/opt/hds/hnas/cinder_iscsi_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.hds.nfs
+#
+
+# Configuration file for HDS NFS cinder plugin (string value)
+#hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+
+# Thin pool ID of storage system (integer value)
+#hitachi_thin_pool_id=<None>
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+
+# Range of group number (string value)
+#hitachi_group_range=<None>
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#hitachi_group_request=false
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+
+# Request for FC Zone creating HostGroup (boolean value)
+#hitachi_zoning_request=false
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_iscsi
+#
+
+# Add CHAP user (boolean value)
+#hitachi_add_chap_user=false
+
+# iSCSI authentication method (string value)
+#hitachi_auth_method=<None>
+
+# iSCSI authentication username (string value)
+#hitachi_auth_user=HBSD-CHAP-user
+
+# iSCSI authentication password (string value)
+#hitachi_auth_password=HBSD-CHAP-password
+
+
+#
+# Options defined in cinder.volume.drivers.huawei
+#
+
+# The configuration file for the Cinder Huawei driver (string
+# value)
+#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.gpfs
+#
+
+# Specifies the path of the GPFS directory where Block Storage
+# volume and snapshot files are stored. (string value)
+#gpfs_mount_point_base=<None>
+
+# Specifies the path of the Image service repository in GPFS.
+# Leave undefined if not storing images in GPFS. (string
+# value)
+#gpfs_images_dir=<None>
+
+# Specifies the type of image copy to be used. Set this when
+# the Image service repository also uses GPFS so that image
+# files can be transferred efficiently from the Image service
+# to the Block Storage service. There are two valid values:
+# "copy" specifies that a full copy of the image is made;
+# "copy_on_write" specifies that copy-on-write optimization
+# strategy is used and unmodified blocks of the image file are
+# shared efficiently. (string value)
+#gpfs_images_share_mode=<None>
+
+# Specifies an upper limit on the number of indirections
+# required to reach a specific block due to snapshots or
+# clones. A lengthy chain of copy-on-write snapshots or
+# clones can have a negative impact on performance, but
+# improves space utilization. 0 indicates unlimited clone
+# depth. (integer value)
+#gpfs_max_clone_depth=0
+
+# Specifies that volumes are created as sparse files which
+# initially consume no space. If set to False, the volume is
+# created as a fully allocated file, in which case, creation
+# may take a significantly longer time. (boolean value)
+#gpfs_sparse_volumes=true
+
+# Specifies the storage pool that volumes are assigned to. By
+# default, the system storage pool is used. (string value)
+#gpfs_storage_pool=system
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.ibmnas
+#
+
+# IP address or Hostname of NAS system. (string value)
+#nas_ip=
+
+# User name to connect to NAS system. (string value)
+#nas_login=admin
+
+# Password to connect to NAS system. (string value)
+#nas_password=
+
+# SSH port to use to connect to NAS system. (integer value)
+#nas_ssh_port=22
+
+# Filename of private key to use for SSH authentication.
+# (string value)
+#nas_private_key=
+
+# IBMNAS platform type to be used as backend storage; valid
+# values are - v7ku : for using IBM Storwize V7000 Unified,
+# sonas : for using IBM Scale Out NAS, gpfs-nas : for using
+# NFS based IBM GPFS deployments. (string value)
+#ibmnas_platform_type=v7ku
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.storwize_svc
+#
+
+# Storage system storage pool for volumes (string value)
+#storwize_svc_volpool_name=volpool
+
+# Storage system space-efficiency parameter for volumes
+# (percentage) (integer value)
+#storwize_svc_vol_rsize=2
+
+# Storage system threshold for volume capacity warnings
+# (percentage) (integer value)
+#storwize_svc_vol_warning=0
+
+# Storage system autoexpand parameter for volumes (True/False)
+# (boolean value)
+#storwize_svc_vol_autoexpand=true
+
+# Storage system grain size parameter for volumes
+# (32/64/128/256) (integer value)
+#storwize_svc_vol_grainsize=256
+
+# Storage system compression option for volumes (boolean
+# value)
+#storwize_svc_vol_compression=false
+
+# Enable Easy Tier for volumes (boolean value)
+#storwize_svc_vol_easytier=true
+
+# The I/O group in which to allocate volumes (integer value)
+#storwize_svc_vol_iogrp=0
+
+# Maximum number of seconds to wait for FlashCopy to be
+# prepared. Maximum value is 600 seconds (10 minutes) (integer
+# value)
+#storwize_svc_flashcopy_timeout=120
+
+# Connection protocol (iSCSI/FC) (string value)
+#storwize_svc_connection_protocol=iSCSI
+
+# Configure CHAP authentication for iSCSI connections
+# (Default: Enabled) (boolean value)
+#storwize_svc_iscsi_chap_enabled=true
+
+# Connect with multipath (FC only; iSCSI multipath is
+# controlled by Nova) (boolean value)
+#storwize_svc_multipath_enabled=false
+
+# Allows vdisk to multi host mapping (boolean value)
+#storwize_svc_multihostmap_enabled=true
+
+# Indicate whether svc driver is compatible for NPIV setup. If
+# it is compatible, it will allow no wwpns being returned on
+# get_conn_fc_wwpns during initialize_connection (boolean
+# value)
+#storwize_svc_npiv_compatibility_mode=false
+
+# Allow tenants to specify QOS on create (boolean value)
+#storwize_svc_allow_tenant_qos=false
+
+# If operating in stretched cluster mode, specify the name of
+# the pool in which mirrored copies are stored.Example:
+# "pool2" (string value)
+#storwize_svc_stretched_cluster_partner=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.xiv_ds8k
+#
+
+# Proxy driver that connects to the IBM Storage Array (string
+# value)
+#xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy
+
+# Connection type to the IBM Storage Array
+# (fibre_channel|iscsi) (string value)
+#xiv_ds8k_connection_type=iscsi
+
+# CHAP authentication mode, effective only for iscsi
+# (disabled|enabled) (string value)
+#xiv_chap=disabled
+
+
+#
+# Options defined in cinder.volume.drivers.lvm
+#
+
+# Name for the VG that will contain exported volumes (string
+# value)
+volume_group=cinder-volumes
+
+# If >0, create LVs with multiple mirrors. Note that this
+# requires lvm_mirrors + 2 PVs with available space (integer
+# value)
+#lvm_mirrors=0
+
+# Type of LVM volumes to deploy; (default or thin) (string
+# value)
+#lvm_type=default
+
+
+#
+# Options defined in cinder.volume.drivers.netapp.options
+#
+
+# The vFiler unit on which provisioning of block storage
+# volumes will be done. This option is only used by the driver
+# when connecting to an instance with a storage family of Data
+# ONTAP operating in 7-Mode and the storage protocol selected
+# is iSCSI. Only use this option when utilizing the MultiStore
+# feature on the NetApp storage system. (string value)
+#netapp_vfiler=<None>
+
+# Administrative user account name used to access the storage
+# system or proxy server. (string value)
+#netapp_login=<None>
+
+# Password for the administrative user account specified in
+# the netapp_login option. (string value)
+#netapp_password=<None>
+
+# This option specifies the virtual storage server (Vserver)
+# name on the storage cluster on which provisioning of block
+# storage volumes should occur. If using the NFS storage
+# protocol, this parameter is mandatory for storage service
+# catalog support (utilized by Cinder volume type extra_specs
+# support). If this option is specified, the exports belonging
+# to the Vserver will only be used for provisioning in the
+# future. Block storage volumes on exports not belonging to
+# the Vserver specified by this option will continue to
+# function normally. (string value)
+#netapp_vserver=<None>
+
+# The hostname (or IP address) for the storage system or proxy
+# server. (string value)
+#netapp_server_hostname=<None>
+
+# The TCP port to use for communication with the storage
+# system or proxy server. Traditionally, port 80 is used for
+# HTTP and port 443 is used for HTTPS; however, this value
+# should be changed if an alternate port has been configured
+# on the storage system or proxy server. (integer value)
+#netapp_server_port=80
+
+# This option is used to specify the path to the E-Series
+# proxy application on a proxy server. The value is combined
+# with the value of the netapp_transport_type,
+# netapp_server_hostname, and netapp_server_port options to
+# create the URL used by the driver to connect to the proxy
+# application. (string value)
+#netapp_webservice_path=/devmgr/v2
+
+# This option is only utilized when the storage family is
+# configured to eseries. This option is used to restrict
+# provisioning to the specified controllers. Specify the value
+# of this option to be a comma separated list of controller
+# hostnames or IP addresses to be used for provisioning.
+# (string value)
+#netapp_controller_ips=<None>
+
+# Password for the NetApp E-Series storage array. (string
+# value)
+#netapp_sa_password=<None>
+
+# This option is used to restrict provisioning to the
+# specified storage pools. Only dynamic disk pools are
+# currently supported. Specify the value of this option to be
+# a comma separated list of disk pool names to be used for
+# provisioning. (string value)
+#netapp_storage_pools=<None>
+
+# This option is used to define how the controllers in the
+# E-Series storage array will work with the particular
+# operating system on the hosts that are connected to it.
+# (string value)
+#netapp_eseries_host_type=linux_dm_mp
+
+# If the percentage of available space for an NFS share has
+# dropped below the value specified by this option, the NFS
+# image cache will be cleaned. (integer value)
+#thres_avl_size_perc_start=20
+
+# When the percentage of available space on an NFS share has
+# reached the percentage specified by this option, the driver
+# will stop clearing files from the NFS image cache that have
+# not been accessed in the last M minutes, where M is the
+# value of the expiry_thres_minutes configuration option.
+# (integer value)
+#thres_avl_size_perc_stop=60
+
+# This option specifies the threshold for last access time for
+# images in the NFS image cache. When a cache cleaning cycle
+# begins, images in the cache that have not been accessed in
+# the last M minutes, where M is the value of this parameter,
+# will be deleted from the cache to create free space on the
+# NFS share. (integer value)
+#expiry_thres_minutes=720
+
+# This option specifies the path of the NetApp copy offload
+# tool binary. Ensure that the binary has execute permissions
+# set which allow the effective user of the cinder-volume
+# process to execute the file. (string value)
+#netapp_copyoffload_tool_path=<None>
+
+# The quantity to be multiplied by the requested volume size
+# to ensure enough space is available on the virtual storage
+# server (Vserver) to fulfill the volume creation request.
+# (floating point value)
+#netapp_size_multiplier=1.2
+
+# This option is only utilized when the storage protocol is
+# configured to use iSCSI. This option is used to restrict
+# provisioning to the specified controller volumes. Specify
+# the value of this option to be a comma separated list of
+# NetApp controller volume names to be used for provisioning.
+# (string value)
+#netapp_volume_list=<None>
+
+# The storage family type used on the storage system; valid
+# values are ontap_7mode for using Data ONTAP operating in
+# 7-Mode, ontap_cluster for using clustered Data ONTAP, or
+# eseries for using E-Series. (string value)
+#netapp_storage_family=ontap_cluster
+
+# The storage protocol to be used on the data path with the
+# storage system; valid values are iscsi or nfs. (string
+# value)
+#netapp_storage_protocol=<None>
+
+# The transport protocol used when communicating with the
+# storage system or proxy server. Valid values are http or
+# https. (string value)
+#netapp_transport_type=http
+
+
+#
+# Options defined in cinder.volume.drivers.nexenta.options
+#
+
+# IP address of Nexenta SA (string value)
+#nexenta_host=
+
+# HTTP port to connect to Nexenta REST API server (integer
+# value)
+#nexenta_rest_port=2000
+
+# Use http or https for REST connection (default auto) (string
+# value)
+#nexenta_rest_protocol=auto
+
+# User name to connect to Nexenta SA (string value)
+#nexenta_user=admin
+
+# Password to connect to Nexenta SA (string value)
+#nexenta_password=nexenta
+
+# Nexenta target portal port (integer value)
+#nexenta_iscsi_target_portal_port=3260
+
+# SA Pool that holds all volumes (string value)
+#nexenta_volume=cinder
+
+# IQN prefix for iSCSI targets (string value)
+#nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder-
+
+# Prefix for iSCSI target groups on SA (string value)
+#nexenta_target_group_prefix=cinder/
+
+# File with the list of available nfs shares (string value)
+#nexenta_shares_config=/etc/cinder/nfs_shares
+
+# Base directory that contains NFS share mount points (string
+# value)
+#nexenta_mount_point_base=$state_path/mnt
+
+# Enables or disables the creation of volumes as sparsed files
+# that take no space. If disabled (False), volume is created
+# as a regular file, which takes a long time. (boolean value)
+#nexenta_sparsed_volumes=true
+
+# Default compression value for new ZFS folders. (string
+# value)
+#nexenta_volume_compression=on
+
+# If set True cache NexentaStor appliance volroot option
+# value. (boolean value)
+#nexenta_nms_cache_volroot=true
+
+# Enable stream compression, level 1..9. 1 - gives best speed;
+# 9 - gives best compression. (integer value)
+#nexenta_rrmgr_compression=0
+
+# TCP Buffer size in KiloBytes. (integer value)
+#nexenta_rrmgr_tcp_buf_size=4096
+
+# Number of TCP connections. (integer value)
+#nexenta_rrmgr_connections=2
+
+# Block size for volumes (default=blank means 8KB) (string
+# value)
+#nexenta_blocksize=
+
+# Enables or disables the creation of sparse volumes (boolean
+# value)
+#nexenta_sparse=false
+
+
+#
+# Options defined in cinder.volume.drivers.nfs
+#
+
+# File with the list of available nfs shares (string value)
+#nfs_shares_config=/etc/cinder/nfs_shares
+
+# Create volumes as sparsed files which take no space.If set
+# to False volume is created as regular file.In such case
+# volume creation takes a lot of time. (boolean value)
+#nfs_sparsed_volumes=true
+
+# Percent of ACTUAL usage of the underlying volume before no
+# new volumes can be allocated to the volume destination.
+# (floating point value)
+#nfs_used_ratio=0.95
+
+# This will compare the allocated to available space on the
+# volume destination. If the ratio exceeds this number, the
+# destination will no longer be valid. (floating point value)
+#nfs_oversub_ratio=1.0
+
+# Base dir containing mount points for nfs shares. (string
+# value)
+#nfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the nfs client. See section of the
+# nfs man page for details. (string value)
+#nfs_mount_options=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.nimble
+#
+
+# Nimble Controller pool name (string value)
+#nimble_pool_name=default
+
+# Nimble Subnet Label (string value)
+#nimble_subnet_label=*
+
+
+#
+# Options defined in cinder.volume.drivers.prophetstor.options
+#
+
+# DPL pool uuid in which DPL volumes are stored. (string
+# value)
+#dpl_pool=
+
+# DPL port number. (integer value)
+#dpl_port=8357
+
+
+#
+# Options defined in cinder.volume.drivers.pure
+#
+
+# REST API authorization token. (string value)
+#pure_api_token=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=rbd
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=<None>
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=<None>
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+
+#
+# Options defined in cinder.volume.drivers.remotefs
+#
+
+# IP address or Hostname of NAS system. (string value)
+#nas_ip=
+
+# User name to connect to NAS system. (string value)
+#nas_login=admin
+
+# Password to connect to NAS system. (string value)
+#nas_password=
+
+# SSH port to use to connect to NAS system. (integer value)
+#nas_ssh_port=22
+
+# Filename of private key to use for SSH authentication.
+# (string value)
+#nas_private_key=
+
+
+#
+# Options defined in cinder.volume.drivers.san.hp.hp_3par_common
+#
+
+# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1
+# (string value)
+#hp3par_api_url=
+
+# 3PAR Super user username (string value)
+#hp3par_username=
+
+# 3PAR Super user password (string value)
+#hp3par_password=
+
+# The CPG to use for volume creation (string value)
+#hp3par_cpg=OpenStack
+
+# The CPG to use for Snapshots for volumes. If empty
+# hp3par_cpg will be used (string value)
+#hp3par_cpg_snap=
+
+# The time in hours to retain a snapshot. You can't delete it
+# before this expires. (string value)
+#hp3par_snapshot_retention=
+
+# The time in hours when a snapshot expires and is deleted.
+# This must be larger than expiration (string value)
+#hp3par_snapshot_expiration=
+
+# Enable HTTP debugging to 3PAR (boolean value)
+#hp3par_debug=false
+
+# List of target iSCSI addresses to use. (list value)
+#hp3par_iscsi_ips=
+
+# Enable CHAP authentication for iSCSI connections. (boolean
+# value)
+#hp3par_iscsi_chap_enabled=false
+
+
+#
+# Options defined in cinder.volume.drivers.san.hp.hp_lefthand_rest_proxy
+#
+
+# HP LeftHand WSAPI Server Url like https://<LeftHand
+# ip>:8081/lhos (string value)
+#hplefthand_api_url=<None>
+
+# HP LeftHand Super user username (string value)
+#hplefthand_username=<None>
+
+# HP LeftHand Super user password (string value)
+#hplefthand_password=<None>
+
+# HP LeftHand cluster name (string value)
+#hplefthand_clustername=<None>
+
+# Configure CHAP authentication for iSCSI connections
+# (Default: Disabled) (boolean value)
+#hplefthand_iscsi_chap_enabled=false
+
+# Enable HTTP debugging to LeftHand (boolean value)
+#hplefthand_debug=false
+
+
+#
+# Options defined in cinder.volume.drivers.san.hp.hp_msa_common
+#
+
+# The VDisk to use for volume creation. (string value)
+#msa_vdisk=OpenStack
+
+
+#
+# Options defined in cinder.volume.drivers.san.san
+#
+
+# Use thin provisioning for SAN volumes? (boolean value)
+#san_thin_provision=true
+
+# IP address of SAN controller (string value)
+#san_ip=
+
+# Username for SAN controller (string value)
+#san_login=admin
+
+# Password for SAN controller (string value)
+#san_password=
+
+# Filename of private key to use for SSH authentication
+# (string value)
+#san_private_key=
+
+# Cluster name to use for creating volumes (string value)
+#san_clustername=
+
+# SSH port to use with SAN (integer value)
+#san_ssh_port=22
+
+# Execute commands locally instead of over SSH; use if the
+# volume service is running on the SAN device (boolean value)
+#san_is_local=false
+
+# SSH connection timeout in seconds (integer value)
+#ssh_conn_timeout=30
+
+# Minimum ssh connections in the pool (integer value)
+#ssh_min_pool_conn=1
+
+# Maximum ssh connections in the pool (integer value)
+#ssh_max_pool_conn=5
+
+
+#
+# Options defined in cinder.volume.drivers.san.solaris
+#
+
+# The ZFS path under which to create zvols for volumes.
+# (string value)
+#san_zfs_volume_base=rpool/
+
+
+#
+# Options defined in cinder.volume.drivers.scality
+#
+
+# Path or URL to Scality SOFS configuration file (string
+# value)
+#scality_sofs_config=<None>
+
+# Base dir where Scality SOFS shall be mounted (string value)
+#scality_sofs_mount_point=$state_path/scality
+
+# Path from Scality SOFS root to volume dir (string value)
+#scality_sofs_volume_dir=cinder/volumes
+
+
+#
+# Options defined in cinder.volume.drivers.smbfs
+#
+
+# File with the list of available smbfs shares. (string value)
+#smbfs_shares_config=/etc/cinder/smbfs_shares
+
+# Default format that will be used when creating volumes if no
+# volume format is specified. Can be set to: raw, qcow2, vhd
+# or vhdx. (string value)
+#smbfs_default_volume_format=qcow2
+
+# Create volumes as sparsed files which take no space rather
+# than regular files when using raw format, in which case
+# volume creation takes lot of time. (boolean value)
+#smbfs_sparsed_volumes=true
+
+# Percent of ACTUAL usage of the underlying volume before no
+# new volumes can be allocated to the volume destination.
+# (floating point value)
+#smbfs_used_ratio=0.95
+
+# This will compare the allocated to available space on the
+# volume destination. If the ratio exceeds this number, the
+# destination will no longer be valid. (floating point value)
+#smbfs_oversub_ratio=1.0
+
+# Base dir containing mount points for smbfs shares. (string
+# value)
+#smbfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the smbfs client. See mount.cifs man
+# page for details. (string value)
+#smbfs_mount_options=noperm,file_mode=0775,dir_mode=0775
+
+
+#
+# Options defined in cinder.volume.drivers.solidfire
+#
+
+# Set 512 byte emulation on volume creation; (boolean value)
+#sf_emulate_512=true
+
+# Allow tenants to specify QOS on create (boolean value)
+#sf_allow_tenant_qos=false
+
+# Create SolidFire accounts with this prefix. Any string can
+# be used here, but the string "hostname" is special and will
+# create a prefix using the cinder node hostsname (previous
+# default behavior). The default is NO prefix. (string value)
+#sf_account_prefix=<None>
+
+# SolidFire API port. Useful if the device api is behind a
+# proxy on a different port. (integer value)
+#sf_api_port=443
+
+
+#
+# Options defined in cinder.volume.drivers.vmware.vmdk
+#
+
+# IP address for connecting to VMware ESX/VC server. (string
+# value)
+#vmware_host_ip=<None>
+
+# Username for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_username=<None>
+
+# Password for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_password=<None>
+
+# Optional VIM service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds. (string value)
+#vmware_wsdl_location=<None>
+
+# Number of times VMware ESX/VC server API must be retried
+# upon connection related issues. (integer value)
+#vmware_api_retry_count=10
+
+# The interval (in seconds) for polling remote tasks invoked
+# on VMware ESX/VC server. (floating point value)
+#vmware_task_poll_interval=0.5
+
+# Name for the folder in the VC datacenter that will contain
+# cinder volumes. (string value)
+#vmware_volume_folder=cinder-volumes
+
+# Timeout in seconds for VMDK volume transfer between Cinder
+# and Glance. (integer value)
+#vmware_image_transfer_timeout_secs=7200
+
+# Max number of objects to be retrieved per batch. Query
+# results will be obtained in batches from the server and not
+# in one shot. Server may still limit the count to something
+# less than the configured value. (integer value)
+#vmware_max_objects_retrieval=100
+
+# Optional string specifying the VMware VC server version. The
+# driver attempts to retrieve the version from VMware VC
+# server. Set this configuration only if you want to override
+# the VC server version. (string value)
+#vmware_host_version=<None>
+
+# Directory where virtual disks are stored during volume
+# backup and restore. (string value)
+#vmware_tmp_dir=/tmp
+
+
+#
+# Options defined in cinder.volume.drivers.windows.windows
+#
+
+# Path to store VHD backed volumes (string value)
+#windows_iscsi_lun_path=C:\iSCSIVirtualDisks
+
+
+#
+# Options defined in cinder.volume.drivers.zadara
+#
+
+# Management IP of Zadara VPSA (string value)
+#zadara_vpsa_ip=<None>
+
+# Zadara VPSA port number (string value)
+#zadara_vpsa_port=<None>
+
+# Use SSL connection (boolean value)
+#zadara_vpsa_use_ssl=false
+
+# User name for the VPSA (string value)
+#zadara_user=<None>
+
+# Password for the VPSA (string value)
+#zadara_password=<None>
+
+# Name of VPSA storage pool for volumes (string value)
+#zadara_vpsa_poolname=<None>
+
+# Default thin provisioning policy for volumes (boolean value)
+#zadara_vol_thin=true
+
+# Default encryption policy for volumes (boolean value)
+#zadara_vol_encrypt=false
+
+# Default template for VPSA volume names (string value)
+#zadara_vol_name_template=OS_%s
+
+# Automatically detach from servers on volume delete (boolean
+# value)
+#zadara_vpsa_auto_detach_on_delete=true
+
+# Don't halt on deletion of non-existing volumes (boolean
+# value)
+#zadara_vpsa_allow_nonexistent_delete=true
+
+
+#
+# Options defined in cinder.volume.drivers.zfssa.zfssaiscsi
+#
+
+# Storage pool name. (string value)
+#zfssa_pool=<None>
+
+# Project name. (string value)
+#zfssa_project=<None>
+
+# Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k.
+# (string value)
+#zfssa_lun_volblocksize=8k
+
+# Flag to enable sparse (thin-provisioned): True, False.
+# (boolean value)
+#zfssa_lun_sparse=false
+
+# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string
+# value)
+#zfssa_lun_compression=
+
+# Synchronous write bias-latency, throughput. (string value)
+#zfssa_lun_logbias=
+
+# iSCSI initiator group. (string value)
+#zfssa_initiator_group=
+
+# iSCSI initiator IQNs. (comma separated) (string value)
+#zfssa_initiator=
+
+# iSCSI initiator CHAP user. (string value)
+#zfssa_initiator_user=
+
+# iSCSI initiator CHAP password. (string value)
+#zfssa_initiator_password=
+
+# iSCSI target group name. (string value)
+#zfssa_target_group=tgt-grp
+
+# iSCSI target CHAP user. (string value)
+#zfssa_target_user=
+
+# iSCSI target CHAP password. (string value)
+#zfssa_target_password=
+
+# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string
+# value)
+#zfssa_target_portal=<None>
+
+# Network interfaces of iSCSI targets. (comma separated)
+# (string value)
+#zfssa_target_interfaces=<None>
+
+# REST connection timeout. (seconds) (integer value)
+#zfssa_rest_timeout=<None>
+
+
+#
+# Options defined in cinder.volume.manager
+#
+
+# Driver to use for volume creation (string value)
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+# Timeout for creating the volume to migrate to when
+# performing volume migration (seconds) (integer value)
+#migration_create_volume_timeout_secs=300
+
+# Offload pending volume delete during volume service startup
+# (boolean value)
+#volume_service_inithost_offload=false
+
+# FC Zoning mode configured (string value)
+#zoning_mode=none
+
+# User defined capabilities, a JSON formatted string
+# specifying key/value pairs. (string value)
+#extra_capabilities={}
+
+
+[BRCD_FABRIC_EXAMPLE]
+
+#
+# Options defined in cinder.zonemanager.drivers.brocade.brcd_fabric_opts
+#
+
+# Management IP of fabric (string value)
+#fc_fabric_address=
+
+# Fabric user ID (string value)
+#fc_fabric_user=
+
+# Password for user (string value)
+#fc_fabric_password=
+
+# Connecting port (integer value)
+#fc_fabric_port=22
+
+# overridden zoning policy (string value)
+#zoning_policy=initiator-target
+
+# overridden zoning activation state (boolean value)
+#zone_activate=true
+
+# overridden zone name prefix (string value)
+#zone_name_prefix=<None>
+
+# Principal switch WWN of the fabric (string value)
+#principal_switch_wwn=<None>
+
+
+[CISCO_FABRIC_EXAMPLE]
+
+#
+# Options defined in cinder.zonemanager.drivers.cisco.cisco_fabric_opts
+#
+
+# Management IP of fabric (string value)
+#cisco_fc_fabric_address=
+
+# Fabric user ID (string value)
+#cisco_fc_fabric_user=
+
+# Password for user (string value)
+#cisco_fc_fabric_password=
+
+# Connecting port (integer value)
+#cisco_fc_fabric_port=22
+
+# overridden zoning policy (string value)
+#cisco_zoning_policy=initiator-target
+
+# overridden zoning activation state (boolean value)
+#cisco_zone_activate=true
+
+# overridden zone name prefix (string value)
+#cisco_zone_name_prefix=<None>
+
+# VSAN of the Fabric (string value)
+#cisco_zoning_vsan=<None>
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection=postgresql://{{ CINDER_DB_USER }}:{{ CINDER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/cinder
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum number of database connection retries during
+# startup. Set to -1 to specify an infinite retry count.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+#
+# Options defined in oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API
+# calls (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool=false
+
+
+[fc-zone-manager]
+
+#
+# Options defined in cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver
+#
+
+# Southbound connector for zoning operation (string value)
+#brcd_sb_connector=cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI
+
+
+#
+# Options defined in cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver
+#
+
+# Southbound connector for zoning operation (string value)
+#cisco_sb_connector=cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI
+
+
+#
+# Options defined in cinder.zonemanager.fc_zone_manager
+#
+
+# FC Zone Driver responsible for zone management (string
+# value)
+#zone_driver=cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver
+
+# Zoning policy configured by user (string value)
+#zoning_policy=initiator-target
+
+# Comma separated list of fibre channel fabric names. This
+# list of names is used to retrieve other SAN credentials for
+# connecting to each SAN fabric (string value)
+#fc_fabric_names=<None>
+
+# FC San Lookup Service (string value)
+#fc_san_lookup_service=cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService
+
+
+[keymgr]
+
+#
+# Options defined in cinder.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in cinder.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
+# value)
+#fixed_key=<None>
+
+
+#
+# Options defined in cinder.keymgr.key_mgr
+#
+
+# Authentication url for encryption service. (string value)
+#encryption_auth_url=http://localhost:5000/v2.0
+
+# Url for encryption service. (string value)
+#encryption_api_url=http://localhost:9311/v1
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystonemiddleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user={{ CINDER_SERVICE_USER }}
+
+# Keystone account password (string value)
+admin_password={{ CINDER_SERVICE_PASSWORD }}
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name=service
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=10
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) number of seconds memcached server is considered
+# dead before it is tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (optional) max total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize=10
+
+# (optional) socket timeout in seconds for communicating with
+# a memcache server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (optional) number of seconds a connection to memcached is
+# held unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (optional) number of seconds that an operation will wait to
+# get a memcache client connection from the pool. (integer
+# value)
+#memcache_pool_conn_get_timeout=10
+
+# (optional) use the advanced (eventlet safe) memcache client
+# pool. The advanced pool will only work under python 2.x.
+# (boolean value)
+#memcache_use_advanced_pool=false
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached
+# tokens. This requires that PKI tokens are configured on the
+# Keystone server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will
+# be tried in the order given, so put the preferred one first
+# for performance. The result of the first hash will be stored
+# in the cache. This will typically be set to multiple values
+# only while migrating from a less secure algorithm to a more
+# secure one. Once all the old tokens are expired this option
+# should be set to a single value for better performance.
+# (list value)
+#hash_algorithms=md5
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[oslo_messaging_amqp]
+
+#
+# Options defined in oslo.messaging
+#
+# NOTE: Options in this group are supported when using oslo.messaging >=1.5.0.
+
+# address prefix used when sending to a specific server
+# (string value)
+#server_request_prefix=exclusive
+
+# address prefix used when broadcasting to all servers (string
+# value)
+#broadcast_prefix=broadcast
+
+# address prefix when sending to any server in group (string
+# value)
+#group_request_prefix=unicast
+
+# Name for the AMQP container (string value)
+#container_name=<None>
+
+# Timeout for inactive connections (in seconds) (integer
+# value)
+#idle_timeout=0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+#trace=false
+
+# CA certificate PEM file for verifing server certificate
+# (string value)
+#ssl_ca_file=
+
+# Identifying certificate PEM file to present to clients
+# (string value)
+#ssl_cert_file=
+
+# Private key PEM file used to sign cert_file certificate
+# (string value)
+#ssl_key_file=
+
+# Password for decrypting ssl_key_file (if encrypted) (string
+# value)
+#ssl_key_password=<None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+#allow_insecure_clients=false
+
+
+[profiler]
+
+#
+# Options defined in cinder.service
+#
+
+# If False fully disable profiling feature. (boolean value)
+#profiler_enabled=false
+
+# If False doesn't trace SQL requests. (boolean value)
+#trace_sqlalchemy=false
+
+
+[ssl]
+
+#
+# Options defined in cinder.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#key_file=<None>
+
+
diff --git a/openstack/usr/share/openstack/cinder/policy.json b/openstack/usr/share/openstack/cinder/policy.json
new file mode 100644
index 00000000..8f3a7b2f
--- /dev/null
+++ b/openstack/usr/share/openstack/cinder/policy.json
@@ -0,0 +1,80 @@
+{
+ "context_is_admin": "role:admin",
+ "admin_or_owner": "is_admin:True or project_id:%(project_id)s",
+ "default": "rule:admin_or_owner",
+
+ "admin_api": "is_admin:True",
+
+ "volume:create": "",
+ "volume:get_all": "",
+ "volume:get_volume_metadata": "",
+ "volume:get_volume_admin_metadata": "rule:admin_api",
+ "volume:delete_volume_admin_metadata": "rule:admin_api",
+ "volume:update_volume_admin_metadata": "rule:admin_api",
+ "volume:get_snapshot": "",
+ "volume:get_all_snapshots": "",
+ "volume:extend": "",
+ "volume:update_readonly_flag": "",
+ "volume:retype": "",
+
+ "volume_extension:types_manage": "rule:admin_api",
+ "volume_extension:types_extra_specs": "rule:admin_api",
+ "volume_extension:volume_type_encryption": "rule:admin_api",
+ "volume_extension:volume_encryption_metadata": "rule:admin_or_owner",
+ "volume_extension:extended_snapshot_attributes": "",
+ "volume_extension:volume_image_metadata": "",
+
+ "volume_extension:quotas:show": "",
+ "volume_extension:quotas:update": "rule:admin_api",
+ "volume_extension:quota_classes": "",
+
+ "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
+ "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api",
+ "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api",
+
+ "volume_extension:volume_host_attribute": "rule:admin_api",
+ "volume_extension:volume_tenant_attribute": "rule:admin_or_owner",
+ "volume_extension:volume_mig_status_attribute": "rule:admin_api",
+ "volume_extension:hosts": "rule:admin_api",
+ "volume_extension:services": "rule:admin_api",
+
+ "volume_extension:volume_manage": "rule:admin_api",
+ "volume_extension:volume_unmanage": "rule:admin_api",
+
+ "volume:services": "rule:admin_api",
+
+ "volume:create_transfer": "",
+ "volume:accept_transfer": "",
+ "volume:delete_transfer": "",
+ "volume:get_all_transfers": "",
+
+ "volume_extension:replication:promote": "rule:admin_api",
+ "volume_extension:replication:reenable": "rule:admin_api",
+
+ "backup:create" : "",
+ "backup:delete": "",
+ "backup:get": "",
+ "backup:get_all": "",
+ "backup:restore": "",
+ "backup:backup-import": "rule:admin_api",
+ "backup:backup-export": "rule:admin_api",
+
+ "snapshot_extension:snapshot_actions:update_snapshot_status": "",
+
+ "consistencygroup:create" : "group:nobody",
+ "consistencygroup:delete": "group:nobody",
+ "consistencygroup:get": "group:nobody",
+ "consistencygroup:get_all": "group:nobody",
+
+ "consistencygroup:create_cgsnapshot" : "",
+ "consistencygroup:delete_cgsnapshot": "",
+ "consistencygroup:get_cgsnapshot": "",
+ "consistencygroup:get_all_cgsnapshots": "",
+
+ "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api"
+}
diff --git a/openstack/usr/share/openstack/extras/00-disable-device.network b/openstack/usr/share/openstack/extras/00-disable-device.network
new file mode 100644
index 00000000..8e2532d0
--- /dev/null
+++ b/openstack/usr/share/openstack/extras/00-disable-device.network
@@ -0,0 +1,2 @@
+[Match]
+Name={{ item }}
diff --git a/openstack/usr/share/openstack/extras/60-device-dhcp.network b/openstack/usr/share/openstack/extras/60-device-dhcp.network
new file mode 100644
index 00000000..6fdbfd8d
--- /dev/null
+++ b/openstack/usr/share/openstack/extras/60-device-dhcp.network
@@ -0,0 +1,5 @@
+[Match]
+Name={{ item }}
+
+[Network]
+DHCP=yes
diff --git a/openstack/usr/share/openstack/glance.yml b/openstack/usr/share/openstack/glance.yml
new file mode 100644
index 00000000..aa7e4c78
--- /dev/null
+++ b/openstack/usr/share/openstack/glance.yml
@@ -0,0 +1,93 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/glance.conf"
+ tasks:
+ - name: Create the glance user.
+ user:
+ name: glance
+ comment: Openstack Glance Daemons
+ shell: /sbin/nologin
+ home: /var/lib/glance
+
+ - name: Create the /var folders for glance
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: glance
+ group: glance
+ with_items:
+ - /var/run/glance
+ - /var/lock/glance
+ - /var/log/glance
+ - /var/lib/glance
+ - /var/lib/glance/images
+ - /var/lib/glance/image-cache
+
+ - name: Create /etc/glance directory
+ file:
+ path: /etc/glance
+ state: directory
+
+ - name: Add the configuration needed for glance in /etc/glance using templates
+ template:
+ src: /usr/share/openstack/glance/{{ item }}
+ dest: /etc/glance/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/glance && find -type f
+
+ - name: Create glance service user in service tenant
+ keystone_user:
+ user: "{{ GLANCE_SERVICE_USER }}"
+ password: "{{ GLANCE_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to glances service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ GLANCE_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add glance endpoint
+ keystone_service:
+ name: glance
+ type: image
+ description: Openstack Image Service
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for glance
+ postgresql_user:
+ name: "{{ GLANCE_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ password: "{{ GLANCE_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: glance
+
+ - name: Create database for glance services
+ postgresql_db:
+ name: glance
+ owner: "{{ GLANCE_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ sudo: yes
+ sudo_user: glance
+
+ - name: Initiate glance database
+ glance_manage:
+ action: dbsync
+ sudo: yes
+ sudo_user: glance
+
+ - name: Enable and start openstack-glance services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - openstack-glance-api.service
+ - openstack-glance-registry.service
diff --git a/openstack/usr/share/openstack/glance/glance-api-paste.ini b/openstack/usr/share/openstack/glance/glance-api-paste.ini
new file mode 100644
index 00000000..86a4cdb1
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/glance-api-paste.ini
@@ -0,0 +1,77 @@
+# Use this pipeline for no auth or image caching - DEFAULT
+[pipeline:glance-api]
+pipeline = versionnegotiation osprofiler unauthenticated-context rootapp
+
+# Use this pipeline for image caching and no auth
+[pipeline:glance-api-caching]
+pipeline = versionnegotiation osprofiler unauthenticated-context cache rootapp
+
+# Use this pipeline for caching w/ management interface but no auth
+[pipeline:glance-api-cachemanagement]
+pipeline = versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-api-keystone]
+pipeline = versionnegotiation osprofiler authtoken context rootapp
+
+# Use this pipeline for keystone auth with image caching
+[pipeline:glance-api-keystone+caching]
+pipeline = versionnegotiation osprofiler authtoken context cache rootapp
+
+# Use this pipeline for keystone auth with caching and cache management
+[pipeline:glance-api-keystone+cachemanagement]
+pipeline = versionnegotiation osprofiler authtoken context cache cachemanage rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-api-trusted-auth]
+pipeline = versionnegotiation osprofiler context rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user and uses cache management
+[pipeline:glance-api-trusted-auth+cachemanagement]
+pipeline = versionnegotiation osprofiler context cache cachemanage rootapp
+
+[composite:rootapp]
+paste.composite_factory = glance.api:root_app_factory
+/: apiversions
+/v1: apiv1app
+/v2: apiv2app
+
+[app:apiversions]
+paste.app_factory = glance.api.versions:create_resource
+
+[app:apiv1app]
+paste.app_factory = glance.api.v1.router:API.factory
+
+[app:apiv2app]
+paste.app_factory = glance.api.v2.router:API.factory
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
+
+[filter:cache]
+paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
+
+[filter:cachemanage]
+paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:gzip]
+paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
diff --git a/openstack/usr/share/openstack/glance/glance-api.conf b/openstack/usr/share/openstack/glance/glance-api.conf
new file mode 100644
index 00000000..39257a6d
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/glance-api.conf
@@ -0,0 +1,699 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+default_store = file
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+# data_api = glance.db.sqlalchemy.api
+
+# The number of child process workers that will be
+# created to service API requests. The default will be
+# equal to the number of CPUs available. (integer value)
+#workers = 4
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically those generated by the Keystone v3 API with big service
+# catalogs)
+# max_header_line = 16384
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+#allow_anonymous_access = False
+
+# Allow access to version 1 of glance api
+#enable_v1_api = True
+
+# Allow access to version 2 of glance api
+#enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system. For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+#container_formats=ami,ari,aki,bare,ovf,ova
+
+# Supported values for the 'disk_format' image attribute
+#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# Property Protections config file
+# This file contains the rules for property protections and the roles/policies
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then the glance-api
+# service will not start.
+#property_protection_file =
+
+# Specify whether 'roles' or 'policies' are used in the
+# property_protection_file.
+# The default value for property_protection_rule_format is 'roles'.
+#property_protection_rule_format = roles
+
+# This value sets what strategy will be used to determine the image location
+# order. Currently two strategies are packaged with Glance 'location_order'
+# and 'store_type'.
+#location_strategy = location_order
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+use_syslog = True
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# Pass the user's token through for API requests to the registry.
+# Default: True
+#use_user_token = True
+
+# If 'use_user_token' is not in effect then admin credentials
+# can be specified. Requests to the registry on behalf of
+# the API will use these credentials.
+# Admin user name
+#admin_user = None
+# Admin password
+#admin_password = None
+# Admin tenant name
+#admin_tenant_name = None
+# Keystone endpoint
+#auth_url = None
+# Keystone region
+#auth_region = None
+# Auth strategy
+#auth_strategy = keystone
+
+# ============ Notification System Options =====================
+
+# Driver or drivers to handle sending notifications. Set to
+# 'messaging' to send notifications to a message queue.
+notification_driver = messagingv2
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Messaging driver used for 'messaging' notifications driver
+rpc_backend=rabbit
+
+# Configuration options if sending notifications via rabbitmq
+rabbit_host = {{ RABBITMQ_HOST }}
+rabbit_port = {{ RABBITMQ_PORT }}
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBITMQ_USER }}
+rabbit_password = {{ RABBITMQ_PASSWORD }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+#qpid_notification_exchange = glance
+#qpid_notification_topic = notifications
+#qpid_hostname = localhost
+#qpid_port = 5672
+#qpid_username =
+#qpid_password =
+#qpid_sasl_mechanisms =
+#qpid_reconnect_timeout = 0
+#qpid_reconnect_limit = 0
+#qpid_reconnect_interval_min = 0
+#qpid_reconnect_interval_max = 0
+#qpid_reconnect_interval = 0
+#qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+#qpid_protocol = tcp
+#qpid_tcp_nodelay = True
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# =============== Quota Options ==================================
+
+# The maximum number of image members allowed per image
+#image_member_quota = 128
+
+# The maximum number of image properties allowed per image
+#image_property_quota = 128
+
+# The maximum number of tags allowed per image
+#image_tag_quota = 128
+
+# The maximum number of locations allowed per image
+#image_location_quota = 10
+
+# Set a system wide quota for every user. This value is the total number
+# of bytes that a user can use across all storage systems. A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir = /var/lib/glance/image-cache/
+
+# =============== Database Options =================================
+
+[database]
+# The file name to use with SQLite (string value)
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance
+
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+admin_tenant_name = service
+admin_user = {{ GLANCE_SERVICE_USER }}
+admin_password = {{ GLANCE_SERVICE_PASSWORD }}
+revocation_cache_time = 10
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-api-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=keystone
+
+[store_type_location_strategy]
+# The scheme list to use to get store preference order. The scheme must be
+# registered by one of the stores defined by the 'known_stores' config option.
+# This option will be applied when you using 'store_type' option as image
+# location strategy defined by the 'location_strategy' config option.
+#store_type_preference =
+
+[profiler]
+# If False fully disable profiling feature.
+#enabled = False
+
+# If False doesn't trace SQL requests.
+#trace_sqlalchemy = False
+
+[task]
+# ================= Glance Tasks Options ============================
+
+# Specifies how long (in hours) a task is supposed to live in the tasks DB
+# after succeeding or failing before getting soft-deleted.
+# The default value for task_time_to_live is 48 hours.
+# task_time_to_live = 48
+
+# Specifies which task executor to be used to run the task scripts.
+# The default value for task_executor is eventlet.
+# task_executor = eventlet
+
+# Specifies the maximum number of eventlet threads which can be spun up by
+# the eventlet based task executor to perform execution of Glance tasks.
+# eventlet_executor_pool_size = 1000
+
+[glance_store]
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# Existing but disabled stores:
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.gridfs.Store,
+# glance.store.vmware_datastore.Store,
+#stores = glance.store.filesystem.Store,
+# glance.store.http.Store
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = /var/lib/glance/images/
+
+# A list of directories where image data can be stored.
+# This option may be specified multiple times for specifying multiple store
+# directories. Either one of filesystem_store_datadirs or
+# filesystem_store_datadir option is required. A priority number may be given
+# after each directory entry, separated by a ":".
+# When adding an image, the highest priority directory will be selected, unless
+# there is not enough space available in cases where the image size is already
+# known. If no priority is given, it is assumed to be zero and the directory
+# will be considered for selection last. If multiple directories have the same
+# priority, then the one with the most free space available is selected.
+# If same store is specified multiple times then BadStoreConfiguration
+# exception will be raised.
+#filesystem_store_datadirs = /var/lib/glance/images/:1
+
+# A path to a JSON file that contains metadata describing the storage
+# system. When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# swift_store_config_file = glance-swift.conf
+# This file contains references for each of the configured
+# Swift accounts/backing stores. If used, this option can prevent
+# credentials being stored in the database. Using Swift references
+# is disabled if this config is left blank.
+
+# The reference to the default Swift parameters to use for adding new images.
+# default_swift_reference = 'ref1'
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# The number of times a Swift download will be retried before the
+# request fails
+#swift_store_retry_get_count = 0
+
+# Bypass SSL verification for Swift
+#swift_store_auth_insecure = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# Size, in MB, should S3 start chunking image files
+# and do a multipart upload in S3. The default is 100MB.
+#s3_store_large_object_size = 100
+
+# Multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to
+# 5MB. The default is 10MB.
+#s3_store_large_object_chunk_size = 10
+
+# The number of thread pools to perform a multipart upload
+# in S3. The default is 10.
+#s3_store_thread_pools = 10
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+# If <None>, a default will be chosen based on the client. section
+# in rbd_store_ceph_conf
+#rbd_store_user = <None>
+
+# RADOS pool in which images are stored
+#rbd_store_pool = images
+
+# RADOS images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+#rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
diff --git a/openstack/usr/share/openstack/glance/glance-cache.conf b/openstack/usr/share/openstack/glance/glance-cache.conf
new file mode 100644
index 00000000..3f2d4603
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/glance-cache.conf
@@ -0,0 +1,200 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/image-cache.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+use_syslog = True
+
+# Directory that the Image Cache writes data to
+image_cache_dir = /var/lib/glance/image-cache/
+
+# Number of seconds after which we should consider an incomplete image to be
+# stalled and eligible for reaping
+image_cache_stall_time = 86400
+
+# Max cache size in bytes
+image_cache_max_size = 10737418240
+
+# Address to find the registry server
+registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# known_stores = glance.store.filesystem.Store,
+# glance.store.http.Store,
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.vmware_datastore.Store,
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = /var/lib/glance/images/
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+# s3_store_object_buffer_dir = /path/to/dir
+
+# ============ Cinder Store Options ===========================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+# metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/openstack/usr/share/openstack/glance/glance-registry-paste.ini b/openstack/usr/share/openstack/glance/glance-registry-paste.ini
new file mode 100644
index 00000000..df403f6e
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/glance-registry-paste.ini
@@ -0,0 +1,30 @@
+# Use this pipeline for no auth - DEFAULT
+[pipeline:glance-registry]
+pipeline = osprofiler unauthenticated-context registryapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-registry-keystone]
+pipeline = osprofiler authtoken context registryapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-registry-trusted-auth]
+pipeline = osprofiler context registryapp
+
+[app:registryapp]
+paste.app_factory = glance.registry.api:API.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
diff --git a/openstack/usr/share/openstack/glance/glance-registry.conf b/openstack/usr/share/openstack/glance/glance-registry.conf
new file mode 100644
index 00000000..302f4138
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/glance-registry.conf
@@ -0,0 +1,245 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the registry server
+bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+#data_api = glance.db.sqlalchemy.api
+
+# The number of child process workers that will be
+# created to service Registry requests. The default will be
+# equal to the number of CPUs available. (integer value)
+#workers = None
+
+# Enable Registry API versions individually or simultaneously
+#enable_v1_registry = True
+#enable_v2_registry = True
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+use_syslog = True
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ============ Notification System Options =====================
+
+# Driver or drivers to handle sending notifications. Set to
+# 'messaging' to send notifications to a message queue.
+notification_driver = messagingv2
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Messaging driver used for 'messaging' notifications driver
+rpc_backend=rabbit
+
+# Configuration options if sending notifications via rabbitmq
+rabbit_host = {{ RABBITMQ_HOST }}
+rabbit_port = {{ RABBITMQ_PORT }}
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBITMQ_USER }}
+rabbit_password = {{ RABBITMQ_PASSWORD }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+
+# ================= Database Options ==========================
+
+[database]
+# The file name to use with SQLite (string value)
+#sqlite_db = glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+admin_tenant_name = service
+admin_user = {{ GLANCE_SERVICE_USER }}
+admin_password = {{ GLANCE_SERVICE_PASSWORD }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-registry-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=keystone
+
+[profiler]
+# If False fully disable profiling feature.
+#enabled = False
+
+# If False doesn't trace SQL requests.
+#trace_sqlalchemy = False
diff --git a/openstack/usr/share/openstack/glance/glance-scrubber.conf b/openstack/usr/share/openstack/glance/glance-scrubber.conf
new file mode 100644
index 00000000..cdbfda71
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/glance-scrubber.conf
@@ -0,0 +1,108 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/scrubber.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+use_syslog = True
+
+# Should we run our own loop or rely on cron/scheduler to run us
+daemon = False
+
+# Loop time between checking for new items to schedule for delete
+wakeup_time = 300
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-api.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# Only one server in your deployment should be designated the cleanup host
+cleanup_scrubber = False
+
+# pending_delete items older than this time are candidates for cleanup
+cleanup_scrubber_time = 86400
+
+# Address to find the registry server for cleanups
+registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+#data_api = glance.db.sqlalchemy.api
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ================= Database Options ===============+==========
+
+[database]
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+#connection=sqlite:////glance/openstack/common/db/$sqlite_db
+
+# The SQLAlchemy connection string used to connect to the
+# slave database (string value)
+#slave_connection=
+
+# timeout before idle sql connections are reaped (integer
+# value)
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+#max_pool_size=<None>
+
+# maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+#max_retries=10
+
+# interval between retries of opening a sql connection
+# (integer value)
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+#pool_timeout=<None>
diff --git a/openstack/usr/share/openstack/glance/logging.conf b/openstack/usr/share/openstack/glance/logging.conf
new file mode 100644
index 00000000..7e7f31f0
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/logging.conf
@@ -0,0 +1,54 @@
+[loggers]
+keys=root,api,registry,combined
+
+[formatters]
+keys=normal,normal_with_name,debug
+
+[handlers]
+keys=production,file,devel
+
+[logger_root]
+level=NOTSET
+handlers=devel
+
+[logger_api]
+level=DEBUG
+handlers=devel
+qualname=glance-api
+
+[logger_registry]
+level=DEBUG
+handlers=devel
+qualname=glance-registry
+
+[logger_combined]
+level=DEBUG
+handlers=devel
+qualname=glance-combined
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal_with_name
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=FileHandler
+level=DEBUG
+formatter=normal_with_name
+args=('glance.log', 'w')
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+[formatter_normal]
+format=%(asctime)s %(levelname)s %(message)s
+
+[formatter_normal_with_name]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/openstack/usr/share/openstack/glance/policy.json b/openstack/usr/share/openstack/glance/policy.json
new file mode 100644
index 00000000..325f00b2
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/policy.json
@@ -0,0 +1,52 @@
+{
+ "context_is_admin": "role:admin",
+ "default": "",
+
+ "add_image": "",
+ "delete_image": "",
+ "get_image": "",
+ "get_images": "",
+ "modify_image": "",
+ "publicize_image": "role:admin",
+ "copy_from": "",
+
+ "download_image": "",
+ "upload_image": "",
+
+ "delete_image_location": "",
+ "get_image_location": "",
+ "set_image_location": "",
+
+ "add_member": "",
+ "delete_member": "",
+ "get_member": "",
+ "get_members": "",
+ "modify_member": "",
+
+ "manage_image_cache": "role:admin",
+
+ "get_task": "",
+ "get_tasks": "",
+ "add_task": "",
+ "modify_task": "",
+
+ "get_metadef_namespace": "",
+ "get_metadef_namespaces":"",
+ "modify_metadef_namespace":"",
+ "add_metadef_namespace":"",
+
+ "get_metadef_object":"",
+ "get_metadef_objects":"",
+ "modify_metadef_object":"",
+ "add_metadef_object":"",
+
+ "list_metadef_resource_types":"",
+ "get_metadef_resource_type":"",
+ "add_metadef_resource_type_association":"",
+
+ "get_metadef_property":"",
+ "get_metadef_properties":"",
+ "modify_metadef_property":"",
+ "add_metadef_property":""
+
+}
diff --git a/openstack/usr/share/openstack/glance/schema-image.json b/openstack/usr/share/openstack/glance/schema-image.json
new file mode 100644
index 00000000..5aafd6b3
--- /dev/null
+++ b/openstack/usr/share/openstack/glance/schema-image.json
@@ -0,0 +1,28 @@
+{
+ "kernel_id": {
+ "type": "string",
+ "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+ "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
+ },
+ "ramdisk_id": {
+ "type": "string",
+ "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+ "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
+ },
+ "instance_uuid": {
+ "type": "string",
+ "description": "ID of instance used to create this image."
+ },
+ "architecture": {
+ "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+ "type": "string"
+ },
+ "os_distro": {
+ "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+ "type": "string"
+ },
+ "os_version": {
+ "description": "Operating system version as specified by the distributor",
+ "type": "string"
+ }
+}
diff --git a/openstack/usr/share/openstack/horizon.yml b/openstack/usr/share/openstack/horizon.yml
new file mode 100644
index 00000000..14cea5c5
--- /dev/null
+++ b/openstack/usr/share/openstack/horizon.yml
@@ -0,0 +1,47 @@
+---
+- hosts: localhost
+ tasks:
+
+# Setup apache, this may end up in apache.yml
+ - name: Create the apache user.
+ user:
+ name: apache
+ comment: Apache Server
+ shell: /sbin/nologin
+ home: /var/www
+
+ - file:
+ path: /usr/sbin/suexec
+ group: apache
+ mode: 4750
+
+# Setup horizon
+ - name: Create the horizon user.
+ user:
+ name: horizon
+ comment: Openstack Horizon User
+ shell: /sbin/nologin
+ home: /var/lib/horizon
+
+ - name: Create the /var folders for horizon
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: horizon
+ group: horizon
+ with_items:
+ - /var/lib/horizon
+
+ - name: Link horizon apache configuration
+ file:
+ src: /etc/horizon/apache-horizon.conf
+ dest: /etc/httpd/conf.d/apache-horizon.conf
+ state: link
+
+ - name: Enable and start apache services needed by horizon
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - apache-httpd.service
diff --git a/openstack/usr/share/openstack/hosts b/openstack/usr/share/openstack/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/openstack/usr/share/openstack/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/openstack/usr/share/openstack/ironic.yml b/openstack/usr/share/openstack/ironic.yml
new file mode 100644
index 00000000..db0a8aa8
--- /dev/null
+++ b/openstack/usr/share/openstack/ironic.yml
@@ -0,0 +1,104 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/ironic.conf"
+ tasks:
+ - name: Create the ironic user
+ user:
+ name: ironic
+ comment: Openstack Ironic Daemons
+ shell: /sbin/nologin
+ home: /var/lib/ironic
+
+ - name: Create the /var folders for Ironic
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: ironic
+ group: ironic
+ with_items:
+ - /var/run/ironic
+ - /var/lock/ironic
+ - /var/log/ironic
+ - /var/lib/ironic
+
+ - file: path=/etc/ironic state=directory
+ - name: Add the configuration needed for ironic in /etc/ironic using templates
+ template:
+ src: /usr/share/openstack/ironic/{{ item }}
+ dest: /etc/ironic/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/ironic && find -type f
+
+ - name: Create Ironic service user in service tenant
+ keystone_user:
+ user: "{{ IRONIC_SERVICE_USER }}"
+ password: "{{ IRONIC_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to Ironic service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ IRONIC_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add Ironic endpoint
+ keystone_service:
+ name: ironic
+ type: baremetal
+ description: Openstack Ironic Service
+ publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385'
+ internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385'
+ adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385'
+ region: 'regionOne'
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for Ironic
+ postgresql_user:
+ name: "{{ IRONIC_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ password: "{{ IRONIC_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: ironic
+
+ - name: Create database for Ironic services
+ postgresql_db:
+ name: ironic
+ owner: "{{ IRONIC_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ sudo: yes
+ sudo_user: ironic
+
+ - name: Initiate Ironic database
+ # Use 'upgrade' instead of 'create_schema' to make the operation
+ # idempotent
+ shell: |
+ ironic-dbsync \
+ --config-file /etc/ironic/ironic.conf upgrade
+ sudo: yes
+ sudo_user: ironic
+
+ - name: Enable and start openstack-ironic services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - openstack-ironic-conductor.service
+ - openstack-ironic-api.service
+
+ - name: Set owner and group for the tftp root directory
+ file:
+ path: "/srv/tftp_root/"
+ state: directory
+ owner: ironic
+ group: ironic
+ recurse: yes
+
+ - name: Enable and start tftp-hpa
+ service:
+ name: tftp-hpa.socket
+ enabled: yes
+ state: started
diff --git a/openstack/usr/share/openstack/ironic/ironic.conf b/openstack/usr/share/openstack/ironic/ironic.conf
new file mode 100644
index 00000000..75c62b8e
--- /dev/null
+++ b/openstack/usr/share/openstack/ironic/ironic.conf
@@ -0,0 +1,1247 @@
+[DEFAULT]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=ironic
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in ironic.netconf
+#
+
+# IP address of this host. (string value)
+my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Use IPv6. (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in ironic.api.app
+#
+
+# Method to use for authentication: noauth or keystone.
+# (string value)
+#auth_strategy=keystone
+
+
+#
+# Options defined in ironic.common.driver_factory
+#
+
+# Specify the list of drivers to load during service
+# initialization. Missing drivers, or drivers which fail to
+# initialize, will prevent the conductor service from
+# starting. The option default is a recommended set of
+# production-oriented drivers. A complete list of drivers
+# present on your system may be found by enumerating the
+# "ironic.drivers" entrypoint. An example may be found in the
+# developer documentation online. (list value)
+enabled_drivers=pxe_ipmitool,pxe_ssh
+
+
+#
+# Options defined in ironic.common.exception
+#
+
+# Make exception message format errors fatal. (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in ironic.common.hash_ring
+#
+
+# Exponent to determine number of hash partitions to use when
+# distributing load across conductors. Larger values will
+# result in more even distribution of load and less load when
+# rebalancing the ring, but more memory usage. Number of
+# partitions per conductor is (2^hash_partition_exponent).
+# This determines the granularity of rebalancing: given 10
+# hosts, and an exponent of the 2, there are 40 partitions in
+# the ring.A few thousand partitions should make rebalancing
+# smooth in most cases. The default is suitable for up to a
+# few hundred conductors. Too many partitions has a CPU
+# impact. (integer value)
+#hash_partition_exponent=5
+
+# [Experimental Feature] Number of hosts to map onto each hash
+# partition. Setting this to more than one will cause
+# additional conductor services to prepare deployment
+# environments and potentially allow the Ironic cluster to
+# recover more quickly if a conductor instance is terminated.
+# (integer value)
+#hash_distribution_replicas=1
+
+
+#
+# Options defined in ironic.common.images
+#
+
+# Force backing images to raw format. (boolean value)
+#force_raw_images=true
+
+# Path to isolinux binary file. (string value)
+#isolinux_bin=/usr/lib/syslinux/isolinux.bin
+
+# Template file for isolinux configuration file. (string
+# value)
+#isolinux_config_template=$pybasedir/common/isolinux_config.template
+
+
+#
+# Options defined in ironic.common.paths
+#
+
+# Directory where the ironic python module is installed.
+# (string value)
+#pybasedir=/usr/lib/python/site-packages/ironic
+
+# Directory where ironic binaries are installed. (string
+# value)
+#bindir=$pybasedir/bin
+
+# Top-level directory for maintaining ironic's state. (string
+# value)
+#state_path=$pybasedir
+
+
+#
+# Options defined in ironic.common.policy
+#
+
+# JSON file representing policy. (string value)
+#policy_file=policy.json
+
+# Rule checked when requested rule is not found. (string
+# value)
+#policy_default_rule=default
+
+
+#
+# Options defined in ironic.common.service
+#
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval=60
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address. (string
+# value)
+#host=ironic
+
+
+#
+# Options defined in ironic.common.utils
+#
+
+# Path to the rootwrap configuration file to use for running
+# commands as root. (string value)
+#rootwrap_config=/etc/ironic/rootwrap.conf
+
+# Explicitly specify the temporary working directory. (string
+# value)
+#tempdir=<None>
+
+
+#
+# Options defined in ironic.drivers.modules.image_cache
+#
+
+# Run image downloads and raw format conversions in parallel.
+# (boolean value)
+#parallel_image_downloads=false
+
+
+#
+# Options defined in ironic.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in ironic.openstack.common.lockutils
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in ironic.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+use_syslog=True
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in ironic.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+[agent]
+
+#
+# Options defined in ironic.drivers.modules.agent
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#agent_pxe_append_params=nofb nomodeset vga=normal
+
+# Template file for PXE configuration. (string value)
+#agent_pxe_config_template=$pybasedir/drivers/modules/agent_config.template
+
+# Neutron bootfile DHCP parameter. (string value)
+#agent_pxe_bootfile_name=pxelinux.0
+
+# Maximum interval (in seconds) for agent heartbeats. (integer
+# value)
+#heartbeat_timeout=300
+
+
+#
+# Options defined in ironic.drivers.modules.agent_client
+#
+
+# API version to use for communicating with the ramdisk agent.
+# (string value)
+#agent_api_version=v1
+
+
+[api]
+
+#
+# Options defined in ironic.api
+#
+
+# The listen IP for the Ironic API server. (string value)
+#host_ip=0.0.0.0
+
+# The port for the Ironic API server. (integer value)
+#port=6385
+
+# The maximum number of items returned in a single response
+# from a collection resource. (integer value)
+#max_limit=1000
+
+
+[conductor]
+
+#
+# Options defined in ironic.conductor.manager
+#
+
+# URL of Ironic API service. If not set ironic can get the
+# current value from the keystone service catalog. (string
+# value)
+api_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6385
+
+# Seconds between conductor heart beats. (integer value)
+#heartbeat_interval=10
+
+# Maximum time (in seconds) since the last check-in of a
+# conductor. (integer value)
+#heartbeat_timeout=60
+
+# Interval between syncing the node power state to the
+# database, in seconds. (integer value)
+#sync_power_state_interval=60
+
+# Interval between checks of provision timeouts, in seconds.
+# (integer value)
+#check_provision_state_interval=60
+
+# Timeout (seconds) for waiting callback from deploy ramdisk.
+# 0 - unlimited. (integer value)
+#deploy_callback_timeout=1800
+
+# During sync_power_state, should the hardware power state be
+# set to the state recorded in the database (True) or should
+# the database be updated based on the hardware state (False).
+# (boolean value)
+#force_power_state_during_sync=true
+
+# During sync_power_state failures, limit the number of times
+# Ironic should try syncing the hardware node power state with
+# the node power state in DB (integer value)
+#power_state_sync_max_retries=3
+
+# Maximum number of worker threads that can be started
+# simultaneously by a periodic task. Should be less than RPC
+# thread pool size. (integer value)
+#periodic_max_workers=8
+
+# The size of the workers greenthread pool. (integer value)
+#workers_pool_size=100
+
+# Number of attempts to grab a node lock. (integer value)
+#node_locked_retry_attempts=3
+
+# Seconds to sleep between node lock attempts. (integer value)
+#node_locked_retry_interval=1
+
+# Enable sending sensor data message via the notification bus
+# (boolean value)
+#send_sensor_data=false
+
+# Seconds between conductor sending sensor data message to
+# ceilometer via the notification bus. (integer value)
+#send_sensor_data_interval=600
+
+# List of comma separated metric types which need to be sent
+# to Ceilometer. The default value, "ALL", is a special value
+# meaning send all the sensor data. (list value)
+#send_sensor_data_types=ALL
+
+# When conductors join or leave the cluster, existing
+# conductors may need to update any persistent local state as
+# nodes are moved around the cluster. This option controls how
+# often, in seconds, each conductor will check for nodes that
+# it should "take over". Set it to a negative value to disable
+# the check entirely. (integer value)
+#sync_local_state_interval=180
+
+
+[console]
+
+#
+# Options defined in ironic.drivers.modules.console_utils
+#
+
+# Path to serial console terminal program (string value)
+#terminal=shellinaboxd
+
+# Directory containing the terminal SSL cert(PEM) for serial
+# console access (string value)
+#terminal_cert_dir=<None>
+
+# Directory for holding terminal pid files. If not specified,
+# the temporary directory will be used. (string value)
+#terminal_pid_dir=<None>
+
+# Time interval (in seconds) for checking the status of
+# console subprocess. (integer value)
+#subprocess_checking_interval=1
+
+# Time (in seconds) to wait for the console subprocess to
+# start. (integer value)
+#subprocess_timeout=10
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection=postgresql://{{ IRONIC_DB_USER}}:{{ IRONIC_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ironic
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+#
+# Options defined in ironic.db.sqlalchemy.models
+#
+
+# MySQL engine to use. (string value)
+#mysql_engine=InnoDB
+
+
+[dhcp]
+
+#
+# Options defined in ironic.common.dhcp_factory
+#
+
+# DHCP provider to use. "neutron" uses Neutron, and "none"
+# uses a no-op provider. (string value)
+#dhcp_provider=neutron
+
+
+[disk_partitioner]
+
+#
+# Options defined in ironic.common.disk_partitioner
+#
+
+# After Ironic has completed creating the partition table, it
+# continues to check for activity on the attached iSCSI device
+# status at this interval prior to copying the image to the
+# node, in seconds (integer value)
+#check_device_interval=1
+
+# The maximum number of times to check that the device is not
+# accessed by another process. If the device is still busy
+# after that, the disk partitioning will be treated as having
+# failed. (integer value)
+#check_device_max_retries=20
+
+
+[glance]
+
+#
+# Options defined in ironic.common.glance_service.v2.image_service
+#
+
+# A list of URL schemes that can be downloaded directly via
+# the direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+# The secret token given to Swift to allow temporary URL
+# downloads. Required for temporary URLs. (string value)
+#swift_temp_url_key=<None>
+
+# The length of time in seconds that the temporary URL will be
+# valid for. Defaults to 20 minutes. If some deploys get a 401
+# response code when trying to download from the temporary
+# URL, try raising this duration. (integer value)
+#swift_temp_url_duration=1200
+
+# The "endpoint" (scheme, hostname, optional port) for the
+# Swift URL of the form
+# "endpoint_url/api_version/account/container/object_id". Do
+# not include trailing "/". For example, use
+# "https://swift.example.com". Required for temporary URLs.
+# (string value)
+#swift_endpoint_url=<None>
+
+# The Swift API version to create a temporary URL for.
+# Defaults to "v1". Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_api_version=v1
+
+# The account that Glance uses to communicate with Swift. The
+# format is "AUTH_uuid". "uuid" is the UUID for the account
+# configured in the glance-api.conf. Required for temporary
+# URLs. For example:
+# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary
+# URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_account=<None>
+
+# The Swift container Glance is configured to store its images
+# in. Defaults to "glance", which is the default in glance-
+# api.conf. Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_container=glance
+
+
+#
+# Options defined in ironic.common.image_service
+#
+
+# Default glance hostname or IP address. (string value)
+glance_host={{ CONTROLLER_HOST_ADDRESS }}
+
+# Default glance port. (integer value)
+#glance_port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#glance_protocol=http
+
+# A list of the glance api servers available to ironic. Prefix
+# with https:// for SSL-based glance API servers. Format is
+# [hostname|IP]:port. (string value)
+#glance_api_servers=<None>
+
+# Allow to perform insecure SSL (https) requests to glance.
+# (boolean value)
+#glance_api_insecure=false
+
+# Number of retries when downloading an image from glance.
+# (integer value)
+#glance_num_retries=0
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#auth_strategy=keystone
+
+
+[ilo]
+
+#
+# Options defined in ironic.drivers.modules.ilo.common
+#
+
+# Timeout (in seconds) for iLO operations (integer value)
+#client_timeout=60
+
+# Port to be used for iLO operations (integer value)
+#client_port=443
+
+# The Swift iLO container to store data. (string value)
+#swift_ilo_container=ironic_ilo_container
+
+# Amount of time in seconds for Swift objects to auto-expire.
+# (integer value)
+#swift_object_expiry_timeout=900
+
+
+#
+# Options defined in ironic.drivers.modules.ilo.power
+#
+
+# Number of times a power operation needs to be retried
+# (integer value)
+#power_retry=6
+
+# Amount of time in seconds to wait in between power
+# operations (integer value)
+#power_wait=2
+
+
+[ipmi]
+
+#
+# Options defined in ironic.drivers.modules.ipminative
+#
+
+# Maximum time in seconds to retry IPMI operations. (integer
+# value)
+#retry_timeout=60
+
+# Minimum time, in seconds, between IPMI operations sent to a
+# server. There is a risk with some hardware that setting this
+# too low may cause the BMC to crash. Recommended setting is 5
+# seconds. (integer value)
+#min_command_interval=5
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystonemiddleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+auth_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user={{ IRONIC_SERVICE_USER }}
+
+# Keystone account password (string value)
+admin_password={{ IRONIC_SERVICE_PASSWORD }}
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name=service
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=10
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) number of seconds memcached server is considered
+# dead before it is tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (optional) max total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize=10
+
+# (optional) socket timeout in seconds for communicating with
+# a memcache server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (optional) number of seconds a connection to memcached is
+# held unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (optional) number of seconds that an operation will wait to
+# get a memcache client connection from the pool. (integer
+# value)
+#memcache_pool_conn_get_timeout=10
+
+# (optional) use the advanced (eventlet safe) memcache client
+# pool. The advanced pool will only work under python 2.x.
+# (boolean value)
+#memcache_use_advanced_pool=false
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached
+# tokens. This requires that PKI tokens are configured on the
+# Keystone server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will
+# be tried in the order given, so put the preferred one first
+# for performance. The result of the first hash will be stored
+# in the cache. This will typically be set to multiple values
+# only while migrating from a less secure algorithm to a more
+# secure one. Once all the old tokens are expired this option
+# should be set to a single value for better performance.
+# (list value)
+#hash_algorithms=md5
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[neutron]
+
+#
+# Options defined in ironic.dhcp.neutron
+#
+
+# URL for connecting to neutron. (string value)
+url=http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+
+# Timeout value for connecting to neutron in seconds. (integer
+# value)
+#url_timeout=30
+
+# Default authentication strategy to use when connecting to
+# neutron. Can be either "keystone" or "noauth". Running
+# neutron in noauth mode (related to but not affected by this
+# setting) is insecure and should only be used for testing.
+# (string value)
+#auth_strategy=keystone
+
+
+[pxe]
+
+#
+# Options defined in ironic.drivers.modules.iscsi_deploy
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#pxe_append_params=nofb nomodeset vga=normal
+
+# Default file system format for ephemeral partition, if one
+# is created. (string value)
+#default_ephemeral_format=ext4
+
+# Directory where images are stored on disk. (string value)
+#images_path=/var/lib/ironic/images/
+
+# Directory where master instance images are stored on disk.
+# (string value)
+#instance_master_path=/var/lib/ironic/master_images
+
+# Maximum size (in MiB) of cache for master images, including
+# those in use. (integer value)
+#image_cache_size=20480
+
+# Maximum TTL (in minutes) for old master images in cache.
+# (integer value)
+#image_cache_ttl=10080
+
+# The disk devices to scan while doing the deploy. (string
+# value)
+#disk_devices=cciss/c0d0,sda,hda,vda
+
+
+#
+# Options defined in ironic.drivers.modules.pxe
+#
+
+# Template file for PXE configuration. (string value)
+#pxe_config_template=$pybasedir/drivers/modules/pxe_config.template
+
+# Template file for PXE configuration for UEFI boot loader.
+# (string value)
+#uefi_pxe_config_template=$pybasedir/drivers/modules/elilo_efi_pxe_config.template
+
+# IP address of Ironic compute node's tftp server. (string
+# value)
+#tftp_server=$my_ip
+
+# Ironic compute node's tftp root path. (string value)
+tftp_root=/srv/tftp_root/
+
+# Directory where master tftp images are stored on disk.
+# (string value)
+tftp_master_path=/srv/tftp_root/master_images
+
+# Bootfile DHCP parameter. (string value)
+#pxe_bootfile_name=pxelinux.0
+
+# Bootfile DHCP parameter for UEFI boot mode. (string value)
+#uefi_pxe_bootfile_name=elilo.efi
+
+# Ironic compute node's HTTP server URL. Example:
+# http://192.1.2.3:8080 (string value)
+#http_url=<None>
+
+# Ironic compute node's HTTP root path. (string value)
+#http_root=/httpboot
+
+# Enable iPXE boot. (boolean value)
+#ipxe_enabled=false
+
+# The path to the main iPXE script file. (string value)
+#ipxe_boot_script=$pybasedir/drivers/modules/boot.ipxe
+
+
+[seamicro]
+
+#
+# Options defined in ironic.drivers.modules.seamicro
+#
+
+# Maximum retries for SeaMicro operations (integer value)
+#max_retry=3
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#action_timeout=10
+
+
+[snmp]
+
+#
+# Options defined in ironic.drivers.modules.snmp
+#
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#power_timeout=10
+
+
+[ssh]
+
+#
+# Options defined in ironic.drivers.modules.ssh
+#
+
+# libvirt uri (string value)
+#libvirt_uri=qemu:///system
+
+
+[swift]
+
+#
+# Options defined in ironic.common.swift
+#
+
+# Maximum number of times to retry a Swift request, before
+# failing. (integer value)
+#swift_max_retries=2
+
+
diff --git a/openstack/usr/share/openstack/ironic/policy.json b/openstack/usr/share/openstack/ironic/policy.json
new file mode 100644
index 00000000..94ac3a5b
--- /dev/null
+++ b/openstack/usr/share/openstack/ironic/policy.json
@@ -0,0 +1,5 @@
+{
+ "admin": "role:admin or role:administrator",
+ "admin_api": "is_admin:True",
+ "default": "rule:admin_api"
+}
diff --git a/openstack/usr/share/openstack/iscsi.yml b/openstack/usr/share/openstack/iscsi.yml
new file mode 100644
index 00000000..b80377ae
--- /dev/null
+++ b/openstack/usr/share/openstack/iscsi.yml
@@ -0,0 +1,15 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Update kernel module dependencies
+ command: depmod -a
+
+ - name: generate InitiatorName for iscsi
+ shell: iscsi-iname
+ register: initiator_name
+
+ - lineinfile:
+ dest: /etc/iscsi/initiatorname.iscsi
+ regexp: '^InitiatorName=$'
+ line: 'InitiatorName={{ initiator_name.stdout }}'
+ backrefs: yes
diff --git a/openstack/usr/share/openstack/keystone.yml b/openstack/usr/share/openstack/keystone.yml
new file mode 100644
index 00000000..330d74d0
--- /dev/null
+++ b/openstack/usr/share/openstack/keystone.yml
@@ -0,0 +1,143 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/keystone.conf"
+ tasks:
+
+ # RabbitMQ configuration, this may end up in a different playbook
+ - name: Create rabbitmq user
+ user:
+ name: rabbitmq
+ comment: Rabbitmq server daemon
+ shell: /sbin/nologin
+ home: /var/lib/rabbitmq
+
+ - name: Create the rabbitmq directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: rabbitmq
+ group: rabbitmq
+ with_items:
+ - /var/run/rabbitmq
+ - /var/log/rabbitmq
+ - /etc/rabbitmq
+
+ - name: Add the configuration needed for rabbitmq in /etc/rabbitmq using templates
+ template:
+ src: /usr/share/openstack/rabbitmq/{{ item }}
+ dest: /etc/rabbitmq/{{ item }}
+ owner: rabbitmq
+ group: rabbitmq
+ mode: 0644
+ with_items:
+ - rabbitmq.config
+ - rabbitmq-env.conf
+
+ - name: Enable and start rabbitmq services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - rabbitmq-server
+
+ # Keystone configuration
+ - name: Create the keystone user.
+ user:
+ name: keystone
+ comment: Openstack Keystone Daemons
+ shell: /sbin/nologin
+ home: /var/lib/keystone
+
+ - name: Create the /var folders for keystone
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: keystone
+ group: keystone
+ with_items:
+ - /var/run/keystone
+ - /var/lock/keystone
+ - /var/log/keystone
+ - /var/lib/keystone
+
+ - name: Create /etc/keystone directory
+ file:
+ path: /etc/keystone
+ state: directory
+
+ - name: Add the configuration needed for keystone in /etc using templates
+ template:
+ src: /usr/share/openstack/keystone/{{ item }}
+ dest: /etc/keystone/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/keystone && find -type f
+
+ - name: Create postgresql user for keystone
+ postgresql_user:
+ name: "{{ KEYSTONE_DB_USER }}"
+ password: "{{ KEYSTONE_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: keystone
+
+ - name: Create database for keystone services
+ postgresql_db:
+ name: keystone
+ owner: "{{ KEYSTONE_DB_USER }}"
+ sudo: yes
+ sudo_user: keystone
+
+ - name: Initiatie keystone database
+ keystone_manage:
+ action: dbsync
+ sudo: yes
+ sudo_user: keystone
+
+ - name: Enable and start openstack-keystone service
+ service:
+ name: openstack-keystone.service
+ enabled: yes
+ state: started
+
+ - name: Create admin tenant
+ keystone_user:
+ tenant: admin
+ tenant_description: Admin Tenant
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Create admin user for the admin tenant
+ keystone_user:
+ user: admin
+ tenant: admin
+ password: "{{ KEYSTONE_ADMIN_PASSWORD }}"
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Create admin role for admin user in the admin tenant
+ keystone_user:
+ role: admin
+ user: admin
+ tenant: admin
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Create service tenant
+ keystone_user:
+ tenant: service
+ tenant_description: Service Tenant
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Add keystone endpoint
+ keystone_service:
+ name: keystone
+ type: identity
+ description: Keystone Identity Service
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
diff --git a/openstack/usr/share/openstack/keystone/keystone-paste.ini b/openstack/usr/share/openstack/keystone/keystone-paste.ini
new file mode 100644
index 00000000..46f994c3
--- /dev/null
+++ b/openstack/usr/share/openstack/keystone/keystone-paste.ini
@@ -0,0 +1,121 @@
+# Keystone PasteDeploy configuration file.
+
+[filter:debug]
+paste.filter_factory = keystone.common.wsgi:Debug.factory
+
+[filter:build_auth_context]
+paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory
+
+[filter:token_auth]
+paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
+
+[filter:admin_token_auth]
+paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
+
+[filter:xml_body]
+paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
+
+[filter:xml_body_v2]
+paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory
+
+[filter:xml_body_v3]
+paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory
+
+[filter:json_body]
+paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
+
+[filter:user_crud_extension]
+paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
+
+[filter:crud_extension]
+paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
+
+[filter:ec2_extension]
+paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
+
+[filter:ec2_extension_v3]
+paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory
+
+[filter:federation_extension]
+paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory
+
+[filter:oauth1_extension]
+paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory
+
+[filter:s3_extension]
+paste.filter_factory = keystone.contrib.s3:S3Extension.factory
+
+[filter:endpoint_filter_extension]
+paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
+
+[filter:endpoint_policy_extension]
+paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory
+
+[filter:simple_cert_extension]
+paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory
+
+[filter:revoke_extension]
+paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
+
+[filter:url_normalize]
+paste.filter_factory = keystone.middleware:NormalizingFilter.factory
+
+[filter:sizelimit]
+paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
+
+[filter:stats_monitoring]
+paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
+
+[filter:stats_reporting]
+paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
+
+[filter:access_log]
+paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
+
+[app:public_service]
+paste.app_factory = keystone.service:public_app_factory
+
+[app:service_v3]
+paste.app_factory = keystone.service:v3_app_factory
+
+[app:admin_service]
+paste.app_factory = keystone.service:admin_app_factory
+
+[pipeline:public_api]
+# The last item in this pipeline must be public_service or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service
+
+[pipeline:admin_api]
+# The last item in this pipeline must be admin_service or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service
+
+[pipeline:api_v3]
+# The last item in this pipeline must be service_v3 or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3
+
+[app:public_version_service]
+paste.app_factory = keystone.service:public_version_app_factory
+
+[app:admin_version_service]
+paste.app_factory = keystone.service:admin_version_app_factory
+
+[pipeline:public_version_api]
+pipeline = sizelimit url_normalize xml_body public_version_service
+
+[pipeline:admin_version_api]
+pipeline = sizelimit url_normalize xml_body admin_version_service
+
+[composite:main]
+use = egg:Paste#urlmap
+/v2.0 = public_api
+/v3 = api_v3
+/ = public_version_api
+
+[composite:admin]
+use = egg:Paste#urlmap
+/v2.0 = admin_api
+/v3 = api_v3
+/ = admin_version_api
diff --git a/openstack/usr/share/openstack/keystone/keystone.conf b/openstack/usr/share/openstack/keystone/keystone.conf
new file mode 100644
index 00000000..4e04c81b
--- /dev/null
+++ b/openstack/usr/share/openstack/keystone/keystone.conf
@@ -0,0 +1,1588 @@
+[DEFAULT]
+
+#
+# Options defined in keystone
+#
+
+# A "shared secret" that can be used to bootstrap Keystone.
+# This "token" does not represent a user, and carries no
+# explicit authorization. To disable in production (highly
+# recommended), remove AdminTokenAuthMiddleware from your
+# paste application pipelines (for example, in keystone-
+# paste.ini). (string value)
+admin_token={{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}
+
+# The IP address of the network interface for the public
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#public_bind_host=0.0.0.0
+
+# The IP address of the network interface for the admin
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#admin_bind_host=0.0.0.0
+
+# (Deprecated) The port which the OpenStack Compute service
+# listens on. This option was only used for string replacement
+# in the templated catalog backend. Templated catalogs should
+# replace the "$(compute_port)s" substitution with the static
+# port of the compute service. As of Juno, this option is
+# deprecated and will be removed in the L release. (integer
+# value)
+#compute_port=8774
+
+# The port number which the admin service listens on. (integer
+# value)
+admin_port=35357
+
+# The port number which the public service listens on.
+# (integer value)
+public_port=5000
+
+# The base public endpoint URL for Keystone that is advertised
+# to clients (NOTE: this does NOT affect how Keystone listens
+# for connections). Defaults to the base host URL of the
+# request. E.g. a request to http://server:5000/v2.0/users
+# will default to http://server:5000. You should only need to
+# set this value if the base URL contains a path (e.g.
+# /prefix/v2.0) or the endpoint should be found on a different
+# server. (string value)
+#public_endpoint=<None>
+
+# The base admin endpoint URL for Keystone that is advertised
+# to clients (NOTE: this does NOT affect how Keystone listens
+# for connections). Defaults to the base host URL of the
+# request. E.g. a request to http://server:35357/v2.0/users
+# will default to http://server:35357. You should only need to
+# set this value if the base URL contains a path (e.g.
+# /prefix/v2.0) or the endpoint should be found on a different
+# server. (string value)
+#admin_endpoint=<None>
+
+# The number of worker processes to serve the public WSGI
+# application. Defaults to number of CPUs (minimum of 2).
+# (integer value)
+#public_workers=<None>
+
+# The number of worker processes to serve the admin WSGI
+# application. Defaults to number of CPUs (minimum of 2).
+# (integer value)
+#admin_workers=<None>
+
+# Enforced by optional sizelimit middleware
+# (keystone.middleware:RequestBodySizeLimiter). (integer
+# value)
+#max_request_body_size=114688
+
+# Limit the sizes of user & project ID/names. (integer value)
+#max_param_size=64
+
+# Similar to max_param_size, but provides an exception for
+# token values. (integer value)
+#max_token_size=8192
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the assignment table
+# with explicit role grants. After migration, the
+# member_role_id will be used in the API add_user_to_project.
+# (string value)
+#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
+
+# During a SQL upgrade member_role_name will be used to create
+# a new role that will replace records in the assignment table
+# with explicit role grants. After migration, member_role_name
+# will be ignored. (string value)
+#member_role_name=_member_
+
+# The value passed as the keyword "rounds" to passlib's
+# encrypt method. (integer value)
+#crypt_strength=40000
+
+# Set this to true if you want to enable TCP_KEEPALIVE on
+# server sockets, i.e. sockets used by the Keystone wsgi
+# server for client connections. (boolean value)
+#tcp_keepalive=false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Only applies if tcp_keepalive is true. Not supported
+# on OS X. (integer value)
+#tcp_keepidle=600
+
+# The maximum number of entities that will be returned in a
+# collection, with no limit set by default. This global limit
+# may be then overridden for a specific driver, by specifying
+# a list_limit in the appropriate section (e.g. [assignment]).
+# (integer value)
+#list_limit=<None>
+
+# Set this to false if you want to enable the ability for
+# user, group and project entities to be moved between domains
+# by updating their domain_id. Allowing such movement is not
+# recommended if the scope of a domain admin is being
+# restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). (boolean value)
+#domain_id_immutable=true
+
+# If set to true, strict password length checking is performed
+# for password manipulation. If a password exceeds the maximum
+# length, the operation will fail with an HTTP 403 Forbidden
+# error. If set to false, passwords are automatically
+# truncated to the maximum length. (boolean value)
+#strict_password_check=false
+
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=keystone
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=keystone
+
+
+#
+# Options defined in keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in keystone.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+use_syslog=True
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in keystone.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+
+[assignment]
+
+#
+# Options defined in keystone
+#
+
+# Assignment backend driver. (string value)
+#driver=<None>
+
+# Toggle for assignment caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# TTL (in seconds) to cache assignment data. This has no
+# effect unless global caching is enabled. (integer value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in an
+# assignment collection. (integer value)
+#list_limit=<None>
+
+
+[auth]
+
+#
+# Options defined in keystone
+#
+
+# Default auth methods. (list value)
+#methods=external,password,token
+
+# The password auth plugin module. (string value)
+#password=keystone.auth.plugins.password.Password
+
+# The token auth plugin module. (string value)
+#token=keystone.auth.plugins.token.Token
+
+# The external (REMOTE_USER) auth plugin module. (string
+# value)
+#external=keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# Options defined in keystone
+#
+
+# Prefix for building the configuration dictionary for the
+# cache region. This should not need to be changed unless
+# there is another dogpile.cache region with the same
+# configuration name. (string value)
+#config_prefix=cache.keystone
+
+# Default TTL, in seconds, for any cached item in the
+# dogpile.cache region. This applies to any cached method that
+# doesn't have an explicit cache expiration time defined for
+# it. (integer value)
+#expiration_time=600
+
+# Dogpile.cache backend module. It is recommended that
+# Memcache with pooling (keystone.cache.memcache_pool) or
+# Redis (dogpile.cache.redis) be used in production
+# deployments. Small workloads (single process) like devstack
+# can use the dogpile.cache.memory backend. (string value)
+#backend=keystone.common.cache.noop
+
+# Arguments supplied to the backend module. Specify this
+# option once per argument to be passed to the dogpile.cache
+# backend. Example format: "<argname>:<value>". (multi valued)
+#backend_argument=
+
+# Proxy classes to import that will affect the way the
+# dogpile.cache backend functions. See the dogpile.cache
+# documentation on changing-backend-behavior. (list value)
+#proxies=
+
+# Global toggle for all caching using the should_cache_fn
+# mechanism. (boolean value)
+#enabled=false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls). This is only really useful if you
+# need to see the specific cache-backend get/set/delete calls
+# with the keys/values. Typically this should be left set to
+# false. (boolean value)
+#debug_cache_backend=false
+
+# Memcache servers in the format of "host:port".
+# (dogpile.cache.memcache and keystone.cache.memcache_pool
+# backends only) (list value)
+#memcache_servers=localhost:11211
+
+# Number of seconds memcached server is considered dead before
+# it is tried again. (dogpile.cache.memcache and
+# keystone.cache.memcache_pool backends only) (integer value)
+#memcache_dead_retry=300
+
+# Timeout in seconds for every call to a server.
+# (dogpile.cache.memcache and keystone.cache.memcache_pool
+# backends only) (integer value)
+#memcache_socket_timeout=3
+
+# Max total number of open connections to every memcached
+# server. (keystone.cache.memcache_pool backend only) (integer
+# value)
+#memcache_pool_maxsize=10
+
+# Number of seconds a connection to memcached is held unused
+# in the pool before it is closed.
+# (keystone.cache.memcache_pool backend only) (integer value)
+#memcache_pool_unused_timeout=60
+
+# Number of seconds that an operation will wait to get a
+# memcache client connection. (integer value)
+#memcache_pool_connection_get_timeout=10
+
+
+[catalog]
+
+#
+# Options defined in keystone
+#
+
+# Catalog template file name for use with the template catalog
+# backend. (string value)
+#template_file=default_catalog.templates
+
+# Catalog backend driver. (string value)
+#driver=keystone.catalog.backends.sql.Catalog
+
+# Toggle for catalog caching. This has no effect unless global
+# caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache catalog data (in seconds). This has no effect
+# unless global and catalog caching are enabled. (integer
+# value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in a
+# catalog collection. (integer value)
+#list_limit=<None>
+
+# (Deprecated) List of possible substitutions for use in
+# formatting endpoints. Use caution when modifying this list.
+# It will give users with permission to create endpoints the
+# ability to see those values in your configuration file. This
+# option will be removed in Juno. (list value)
+#endpoint_substitution_whitelist=tenant_id,user_id,public_bind_host,admin_bind_host,compute_host,compute_port,admin_port,public_port,public_endpoint,admin_endpoint
+
+
+[credential]
+
+#
+# Options defined in keystone
+#
+
+# Credential backend driver. (string value)
+#driver=keystone.credential.backends.sql.Credential
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+connection=postgresql://{{ KEYSTONE_DB_USER }}:{{ KEYSTONE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/keystone
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+[ec2]
+
+#
+# Options defined in keystone
+#
+
+# EC2Credential backend driver. (string value)
+#driver=keystone.contrib.ec2.backends.kvs.Ec2
+
+
+[endpoint_filter]
+
+#
+# Options defined in keystone
+#
+
+# Endpoint Filter backend driver (string value)
+#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists.
+# (boolean value)
+#return_all_endpoints_if_no_filter=true
+
+
+[endpoint_policy]
+
+#
+# Options defined in keystone
+#
+
+# Endpoint policy backend driver (string value)
+#driver=keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy
+
+
+[federation]
+
+#
+# Options defined in keystone
+#
+
+# Federation backend driver. (string value)
+#driver=keystone.contrib.federation.backends.sql.Federation
+
+# Value to be used when filtering assertion parameters from
+# the environment. (string value)
+#assertion_prefix=
+
+
+[identity]
+
+#
+# Options defined in keystone
+#
+
+# This references the domain to use for all Identity API v2
+# requests (which are not aware of domains). A domain with
+# this ID will be created for you by keystone-manage db_sync
+# in migration 008. The domain referenced by this ID cannot be
+# deleted on the v3 API, to prevent accidentally breaking the
+# v2 API. There is nothing special about this domain, other
+# than the fact that it must exist to order to maintain
+# support for your v2 clients. (string value)
+#default_domain_id=default
+
+# A subset (or all) of domains can have their own identity
+# driver, each with their own partial configuration file in a
+# domain configuration directory. Only values specific to the
+# domain need to be placed in the domain specific
+# configuration file. This feature is disabled by default; set
+# to true to enable. (boolean value)
+#domain_specific_drivers_enabled=false
+
+# Path for Keystone to locate the domain specific identity
+# configuration files if domain_specific_drivers_enabled is
+# set to true. (string value)
+#domain_config_dir=/etc/keystone/domains
+
+# Identity backend driver. (string value)
+#driver=keystone.identity.backends.sql.Identity
+
+# Maximum supported length for user passwords; decrease to
+# improve performance. (integer value)
+#max_password_length=4096
+
+# Maximum number of entities that will be returned in an
+# identity collection. (integer value)
+#list_limit=<None>
+
+
+[identity_mapping]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Identity Mapping backend driver. (string value)
+#driver=keystone.identity.mapping_backends.sql.Mapping
+
+# Public ID generator for user and group entities. The
+# Keystone identity mapper only supports generators that
+# produce no more than 64 characters. (string value)
+#generator=keystone.identity.id_generators.sha256.Generator
+
+# The format of user and group IDs changed in Juno for
+# backends that do not generate UUIDs (e.g. LDAP), with
+# keystone providing a hash mapping to the underlying
+# attribute in LDAP. By default this mapping is disabled,
+# which ensures that existing IDs will not change. Even when
+# the mapping is enabled by using domain specific drivers, any
+# users and groups from the default domain being handled by
+# LDAP will still not be mapped to ensure their IDs remain
+# backward compatible. Setting this value to False will enable
+# the mapping for even the default LDAP driver. It is only
+# safe to do this if you do not already have assignments for
+# users and groups from the default LDAP domain, and it is
+# acceptable for Keystone to provide the different IDs to
+# clients than it did previously. Typically this means that
+# the only time you can set this value to False is when
+# configuring a fresh installation. (boolean value)
+#backward_compatible_ids=true
+
+
+[kvs]
+
+#
+# Options defined in keystone
+#
+
+# Extra dogpile.cache backend modules to register with the
+# dogpile.cache library. (list value)
+#backends=
+
+# Prefix for building the configuration dictionary for the KVS
+# region. This should not need to be changed unless there is
+# another dogpile.cache region with the same configuration
+# name. (string value)
+#config_prefix=keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure
+# fixed length keys. This is toggle-able for debugging
+# purposes, it is highly recommended to always leave this set
+# to true. (boolean value)
+#enable_key_mangler=true
+
+# Default lock timeout for distributed locking. (integer
+# value)
+#default_lock_timeout=5
+
+
+[ldap]
+
+#
+# Options defined in keystone
+#
+
+# URL for connecting to the LDAP server. (string value)
+#url=ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user=<None>
+
+# Password for the BindDN to query the LDAP server. (string
+# value)
+#password=<None>
+
+# LDAP server suffix (string value)
+#suffix=cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required
+# if the objectclass for groups requires the "member"
+# attribute. (boolean value)
+#use_dumb_member=false
+
+# DN of the "dummy member" to use when "use_dumb_member" is
+# enabled. (string value)
+#dumb_member=cn=dumb,dc=nonexistent
+
+# Delete subtrees using the subtree delete control. Only
+# enable this option if your LDAP server supports subtree
+# deletion. (boolean value)
+#allow_subtree_delete=false
+
+# The LDAP scope for queries, this can be either "one"
+# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree).
+# (string value)
+#query_scope=one
+
+# Maximum results per page; a value of zero ("0") disables
+# paging. (integer value)
+#page_size=0
+
+# The LDAP dereferencing option for queries. This can be
+# either "never", "searching", "always", "finding" or
+# "default". The "default" option falls back to using default
+# dereferencing configured by your ldap.conf. (string value)
+#alias_dereferencing=default
+
+# Sets the LDAP debugging level for LDAP calls. A value of 0
+# means that debugging is not enabled. This value is a
+# bitmask, consult your LDAP documentation for possible
+# values. (integer value)
+#debug_level=<None>
+
+# Override the system's default referral chasing behavior for
+# queries. (boolean value)
+#chase_referrals=<None>
+
+# Search base for users. (string value)
+#user_tree_dn=<None>
+
+# LDAP search filter for users. (string value)
+#user_filter=<None>
+
+# LDAP objectclass for users. (string value)
+#user_objectclass=inetOrgPerson
+
+# LDAP attribute mapped to user id. WARNING: must not be a
+# multivalued attribute. (string value)
+#user_id_attribute=cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute=sn
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute=mail
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute=userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute=enabled
+
+# Invert the meaning of the boolean enabled values. Some LDAP
+# servers use a boolean lock attribute where "true" means an
+# account is disabled. Setting "user_enabled_invert = true"
+# will allow these lock attributes to be used. This setting
+# will have no effect if "user_enabled_mask" or
+# "user_enabled_emulation" settings are in use. (boolean
+# value)
+#user_enabled_invert=false
+
+# Bitmask integer to indicate the bit that the enabled value
+# is stored in if the LDAP server represents "enabled" as a
+# bit on an integer rather than a boolean. A value of "0"
+# indicates the mask is not used. If this is not set to "0"
+# the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer
+# value)
+#user_enabled_mask=0
+
+# Default value to enable users. This should match an
+# appropriate int value if the LDAP server uses non-boolean
+# (bitmask) values to indicate if a user is enabled or
+# disabled. If this is not set to "True" the typical value is
+# "512". This is typically used when "user_enabled_attribute =
+# userAccountControl". (string value)
+#user_enabled_default=True
+
+# List of attributes stripped off the user on update. (list
+# value)
+#user_attribute_ignore=default_project_id,tenants
+
+# LDAP attribute mapped to default_project_id for users.
+# (string value)
+#user_default_project_id_attribute=<None>
+
+# Allow user creation in LDAP backend. (boolean value)
+#user_allow_create=true
+
+# Allow user updates in LDAP backend. (boolean value)
+#user_allow_update=true
+
+# Allow user deletion in LDAP backend. (boolean value)
+#user_allow_delete=true
+
+# If true, Keystone uses an alternative method to determine if
+# a user is enabled or not by checking if they are a member of
+# the "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation=false
+
+# DN of the group entry to hold enabled users when using
+# enabled emulation. (string value)
+#user_enabled_emulation_dn=<None>
+
+# List of additional LDAP attributes used for mapping
+# additional attribute mappings for users. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#user_additional_attribute_mapping=
+
+# Search base for projects (string value)
+# Deprecated group/name - [ldap]/tenant_tree_dn
+#project_tree_dn=<None>
+
+# LDAP search filter for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_filter
+#project_filter=<None>
+
+# LDAP objectclass for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_objectclass
+#project_objectclass=groupOfNames
+
+# LDAP attribute mapped to project id. (string value)
+# Deprecated group/name - [ldap]/tenant_id_attribute
+#project_id_attribute=cn
+
+# LDAP attribute mapped to project membership for user.
+# (string value)
+# Deprecated group/name - [ldap]/tenant_member_attribute
+#project_member_attribute=member
+
+# LDAP attribute mapped to project name. (string value)
+# Deprecated group/name - [ldap]/tenant_name_attribute
+#project_name_attribute=ou
+
+# LDAP attribute mapped to project description. (string value)
+# Deprecated group/name - [ldap]/tenant_desc_attribute
+#project_desc_attribute=description
+
+# LDAP attribute mapped to project enabled. (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_attribute
+#project_enabled_attribute=enabled
+
+# LDAP attribute mapped to project domain_id. (string value)
+# Deprecated group/name - [ldap]/tenant_domain_id_attribute
+#project_domain_id_attribute=businessCategory
+
+# List of attributes stripped off the project on update. (list
+# value)
+# Deprecated group/name - [ldap]/tenant_attribute_ignore
+#project_attribute_ignore=
+
+# Allow project creation in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_create
+#project_allow_create=true
+
+# Allow project update in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_update
+#project_allow_update=true
+
+# Allow project deletion in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_delete
+#project_allow_delete=true
+
+# If true, Keystone uses an alternative method to determine if
+# a project is enabled or not by checking if they are a member
+# of the "project_enabled_emulation_dn" group. (boolean value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation
+#project_enabled_emulation=false
+
+# DN of the group entry to hold enabled projects when using
+# enabled emulation. (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn
+#project_enabled_emulation_dn=<None>
+
+# Additional attribute mappings for projects. Attribute
+# mapping format is <ldap_attr>:<user_attr>, where ldap_attr
+# is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping
+#project_additional_attribute_mapping=
+
+# Search base for roles. (string value)
+#role_tree_dn=<None>
+
+# LDAP search filter for roles. (string value)
+#role_filter=<None>
+
+# LDAP objectclass for roles. (string value)
+#role_objectclass=organizationalRole
+
+# LDAP attribute mapped to role id. (string value)
+#role_id_attribute=cn
+
+# LDAP attribute mapped to role name. (string value)
+#role_name_attribute=ou
+
+# LDAP attribute mapped to role membership. (string value)
+#role_member_attribute=roleOccupant
+
+# List of attributes stripped off the role on update. (list
+# value)
+#role_attribute_ignore=
+
+# Allow role creation in LDAP backend. (boolean value)
+#role_allow_create=true
+
+# Allow role update in LDAP backend. (boolean value)
+#role_allow_update=true
+
+# Allow role deletion in LDAP backend. (boolean value)
+#role_allow_delete=true
+
+# Additional attribute mappings for roles. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#role_additional_attribute_mapping=
+
+# Search base for groups. (string value)
+#group_tree_dn=<None>
+
+# LDAP search filter for groups. (string value)
+#group_filter=<None>
+
+# LDAP objectclass for groups. (string value)
+#group_objectclass=groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute=cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute=ou
+
+# LDAP attribute mapped to show group membership. (string
+# value)
+#group_member_attribute=member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute=description
+
+# List of attributes stripped off the group on update. (list
+# value)
+#group_attribute_ignore=
+
+# Allow group creation in LDAP backend. (boolean value)
+#group_allow_create=true
+
+# Allow group update in LDAP backend. (boolean value)
+#group_allow_update=true
+
+# Allow group deletion in LDAP backend. (boolean value)
+#group_allow_delete=true
+
+# Additional attribute mappings for groups. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#group_additional_attribute_mapping=
+
+# CA certificate file path for communicating with LDAP
+# servers. (string value)
+#tls_cacertfile=<None>
+
+# CA certificate directory path for communicating with LDAP
+# servers. (string value)
+#tls_cacertdir=<None>
+
+# Enable TLS for communicating with LDAP servers. (boolean
+# value)
+#use_tls=false
+
+# Valid options for tls_req_cert are demand, never, and allow.
+# (string value)
+#tls_req_cert=demand
+
+# Enable LDAP connection pooling. (boolean value)
+#use_pool=false
+
+# Connection pool size. (integer value)
+#pool_size=10
+
+# Maximum count of reconnect trials. (integer value)
+#pool_retry_max=3
+
+# Time span in seconds to wait between two reconnect trials.
+# (floating point value)
+#pool_retry_delay=0.1
+
+# Connector timeout in seconds. Value -1 indicates indefinite
+# wait for response. (integer value)
+#pool_connection_timeout=-1
+
+# Connection lifetime in seconds. (integer value)
+#pool_connection_lifetime=600
+
+# Enable LDAP connection pooling for end user authentication.
+# If use_pool is disabled, then this setting is meaningless
+# and is not used at all. (boolean value)
+#use_auth_pool=false
+
+# End user auth connection pool size. (integer value)
+#auth_pool_size=100
+
+# End user auth connection lifetime in seconds. (integer
+# value)
+#auth_pool_connection_lifetime=60
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[memcache]
+
+#
+# Options defined in keystone
+#
+
+# Memcache servers in the format of "host:port". (list value)
+#servers=localhost:11211
+
+# Number of seconds memcached server is considered dead before
+# it is tried again. This is used by the key value store
+# system (e.g. token pooled memcached persistence backend).
+# (integer value)
+#dead_retry=300
+
+# Timeout in seconds for every call to a server. This is used
+# by the key value store system (e.g. token pooled memcached
+# persistence backend). (integer value)
+#socket_timeout=3
+
+# Max total number of open connections to every memcached
+# server. This is used by the key value store system (e.g.
+# token pooled memcached persistence backend). (integer value)
+#pool_maxsize=10
+
+# Number of seconds a connection to memcached is held unused
+# in the pool before it is closed. This is used by the key
+# value store system (e.g. token pooled memcached persistence
+# backend). (integer value)
+#pool_unused_timeout=60
+
+# Number of seconds that an operation will wait to get a
+# memcache client connection. This is used by the key value
+# store system (e.g. token pooled memcached persistence
+# backend). (integer value)
+#pool_connection_get_timeout=10
+
+
+[oauth1]
+
+#
+# Options defined in keystone
+#
+
+# Credential backend driver. (string value)
+#driver=keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token. (integer
+# value)
+#request_token_duration=28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer
+# value)
+#access_token_duration=86400
+
+
+[os_inherit]
+
+#
+# Options defined in keystone
+#
+
+# role-assignment inheritance to projects from owning domain
+# can be optionally enabled. (boolean value)
+#enabled=false
+
+
+[paste_deploy]
+
+#
+# Options defined in keystone
+#
+
+# Name of the paste configuration file that defines the
+# available pipelines. (string value)
+#config_file=keystone-paste.ini
+
+
+[policy]
+
+#
+# Options defined in keystone
+#
+
+# Policy backend driver. (string value)
+#driver=keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy
+# collection. (integer value)
+#list_limit=<None>
+
+
+[revoke]
+
+#
+# Options defined in keystone
+#
+
+# An implementation of the backend for persisting revocation
+# events. (string value)
+#driver=keystone.contrib.revoke.backends.kvs.Revoke
+
+# This value (calculated in seconds) is added to token
+# expiration before a revocation event may be removed from the
+# backend. (integer value)
+#expiration_buffer=1800
+
+# Toggle for revocation event caching. This has no effect
+# unless global caching is enabled. (boolean value)
+#caching=true
+
+
+[saml]
+
+#
+# Options defined in keystone
+#
+
+# Default TTL, in seconds, for any generated SAML assertion
+# created by Keystone. (integer value)
+#assertion_expiration_time=3600
+
+# Binary to be called for XML signing. Install the appropriate
+# package, specify absolute path or adjust your PATH
+# environment variable if the binary cannot be found. (string
+# value)
+#xmlsec1_binary=xmlsec1
+
+# Path of the certfile for SAML signing. For non-production
+# environments, you may be interested in using `keystone-
+# manage pki_setup` to generate self-signed certificates.
+# Note, the path cannot contain a comma. (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for SAML signing. Note, the path cannot
+# contain a comma. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Entity ID value for unique Identity Provider identification.
+# Usually FQDN is set with a suffix. A value is required to
+# generate IDP Metadata. For example:
+# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp
+# (string value)
+#idp_entity_id=<None>
+
+# Identity Provider Single-Sign-On service value, required in
+# the Identity Provider's metadata. A value is required to
+# generate IDP Metadata. For example:
+# https://keystone.example.com/v3/OS-FEDERATION/saml2/sso
+# (string value)
+#idp_sso_endpoint=<None>
+
+# Language used by the organization. (string value)
+#idp_lang=en
+
+# Organization name the installation belongs to. (string
+# value)
+#idp_organization_name=<None>
+
+# Organization name to be displayed. (string value)
+#idp_organization_display_name=<None>
+
+# URL of the organization. (string value)
+#idp_organization_url=<None>
+
+# Company of contact person. (string value)
+#idp_contact_company=<None>
+
+# Given name of contact person (string value)
+#idp_contact_name=<None>
+
+# Surname of contact person. (string value)
+#idp_contact_surname=<None>
+
+# Email address of contact person. (string value)
+#idp_contact_email=<None>
+
+# Telephone number of contact person. (string value)
+#idp_contact_telephone=<None>
+
+# Contact type. Allowed values are: technical, support,
+# administrative billing, and other (string value)
+#idp_contact_type=other
+
+# Path to the Identity Provider Metadata file. This file
+# should be generated with the keystone-manage
+# saml_idp_metadata command. (string value)
+#idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml
+
+
+[signing]
+
+#
+# Options defined in keystone
+#
+
+# Deprecated in favor of provider in the [token] section.
+# (string value)
+#token_format=<None>
+
+# Path of the certfile for token signing. For non-production
+# environments, you may be interested in using `keystone-
+# manage pki_setup` to generate self-signed certificates.
+# (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key for token signing. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Key size (in bits) for token signing cert (auto generated
+# certificate). (integer value)
+#key_size=2048
+
+# Days the token signing cert is valid for (auto generated
+# certificate). (integer value)
+#valid_days=3650
+
+# Certificate subject (auto generated certificate) for token
+# signing. (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# Options defined in keystone
+#
+
+# Toggle for SSL support on the Keystone eventlet servers.
+# (boolean value)
+#enable=false
+
+# Path of the certfile for SSL. For non-production
+# environments, you may be interested in using `keystone-
+# manage ssl_setup` to generate self-signed certificates.
+# (string value)
+#certfile=/etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+#keyfile=/etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the ca cert file for SSL. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key file for SSL. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Require client certificate. (boolean value)
+#cert_required=false
+
+# SSL key length (in bits) (auto generated certificate).
+# (integer value)
+#key_size=1024
+
+# Days the certificate is valid for once signed (auto
+# generated certificate). (integer value)
+#valid_days=3650
+
+# SSL certificate subject (auto generated certificate).
+# (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[stats]
+
+#
+# Options defined in keystone
+#
+
+# Stats backend driver. (string value)
+#driver=keystone.contrib.stats.backends.kvs.Stats
+
+
+[token]
+
+#
+# Options defined in keystone
+#
+
+# External auth mechanisms that should add bind information to
+# token, e.g., kerberos,x509. (list value)
+#bind=
+
+# Enforcement policy on tokens presented to Keystone with bind
+# information. One of disabled, permissive, strict, required
+# or a specifically required bind mode, e.g., kerberos or x509
+# to require binding to that authentication. (string value)
+#enforce_token_bind=permissive
+
+# Amount of time a token should remain valid (in seconds).
+# (integer value)
+#expiration=3600
+
+# Controls the token construction, validation, and revocation
+# operations. Core providers are
+# "keystone.token.providers.[pkiz|pki|uuid].Provider". The
+# default provider is pkiz. (string value)
+provider=keystone.token.providers.uuid.Provider
+
+# Token persistence backend driver. (string value)
+driver=keystone.token.backends.sql.Token
+
+# Toggle for token system caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache the revocation list and the revocation events
+# if revoke extension is enabled (in seconds). This has no
+# effect unless global and token caching are enabled. (integer
+# value)
+#revocation_cache_time=3600
+
+# Time to cache tokens (in seconds). This has no effect unless
+# global and token caching are enabled. (integer value)
+#cache_time=<None>
+
+# Revoke token by token identifier. Setting revoke_by_id to
+# true enables various forms of enumerating tokens, e.g. `list
+# tokens for user`. These enumerations are processed to
+# determine the list of tokens to revoke. Only disable if you
+# are switching to using the Revoke extension with a backend
+# other than KVS, which stores events in memory. (boolean
+# value)
+#revoke_by_id=true
+
+# The hash algorithm to use for PKI tokens. This can be set to
+# any algorithm that hashlib supports. WARNING: Before
+# changing this value, the auth_token middleware must be
+# configured with the hash_algorithms, otherwise token
+# revocation will not be processed correctly. (string value)
+#hash_algorithm=md5
+
+
+[trust]
+
+#
+# Options defined in keystone
+#
+
+# Delegation and impersonation features can be optionally
+# disabled. (boolean value)
+#enabled=true
+
+# Trust backend driver. (string value)
+#driver=keystone.trust.backends.sql.Trust
+
+
diff --git a/openstack/usr/share/openstack/keystone/logging.conf b/openstack/usr/share/openstack/keystone/logging.conf
new file mode 100644
index 00000000..6cb8c425
--- /dev/null
+++ b/openstack/usr/share/openstack/keystone/logging.conf
@@ -0,0 +1,65 @@
+[loggers]
+keys=root,access
+
+[handlers]
+keys=production,file,access_file,devel
+
+[formatters]
+keys=minimal,normal,debug
+
+
+###########
+# Loggers #
+###########
+
+[logger_root]
+level=WARNING
+handlers=file
+
+[logger_access]
+level=INFO
+qualname=access
+handlers=access_file
+
+
+################
+# Log Handlers #
+################
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=handlers.WatchedFileHandler
+level=WARNING
+formatter=normal
+args=('error.log',)
+
+[handler_access_file]
+class=handlers.WatchedFileHandler
+level=INFO
+formatter=minimal
+args=('access.log',)
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+
+##################
+# Log Formatters #
+##################
+
+[formatter_minimal]
+format=%(message)s
+
+[formatter_normal]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/openstack/usr/share/openstack/keystone/policy.json b/openstack/usr/share/openstack/keystone/policy.json
new file mode 100644
index 00000000..af65205e
--- /dev/null
+++ b/openstack/usr/share/openstack/keystone/policy.json
@@ -0,0 +1,171 @@
+{
+ "admin_required": "role:admin or is_admin:1",
+ "service_role": "role:service",
+ "service_or_admin": "rule:admin_required or rule:service_role",
+ "owner" : "user_id:%(user_id)s",
+ "admin_or_owner": "rule:admin_required or rule:owner",
+
+ "default": "rule:admin_required",
+
+ "identity:get_region": "",
+ "identity:list_regions": "",
+ "identity:create_region": "rule:admin_required",
+ "identity:update_region": "rule:admin_required",
+ "identity:delete_region": "rule:admin_required",
+
+ "identity:get_service": "rule:admin_required",
+ "identity:list_services": "rule:admin_required",
+ "identity:create_service": "rule:admin_required",
+ "identity:update_service": "rule:admin_required",
+ "identity:delete_service": "rule:admin_required",
+
+ "identity:get_endpoint": "rule:admin_required",
+ "identity:list_endpoints": "rule:admin_required",
+ "identity:create_endpoint": "rule:admin_required",
+ "identity:update_endpoint": "rule:admin_required",
+ "identity:delete_endpoint": "rule:admin_required",
+
+ "identity:get_domain": "rule:admin_required",
+ "identity:list_domains": "rule:admin_required",
+ "identity:create_domain": "rule:admin_required",
+ "identity:update_domain": "rule:admin_required",
+ "identity:delete_domain": "rule:admin_required",
+
+ "identity:get_project": "rule:admin_required",
+ "identity:list_projects": "rule:admin_required",
+ "identity:list_user_projects": "rule:admin_or_owner",
+ "identity:create_project": "rule:admin_required",
+ "identity:update_project": "rule:admin_required",
+ "identity:delete_project": "rule:admin_required",
+
+ "identity:get_user": "rule:admin_required",
+ "identity:list_users": "rule:admin_required",
+ "identity:create_user": "rule:admin_required",
+ "identity:update_user": "rule:admin_required",
+ "identity:delete_user": "rule:admin_required",
+ "identity:change_password": "rule:admin_or_owner",
+
+ "identity:get_group": "rule:admin_required",
+ "identity:list_groups": "rule:admin_required",
+ "identity:list_groups_for_user": "rule:admin_or_owner",
+ "identity:create_group": "rule:admin_required",
+ "identity:update_group": "rule:admin_required",
+ "identity:delete_group": "rule:admin_required",
+ "identity:list_users_in_group": "rule:admin_required",
+ "identity:remove_user_from_group": "rule:admin_required",
+ "identity:check_user_in_group": "rule:admin_required",
+ "identity:add_user_to_group": "rule:admin_required",
+
+ "identity:get_credential": "rule:admin_required",
+ "identity:list_credentials": "rule:admin_required",
+ "identity:create_credential": "rule:admin_required",
+ "identity:update_credential": "rule:admin_required",
+ "identity:delete_credential": "rule:admin_required",
+
+ "identity:ec2_get_credential": "rule:admin_or_owner",
+ "identity:ec2_list_credentials": "rule:admin_or_owner",
+ "identity:ec2_create_credential": "rule:admin_or_owner",
+ "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+
+ "identity:get_role": "rule:admin_required",
+ "identity:list_roles": "rule:admin_required",
+ "identity:create_role": "rule:admin_required",
+ "identity:update_role": "rule:admin_required",
+ "identity:delete_role": "rule:admin_required",
+
+ "identity:check_grant": "rule:admin_required",
+ "identity:list_grants": "rule:admin_required",
+ "identity:create_grant": "rule:admin_required",
+ "identity:revoke_grant": "rule:admin_required",
+
+ "identity:list_role_assignments": "rule:admin_required",
+
+ "identity:get_policy": "rule:admin_required",
+ "identity:list_policies": "rule:admin_required",
+ "identity:create_policy": "rule:admin_required",
+ "identity:update_policy": "rule:admin_required",
+ "identity:delete_policy": "rule:admin_required",
+
+ "identity:check_token": "rule:admin_required",
+ "identity:validate_token": "rule:service_or_admin",
+ "identity:validate_token_head": "rule:service_or_admin",
+ "identity:revocation_list": "rule:service_or_admin",
+ "identity:revoke_token": "rule:admin_or_owner",
+
+ "identity:create_trust": "user_id:%(trust.trustor_user_id)s",
+ "identity:get_trust": "rule:admin_or_owner",
+ "identity:list_trusts": "",
+ "identity:list_roles_for_trust": "",
+ "identity:check_role_for_trust": "",
+ "identity:get_role_for_trust": "",
+ "identity:delete_trust": "",
+
+ "identity:create_consumer": "rule:admin_required",
+ "identity:get_consumer": "rule:admin_required",
+ "identity:list_consumers": "rule:admin_required",
+ "identity:delete_consumer": "rule:admin_required",
+ "identity:update_consumer": "rule:admin_required",
+
+ "identity:authorize_request_token": "rule:admin_required",
+ "identity:list_access_token_roles": "rule:admin_required",
+ "identity:get_access_token_role": "rule:admin_required",
+ "identity:list_access_tokens": "rule:admin_required",
+ "identity:get_access_token": "rule:admin_required",
+ "identity:delete_access_token": "rule:admin_required",
+
+ "identity:list_projects_for_endpoint": "rule:admin_required",
+ "identity:add_endpoint_to_project": "rule:admin_required",
+ "identity:check_endpoint_in_project": "rule:admin_required",
+ "identity:list_endpoints_for_project": "rule:admin_required",
+ "identity:remove_endpoint_from_project": "rule:admin_required",
+
+ "identity:create_endpoint_group": "rule:admin_required",
+ "identity:list_endpoint_groups": "rule:admin_required",
+ "identity:get_endpoint_group": "rule:admin_required",
+ "identity:update_endpoint_group": "rule:admin_required",
+ "identity:delete_endpoint_group": "rule:admin_required",
+ "identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
+ "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
+ "identity:list_endpoint_groups_for_project": "rule:admin_required",
+ "identity:add_endpoint_group_to_project": "rule:admin_required",
+ "identity:remove_endpoint_group_from_project": "rule:admin_required",
+
+ "identity:create_identity_provider": "rule:admin_required",
+ "identity:list_identity_providers": "rule:admin_required",
+ "identity:get_identity_providers": "rule:admin_required",
+ "identity:update_identity_provider": "rule:admin_required",
+ "identity:delete_identity_provider": "rule:admin_required",
+
+ "identity:create_protocol": "rule:admin_required",
+ "identity:update_protocol": "rule:admin_required",
+ "identity:get_protocol": "rule:admin_required",
+ "identity:list_protocols": "rule:admin_required",
+ "identity:delete_protocol": "rule:admin_required",
+
+ "identity:create_mapping": "rule:admin_required",
+ "identity:get_mapping": "rule:admin_required",
+ "identity:list_mappings": "rule:admin_required",
+ "identity:delete_mapping": "rule:admin_required",
+ "identity:update_mapping": "rule:admin_required",
+
+ "identity:get_auth_catalog": "",
+ "identity:get_auth_projects": "",
+ "identity:get_auth_domains": "",
+
+ "identity:list_projects_for_groups": "",
+ "identity:list_domains_for_groups": "",
+
+ "identity:list_revoke_events": "",
+
+ "identity:create_policy_association_for_endpoint": "rule:admin_required",
+ "identity:check_policy_association_for_endpoint": "rule:admin_required",
+ "identity:delete_policy_association_for_endpoint": "rule:admin_required",
+ "identity:create_policy_association_for_service": "rule:admin_required",
+ "identity:check_policy_association_for_service": "rule:admin_required",
+ "identity:delete_policy_association_for_service": "rule:admin_required",
+ "identity:create_policy_association_for_region_and_service": "rule:admin_required",
+ "identity:check_policy_association_for_region_and_service": "rule:admin_required",
+ "identity:delete_policy_association_for_region_and_service": "rule:admin_required",
+ "identity:get_policy_for_endpoint": "rule:admin_required",
+ "identity:list_endpoints_for_policy": "rule:admin_required"
+}
diff --git a/openstack/usr/share/openstack/network.yml b/openstack/usr/share/openstack/network.yml
new file mode 100644
index 00000000..f99f7f1a
--- /dev/null
+++ b/openstack/usr/share/openstack/network.yml
@@ -0,0 +1,67 @@
+---
+- hosts: localhost
+ vars_files:
+ - /etc/openstack/network.conf
+ tasks:
+# Create the bridges to use the External network mapped
+
+# Count number of network interfaces (interfaces starting with 'e')
+ - shell: ls /sys/class/net | grep ^e.* | wc -l
+ register: number_interfaces
+
+# Abort if there number of interfaces != 1
+ - fail:
+ msg: More than one, or none network interfaces found.
+ when: EXTERNAL_INTERFACE is not defined and number_interfaces.stdout != "1"
+
+ - shell: ls /sys/class/net | grep ^e.*
+ register: interface_name
+ when: EXTERNAL_INTERFACE is not defined
+
+ - set_fact:
+ ETH_INTERFACE: "{{ interface_name.stdout }}"
+ when: EXTERNAL_INTERFACE is not defined
+
+ - set_fact:
+ ETH_INTERFACE: "{{ EXTERNAL_INTERFACE }}"
+ when: EXTERNAL_INTERFACE is defined
+
+ - set_fact:
+ ETH_MAC_ADDRESS: "{{ hostvars['localhost']['ansible_' + ETH_INTERFACE]['macaddress'] }}"
+
+ - name: Create the /run/systemd/network
+ file:
+ path: /run/systemd/network
+ state: directory
+
+ - name: Disable dhcp on the bound physical interface
+ template:
+ src: /usr/share/openstack/extras/00-disable-device.network
+ dest: /run/systemd/network/00-disable-{{ item }}-config.network
+ with_items:
+ - "{{ ETH_INTERFACE }}"
+
+ - name: Disable dhcp on all the internal interfaces
+ template:
+ src: /usr/share/openstack/extras/00-disable-device.network
+ dest: /run/systemd/network/00-disable-{{ item }}-config.network
+ with_items:
+ - ovs-system
+
+ - openvswitch_bridge:
+ bridge: br-ex
+ state: present
+
+ - openvswitch_port:
+ bridge: br-ex
+ port: "{{ ETH_INTERFACE }}"
+ state: present
+
+ - shell: ovs-vsctl set bridge br-ex other-config:hwaddr={{ ETH_MAC_ADDRESS }}
+
+ - name: Enable dhcp on the Open vSwitch device that replaces our external interface
+ template:
+ src: /usr/share/openstack/extras/60-device-dhcp.network
+ dest: /run/systemd/network/60-{{ item }}-dhcp.network
+ with_items:
+ - br-ex
diff --git a/openstack/usr/share/openstack/neutron-config.yml b/openstack/usr/share/openstack/neutron-config.yml
new file mode 100644
index 00000000..97f4c76e
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron-config.yml
@@ -0,0 +1,48 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/neutron.conf"
+ tasks:
+
+ - name: Create the neutron user.
+ user:
+ name: neutron
+ comment: Openstack Neutron Daemons
+ shell: /sbin/nologin
+ home: /var/lib/neutron
+
+ - name: Create the /var folders for neutron
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: neutron
+ group: neutron
+ with_items:
+ - /var/run/neutron
+ - /var/lock/neutron
+ - /var/log/neutron
+
+ - name: Get service tenant id needed in neutron.conf
+ shell: |
+ keystone \
+ --os-endpoint http://{{ CONTROLLER_HOST_ADDRESS|quote }}:35357/v2.0 \
+ --os-token {{ KEYSTONE_TEMPORARY_ADMIN_TOKEN|quote }} \
+ tenant-get service | grep id | tr -d " " | cut -d"|" -f3
+ register: tenant_service_id
+
+ - set_fact:
+ SERVICE_TENANT_ID: "{{ tenant_service_id.stdout }}"
+
+ - name: Create the directories needed for Neutron configuration files.
+ file:
+ path: /etc/{{ item }}
+ state: directory
+ with_lines:
+ - cd /usr/share/openstack && find neutron -type d
+
+ - name: Add configuration needed for neutron using templates
+ template:
+ src: /usr/share/openstack/{{ item }}
+ dest: /etc/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack && find neutron -type f
diff --git a/openstack/usr/share/openstack/neutron-db.yml b/openstack/usr/share/openstack/neutron-db.yml
new file mode 100644
index 00000000..91dde6fe
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron-db.yml
@@ -0,0 +1,51 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/neutron.conf"
+ tasks:
+ - name: Create neutron service user in service tenant
+ keystone_user:
+ user: "{{ NEUTRON_SERVICE_USER }}"
+ password: "{{ NEUTRON_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add admin role to neutron service user in service tenant
+ keystone_user:
+ role: admin
+ user: "{{ NEUTRON_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - keystone_service:
+ name: neutron
+ type: network
+ description: Openstack Compute Networking
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for neutron
+ postgresql_user:
+ name: "{{ NEUTRON_DB_USER }}"
+ password: "{{ NEUTRON_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: neutron
+
+ - name: Create database for neutron services
+ postgresql_db:
+ name: neutron
+ owner: "{{ NEUTRON_DB_USER }}"
+ sudo: yes
+ sudo_user: neutron
+
+ - name: Initiate neutron database
+ shell: |
+ neutron-db-manage \
+ --config-file /etc/neutron/neutron.conf \
+ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \
+ upgrade juno
+ sudo: yes
+ sudo_user: neutron
diff --git a/openstack/usr/share/openstack/neutron/api-paste.ini b/openstack/usr/share/openstack/neutron/api-paste.ini
new file mode 100644
index 00000000..bbcd4152
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/api-paste.ini
@@ -0,0 +1,30 @@
+[composite:neutron]
+use = egg:Paste#urlmap
+/: neutronversions
+/v2.0: neutronapi_v2_0
+
+[composite:neutronapi_v2_0]
+use = call:neutron.auth:pipeline_factory
+noauth = request_id catch_errors extensions neutronapiapp_v2_0
+keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
+
+[filter:request_id]
+paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:catch_errors]
+paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
+
+[filter:keystonecontext]
+paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:extensions]
+paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
+
+[app:neutronversions]
+paste.app_factory = neutron.api.versions:Versions.factory
+
+[app:neutronapiapp_v2_0]
+paste.app_factory = neutron.api.v2.router:APIRouter.factory
diff --git a/openstack/usr/share/openstack/neutron/dhcp_agent.ini b/openstack/usr/share/openstack/neutron/dhcp_agent.ini
new file mode 100644
index 00000000..c6c2b9a7
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/dhcp_agent.ini
@@ -0,0 +1,89 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+use_syslog = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+# resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = True
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+# enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+# dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+# dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/openstack/usr/share/openstack/neutron/fwaas_driver.ini b/openstack/usr/share/openstack/neutron/fwaas_driver.ini
new file mode 100644
index 00000000..41f761ab
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/fwaas_driver.ini
@@ -0,0 +1,3 @@
+[fwaas]
+#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
+#enabled = True
diff --git a/openstack/usr/share/openstack/neutron/l3_agent.ini b/openstack/usr/share/openstack/neutron/l3_agent.ini
new file mode 100644
index 00000000..000cd997
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/l3_agent.ini
@@ -0,0 +1,103 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+use_syslog = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+# handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+# metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+# send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+# periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+# periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
+
+# The working mode for the agent. Allowed values are:
+# - legacy: this preserves the existing behavior where the L3 agent is
+# deployed on a centralized networking node to provide L3 services
+# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
+# - dvr: this mode enables DVR functionality, and must be used for an L3
+# agent that runs on a compute host.
+# - dvr_snat: this enables centralized SNAT support in conjunction with
+# DVR. This mode must be used for an L3 agent running on a centralized
+# node (or in single-host deployments, e.g. devstack).
+# agent_mode = legacy
+
+# Location to store keepalived and all HA configurations
+# ha_confs_path = $state_path/ha_confs
+
+# VRRP authentication type AH/PASS
+# ha_vrrp_auth_type = PASS
+
+# VRRP authentication password
+# ha_vrrp_auth_password =
+
+# The advertisement interval in seconds
+# ha_vrrp_advert_int = 2
diff --git a/openstack/usr/share/openstack/neutron/lbaas_agent.ini b/openstack/usr/share/openstack/neutron/lbaas_agent.ini
new file mode 100644
index 00000000..68a2759e
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/lbaas_agent.ini
@@ -0,0 +1,42 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output).
+# debug = False
+
+# The LBaaS agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+# periodic_interval = 10
+
+# LBaas requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version.
+# Multiple device drivers reflecting different service providers could be specified:
+# device_driver = path.to.provider1.driver.Driver
+# device_driver = path.to.provider2.driver.Driver
+# Default is:
+# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
+
+[haproxy]
+# Location to store config and state files
+# loadbalancer_state_path = $state_path/lbaas
+
+# The user group
+# user_group = nogroup
+
+# When delete and re-add the same vip, send this many gratuitous ARPs to flush
+# the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
+# send_gratuitous_arp = 3
diff --git a/openstack/usr/share/openstack/neutron/metadata_agent.ini b/openstack/usr/share/openstack/neutron/metadata_agent.ini
new file mode 100644
index 00000000..ed238770
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/metadata_agent.ini
@@ -0,0 +1,60 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = True
+use_syslog = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+auth_region = regionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = {{ NEUTRON_SERVICE_USER }}
+admin_password = {{ NEUTRON_SERVICE_PASSWORD }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ CONTROLLER_HOST_ADDRESS }}
+
+# TCP Port used by Nova metadata server
+# nova_metadata_port = 8775
+
+# Which protocol to use for requests to Nova metadata server, http or https
+# nova_metadata_protocol = http
+
+# Whether insecure SSL connection should be accepted for Nova metadata server
+# requests
+# nova_metadata_insecure = False
+
+# Client certificate for nova api, needed when nova api requires client
+# certificates
+# nova_client_cert =
+
+# Private key for nova client certificate
+# nova_client_priv_key =
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_PROXY_SHARED_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server. Defaults to
+# half the number of CPU cores
+# metadata_workers =
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 4096
+
+# URL to connect to the cache backend.
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url = memory://?default_ttl=5
diff --git a/openstack/usr/share/openstack/neutron/metering_agent.ini b/openstack/usr/share/openstack/neutron/metering_agent.ini
new file mode 100644
index 00000000..88826ce7
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/metering_agent.ini
@@ -0,0 +1,18 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = True
+
+# Default driver:
+# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver
+# Example of non-default driver
+# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver
+
+# Interval between two metering measures
+# measure_interval = 30
+
+# Interval between two metering reports
+# report_interval = 300
+
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# use_namespaces = True
diff --git a/openstack/usr/share/openstack/neutron/neutron.conf b/openstack/usr/share/openstack/neutron/neutron.conf
new file mode 100644
index 00000000..51de7464
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/neutron.conf
@@ -0,0 +1,640 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+# verbose = False
+
+# =========Start Global Config Option for Distributed L3 Router===============
+# Setting the "router_distributed" flag to "True" will default to the creation
+# of distributed tenant routers. The admin can override this flag by specifying
+# the type of the router on the create request (admin-only attribute). Default
+# value is "False" to support legacy mode (centralized) routers.
+#
+# router_distributed = False
+#
+# ===========End Global Config Option for Distributed L3 Router===============
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+# debug = False
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+use_syslog = True
+
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+# log_dir =
+
+# publish_errors = False
+
+# Address to bind the API server to
+# bind_host = 0.0.0.0
+
+# Port the bind the API server to
+# bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+service_plugins = router
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# DVR Base MAC address. The first 3 octets will remain unchanged. If the
+# 4th octet is not 00, it will also be used. The others will be randomly
+# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
+# avoid mixing them up with MAC's allocated for tenant ports.
+# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
+# The default is 3 octet
+# dvr_base_mac = fa:16:3f:00:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds). Use -1 to
+# tell dnsmasq to use infinite lease times.
+# dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet. For IPv6, validate only if
+# gateway is not a link local address. Deprecated, to be removed during the
+# K release, at which point the check will be mandatory.
+# force_gateway_on_subnet = True
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# Maximum number of routes per router
+# max_routes = 30
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+# agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Allow automatic rescheduling of routers from dead L3 agents with
+# admin_state_up set to True to alive agents.
+# allow_automatic_l3agent_failover = False
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== items for l3 extension ==============
+# Enable high availability for virtual routers.
+# l3_ha = False
+#
+# Maximum number of l3 agents which a HA router will be scheduled on. If it
+# is set to 0 the router will be scheduled on every agent.
+# max_l3_agents_per_router = 3
+#
+# Minimum number of l3 agents which a HA router will be scheduled on. The
+# default value is 2.
+# min_l3_agents_per_router = 2
+#
+# CIDR of the administrative network if HA mode is enabled
+# l3_ha_net_cidr = 169.254.192.0/18
+# =========== end of items for l3 extension =======
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+# api_workers = 0
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+# rpc_workers = 0
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = regionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = {{ NOVA_SERVICE_USER }}
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ SERVICE_TENANT_ID }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_SERVICE_PASSWORD }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+# CA file for novaclient to verify server certificates
+# nova_ca_certificates_file =
+
+# Boolean to control ignoring SSL errors on the nova url
+# nova_api_insecure = False
+
+# Number of seconds between sending events to nova if there are any events to send
+# send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=oslo
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+notification_driver=neutron.openstack.common.notifier.rpc_notifier
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+[quotas]
+# Default driver to use for quota checks
+# quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+# quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+# default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# quota_network = 10
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+# quota_subnet = 10
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# quota_port = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+# quota_security_group = 10
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+# quota_security_group_rule = 100
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitor = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+# Number of firewalls allowed per tenant. A negative value means unlimited.
+# quota_firewall = 1
+
+# Number of firewall policies allowed per tenant. A negative value means
+# unlimited.
+# quota_firewall_policy = 1
+
+# Number of firewall rules allowed per tenant. A negative value means
+# unlimited.
+# quota_firewall_rule = 100
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+# root_helper = sudo
+root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+# report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+admin_tenant_name = service
+admin_user = {{ NEUTRON_SERVICE_USER }}
+admin_password = {{ NEUTRON_SERVICE_PASSWORD }}
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite://
+# NOTE: In deployment the [database] section and its connection attribute may
+# be set in the corresponding core plugin '.ini' file. However, it is suggested
+# to put the [database] section and its connection attribute in this
+# configuration file.
+#connection=sqlite:////var/lib/neutron/neutron.sqlite
+connection=postgresql://{{ NEUTRON_DB_USER }}:{{ NEUTRON_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/neutron
+
+# Database engine for which script will be generated when using offline
+# migration
+# engine =
+
+# The SQLAlchemy connection string used to connect to the slave database
+# slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+# retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+# min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# max_pool_size = 10
+
+# Timeout in seconds before idle sql connections are reaped
+# idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+# max_overflow = 20
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+# connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+# connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
+# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
+#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
+# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
+# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default
diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini b/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
new file mode 100644
index 00000000..256f7855
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
@@ -0,0 +1,114 @@
+# Config file for neutron-proxy-plugin.
+
+[restproxy]
+# All configuration for this plugin is in section '[restproxy]'
+#
+# The following parameters are supported:
+# servers : <host:port>[,<host:port>]* (Error if not set)
+# server_auth : <username:password> (default: no auth)
+# server_ssl : True | False (default: True)
+# ssl_cert_directory : <path> (default: /etc/neutron/plugins/bigswitch/ssl)
+# no_ssl_validation : True | False (default: False)
+# ssl_sticky : True | False (default: True)
+# sync_data : True | False (default: False)
+# auto_sync_on_failure : True | False (default: True)
+# consistency_interval : <integer> (default: 60 seconds)
+# server_timeout : <integer> (default: 10 seconds)
+# neutron_id : <string> (default: neutron-<hostname>)
+# add_meta_server_route : True | False (default: True)
+# thread_pool_size : <int> (default: 4)
+
+# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover.
+servers=localhost:8080
+
+# The username and password for authenticating against the BigSwitch or Floodlight controller.
+# server_auth=username:password
+
+# Use SSL when connecting to the BigSwitch or Floodlight controller.
+# server_ssl=True
+
+# Directory which contains the ca_certs and host_certs to be used to validate
+# controller certificates.
+# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/
+
+# If a certificate does not exist for a controller, trust and store the first
+# certificate received for that controller and use it to validate future
+# connections to that controller.
+# ssl_sticky=True
+
+# Do not validate the controller certificates for SSL
+# Warning: This will not provide protection against man-in-the-middle attacks
+# no_ssl_validation=False
+
+# Sync data on connect
+# sync_data=False
+
+# If neutron fails to create a resource because the backend controller
+# doesn't know of a dependency, automatically trigger a full data
+# synchronization to the controller.
+# auto_sync_on_failure=True
+
+# Time between verifications that the backend controller
+# database is consistent with Neutron. (0 to disable)
+# consistency_interval = 60
+
+# Maximum number of seconds to wait for proxy request to connect and complete.
+# server_timeout=10
+
+# User defined identifier for this Neutron deployment
+# neutron_id =
+
+# Flag to decide if a route to the metadata server should be injected into the VM
+# add_meta_server_route = True
+
+# Number of threads to use to handle large volumes of port creation requests
+# thread_pool_size = 4
+
+[nova]
+# Specify the VIF_TYPE that will be controlled on the Nova compute instances
+# options: ivs or ovs
+# default: ovs
+# vif_type = ovs
+
+# Overrides for vif types based on nova compute node host IDs
+# Comma separated list of host IDs to fix to a specific VIF type
+# The VIF type is taken from the end of the configuration item
+# node_override_vif_<vif_type>
+# For example, the following would set the VIF type to IVS for
+# host-id1 and host-id2
+# node_overrride_vif_ivs=host-id1,host-id2
+
+[router]
+# Specify the default router rules installed in newly created tenant routers
+# Specify multiple times for multiple rules
+# Format is <tenant>:<source>:<destination>:<action>
+# Optionally, a comma-separated list of nexthops may be included after <action>
+# Use an * to specify default for all tenants
+# Default is any any allow for all tenants
+# tenant_default_router_rule=*:any:any:permit
+
+# Maximum number of rules that a single router may have
+# Default is 200
+# max_router_rules=200
+
+[restproxyagent]
+
+# Specify the name of the bridge used on compute nodes
+# for attachment.
+# Default: br-int
+# integration_bridge=br-int
+
+# Change the frequency of polling by the restproxy agent.
+# Value is seconds
+# Default: 5
+# polling_interval=5
+
+# Virtual switch type on the compute node.
+# Options: ovs or ivs
+# Default: ovs
+# virtual_switch_type = ovs
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
new file mode 100644
index 00000000..e7e47a27
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
@@ -0,0 +1,3 @@
+Certificates in this folder will be used to
+verify signatures for any controllers the plugin
+connects to.
diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
new file mode 100644
index 00000000..8f5f5e77
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
@@ -0,0 +1,6 @@
+Certificates in this folder must match the name
+of the controller they should be used to authenticate
+with a .pem extension.
+
+For example, the certificate for the controller
+"192.168.0.1" should be named "192.168.0.1.pem".
diff --git a/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini b/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini
new file mode 100644
index 00000000..916e9e5d
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini
@@ -0,0 +1,29 @@
+[switch]
+# username = The SSH username to use
+# password = The SSH password to use
+# address = The address of the host to SSH to
+# ostype = Should be NOS, but is unused otherwise
+#
+# Example:
+# username = admin
+# password = password
+# address = 10.24.84.38
+# ostype = NOS
+
+[physical_interface]
+# physical_interface = The network interface to use when creating a port
+#
+# Example:
+# physical_interface = physnet1
+
+[vlans]
+# network_vlan_ranges = <physical network name>:nnnn:mmmm
+#
+# Example:
+# network_vlan_ranges = physnet1:1000:2999
+
+[linux_bridge]
+# physical_interface_mappings = <physical network name>:<local interface>
+#
+# Example:
+# physical_interface_mappings = physnet1:em1
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
new file mode 100644
index 00000000..d99e8382
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
@@ -0,0 +1,15 @@
+[cfg_agent]
+# (IntOpt) Interval in seconds for processing of service updates.
+# That is when the config agent's process_services() loop executes
+# and it lets each service helper to process its service resources.
+# rpc_loop_interval = 10
+
+# (StrOpt) Period-separated module path to the routing service helper class.
+# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
+
+# (IntOpt) Timeout value in seconds for connecting to a hosting device.
+# device_connection_timeout = 30
+
+# (IntOpt) The time in seconds until a backlogged hosting device is
+# presumed dead or booted to an error state.
+# hosting_device_dead_timeout = 300
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
new file mode 100644
index 00000000..17eae737
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
@@ -0,0 +1,100 @@
+[cisco]
+
+# (StrOpt) A short prefix to prepend to the VLAN number when creating a
+# VLAN interface. For example, if an interface is being created for
+# VLAN 2001 it will be named 'q-2001' using the default prefix.
+#
+# vlan_name_prefix = q-
+# Example: vlan_name_prefix = vnet-
+
+# (StrOpt) A short prefix to prepend to the VLAN number when creating a
+# provider VLAN interface. For example, if an interface is being created
+# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
+#
+# provider_vlan_name_prefix = p-
+# Example: provider_vlan_name_prefix = PV-
+
+# (BoolOpt) A flag indicating whether Openstack networking should manage the
+# creation and removal of VLAN interfaces for provider networks on the Nexus
+# switches. If the flag is set to False then Openstack will not create or
+# remove VLAN interfaces for provider networks, and the administrator needs
+# to manage these interfaces manually or by external orchestration.
+#
+# provider_vlan_auto_create = True
+
+# (BoolOpt) A flag indicating whether Openstack networking should manage
+# the adding and removing of provider VLANs from trunk ports on the Nexus
+# switches. If the flag is set to False then Openstack will not add or
+# remove provider VLANs from trunk ports, and the administrator needs to
+# manage these operations manually or by external orchestration.
+#
+# provider_vlan_auto_trunk = True
+
+# (StrOpt) Period-separated module path to the model class to use for
+# the Cisco neutron plugin.
+#
+# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2
+
+# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
+# Note: This feature is not supported on all models/versions of Cisco
+# Nexus switches. To use this feature, all of the Nexus switches in the
+# deployment must support it.
+# nexus_l3_enable = False
+
+# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
+# svi_round_robin = False
+
+# Cisco Nexus Switch configurations.
+# Each switch to be managed by Openstack Neutron must be configured here.
+#
+# N1KV Format.
+# [N1KV:<IP address of VSM>]
+# username=<credential username>
+# password=<credential password>
+#
+# Example:
+# [N1KV:2.2.2.2]
+# username=admin
+# password=mySecretPassword
+
+[cisco_n1k]
+
+# (StrOpt) Specify the name of the integration bridge to which the VIFs are
+# attached.
+# Default value: br-int
+# integration_bridge = br-int
+
+# (StrOpt) Name of the policy profile to be associated with a port when no
+# policy profile is specified during port creates.
+# Default value: service_profile
+# default_policy_profile = service_profile
+
+# (StrOpt) Name of the policy profile to be associated with a port owned by
+# network node (dhcp, router).
+# Default value: dhcp_pp
+# network_node_policy_profile = dhcp_pp
+
+# (StrOpt) Name of the network profile to be associated with a network when no
+# network profile is specified during network creates. Admin should pre-create
+# a network profile with this name.
+# Default value: default_network_profile
+# default_network_profile = network_pool
+
+# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
+# policy profiles.
+# Default value: 60
+# poll_duration = 60
+
+# (BoolOpt) Specify whether tenants are restricted from accessing all the
+# policy profiles.
+# Default value: False, indicating all tenants can access all policy profiles.
+#
+# restrict_policy_profiles = False
+
+# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
+# Default value: 4
+# http_pool_size = 4
+
+# (IntOpt) Timeout duration in seconds for the http request
+# Default value: 15
+# http_timeout = 15
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
new file mode 100644
index 00000000..3ef271d2
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
@@ -0,0 +1,76 @@
+[general]
+#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers
+# backlog_processing_interval = 10
+
+#(StrOpt) Name of the L3 admin tenant
+# l3_admin_tenant = L3AdminTenant
+
+#(StrOpt) Name of management network for hosting device configuration
+# management_network = osn_mgmt_nw
+
+#(StrOpt) Default security group applied on management port
+# default_security_group = mgmt_sec_grp
+
+#(IntOpt) Seconds of no status update until a cfg agent is considered down
+# cfg_agent_down_time = 60
+
+#(StrOpt) Path to templates for hosting devices
+# templates_path = /opt/stack/data/neutron/cisco/templates
+
+#(StrOpt) Path to config drive files for service VM instances
+# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive
+
+#(BoolOpt) Ensure that Nova is running before attempting to create any VM
+# ensure_nova_running = True
+
+[hosting_devices]
+# Settings coupled to CSR1kv VM devices
+# -------------------------------------
+#(StrOpt) Name of Glance image for CSR1kv
+# csr1kv_image = csr1kv_openstack_img
+
+#(StrOpt) UUID of Nova flavor for CSR1kv
+# csr1kv_flavor = 621
+
+#(StrOpt) Plugging driver for CSR1kv
+# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver
+
+#(StrOpt) Hosting device driver for CSR1kv
+# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
+
+#(StrOpt) Config agent router service driver for CSR1kv
+# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
+
+#(StrOpt) Configdrive template file for CSR1kv
+# csr1kv_configdrive_template = csr1kv_cfg_template
+
+#(IntOpt) Booting time in seconds before a CSR1kv becomes operational
+# csr1kv_booting_time = 420
+
+#(StrOpt) Username to use for CSR1kv configurations
+# csr1kv_username = stack
+
+#(StrOpt) Password to use for CSR1kv configurations
+# csr1kv_password = cisco
+
+[n1kv]
+# Settings coupled to inter-working with N1kv plugin
+# --------------------------------------------------
+#(StrOpt) Name of N1kv port profile for management ports
+# management_port_profile = osn_mgmt_pp
+
+#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic
+# from VXLAN segmented networks).
+# t1_port_profile = osn_t1_pp
+
+#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic
+# from VLAN segmented networks).
+# t2_port_profile = osn_t2_pp
+
+#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks
+# for VXLAN segmented traffic).
+# t1_network_profile = osn_t1_np
+
+#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks
+# for VLAN segmented traffic).
+# t2_network_profile = osn_t2_np
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
new file mode 100644
index 00000000..0aee17eb
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
@@ -0,0 +1,26 @@
+[cisco_csr_ipsec]
+# Status check interval in seconds, for VPNaaS IPSec connections used on CSR
+# status_check_interval = 60
+
+# Cisco CSR management port information for REST access used by VPNaaS
+# TODO(pcm): Remove once CSR is integrated in as a Neutron router.
+#
+# Format is:
+# [cisco_csr_rest:<public IP>]
+# rest_mgmt = <mgmt port IP>
+# tunnel_ip = <tunnel IP>
+# username = <user>
+# password = <password>
+# timeout = <timeout>
+# host = <hostname>
+# tunnel_if = <tunnel I/F>
+#
+# where:
+# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR)
+# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel
+# mgmt port IP -- IP address of CSR for REST API access
+# user ---------- Username for REST management port access to Cisco CSR
+# password ------ Password for REST management port access to Cisco CSR
+# timeout ------- REST request timeout to Cisco CSR (optional)
+# hostname ------ Name of host where CSR is running as a VM
+# tunnel I/F ---- CSR port name used for tunnels' IP address
diff --git a/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini b/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
new file mode 100644
index 00000000..0ca9b46f
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
@@ -0,0 +1,41 @@
+[heleos]
+#configure the ESM management address
+#in the first version of this plugin, only one ESM can be specified
+#Example:
+#esm_mgmt=
+
+#configure admin username and password
+#admin_username=
+#admin_password=
+
+#router image id
+#Example:
+#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0
+
+#mgmt shared security zone id
+#defines the shared management security zone. Each tenant can have a private one configured through the ESM
+#Example:
+#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a
+
+#in-band shared security zone id
+#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM
+#Example:
+#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc
+
+#oob-band shared security zone id
+#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM
+#Example:
+#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871
+
+#dummy security zone id
+#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces
+#Example:
+#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08
+
+#resource pool id
+#define the shared resource pool. Each tenant can have a private one configured through the ESM
+#Example
+#resource_pool_id=
+
+#define if the requests have to be executed asynchronously by the plugin or not
+#async_requests=
diff --git a/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
new file mode 100644
index 00000000..5eeec570
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
@@ -0,0 +1,63 @@
+[hyperv]
+# (StrOpt) Type of network to allocate for tenant networks. The
+# default value 'local' is useful only for single-box testing and
+# provides no connectivity between hosts. You MUST either change this
+# to 'vlan' and configure network_vlan_ranges below or to 'flat'.
+# Set to 'none' to disable creation of tenant networks.
+#
+# tenant_network_type = local
+# Example: tenant_network_type = vlan
+
+# (ListOpt) Comma-separated list of
+# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
+# of VLAN IDs on named physical networks that are available for
+# allocation. All physical networks listed are available for flat and
+# VLAN provider network creation. Specified ranges of VLAN IDs are
+# available for tenant network allocation if tenant_network_type is
+# 'vlan'. If empty, only gre and local networks may be created.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
+
+# (ListOpt) Comma separated list of <physical_network>:<vswitch>
+# where the physical networks can be expressed with wildcards,
+# e.g.: ."*:external".
+# The referred external virtual switches need to be already present on
+# the Hyper-V server.
+# If a given physical network name will not match any value in the list
+# the plugin will look for a virtual switch with the same name.
+#
+# physical_network_vswitch_mappings = *:external
+# Example: physical_network_vswitch_mappings = net1:external1,net2:external2
+
+# (StrOpt) Private virtual switch name used for local networking.
+#
+# local_network_vswitch = private
+# Example: local_network_vswitch = custom_vswitch
+
+# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's
+# metric APIs. Collected data can by retrieved by other apps and services,
+# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above.
+#
+# enable_metrics_collection = False
+
+#-----------------------------------------------------------------------------
+# Sample Configurations.
+#-----------------------------------------------------------------------------
+#
+# Neutron server:
+#
+# [HYPERV]
+# tenant_network_type = vlan
+# network_vlan_ranges = default:2000:3999
+#
+# Agent running on Hyper-V node:
+#
+# [AGENT]
+# polling_interval = 2
+# physical_network_vswitch_mappings = *:external
+# local_network_vswitch = private
diff --git a/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
new file mode 100644
index 00000000..0fab5070
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
@@ -0,0 +1,50 @@
+[sdnve]
+# (ListOpt) The IP address of one (or more) SDN-VE controllers
+# Default value is: controller_ips = 127.0.0.1
+# Example: controller_ips = 127.0.0.1,127.0.0.2
+# (StrOpt) The integration bridge for OF based implementation
+# The default value for integration_bridge is None
+# Example: integration_bridge = br-int
+# (ListOpt) The interface mapping connecting the integration
+# bridge to external network as a list of physical network names and
+# interfaces: <physical_network_name>:<interface_name>
+# Example: interface_mappings = default:eth2
+# (BoolOpt) Used to reset the integration bridge, if exists
+# The default value for reset_bridge is True
+# Example: reset_bridge = False
+# (BoolOpt) Used to set the OVS controller as out-of-band
+# The default value for out_of_band is True
+# Example: out_of_band = False
+#
+# (BoolOpt) The fake controller for testing purposes
+# Default value is: use_fake_controller = False
+# (StrOpt) The port number for use with controller
+# The default value for the port is 8443
+# Example: port = 8443
+# (StrOpt) The userid for use with controller
+# The default value for the userid is admin
+# Example: userid = sdnve_user
+# (StrOpt) The password for use with controller
+# The default value for the password is admin
+# Example: password = sdnve_password
+#
+# (StrOpt) The default type of tenants (and associated resources)
+# Available choices are: OVERLAY or OF
+# The default value for tenant type is OVERLAY
+# Example: default_tenant_type = OVERLAY
+# (StrOpt) The string in tenant description that indicates
+# Default value for OF tenants: of_signature = SDNVE-OF
+# (StrOpt) The string in tenant description that indicates
+# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY
+
+[sdnve_agent]
+# (IntOpt) Agent's polling interval in seconds
+# polling_interval = 2
+# (StrOpt) What to use for root helper
+# The default value: root_helper = 'sudo'
+# (BoolOpt) Whether to use rpc or not
+# The default value: rpc = True
+
+[securitygroup]
+# The security group is not supported:
+# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
diff --git a/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
new file mode 100644
index 00000000..94fe9803
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
@@ -0,0 +1,78 @@
+[vlans]
+# (StrOpt) Type of network to allocate for tenant networks. The
+# default value 'local' is useful only for single-box testing and
+# provides no connectivity between hosts. You MUST change this to
+# 'vlan' and configure network_vlan_ranges below in order for tenant
+# networks to provide connectivity between hosts. Set to 'none' to
+# disable creation of tenant networks.
+#
+# tenant_network_type = local
+# Example: tenant_network_type = vlan
+
+# (ListOpt) Comma-separated list of
+# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
+# of VLAN IDs on named physical networks that are available for
+# allocation. All physical networks listed are available for flat and
+# VLAN provider network creation. Specified ranges of VLAN IDs are
+# available for tenant network allocation if tenant_network_type is
+# 'vlan'. If empty, only local networks may be created.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999
+
+[linux_bridge]
+# (ListOpt) Comma-separated list of
+# <physical_network>:<physical_interface> tuples mapping physical
+# network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical
+# networks listed in network_vlan_ranges on the server should have
+# mappings to appropriate interfaces on each agent.
+#
+# physical_interface_mappings =
+# Example: physical_interface_mappings = physnet1:eth1
+
+[vxlan]
+# (BoolOpt) enable VXLAN on the agent
+# VXLAN support can be enabled when agent is managed by ml2 plugin using
+# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin.
+# enable_vxlan = False
+#
+# (IntOpt) use specific TTL for vxlan interface protocol packets
+# ttl =
+#
+# (IntOpt) use specific TOS for vxlan interface protocol packets
+# tos =
+#
+# (StrOpt) multicast group to use for broadcast emulation.
+# This group must be the same on all the agents.
+# vxlan_group = 224.0.0.1
+#
+# (StrOpt) Local IP address to use for VXLAN endpoints (required)
+# local_ip =
+#
+# (BoolOpt) Flag to enable l2population extension. This option should be used
+# in conjunction with ml2 plugin l2population mechanism driver (in that case,
+# both linuxbridge and l2population mechanism drivers should be loaded).
+# It enables plugin to populate VXLAN forwarding table, in order to limit
+# the use of broadcast emulation (multicast will be turned off if kernel and
+# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
+# l2_population = False
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
+
+# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
+# agents.
+#
+# rpc_support_old_agents = False
+# Example: rpc_support_old_agents = True
+
+[securitygroup]
+# Firewall driver for realizing neutron security group function
+# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
diff --git a/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini b/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
new file mode 100644
index 00000000..2b9bfa5e
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
@@ -0,0 +1,31 @@
+# Config file for Metaplugin
+
+[meta]
+# Comma separated list of flavor:neutron_plugin for plugins to load.
+# Extension method is searched in the list order and the first one is used.
+plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2'
+
+# Comma separated list of flavor:neutron_plugin for L3 service plugins
+# to load.
+# This is intended for specifying L2 plugins which support L3 functions.
+# If you use a router service plugin, set this blank.
+l3_plugin_list =
+
+# Default flavor to use, when flavor:network is not specified at network
+# creation.
+default_flavor = 'nvp'
+
+# Default L3 flavor to use, when flavor:router is not specified at router
+# creation.
+# Ignored if 'l3_plugin_list' is blank.
+default_l3_flavor =
+
+# Comma separated list of supported extension aliases.
+supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler'
+
+# Comma separated list of method:flavor to select specific plugin for a method.
+# This has priority over method search order based on 'plugin_list'.
+extension_map = 'get_port_stats:nvp'
+
+# Specifies flavor for plugin to handle 'q-plugin' RPC requests.
+rpc_flavor = 'ml2'
diff --git a/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini b/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini
new file mode 100644
index 00000000..f2e94052
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini
@@ -0,0 +1,19 @@
+
+[midonet]
+# MidoNet API server URI
+# midonet_uri = http://localhost:8080/midonet-api
+
+# MidoNet admin username
+# username = admin
+
+# MidoNet admin password
+# password = passw0rd
+
+# ID of the project that MidoNet admin user belongs to
+# project_id = 77777777-7777-7777-7777-777777777777
+
+# Virtual provider router ID
+# provider_router_id = 00112233-0011-0011-0011-001122334455
+
+# Path to midonet host uuid file
+# midonet_host_uuid_path = /etc/midolman/host_uuid.properties
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
new file mode 100644
index 00000000..b8097ce2
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
@@ -0,0 +1,86 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = flat,gre
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = gre
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = openvswitch
+
+# (ListOpt) Ordered list of extension driver entrypoints
+# to be loaded from the neutron.ml2.extension_drivers namespace.
+# extension_drivers =
+# Example: extension_drivers = anewextensiondriver
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+# flat_networks =
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+flat_networks = External
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+#network_vlan_ranges = Physnet1:100:200
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+# vni_ranges =
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+# vxlan_group =
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+enable_security_group = True
+
+# Use ipset to speed-up the iptables security groups. Enabling ipset support
+# requires that ipset is installed on L2 agent node.
+enable_ipset = True
+
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+
+[ovs]
+local_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+enable_tunneling = True
+bridge_mappings=External:br-ex
+
+[agent]
+tunnel_types = gre
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
new file mode 100644
index 00000000..abaf5bc7
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
@@ -0,0 +1,100 @@
+# Defines configuration options specific for Arista ML2 Mechanism driver
+
+[ml2_arista]
+# (StrOpt) EOS IP address. This is required field. If not set, all
+# communications to Arista EOS will fail
+#
+# eapi_host =
+# Example: eapi_host = 192.168.0.1
+#
+# (StrOpt) EOS command API username. This is required field.
+# if not set, all communications to Arista EOS will fail.
+#
+# eapi_username =
+# Example: arista_eapi_username = admin
+#
+# (StrOpt) EOS command API password. This is required field.
+# if not set, all communications to Arista EOS will fail.
+#
+# eapi_password =
+# Example: eapi_password = my_password
+#
+# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
+# ("node1.domain.com") or as short names ("node1"). This is
+# optional. If not set, a value of "True" is assumed.
+#
+# use_fqdn =
+# Example: use_fqdn = True
+#
+# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
+# This field defines how often the synchronization is performed.
+# This is an optional field. If not set, a value of 180 seconds
+# is assumed.
+#
+# sync_interval =
+# Example: sync_interval = 60
+#
+# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
+# This is useful when multiple OpenStack/Neutron controllers are
+# managing the same Arista HW clusters. Note that this name must
+# match with the region name registered (or known) to keystone
+# service. Authentication with Keysotne is performed by EOS.
+# This is optional. If not set, a value of "RegionOne" is assumed.
+#
+# region_name =
+# Example: region_name = RegionOne
+
+
+[l3_arista]
+
+# (StrOpt) primary host IP address. This is required field. If not set, all
+# communications to Arista EOS will fail. This is the host where
+# primary router is created.
+#
+# primary_l3_host =
+# Example: primary_l3_host = 192.168.10.10
+#
+# (StrOpt) Primary host username. This is required field.
+# if not set, all communications to Arista EOS will fail.
+#
+# primary_l3_host_username =
+# Example: arista_primary_l3_username = admin
+#
+# (StrOpt) Primary host password. This is required field.
+# if not set, all communications to Arista EOS will fail.
+#
+# primary_l3_host_password =
+# Example: primary_l3_password = my_password
+#
+# (StrOpt) IP address of the second Arista switch paired as
+# MLAG (Multi-chassis Link Aggregation) with the first.
+# This is optional field, however, if mlag_config flag is set,
+# then this is a required field. If not set, all
+# communications to Arista EOS will fail. If mlag_config is set
+# to False, then this field is ignored
+#
+# seconadary_l3_host =
+# Example: seconadary_l3_host = 192.168.10.20
+#
+# (BoolOpt) Defines if Arista switches are configured in MLAG mode
+# If yes, all L3 configuration is pushed to both switches
+# automatically. If this flag is set, ensure that secondary_l3_host
+# is set to the second switch's IP.
+# This flag is Optional. If not set, a value of "False" is assumed.
+#
+# mlag_config =
+# Example: mlag_config = True
+#
+# (BoolOpt) Defines if the router is created in default VRF or a
+# a specific VRF. This is optional.
+# If not set, a value of "False" is assumed.
+#
+# Example: use_vrf = True
+#
+# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
+# This field defines how often the synchronization is performed.
+# This is an optional field. If not set, a value of 180 seconds
+# is assumed.
+#
+# l3_sync_interval =
+# Example: l3_sync_interval = 60
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini
new file mode 100644
index 00000000..67574110
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini
@@ -0,0 +1,15 @@
+[ml2_brocade]
+# username = <mgmt admin username>
+# password = <mgmt admin password>
+# address = <switch mgmt ip address>
+# ostype = NOS
+# osversion = autodetect | n.n.n
+# physical_networks = physnet1,physnet2
+#
+# Example:
+# username = admin
+# password = password
+# address = 10.24.84.38
+# ostype = NOS
+# osversion = 4.1.1
+# physical_networks = physnet1,physnet2
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini
new file mode 100644
index 00000000..1b69100e
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini
@@ -0,0 +1,118 @@
+[ml2_cisco]
+
+# (StrOpt) A short prefix to prepend to the VLAN number when creating a
+# VLAN interface. For example, if an interface is being created for
+# VLAN 2001 it will be named 'q-2001' using the default prefix.
+#
+# vlan_name_prefix = q-
+# Example: vlan_name_prefix = vnet-
+
+# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
+# svi_round_robin = False
+
+#
+# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
+# This string value must be present in the ml2_conf.ini network_vlan_ranges
+# variable.
+#
+# managed_physical_network =
+# Example: managed_physical_network = physnet1
+
+# Cisco Nexus Switch configurations.
+# Each switch to be managed by Openstack Neutron must be configured here.
+#
+# Cisco Nexus Switch Format.
+# [ml2_mech_cisco_nexus:<IP address of switch>]
+# <hostname>=<intf_type:port> (1)
+# ssh_port=<ssh port> (2)
+# username=<credential username> (3)
+# password=<credential password> (4)
+#
+# (1) For each host connected to a port on the switch, specify the hostname
+# and the Nexus physical port (interface) it is connected to.
+# Valid intf_type's are 'ethernet' and 'port-channel'.
+# The default setting for <intf_type:> is 'ethernet' and need not be
+# added to this setting.
+# (2) The TCP port for connecting via SSH to manage the switch. This is
+# port number 22 unless the switch has been configured otherwise.
+# (3) The username for logging into the switch to manage it.
+# (4) The password for logging into the switch to manage it.
+#
+# Example:
+# [ml2_mech_cisco_nexus:1.1.1.1]
+# compute1=1/1
+# compute2=ethernet:1/2
+# compute3=port-channel:1
+# ssh_port=22
+# username=admin
+# password=mySecretPassword
+
+[ml2_cisco_apic]
+
+# Hostname:port list of APIC controllers
+# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80
+
+# Username for the APIC controller
+# apic_username = user
+
+# Password for the APIC controller
+# apic_password = password
+
+# Whether use SSl for connecting to the APIC controller or not
+# apic_use_ssl = True
+
+# How to map names to APIC: use_uuid or use_name
+# apic_name_mapping = use_name
+
+# Names for APIC objects used by Neutron
+# Note: When deploying multiple clouds against one APIC,
+# these names must be unique between the clouds.
+# apic_vmm_domain = openstack
+# apic_vlan_ns_name = openstack_ns
+# apic_node_profile = openstack_profile
+# apic_entity_profile = openstack_entity
+# apic_function_profile = openstack_function
+# apic_app_profile_name = openstack_app
+# Agent timers for State reporting and topology discovery
+# apic_sync_interval = 30
+# apic_agent_report_interval = 30
+# apic_agent_poll_interval = 2
+
+# Specify your network topology.
+# This section indicates how your compute nodes are connected to the fabric's
+# switches and ports. The format is as follows:
+#
+# [apic_switch:<swich_id_from_the_apic>]
+# <compute_host>,<compute_host> = <switchport_the_host(s)_are_connected_to>
+#
+# You can have multiple sections, one for each switch in your fabric that is
+# participating in Openstack. e.g.
+#
+# [apic_switch:17]
+# ubuntu,ubuntu1 = 1/10
+# ubuntu2,ubuntu3 = 1/11
+#
+# [apic_switch:18]
+# ubuntu5,ubuntu6 = 1/1
+# ubuntu7,ubuntu8 = 1/2
+
+# Describe external connectivity.
+# In this section you can specify the external network configuration in order
+# for the plugin to be able to teach the fabric how to route the internal
+# traffic to the outside world. The external connectivity configuration
+# format is as follows:
+#
+# [apic_external_network:<externalNetworkName>]
+# switch = <switch_id_from_the_apic>
+# port = <switchport_the_external_router_is_connected_to>
+# encap = <encapsulation>
+# cidr_exposed = <cidr_exposed_to_the_external_router>
+# gateway_ip = <ip_of_the_external_gateway>
+#
+# An example follows:
+# [apic_external_network:network_ext]
+# switch=203
+# port=1/34
+# encap=vlan-100
+# cidr_exposed=10.10.40.2/16
+# gateway_ip=10.10.40.1
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini
new file mode 100644
index 00000000..6ee4a4e0
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini
@@ -0,0 +1,52 @@
+# Defines Configuration options for FSL SDN OS Mechanism Driver
+# Cloud Resource Discovery (CRD) authorization credentials
+[ml2_fslsdn]
+#(StrOpt) User name for authentication to CRD.
+# e.g.: user12
+#
+# crd_user_name =
+
+#(StrOpt) Password for authentication to CRD.
+# e.g.: secret
+#
+# crd_password =
+
+#(StrOpt) Tenant name for CRD service.
+# e.g.: service
+#
+# crd_tenant_name =
+
+#(StrOpt) CRD auth URL.
+# e.g.: http://127.0.0.1:5000/v2.0/
+#
+# crd_auth_url =
+
+#(StrOpt) URL for connecting to CRD Service.
+# e.g.: http://127.0.0.1:9797
+#
+# crd_url=
+
+#(IntOpt) Timeout value for connecting to CRD service
+# in seconds, e.g.: 30
+#
+# crd_url_timeout=
+
+#(StrOpt) Region name for connecting to CRD in
+# admin context, e.g.: RegionOne
+#
+# crd_region_name=
+
+#(BoolOpt)If set, ignore any SSL validation issues (boolean value)
+# e.g.: False
+#
+# crd_api_insecure=
+
+#(StrOpt)Authorization strategy for connecting to CRD in admin
+# context, e.g.: keystone
+#
+# crd_auth_strategy=
+
+#(StrOpt)Location of CA certificates file to use for CRD client
+# requests.
+#
+# crd_ca_certificates_file=
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini
new file mode 100644
index 00000000..46139aed
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini
@@ -0,0 +1,4 @@
+[eswitch]
+# (StrOpt) Type of Network Interface to allocate for VM:
+# mlnx_direct or hostdev according to libvirt terminology
+# vnic_type = mlnx_direct
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini
new file mode 100644
index 00000000..dbbfcbd2
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini
@@ -0,0 +1,28 @@
+# Defines configuration options specific to the Tail-f NCS Mechanism Driver
+
+[ml2_ncs]
+# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack
+# subtree.
+# If this is not set then no HTTP requests will be made.
+#
+# url =
+# Example: url = http://ncs/api/running/services/openstack
+
+# (StrOpt) Username for HTTP basic authentication to NCS.
+# This is an optional parameter. If unspecified then no authentication is used.
+#
+# username =
+# Example: username = admin
+
+# (StrOpt) Password for HTTP basic authentication to NCS.
+# This is an optional parameter. If unspecified then no authentication is used.
+#
+# password =
+# Example: password = admin
+
+# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion.
+# This is an optional parameter, default value is 10 seconds.
+#
+# timeout =
+# Example: timeout = 15
+
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini
new file mode 100644
index 00000000..9e88c1bb
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini
@@ -0,0 +1,30 @@
+# Configuration for the OpenDaylight MechanismDriver
+
+[ml2_odl]
+# (StrOpt) OpenDaylight REST URL
+# If this is not set then no HTTP requests will be made.
+#
+# url =
+# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
+
+# (StrOpt) Username for HTTP basic authentication to ODL.
+#
+# username =
+# Example: username = admin
+
+# (StrOpt) Password for HTTP basic authentication to ODL.
+#
+# password =
+# Example: password = admin
+
+# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
+# This is an optional parameter, default value is 10 seconds.
+#
+# timeout = 10
+# Example: timeout = 15
+
+# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
+# This is an optional parameter, default value is 30 minutes.
+#
+# session_timeout = 30
+# Example: session_timeout = 60
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini
new file mode 100644
index 00000000..4a94b987
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini
@@ -0,0 +1,13 @@
+# Defines configuration options specific to the OpenFlow Agent Mechanism Driver
+
+[ovs]
+# Please refer to configuration options to the OpenvSwitch
+
+[agent]
+# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath.
+# This is an optional parameter, default value is 60 seconds.
+#
+# get_datapath_retry_times =
+# Example: get_datapath_retry_times = 30
+
+# Please refer to configuration options to the OpenvSwitch else the above.
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini
new file mode 100644
index 00000000..9566f54c
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini
@@ -0,0 +1,31 @@
+# Defines configuration options for SRIOV NIC Switch MechanismDriver
+# and Agent
+
+[ml2_sriov]
+# (ListOpt) Comma-separated list of
+# supported Vendor PCI Devices, in format vendor_id:product_id
+#
+# supported_pci_vendor_devs = 15b3:1004, 8086:10c9
+# Example: supported_pci_vendor_devs = 15b3:1004
+#
+# (BoolOpt) Requires running SRIOV neutron agent for port binding
+# agent_required = True
+
+[sriov_nic]
+# (ListOpt) Comma-separated list of <physical_network>:<network_device>
+# tuples mapping physical network names to the agent's node-specific
+# physical network device interfaces of SR-IOV physical function to be used
+# for VLAN networks. All physical networks listed in network_vlan_ranges on
+# the server should have mappings to appropriate interfaces on each agent.
+#
+# physical_device_mappings =
+# Example: physical_device_mappings = physnet1:eth1
+#
+# (ListOpt) Comma-separated list of <network_device>:<vfs__to_exclude>
+# tuples, mapping network_device to the agent's node-specific list of virtual
+# functions that should not be used for virtual networking.
+# vfs_to_exclude is a semicolon-separated list of virtual
+# functions to exclude from network_device. The network_device in the
+# mapping should appear in the physical_device_mappings list.
+# exclude_devices =
+# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3
diff --git a/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini b/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini
new file mode 100644
index 00000000..b1225111
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini
@@ -0,0 +1,79 @@
+[mlnx]
+# (StrOpt) Type of network to allocate for tenant networks. The
+# default value is 'vlan' You MUST configure network_vlan_ranges below
+# in order for tenant networks to provide connectivity between hosts.
+# Set to 'none' to disable creation of tenant networks.
+#
+# tenant_network_type = vlan
+# Example: tenant_network_type = vlan
+
+# (ListOpt) Comma-separated list of
+# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
+# of VLAN IDs on named physical networks that are available for
+# allocation. All physical networks listed are available for flat and
+# VLAN provider network creation. Specified ranges of VLAN IDs are
+# available for tenant network allocation if tenant_network_type is
+# 'vlan'. If empty, only local networks may be created.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = default:1:100
+
+# (ListOpt) Comma-separated list of
+# <physical_network>:<physical_network_type> tuples mapping physical
+# network names to physical network types. All physical
+# networks listed in network_vlan_ranges should have
+# mappings to appropriate physical network type.
+# Type of the physical network can be either eth (Ethernet) or
+# ib (InfiniBand). If empty, physical network eth type is assumed.
+#
+# physical_network_type_mappings =
+# Example: physical_network_type_mappings = default:eth
+
+# (StrOpt) Type of the physical network, can be either 'eth' or 'ib'
+# The default value is 'eth'
+# physical_network_type = eth
+
+[eswitch]
+# (ListOpt) Comma-separated list of
+# <physical_network>:<physical_interface> tuples mapping physical
+# network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical
+# networks listed in network_vlan_ranges on the server should have
+# mappings to appropriate interfaces on each agent.
+#
+# physical_interface_mappings =
+# Example: physical_interface_mappings = default:eth2
+
+# (StrOpt) Type of Network Interface to allocate for VM:
+# direct or hosdev according to libvirt terminology
+# vnic_type = mlnx_direct
+
+# (StrOpt) Eswitch daemon end point connection url
+# daemon_endpoint = 'tcp://127.0.0.1:60001'
+
+# The number of milliseconds the agent will wait for
+# response on request to daemon
+# request_timeout = 3000
+
+# The number of retries the agent will send request
+# to daemon before giving up
+# retries = 3
+
+# The backoff rate multiplier for waiting period between retries
+# on request to daemon, i.e. value of 2 will double
+# the request timeout each retry
+# backoff_rate = 2
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
+
+# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
+# agents.
+#
+# rpc_support_old_agents = False
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
diff --git a/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini b/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini
new file mode 100644
index 00000000..aa4171da
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini
@@ -0,0 +1,60 @@
+# Sample Configurations
+
+[ovs]
+# Do not change this parameter unless you have a good reason to.
+# This is the name of the OVS integration bridge. There is one per hypervisor.
+# The integration bridge acts as a virtual "patch port". All VM VIFs are
+# attached to this bridge and then "patched" according to their network
+# connectivity.
+# integration_bridge = br-int
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
+
+[securitygroup]
+# Firewall driver for realizing neutron security group function
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+
+[ofc]
+# Specify OpenFlow Controller Host, Port and Driver to connect.
+# host = 127.0.0.1
+# port = 8888
+
+# Base URL of OpenFlow Controller REST API.
+# It is prepended to a path of each API request.
+# path_prefix =
+
+# Drivers are in neutron/plugins/nec/drivers/ .
+# driver = trema
+
+# PacketFilter is available when it's enabled in this configuration
+# and supported by the driver.
+# enable_packet_filter = true
+
+# Use SSL to connect
+# use_ssl = false
+
+# Key file
+# key_file =
+
+# Certificate file
+# cert_file =
+
+# Disable SSL certificate verification
+# insecure_ssl = false
+
+# Maximum attempts per OFC API request. NEC plugin retries
+# API request to OFC when OFC returns ServiceUnavailable (503).
+# The value must be greater than 0.
+# api_max_attempts = 3
+
+[provider]
+# Default router provider to use.
+# default_router_provider = l3-agent
+# List of enabled router providers.
+# router_providers = l3-agent,openflow
diff --git a/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini
new file mode 100644
index 00000000..aad37bd5
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini
@@ -0,0 +1,41 @@
+# Please fill in the correct data for all the keys below and uncomment key-value pairs
+[restproxy]
+# (StrOpt) Default Network partition in which VSD will
+# orchestrate network resources using openstack
+#
+#default_net_partition_name = <default-net-partition-name>
+
+# (StrOpt) Nuage provided uri for initial authorization to
+# access VSD
+#
+#auth_resource = /auth
+
+# (StrOpt) IP Address and Port of VSD
+#
+#server = ip:port
+
+# (StrOpt) Organization name in which VSD will orchestrate
+# network resources using openstack
+#
+#organization = org
+
+# (StrOpt) Username and password of VSD for authentication
+#
+#serverauth = uname:pass
+
+# (BoolOpt) Boolean for SSL connection with VSD server
+#
+#serverssl = True
+
+# (StrOpt) Nuage provided base uri to reach out to VSD
+#
+#base_uri = /base
+
+[syncmanager]
+# (BoolOpt) Boolean to enable sync between openstack and VSD
+#
+#enable_sync = False
+
+# (IntOpt) Sync interval in seconds between openstack and VSD
+#
+#sync_interval = 0 \ No newline at end of file
diff --git a/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini b/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini
new file mode 100644
index 00000000..a1c05d97
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini
@@ -0,0 +1,35 @@
+[nvsd]
+# Configure the NVSD controller. The plugin proxies the api calls using
+# to NVSD controller which implements the required functionality.
+
+# IP address of NVSD controller api server
+# nvsd_ip = <ip address of nvsd controller>
+
+# Port number of NVSD controller api server
+# nvsd_port = 8082
+
+# Authentication credentials to access the api server
+# nvsd_user = <nvsd controller username>
+# nvsd_passwd = <password>
+
+# API request timeout in seconds
+# request_timeout = <default request timeout>
+
+# Maximum number of retry attempts to login to the NVSD controller
+# Specify 0 to retry until success (default)
+# nvsd_retries = 0
+
+[securitygroup]
+# Specify firewall_driver option, if neutron security groups are disabled,
+# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver.
+# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+
+[agent]
+# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
+
+[database]
+# connection = mysql://root:<passwd>@127.0.0.1/<neutron_db>?charset=utf8
diff --git a/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini b/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini
new file mode 100644
index 00000000..629f1fc4
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini
@@ -0,0 +1,26 @@
+# OpenContrail is an Apache 2.0-licensed project that is built using
+# standards-based protocols and provides all the necessary components for
+# network virtualization–SDN controller, virtual router, analytics engine,
+# and published northbound APIs
+# For more information visit: http://opencontrail.org
+
+# Opencontrail plugin specific configuration
+[CONTRAIL]
+# (StrOpt) IP address to connect to opencontrail controller.
+# Uncomment this line for specifying the IP address of the opencontrail
+# Api-Server.
+# Default value is local host(127.0.0.1).
+# api_server_ip='127.0.0.1'
+
+# (IntOpt) port to connect to opencontrail controller.
+# Uncomment this line for the specifying the Port of the opencontrail
+# Api-Server.
+# Default value is 8082
+# api_server_port=8082
+
+# (DictOpt) enable opencontrail extensions
+# Opencontrail in future would support extension such as ipam, policy,
+# these extensions can be configured as shown below. Plugin will then
+# load the specified extensions.
+# Default value is None, it wont load any extension
+# contrail_extensions=ipam:<classpath>,policy:<classpath>
diff --git a/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
new file mode 100644
index 00000000..9c8e6b58
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
@@ -0,0 +1,190 @@
+[ovs]
+# (StrOpt) Type of network to allocate for tenant networks. The
+# default value 'local' is useful only for single-box testing and
+# provides no connectivity between hosts. You MUST either change this
+# to 'vlan' and configure network_vlan_ranges below or change this to
+# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for
+# tenant networks to provide connectivity between hosts. Set to 'none'
+# to disable creation of tenant networks.
+#
+# tenant_network_type = local
+# Example: tenant_network_type = gre
+# Example: tenant_network_type = vxlan
+
+# (ListOpt) Comma-separated list of
+# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
+# of VLAN IDs on named physical networks that are available for
+# allocation. All physical networks listed are available for flat and
+# VLAN provider network creation. Specified ranges of VLAN IDs are
+# available for tenant network allocation if tenant_network_type is
+# 'vlan'. If empty, only gre, vxlan and local networks may be created.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999
+
+# (BoolOpt) Set to True in the server and the agents to enable support
+# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and
+# GRE or VXLAN tunneling.
+#
+# WARNING: This option will be deprecated in the Icehouse release, at which
+# point setting tunnel_type below will be required to enable
+# tunneling.
+#
+# enable_tunneling = False
+
+# (StrOpt) The type of tunnel network, if any, supported by the plugin. If
+# this is set, it will cause tunneling to be enabled. If this is not set and
+# the option enable_tunneling is set, this will default to 'gre'.
+#
+# tunnel_type =
+# Example: tunnel_type = gre
+# Example: tunnel_type = vxlan
+
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples
+# enumerating ranges of GRE or VXLAN tunnel IDs that are available for
+# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'.
+#
+# tunnel_id_ranges =
+# Example: tunnel_id_ranges = 1:1000
+
+# Do not change this parameter unless you have a good reason to.
+# This is the name of the OVS integration bridge. There is one per hypervisor.
+# The integration bridge acts as a virtual "patch bay". All VM VIFs are
+# attached to this bridge and then "patched" according to their network
+# connectivity.
+#
+# integration_bridge = br-int
+
+# Only used for the agent if tunnel_id_ranges (above) is not empty for
+# the server. In most cases, the default value should be fine.
+#
+# tunnel_bridge = br-tun
+
+# Peer patch port in integration bridge for tunnel bridge
+# int_peer_patch_port = patch-tun
+
+# Peer patch port in tunnel bridge for integration bridge
+# tun_peer_patch_port = patch-int
+
+# Uncomment this line for the agent if tunnel_id_ranges (above) is not
+# empty for the server. Set local-ip to be the local IP address of
+# this hypervisor.
+#
+# local_ip =
+
+# (ListOpt) Comma-separated list of <physical_network>:<bridge> tuples
+# mapping physical network names to the agent's node-specific OVS
+# bridge names to be used for flat and VLAN networks. The length of
+# bridge names should be no more than 11. Each bridge must
+# exist, and should have a physical network interface configured as a
+# port. All physical networks listed in network_vlan_ranges on the
+# server should have mappings to appropriate bridges on each agent.
+#
+# bridge_mappings =
+# Example: bridge_mappings = physnet1:br-eth1
+
+# (BoolOpt) Use veths instead of patch ports to interconnect the integration
+# bridge to physical networks. Support kernel without ovs patch port support
+# so long as it is set to True.
+# use_veth_interconnection = False
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
+
+# Minimize polling by monitoring ovsdb for interface changes
+# minimize_polling = True
+
+# When minimize_polling = True, the number of seconds to wait before
+# respawning the ovsdb monitor after losing communication with it
+# ovsdb_monitor_respawn_interval = 30
+
+# (ListOpt) The types of tenant network tunnels supported by the agent.
+# Setting this will enable tunneling support in the agent. This can be set to
+# either 'gre' or 'vxlan'. If this is unset, it will default to [] and
+# disable tunneling support in the agent. When running the agent with the OVS
+# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section.
+# When running the agent with ML2, you can specify as many values here as
+# your compute hosts supports.
+#
+# tunnel_types =
+# Example: tunnel_types = gre
+# Example: tunnel_types = vxlan
+# Example: tunnel_types = vxlan, gre
+
+# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By
+# default, this will make use of the Open vSwitch default value of '4789' if
+# not specified.
+#
+# vxlan_udp_port =
+# Example: vxlan_udp_port = 8472
+
+# (IntOpt) This is the MTU size of veth interfaces.
+# Do not change unless you have a good reason to.
+# The default MTU size of veth interfaces is 1500.
+# This option has no effect if use_veth_interconnection is False
+# veth_mtu =
+# Example: veth_mtu = 1504
+
+# (BoolOpt) Flag to enable l2-population extension. This option should only be
+# used in conjunction with ml2 plugin and l2population mechanism driver. It'll
+# enable plugin to populate remote ports macs and IPs (using fdb_add/remove
+# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to
+# optimize tunnel management.
+#
+# l2_population = False
+
+# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2
+# population ML2 MechanismDriver.
+#
+# arp_responder = False
+
+# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet
+# carrying GRE/VXLAN tunnel. The default value is True.
+#
+# dont_fragment = True
+
+# (BoolOpt) Set to True on L2 agents to enable support
+# for distributed virtual routing.
+#
+# enable_distributed_routing = False
+
+[securitygroup]
+# Firewall driver for realizing neutron security group function.
+# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+
+#-----------------------------------------------------------------------------
+# Sample Configurations.
+#-----------------------------------------------------------------------------
+#
+# 1. With VLANs on eth1.
+# [ovs]
+# network_vlan_ranges = default:2000:3999
+# tunnel_id_ranges =
+# integration_bridge = br-int
+# bridge_mappings = default:br-eth1
+#
+# 2. With GRE tunneling.
+# [ovs]
+# network_vlan_ranges =
+# tunnel_id_ranges = 1:1000
+# integration_bridge = br-int
+# tunnel_bridge = br-tun
+# local_ip = 10.0.0.3
+#
+# 3. With VXLAN tunneling.
+# [ovs]
+# network_vlan_ranges =
+# tenant_network_type = vxlan
+# tunnel_type = vxlan
+# tunnel_id_ranges = 1:1000
+# integration_bridge = br-int
+# tunnel_bridge = br-tun
+# local_ip = 10.0.0.3
+# [agent]
+# tunnel_types = vxlan
diff --git a/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini b/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini
new file mode 100644
index 00000000..bfe8062a
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini
@@ -0,0 +1,14 @@
+# Config file for Neutron PLUMgrid Plugin
+
+[plumgriddirector]
+# This line should be pointing to the PLUMgrid Director,
+# for the PLUMgrid platform.
+# director_server=<director-ip-address>
+# director_server_port=<director-port>
+# Authentification parameters for the Director.
+# These are the admin credentials to manage and control
+# the PLUMgrid Director server.
+# username=<director-admin-username>
+# password=<director-admin-password>
+# servertimeout=5
+# driver=<plugin-driver>
diff --git a/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini b/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini
new file mode 100644
index 00000000..9d9cfa25
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini
@@ -0,0 +1,44 @@
+[ovs]
+# integration_bridge = br-int
+
+# openflow_rest_api = <host IP address of ofp rest api service>:<port: 8080>
+# openflow_rest_api = 127.0.0.1:8080
+
+# tunnel key range: 0 < tunnel_key_min < tunnel_key_max
+# VLAN: 12bits, GRE, VXLAN: 24bits
+# tunnel_key_min = 1
+# tunnel_key_max = 0xffffff
+
+# tunnel_ip = <ip address for tunneling>
+# tunnel_interface = interface for tunneling
+# when tunnel_ip is NOT specified, ip address is read
+# from this interface
+# tunnel_ip =
+# tunnel_interface =
+tunnel_interface = eth0
+
+# ovsdb_port = port number on which ovsdb is listening
+# ryu-agent uses this parameter to setup ovsdb.
+# ovs-vsctl set-manager ptcp:<ovsdb_port>
+# See set-manager section of man ovs-vsctl for details.
+# currently ptcp is only supported.
+# ovsdb_ip = <host IP address on which ovsdb is listening>
+# ovsdb_interface = interface for ovsdb
+# when ovsdb_addr NOT specifiied, ip address is gotten
+# from this interface
+# ovsdb_port = 6634
+# ovsdb_ip =
+# ovsdb_interface =
+ovsdb_interface = eth0
+
+[securitygroup]
+# Firewall driver for realizing neutron security group function
+# firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
diff --git a/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini b/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini
new file mode 100644
index 00000000..baca73b8
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini
@@ -0,0 +1,200 @@
+[DEFAULT]
+# User name for NSX controller
+# nsx_user = admin
+
+# Password for NSX controller
+# nsx_password = admin
+
+# Time before aborting a request on an unresponsive controller (Seconds)
+# http_timeout = 75
+
+# Maximum number of times a particular request should be retried
+# retries = 2
+
+# Maximum number of times a redirect response should be followed
+# redirects = 2
+
+# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
+# is omitted, 443 is assumed. This option MUST be specified, e.g.:
+# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
+
+# UUID of the pre-existing default NSX Transport zone to be used for creating
+# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
+# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
+
+# (Optional) UUID for the default l3 gateway service to use with this cluster.
+# To be specified if planning to use logical routers with external gateways.
+# default_l3_gw_service_uuid =
+
+# (Optional) UUID for the default l2 gateway service to use with this cluster.
+# To be specified for providing a predefined gateway tenant for connecting their networks.
+# default_l2_gw_service_uuid =
+
+# (Optional) UUID for the default service cluster. A service cluster is introduced to
+# represent a group of gateways and it is needed in order to use Logical Services like
+# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
+# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
+# default_service_cluster_uuid =
+
+# Name of the default interface name to be used on network-gateway. This value
+# will be used for any device associated with a network gateway for which an
+# interface name was not specified
+# default_interface_name = breth0
+
+[quotas]
+# number of network gateways allowed per tenant, -1 means unlimited
+# quota_network_gateway = 5
+
+[vcns]
+# URL for VCNS manager
+# manager_uri = https://management_ip
+
+# User name for VCNS manager
+# user = admin
+
+# Password for VCNS manager
+# password = default
+
+# (Optional) Datacenter ID for Edge deployment
+# datacenter_moid =
+
+# (Optional) Deployment Container ID for NSX Edge deployment
+# If not specified, either a default global container will be used, or
+# the resource pool and datastore specified below will be used
+# deployment_container_id =
+
+# (Optional) Resource pool ID for NSX Edge deployment
+# resource_pool_id =
+
+# (Optional) Datastore ID for NSX Edge deployment
+# datastore_id =
+
+# (Required) UUID of logic switch for physical network connectivity
+# external_network =
+
+# (Optional) Asynchronous task status check interval
+# default is 2000 (millisecond)
+# task_status_check_interval = 2000
+
+[nsx]
+# Maximum number of ports for each bridged logical switch
+# The recommended value for this parameter varies with NSX version
+# Please use:
+# NSX 2.x -> 64
+# NSX 3.0, 3.1 -> 5000
+# NSX 3.2 -> 10000
+# max_lp_per_bridged_ls = 5000
+
+# Maximum number of ports for each overlay (stt, gre) logical switch
+# max_lp_per_overlay_ls = 256
+
+# Number of connections to each controller node.
+# default is 10
+# concurrent_connections = 10
+
+# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
+# nsx_gen_timeout = -1
+
+# Acceptable values for 'metadata_mode' are:
+# - 'access_network': this enables a dedicated connection to the metadata
+# proxy for metadata server access via Neutron router.
+# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
+# This option is only useful if running on a host that does not support
+# namespaces otherwise access_network should be used.
+# metadata_mode = access_network
+
+# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
+# default_transport_type = stt
+
+# Specifies in which mode the plugin needs to operate in order to provide DHCP and
+# metadata proxy services to tenant instances. If 'agent' is chosen (default)
+# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
+# provide such services. In this mode, the plugin supports API extensions 'agent'
+# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
+# the plugin will use NSX logical services for DHCP and metadata proxy. This
+# simplifies the deployment model for Neutron, in that the plugin no longer requires
+# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
+# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above.
+# Furthermore, a 'combined' mode is also provided and is used to support existing
+# deployments that want to adopt the agentless mode going forward. With this mode,
+# existing networks keep being served by the existing infrastructure (thus preserving
+# backward compatibility, whereas new networks will be served by the new infrastructure.
+# Migration tools are provided to 'move' one network from one model to another; with
+# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
+# ignored, as new networks will no longer be scheduled to existing dhcp agents.
+# agent_mode = agent
+
+# Specifies which mode packet replication should be done in. If set to service
+# a service node is required in order to perform packet replication. This can
+# also be set to source if one wants replication to be performed locally (NOTE:
+# usually only useful for testing if one does not want to deploy a service node).
+# In order to leverage distributed routers, replication_mode should be set to
+# "service".
+# replication_mode = service
+
+[nsx_sync]
+# Interval in seconds between runs of the status synchronization task.
+# The plugin will aim at resynchronizing operational status for all
+# resources in this interval, and it should be therefore large enough
+# to ensure the task is feasible. Otherwise the plugin will be
+# constantly synchronizing resource status, ie: a new task is started
+# as soon as the previous is completed.
+# If this value is set to 0, the state synchronization thread for this
+# Neutron instance will be disabled.
+# state_sync_interval = 10
+
+# Random additional delay between two runs of the state synchronization task.
+# An additional wait time between 0 and max_random_sync_delay seconds
+# will be added on top of state_sync_interval.
+# max_random_sync_delay = 0
+
+# Minimum delay, in seconds, between two status synchronization requests for NSX.
+# Depending on chunk size, controller load, and other factors, state
+# synchronization requests might be pretty heavy. This means the
+# controller might take time to respond, and its load might be quite
+# increased by them. This parameter allows to specify a minimum
+# interval between two subsequent requests.
+# The value for this parameter must never exceed state_sync_interval.
+# If this does, an error will be raised at startup.
+# min_sync_req_delay = 1
+
+# Minimum number of resources to be retrieved from NSX in a single status
+# synchronization request.
+# The actual size of the chunk will increase if the number of resources is such
+# that using the minimum chunk size will cause the interval between two
+# requests to be less than min_sync_req_delay
+# min_chunk_size = 500
+
+# Enable this option to allow punctual state synchronization on show
+# operations. In this way, show operations will always fetch the operational
+# status of the resource from the NSX backend, and this might have
+# a considerable impact on overall performance.
+# always_read_status = False
+
+[nsx_lsn]
+# Pull LSN information from NSX in case it is missing from the local
+# data store. This is useful to rebuild the local store in case of
+# server recovery
+# sync_on_missing_data = False
+
+[nsx_dhcp]
+# (Optional) Comma separated list of additional dns servers. Default is an empty list
+# extra_domain_name_servers =
+
+# Domain to use for building the hostnames
+# domain_name = openstacklocal
+
+# Default DHCP lease time
+# default_lease_time = 43200
+
+[nsx_metadata]
+# IP address used by Metadata server
+# metadata_server_address = 127.0.0.1
+
+# TCP Port used by Metadata server
+# metadata_server_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it MUST match with the configuration used by the Metadata server
+# metadata_shared_secret =
diff --git a/openstack/usr/share/openstack/neutron/policy.json b/openstack/usr/share/openstack/neutron/policy.json
new file mode 100644
index 00000000..e7db4357
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/policy.json
@@ -0,0 +1,138 @@
+{
+ "context_is_admin": "role:admin",
+ "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
+ "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
+ "admin_only": "rule:context_is_admin",
+ "regular_user": "",
+ "shared": "field:networks:shared=True",
+ "shared_firewalls": "field:firewalls:shared=True",
+ "external": "field:networks:router:external=True",
+ "default": "rule:admin_or_owner",
+
+ "create_subnet": "rule:admin_or_network_owner",
+ "get_subnet": "rule:admin_or_owner or rule:shared",
+ "update_subnet": "rule:admin_or_network_owner",
+ "delete_subnet": "rule:admin_or_network_owner",
+
+ "create_network": "",
+ "get_network": "rule:admin_or_owner or rule:shared or rule:external",
+ "get_network:router:external": "rule:regular_user",
+ "get_network:segments": "rule:admin_only",
+ "get_network:provider:network_type": "rule:admin_only",
+ "get_network:provider:physical_network": "rule:admin_only",
+ "get_network:provider:segmentation_id": "rule:admin_only",
+ "get_network:queue_id": "rule:admin_only",
+ "create_network:shared": "rule:admin_only",
+ "create_network:router:external": "rule:admin_only",
+ "create_network:segments": "rule:admin_only",
+ "create_network:provider:network_type": "rule:admin_only",
+ "create_network:provider:physical_network": "rule:admin_only",
+ "create_network:provider:segmentation_id": "rule:admin_only",
+ "update_network": "rule:admin_or_owner",
+ "update_network:segments": "rule:admin_only",
+ "update_network:shared": "rule:admin_only",
+ "update_network:provider:network_type": "rule:admin_only",
+ "update_network:provider:physical_network": "rule:admin_only",
+ "update_network:provider:segmentation_id": "rule:admin_only",
+ "update_network:router:external": "rule:admin_only",
+ "delete_network": "rule:admin_or_owner",
+
+ "create_port": "",
+ "create_port:mac_address": "rule:admin_or_network_owner",
+ "create_port:fixed_ips": "rule:admin_or_network_owner",
+ "create_port:port_security_enabled": "rule:admin_or_network_owner",
+ "create_port:binding:host_id": "rule:admin_only",
+ "create_port:binding:profile": "rule:admin_only",
+ "create_port:mac_learning_enabled": "rule:admin_or_network_owner",
+ "get_port": "rule:admin_or_owner",
+ "get_port:queue_id": "rule:admin_only",
+ "get_port:binding:vif_type": "rule:admin_only",
+ "get_port:binding:vif_details": "rule:admin_only",
+ "get_port:binding:host_id": "rule:admin_only",
+ "get_port:binding:profile": "rule:admin_only",
+ "update_port": "rule:admin_or_owner",
+ "update_port:fixed_ips": "rule:admin_or_network_owner",
+ "update_port:port_security_enabled": "rule:admin_or_network_owner",
+ "update_port:binding:host_id": "rule:admin_only",
+ "update_port:binding:profile": "rule:admin_only",
+ "update_port:mac_learning_enabled": "rule:admin_or_network_owner",
+ "delete_port": "rule:admin_or_owner",
+
+ "get_router:ha": "rule:admin_only",
+ "create_router": "rule:regular_user",
+ "create_router:external_gateway_info:enable_snat": "rule:admin_only",
+ "create_router:distributed": "rule:admin_only",
+ "create_router:ha": "rule:admin_only",
+ "get_router": "rule:admin_or_owner",
+ "get_router:distributed": "rule:admin_only",
+ "update_router:external_gateway_info:enable_snat": "rule:admin_only",
+ "update_router:distributed": "rule:admin_only",
+ "update_router:ha": "rule:admin_only",
+ "delete_router": "rule:admin_or_owner",
+
+ "add_router_interface": "rule:admin_or_owner",
+ "remove_router_interface": "rule:admin_or_owner",
+
+ "create_firewall": "",
+ "get_firewall": "rule:admin_or_owner",
+ "create_firewall:shared": "rule:admin_only",
+ "get_firewall:shared": "rule:admin_only",
+ "update_firewall": "rule:admin_or_owner",
+ "update_firewall:shared": "rule:admin_only",
+ "delete_firewall": "rule:admin_or_owner",
+
+ "create_firewall_policy": "",
+ "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
+ "create_firewall_policy:shared": "rule:admin_or_owner",
+ "update_firewall_policy": "rule:admin_or_owner",
+ "delete_firewall_policy": "rule:admin_or_owner",
+
+ "create_firewall_rule": "",
+ "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
+ "update_firewall_rule": "rule:admin_or_owner",
+ "delete_firewall_rule": "rule:admin_or_owner",
+
+ "create_qos_queue": "rule:admin_only",
+ "get_qos_queue": "rule:admin_only",
+
+ "update_agent": "rule:admin_only",
+ "delete_agent": "rule:admin_only",
+ "get_agent": "rule:admin_only",
+
+ "create_dhcp-network": "rule:admin_only",
+ "delete_dhcp-network": "rule:admin_only",
+ "get_dhcp-networks": "rule:admin_only",
+ "create_l3-router": "rule:admin_only",
+ "delete_l3-router": "rule:admin_only",
+ "get_l3-routers": "rule:admin_only",
+ "get_dhcp-agents": "rule:admin_only",
+ "get_l3-agents": "rule:admin_only",
+ "get_loadbalancer-agent": "rule:admin_only",
+ "get_loadbalancer-pools": "rule:admin_only",
+
+ "create_floatingip": "rule:regular_user",
+ "update_floatingip": "rule:admin_or_owner",
+ "delete_floatingip": "rule:admin_or_owner",
+ "get_floatingip": "rule:admin_or_owner",
+
+ "create_network_profile": "rule:admin_only",
+ "update_network_profile": "rule:admin_only",
+ "delete_network_profile": "rule:admin_only",
+ "get_network_profiles": "",
+ "get_network_profile": "",
+ "update_policy_profiles": "rule:admin_only",
+ "get_policy_profiles": "",
+ "get_policy_profile": "",
+
+ "create_metering_label": "rule:admin_only",
+ "delete_metering_label": "rule:admin_only",
+ "get_metering_label": "rule:admin_only",
+
+ "create_metering_label_rule": "rule:admin_only",
+ "delete_metering_label_rule": "rule:admin_only",
+ "get_metering_label_rule": "rule:admin_only",
+
+ "get_service_provider": "rule:regular_user",
+ "get_lsn": "rule:admin_only",
+ "create_lsn": "rule:admin_only"
+}
diff --git a/openstack/usr/share/openstack/neutron/vpn_agent.ini b/openstack/usr/share/openstack/neutron/vpn_agent.ini
new file mode 100644
index 00000000..c3089df9
--- /dev/null
+++ b/openstack/usr/share/openstack/neutron/vpn_agent.ini
@@ -0,0 +1,14 @@
+[DEFAULT]
+# VPN-Agent configuration file
+# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also
+
+[vpnagent]
+# vpn device drivers which vpn agent will use
+# If we want to use multiple drivers, we need to define this option multiple times.
+# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver
+# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver
+# vpn_device_driver=another_driver
+
+[ipsec]
+# Status check interval
+# ipsec_status_check_interval=60
diff --git a/openstack/usr/share/openstack/nova-config.yml b/openstack/usr/share/openstack/nova-config.yml
new file mode 100644
index 00000000..4f43db39
--- /dev/null
+++ b/openstack/usr/share/openstack/nova-config.yml
@@ -0,0 +1,34 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/nova.conf"
+ tasks:
+ - name: Create the nova user.
+ user:
+ name: nova
+ comment: Openstack Nova Daemons
+ shell: /sbin/nologin
+ home: /var/lib/nova
+ groups: libvirt
+ append: yes
+
+ - name: Create the /var folders for nova
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: nova
+ group: nova
+ with_items:
+ - /var/run/nova
+ - /var/lock/nova
+ - /var/log/nova
+ - /var/lib/nova
+ - /var/lib/nova/instances
+
+ - file: path=/etc/nova state=directory
+ - name: Add the configuration needed for nova in /etc/nova using templates
+ template:
+ src: /usr/share/openstack/nova/{{ item }}
+ dest: /etc/nova/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/nova && find -type f
diff --git a/openstack/usr/share/openstack/nova-db.yml b/openstack/usr/share/openstack/nova-db.yml
new file mode 100644
index 00000000..e7dc5b10
--- /dev/null
+++ b/openstack/usr/share/openstack/nova-db.yml
@@ -0,0 +1,51 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/nova.conf"
+ tasks:
+ - name: Create nova service user in service tenant
+ keystone_user:
+ user: "{{ NOVA_SERVICE_USER }}"
+ password: "{{ NOVA_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to nova service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ NOVA_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add nova endpoint
+ keystone_service:
+ name: nova
+ type: compute
+ description: Openstack Compute Service
+ publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s'
+ internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s'
+ adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s'
+ region: 'regionOne'
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for nova
+ postgresql_user:
+ name: "{{ NOVA_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ password: "{{ NOVA_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: nova
+
+ - name: Create database for nova services
+ postgresql_db:
+ name: nova
+ owner: "{{ NOVA_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ sudo: yes
+ sudo_user: nova
+
+ - name: Initiate nova database
+ nova_manage:
+ action: dbsync
+ sudo: yes
+ sudo_user: nova
diff --git a/openstack/usr/share/openstack/nova/api-paste.ini b/openstack/usr/share/openstack/nova/api-paste.ini
new file mode 100644
index 00000000..2a825a5b
--- /dev/null
+++ b/openstack/usr/share/openstack/nova/api-paste.ini
@@ -0,0 +1,118 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/services/Cloud: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+/v1.1: openstack_compute_api_v2
+/v2: openstack_compute_api_v2
+/v3: openstack_compute_api_v3
+
+[composite:openstack_compute_api_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
+keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
+
+[composite:openstack_compute_api_v3]
+use = call:nova.api.auth:pipeline_factory_v3
+noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
+
+[filter:request_id]
+paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:noauth_v3]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
+
+[app:osapi_compute_app_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v3]
+paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/openstack/usr/share/openstack/nova/cells.json b/openstack/usr/share/openstack/nova/cells.json
new file mode 100644
index 00000000..cc74930d
--- /dev/null
+++ b/openstack/usr/share/openstack/nova/cells.json
@@ -0,0 +1,26 @@
+{
+ "parent": {
+ "name": "parent",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": true
+ },
+ "cell1": {
+ "name": "cell1",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit1.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": false
+ },
+ "cell2": {
+ "name": "cell2",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit2.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": false
+ }
+}
diff --git a/openstack/usr/share/openstack/nova/logging.conf b/openstack/usr/share/openstack/nova/logging.conf
new file mode 100644
index 00000000..5482a040
--- /dev/null
+++ b/openstack/usr/share/openstack/nova/logging.conf
@@ -0,0 +1,81 @@
+[loggers]
+keys = root, nova
+
+[handlers]
+keys = stderr, stdout, watchedfile, syslog, null
+
+[formatters]
+keys = context, default
+
+[logger_root]
+level = WARNING
+handlers = null
+
+[logger_nova]
+level = INFO
+handlers = stderr
+qualname = nova
+
+[logger_amqp]
+level = WARNING
+handlers = stderr
+qualname = amqp
+
+[logger_amqplib]
+level = WARNING
+handlers = stderr
+qualname = amqplib
+
+[logger_sqlalchemy]
+level = WARNING
+handlers = stderr
+qualname = sqlalchemy
+# "level = INFO" logs SQL queries.
+# "level = DEBUG" logs SQL queries and results.
+# "level = WARNING" logs neither. (Recommended for production systems.)
+
+[logger_boto]
+level = WARNING
+handlers = stderr
+qualname = boto
+
+[logger_suds]
+level = INFO
+handlers = stderr
+qualname = suds
+
+[logger_eventletwsgi]
+level = WARNING
+handlers = stderr
+qualname = eventlet.wsgi.server
+
+[handler_stderr]
+class = StreamHandler
+args = (sys.stderr,)
+formatter = context
+
+[handler_stdout]
+class = StreamHandler
+args = (sys.stdout,)
+formatter = context
+
+[handler_watchedfile]
+class = handlers.WatchedFileHandler
+args = ('nova.log',)
+formatter = context
+
+[handler_syslog]
+class = handlers.SysLogHandler
+args = ('/dev/log', handlers.SysLogHandler.LOG_USER)
+formatter = context
+
+[handler_null]
+class = nova.openstack.common.log.NullHandler
+formatter = default
+args = ()
+
+[formatter_context]
+class = nova.openstack.common.log.ContextFormatter
+
+[formatter_default]
+format = %(message)s
diff --git a/openstack/usr/share/openstack/nova/nova-compute.conf b/openstack/usr/share/openstack/nova/nova-compute.conf
new file mode 100644
index 00000000..8d186211
--- /dev/null
+++ b/openstack/usr/share/openstack/nova/nova-compute.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+compute_driver={{ COMPUTE_DRIVER }}
+[libvirt]
+virt_type={{ NOVA_VIRT_TYPE }}
diff --git a/openstack/usr/share/openstack/nova/nova.conf b/openstack/usr/share/openstack/nova/nova.conf
new file mode 100644
index 00000000..43343cdd
--- /dev/null
+++ b/openstack/usr/share/openstack/nova/nova.conf
@@ -0,0 +1,3809 @@
+[DEFAULT]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=nova
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+notification_driver=messagingv2
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in nova.availability_zones
+#
+
+# The availability_zone to show internal services under
+# (string value)
+#internal_service_availability_zone=internal
+
+# Default compute node availability_zone (string value)
+#default_availability_zone=nova
+
+
+#
+# Options defined in nova.crypto
+#
+
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+#
+# Options defined in nova.exception
+#
+
+# Make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in nova.netconf
+#
+
+# IP address of this host (string value)
+my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host=nova
+
+# Use IPv6 (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in nova.notifications
+#
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are None for no
+# notifications, "vm_state" for notifications on VM state
+# changes, or "vm_and_task_state" for notifications on VM and
+# task state changes. (string value)
+notify_on_state_change=vm_and_task_state
+
+# If set, send api.fault notifications on caught exceptions in
+# the API service. (boolean value)
+#notify_api_faults=false
+
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in nova.paths
+#
+
+# Directory where the nova python module is installed (string
+# value)
+#pybasedir=/usr/lib/python/site-packages
+
+# Directory where nova binaries are installed (string value)
+#bindir=/usr/local/bin
+
+# Top-level directory for maintaining nova's state (string
+# value)
+state_path=/var/lib/nova
+
+
+#
+# Options defined in nova.quota
+#
+
+# Number of instances allowed per project (integer value)
+#quota_instances=10
+
+# Number of instance cores allowed per project (integer value)
+#quota_cores=20
+
+# Megabytes of instance RAM allowed per project (integer
+# value)
+#quota_ram=51200
+
+# Number of floating IPs allowed per project (integer value)
+#quota_floating_ips=10
+
+# Number of fixed IPs allowed per project (this should be at
+# least the number of instances allowed) (integer value)
+#quota_fixed_ips=-1
+
+# Number of metadata items allowed per instance (integer
+# value)
+#quota_metadata_items=128
+
+# Number of injected files allowed (integer value)
+#quota_injected_files=5
+
+# Number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+
+# Length of injected file path (integer value)
+# Deprecated group/name - [DEFAULT]/quota_injected_file_path_bytes
+#quota_injected_file_path_length=255
+
+# Number of security groups per project (integer value)
+#quota_security_groups=10
+
+# Number of security rules per security group (integer value)
+#quota_security_group_rules=20
+
+# Number of key pairs per user (integer value)
+#quota_key_pairs=100
+
+# Number of server groups per project (integer value)
+#quota_server_groups=10
+
+# Number of servers per server group (integer value)
+#quota_server_group_members=10
+
+# Number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
+
+# Count of reservations until usage is refreshed (integer
+# value)
+#until_refresh=0
+
+# Number of seconds between subsequent usage refreshes
+# (integer value)
+#max_age=0
+
+# Default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
+
+
+#
+# Options defined in nova.service
+#
+
+# Seconds between nodes reporting state to datastore (integer
+# value)
+#report_interval=10
+
+# Enable periodic tasks (boolean value)
+#periodic_enable=true
+
+# Range of seconds to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+
+# A list of APIs to enable by default (list value)
+enabled_apis=ec2,osapi_compute,metadata
+
+# A list of APIs with enabled SSL (list value)
+#enabled_ssl_apis=
+
+# The IP address on which the EC2 API will listen. (string
+# value)
+#ec2_listen=0.0.0.0
+
+# The port on which the EC2 API will listen. (integer value)
+#ec2_listen_port=8773
+
+# Number of workers for EC2 API service. The default will be
+# equal to the number of CPUs available. (integer value)
+#ec2_workers=<None>
+
+# The IP address on which the OpenStack API will listen.
+# (string value)
+#osapi_compute_listen=0.0.0.0
+
+# The port on which the OpenStack API will listen. (integer
+# value)
+#osapi_compute_listen_port=8774
+
+# Number of workers for OpenStack API service. The default
+# will be the number of CPUs available. (integer value)
+#osapi_compute_workers=<None>
+
+# OpenStack metadata service manager (string value)
+#metadata_manager=nova.api.manager.MetadataManager
+
+# The IP address on which the metadata API will listen.
+# (string value)
+#metadata_listen=0.0.0.0
+
+# The port on which the metadata API will listen. (integer
+# value)
+#metadata_listen_port=8775
+
+# Number of workers for metadata service. The default will be
+# the number of CPUs available. (integer value)
+#metadata_workers=<None>
+
+# Full class name for the Manager for compute (string value)
+compute_manager={{ COMPUTE_MANAGER }}
+
+# Full class name for the Manager for console proxy (string
+# value)
+#console_manager=nova.console.manager.ConsoleProxyManager
+
+# Manager for console auth (string value)
+#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
+
+# Full class name for the Manager for cert (string value)
+#cert_manager=nova.cert.manager.CertManager
+
+# Full class name for the Manager for network (string value)
+#network_manager=nova.network.manager.VlanManager
+
+# Full class name for the Manager for scheduler (string value)
+#scheduler_manager=nova.scheduler.manager.SchedulerManager
+
+# Maximum time since last check-in for up service (integer
+# value)
+#service_down_time=60
+
+
+#
+# Options defined in nova.test
+#
+
+# File name of clean sqlite db (string value)
+#sqlite_clean_db=clean.sqlite
+
+
+#
+# Options defined in nova.utils
+#
+
+# Whether to log monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator
+
+# Length of generated instance admin passwords (integer value)
+#password_length=12
+
+# Time period to generate instance usages for. Time period
+# must be hour, day, month or year (string value)
+instance_usage_audit_period=hour
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+rootwrap_config=/etc/nova/rootwrap.conf
+
+# Explicitly specify the temporary working directory (string
+# value)
+#tempdir=<None>
+
+
+#
+# Options defined in nova.wsgi
+#
+
+# File name for the paste.deploy config for nova-api (string
+# value)
+api_paste_config=api-paste.ini
+
+# A python format string that is used as the template to
+# generate log lines. The following values can be formatted
+# into it: client_ip, date_time, request_line, status_code,
+# body_length, wall_seconds. (string value)
+#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ssl_ca_file=<None>
+
+# SSL certificate of API server (string value)
+#ssl_cert_file=<None>
+
+# SSL private key of API server (string value)
+#ssl_key_file=<None>
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepidle=600
+
+# Size of the pool of greenthreads used by wsgi (integer
+# value)
+#wsgi_default_pool_size=1000
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large
+# tokens (typically those generated by the Keystone v3 API
+# with big service catalogs). (integer value)
+#max_header_line=16384
+
+
+#
+# Options defined in nova.api.auth
+#
+
+# Whether to use per-user rate limiting for the api. This
+# option is only used by v2 api. Rate limiting is removed from
+# v3 api. (boolean value)
+#api_rate_limit=false
+
+# The strategy to use for auth: noauth or keystone. (string
+# value)
+auth_strategy=keystone
+
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
+
+
+#
+# Options defined in nova.api.ec2
+#
+
+# Number of failed auths before lockout. (integer value)
+#lockout_attempts=5
+
+# Number of minutes to lockout if triggered. (integer value)
+#lockout_minutes=15
+
+# Number of minutes for lockout window. (integer value)
+#lockout_window=15
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Return the IP address as private dns hostname in describe
+# instances (boolean value)
+#ec2_private_dns_show_ip=false
+
+# Validate security group names according to EC2 specification
+# (boolean value)
+#ec2_strict_validation=true
+
+# Time in seconds before ec2 timestamp expires (integer value)
+#ec2_timestamp_expiry=300
+
+
+#
+# Options defined in nova.api.ec2.cloud
+#
+
+# The IP address of the EC2 API server (string value)
+#ec2_host=$my_ip
+
+# The internal IP address of the EC2 API server (string value)
+#ec2_dmz_host=$my_ip
+
+# The port of the EC2 API server (integer value)
+#ec2_port=8773
+
+# The protocol to use when connecting to the EC2 API server
+# (http, https) (string value)
+#ec2_scheme=http
+
+# The path prefix used to call the ec2 API server (string
+# value)
+#ec2_path=/services/Cloud
+
+# List of region=fqdn pairs separated by commas (list value)
+#region_list=
+
+
+#
+# Options defined in nova.api.metadata.base
+#
+
+# List of metadata versions to skip placing into the config
+# drive (string value)
+#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+# Driver to use for vendor data (string value)
+#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData
+
+
+#
+# Options defined in nova.api.metadata.vendordata_json
+#
+
+# File to load JSON formatted vendor data from (string value)
+#vendordata_jsonfile_path=<None>
+
+
+#
+# Options defined in nova.api.openstack.common
+#
+
+# The maximum number of items returned in a single response
+# from a collection resource (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Compute API (string value)
+#osapi_compute_link_prefix=<None>
+
+# Base URL that will be presented to users in links to glance
+# resources (string value)
+#osapi_glance_link_prefix=<None>
+
+
+#
+# Options defined in nova.api.openstack.compute
+#
+
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib
+#
+
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list
+# value)
+osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.fping
+#
+
+# Full path to fping. (string value)
+#fping_path=/usr/sbin/fping
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks
+#
+
+# Enables or disables quota checking for tenant networks
+# (boolean value)
+#enable_network_quota=false
+
+# Control for checking for default networks (string value)
+#use_neutron_default_nets=False
+
+# Default tenant id when creating neutron networks (string
+# value)
+#neutron_default_tenant_id=default
+
+
+#
+# Options defined in nova.api.openstack.compute.extensions
+#
+
+# osapi compute extension to load (multi valued)
+#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
+
+
+#
+# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses
+#
+
+# List of instance states that should hide network info (list
+# value)
+#osapi_hide_server_address_states=building
+
+
+#
+# Options defined in nova.api.openstack.compute.servers
+#
+
+# Enables returning of the instance password by the relevant
+# server API calls such as create, rebuild or rescue, If the
+# hypervisor does not support password injection then the
+# password returned will not be correct (boolean value)
+#enable_instance_password=true
+
+
+#
+# Options defined in nova.api.sizelimit
+#
+
+# The maximum body size per each osapi request(bytes) (integer
+# value)
+#osapi_max_request_body_size=114688
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# The topic cert nodes listen on (string value)
+#cert_topic=cert
+
+
+#
+# Options defined in nova.cloudpipe.pipelib
+#
+
+# Image ID used when starting up a cloudpipe vpn server
+# (string value)
+#vpn_image_id=0
+
+# Flavor for vpn instances (string value)
+#vpn_flavor=m1.tiny
+
+# Template for cloudpipe instance boot script (string value)
+#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
+
+# Network to push into openvpn config (string value)
+#dmz_net=10.0.0.0
+
+# Netmask to push into openvpn config (string value)
+#dmz_mask=255.255.255.0
+
+# Suffix to add to project name for vpn key and secgroups
+# (string value)
+#vpn_key_suffix=-vpn
+
+
+#
+# Options defined in nova.cmd.novnc
+#
+
+# Record sessions to FILE.[session_number] (boolean value)
+#record=false
+
+# Become a daemon (background process) (boolean value)
+#daemon=false
+
+# Disallow non-encrypted connections (boolean value)
+#ssl_only=false
+
+# Source is ipv6 (boolean value)
+#source_is_ipv6=false
+
+# SSL certificate file (string value)
+#cert=self.pem
+
+# SSL key file (if separate from cert) (string value)
+#key=<None>
+
+# Run webserver on same port. Serve files from DIR. (string
+# value)
+#web=/usr/share/spice-html5
+
+
+#
+# Options defined in nova.cmd.novncproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#novncproxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#novncproxy_port=6080
+
+
+#
+# Options defined in nova.compute.api
+#
+
+# Allow destination machine to match source for resize. Useful
+# when testing in single-host environments. (boolean value)
+#allow_resize_to_same_host=false
+
+# Allow migrate machine to the same host. Useful when testing
+# in single-host environments. (boolean value)
+#allow_migrate_to_same_host=false
+
+# Availability zone to use when user doesn't specify one
+# (string value)
+#default_schedule_zone=<None>
+
+# These are image properties which a snapshot should not
+# inherit from an instance (list value)
+#non_inheritable_image_properties=cache_in_nova,bittorrent
+
+# Kernel image that indicates not to use a kernel, but to use
+# a raw disk image instead (string value)
+#null_kernel=nokernel
+
+# When creating multiple instances with a single request using
+# the os-multiple-create API extension, this template will be
+# used to build the display name for each instance. The
+# benefit is that the instances end up with different
+# hostnames. To restore legacy behavior of every instance
+# having the same name, set this option to "%(name)s". Valid
+# keys for the template are: name, uuid, count. (string value)
+#multi_instance_display_name_template=%(name)s-%(uuid)s
+
+# Maximum number of devices that will result in a local image
+# being created on the hypervisor node. Setting this to 0
+# means nova will allow only boot from volume. A negative
+# number means unlimited. (integer value)
+#max_local_block_devices=3
+
+
+#
+# Options defined in nova.compute.flavors
+#
+
+# Default flavor to use for the EC2 API only. The Nova API
+# does not support a default flavor. (string value)
+#default_flavor=m1.small
+
+
+#
+# Options defined in nova.compute.manager
+#
+
+# Console proxy host to use to connect to instances on this
+# host. (string value)
+#console_host=nova
+
+# Name of network to use to set access IPs for instances
+# (string value)
+#default_access_ip_network_name=<None>
+
+# Whether to batch up the application of IPTables rules during
+# a host restart and apply all at the end of the init phase
+# (boolean value)
+#defer_iptables_apply=false
+
+# Where instances are stored on disk (string value)
+#instances_path=$state_path/instances
+
+# Generate periodic compute.instance.exists notifications
+# (boolean value)
+instance_usage_audit=True
+
+# Number of 1 second retries needed in live_migration (integer
+# value)
+#live_migration_retry_count=30
+
+# Whether to start guests that were running before the host
+# rebooted (boolean value)
+#resume_guests_state_on_host_boot=false
+
+# Number of times to retry network allocation on failures
+# (integer value)
+#network_allocate_retries=0
+
+# Number of times to retry block device allocation on failures
+# (integer value)
+#block_device_allocate_retries=60
+
+# The number of times to attempt to reap an instance's files.
+# (integer value)
+#maximum_instance_delete_attempts=5
+
+# Interval to pull network bandwidth usage info. Not supported
+# on all hypervisors. Set to -1 to disable. Setting this to 0
+# will disable, but this will change in the K release to mean
+# "run at the default rate". (integer value)
+#bandwidth_poll_interval=600
+
+# Interval to sync power states between the database and the
+# hypervisor. Set to -1 to disable. Setting this to 0 will
+# disable, but this will change in Juno to mean "run at the
+# default rate". (integer value)
+#sync_power_state_interval=600
+
+# Number of seconds between instance info_cache self healing
+# updates (integer value)
+#heal_instance_info_cache_interval=60
+
+# Interval in seconds for reclaiming deleted instances
+# (integer value)
+#reclaim_instance_interval=0
+
+# Interval in seconds for gathering volume usages (integer
+# value)
+#volume_usage_poll_interval=0
+
+# Interval in seconds for polling shelved instances to
+# offload. Set to -1 to disable.Setting this to 0 will
+# disable, but this will change in Juno to mean "run at the
+# default rate". (integer value)
+#shelved_poll_interval=3600
+
+# Time in seconds before a shelved instance is eligible for
+# removing from a host. -1 never offload, 0 offload when
+# shelved (integer value)
+#shelved_offload_time=0
+
+# Interval in seconds for retrying failed instance file
+# deletes (integer value)
+#instance_delete_interval=300
+
+# Waiting time interval (seconds) between block device
+# allocation retries on failures (integer value)
+#block_device_allocate_retries_interval=3
+
+# Action to take if a running deleted instance is
+# detected.Valid options are 'noop', 'log', 'shutdown', or
+# 'reap'. Set to 'noop' to take no action. (string value)
+#running_deleted_instance_action=reap
+
+# Number of seconds to wait between runs of the cleanup task.
+# (integer value)
+#running_deleted_instance_poll_interval=1800
+
+# Number of seconds after being deleted when a running
+# instance should be considered eligible for cleanup. (integer
+# value)
+#running_deleted_instance_timeout=0
+
+# Automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds. Set to 0 to
+# disable. (integer value)
+#reboot_timeout=0
+
+# Amount of time in seconds an instance can be in BUILD before
+# going into ERROR status.Set to 0 to disable. (integer value)
+#instance_build_timeout=0
+
+# Automatically unrescue an instance after N seconds. Set to 0
+# to disable. (integer value)
+#rescue_timeout=0
+
+# Automatically confirm resizes after N seconds. Set to 0 to
+# disable. (integer value)
+#resize_confirm_window=0
+
+# Total amount of time to wait in seconds for an instance to
+# perform a clean shutdown. (integer value)
+#shutdown_timeout=60
+
+
+#
+# Options defined in nova.compute.monitors
+#
+
+# Monitor classes available to the compute which may be
+# specified more than once. (multi valued)
+#compute_available_monitors=nova.compute.monitors.all_monitors
+
+# A list of monitors that can be used for getting compute
+# metrics. (list value)
+#compute_monitors=
+
+
+#
+# Options defined in nova.compute.resource_tracker
+#
+
+# Amount of disk in MB to reserve for the host (integer value)
+#reserved_host_disk_mb=0
+
+# Amount of memory in MB to reserve for the host (integer
+# value)
+reserved_host_memory_mb={{ RESERVED_HOST_MEMORY_MB }}
+
+# Class that will manage stats for the local compute host
+# (string value)
+#compute_stats_class=nova.compute.stats.Stats
+
+# The names of the extra resources to track. (list value)
+#compute_resources=vcpu
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# The topic compute nodes listen on (string value)
+#compute_topic=compute
+
+
+#
+# Options defined in nova.conductor.tasks.live_migrate
+#
+
+# Number of times to retry live-migration before failing. If
+# == -1, try until out of hosts. If == 0, only try once, no
+# retries. (integer value)
+#migrate_max_retries=-1
+
+
+#
+# Options defined in nova.console.manager
+#
+
+# Driver to use for the console proxy (string value)
+#console_driver=nova.console.xvp.XVPConsoleProxy
+
+# Stub calls to compute worker for tests (boolean value)
+#stub_compute=false
+
+# Publicly visible name for this console host (string value)
+#console_public_hostname=nova
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# The topic console proxy nodes listen on (string value)
+#console_topic=console
+
+
+#
+# Options defined in nova.console.vmrc
+#
+
+# DEPRECATED. Port for VMware VMRC connections (integer value)
+#console_vmrc_port=443
+
+# DEPRECATED. Number of retries for retrieving VMRC
+# information (integer value)
+#console_vmrc_error_retries=10
+
+
+#
+# Options defined in nova.console.xvp
+#
+
+# XVP conf template (string value)
+#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
+
+# Generated XVP conf file (string value)
+#console_xvp_conf=/etc/xvp.conf
+
+# XVP master process pid file (string value)
+#console_xvp_pid=/var/run/xvp.pid
+
+# XVP log file (string value)
+#console_xvp_log=/var/log/xvp.log
+
+# Port for XVP to multiplex VNC connections on (integer value)
+#console_xvp_multiplex_port=5900
+
+
+#
+# Options defined in nova.consoleauth
+#
+
+# The topic console auth proxy nodes listen on (string value)
+#consoleauth_topic=consoleauth
+
+
+#
+# Options defined in nova.consoleauth.manager
+#
+
+# How many seconds before deleting tokens (integer value)
+#console_token_ttl=600
+
+
+#
+# Options defined in nova.db.api
+#
+
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
+
+# Template string to be used to generate instance names
+# (string value)
+instance_name_template=instance-%08x
+
+# Template string to be used to generate snapshot names
+# (string value)
+snapshot_name_template=snapshot-%s
+
+
+#
+# Options defined in nova.db.base
+#
+
+# The driver to use for database access (string value)
+#db_driver=nova.db
+
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# When set, compute API will consider duplicate hostnames
+# invalid within the specified scope, regardless of case.
+# Should be empty, "project" or "global". (string value)
+#osapi_compute_unique_server_name_scope=
+
+
+#
+# Options defined in nova.image.s3
+#
+
+# Parent directory for tempdir used for image decryption
+# (string value)
+#image_decryption_dir=/tmp
+
+# Hostname or IP for OpenStack to use when accessing the S3
+# api (string value)
+#s3_host=$my_ip
+
+# Port used when accessing the S3 api (integer value)
+#s3_port=3333
+
+# Access key to use for S3 server for images (string value)
+#s3_access_key=notchecked
+
+# Secret key to use for S3 server for images (string value)
+#s3_secret_key=notchecked
+
+# Whether to use SSL when talking to S3 (boolean value)
+#s3_use_ssl=false
+
+# Whether to affix the tenant id to the access key when
+# downloading from S3 (boolean value)
+#s3_affix_tenant=false
+
+
+#
+# Options defined in nova.ipv6.api
+#
+
+# Backend to use for IPv6 generation (string value)
+#ipv6_backend=rfc2462
+
+
+#
+# Options defined in nova.network
+#
+
+# The full class name of the network API class to use (string
+# value)
+network_api_class=nova.network.neutronv2.api.API
+
+
+#
+# Options defined in nova.network.driver
+#
+
+# Driver to use for network creation (string value)
+#network_driver=nova.network.linux_net
+
+
+#
+# Options defined in nova.network.floating_ips
+#
+
+# Default pool for floating IPs (string value)
+#default_floating_pool=nova
+
+# Autoassigning floating IP to VM (boolean value)
+#auto_assign_floating_ip=false
+
+# Full class name for the DNS Manager for floating IPs (string
+# value)
+#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Manager for instance IPs (string
+# value)
+#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Zone for instance IPs (string
+# value)
+#instance_dns_domain=
+
+
+#
+# Options defined in nova.network.ldapdns
+#
+
+# URL for LDAP server which will store DNS entries (string
+# value)
+#ldap_dns_url=ldap://ldap.example.com:389
+
+# User for LDAP DNS (string value)
+#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
+
+# Password for LDAP DNS (string value)
+#ldap_dns_password=password
+
+# Hostmaster for LDAP DNS driver Statement of Authority
+# (string value)
+#ldap_dns_soa_hostmaster=hostmaster@example.org
+
+# DNS Servers for LDAP DNS driver (multi valued)
+#ldap_dns_servers=dns.example.org
+
+# Base DN for DNS entries in LDAP (string value)
+#ldap_dns_base_dn=ou=hosts,dc=example,dc=org
+
+# Refresh interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_refresh=1800
+
+# Retry interval (in seconds) for LDAP DNS driver Statement of
+# Authority (string value)
+#ldap_dns_soa_retry=3600
+
+# Expiry interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_expiry=86400
+
+# Minimum interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_minimum=7200
+
+
+#
+# Options defined in nova.network.linux_net
+#
+
+# Location of flagfiles for dhcpbridge (multi valued)
+#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
+
+# Location to keep network config files (string value)
+#networks_path=$state_path/networks
+
+# Interface for public IP addresses (string value)
+#public_interface=eth0
+
+# Location of nova-dhcpbridge (string value)
+#dhcpbridge=$bindir/nova-dhcpbridge
+
+# Public IP of network host (string value)
+#routing_source_ip=$my_ip
+
+# Lifetime of a DHCP lease in seconds (integer value)
+#dhcp_lease_time=86400
+
+# If set, uses specific DNS server for dnsmasq. Can be
+# specified multiple times. (multi valued)
+#dns_server=
+
+# If set, uses the dns1 and dns2 from the network ref. as dns
+# servers. (boolean value)
+#use_network_dns_servers=false
+
+# A list of dmz range that should be accepted (list value)
+#dmz_cidr=
+
+# Traffic to this range will always be snatted to the fallback
+# ip, even if it would normally be bridged out of the node.
+# Can be specified multiple times. (multi valued)
+#force_snat_range=
+
+# Override the default dnsmasq settings with this file (string
+# value)
+#dnsmasq_config_file=
+
+# Driver used to create ethernet devices. (string value)
+linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
+
+# Name of Open vSwitch bridge used with linuxnet (string
+# value)
+#linuxnet_ovs_integration_bridge=br-int
+
+# Send gratuitous ARPs for HA setup (boolean value)
+#send_arp_for_ha=false
+
+# Send this many gratuitous ARPs for HA setup (integer value)
+#send_arp_for_ha_count=3
+
+# Use single default gateway. Only first nic of vm will get
+# default gateway from dhcp server (boolean value)
+#use_single_default_gateway=false
+
+# An interface that bridges can forward to. If this is set to
+# all then all traffic will be forwarded. Can be specified
+# multiple times. (multi valued)
+#forward_bridge_interface=all
+
+# The IP address for the metadata API server (string value)
+#metadata_host=$my_ip
+
+# The port for the metadata API port (integer value)
+#metadata_port=8775
+
+# Regular expression to match iptables rule that should always
+# be on the top. (string value)
+#iptables_top_regex=
+
+# Regular expression to match iptables rule that should always
+# be on the bottom. (string value)
+#iptables_bottom_regex=
+
+# The table that iptables to jump to when a packet is to be
+# dropped. (string value)
+#iptables_drop_action=DROP
+
+# Amount of time, in seconds, that ovs_vsctl should wait for a
+# response from the database. 0 is to wait forever. (integer
+# value)
+#ovs_vsctl_timeout=120
+
+# If passed, use fake network devices and addresses (boolean
+# value)
+#fake_network=false
+
+
+#
+# Options defined in nova.network.manager
+#
+
+# Bridge for simple network instances (string value)
+#flat_network_bridge=<None>
+
+# DNS server for simple network (string value)
+#flat_network_dns=8.8.4.4
+
+# Whether to attempt to inject network setup into guest
+# (boolean value)
+#flat_injected=false
+
+# FlatDhcp will bridge into this interface if set (string
+# value)
+#flat_interface=<None>
+
+# First VLAN for private networks (integer value)
+#vlan_start=100
+
+# VLANs will bridge into this interface if set (string value)
+#vlan_interface=<None>
+
+# Number of networks to support (integer value)
+#num_networks=1
+
+# Public IP for the cloudpipe VPN servers (string value)
+#vpn_ip=$my_ip
+
+# First Vpn port for private networks (integer value)
+#vpn_start=1000
+
+# Number of addresses in each private subnet (integer value)
+#network_size=256
+
+# Fixed IPv6 address block (string value)
+#fixed_range_v6=fd00::/48
+
+# Default IPv4 gateway (string value)
+#gateway=<None>
+
+# Default IPv6 gateway (string value)
+#gateway_v6=<None>
+
+# Number of addresses reserved for vpn clients (integer value)
+#cnt_vpn_clients=0
+
+# Seconds after which a deallocated IP is disassociated
+# (integer value)
+#fixed_ip_disassociate_timeout=600
+
+# Number of attempts to create unique mac address (integer
+# value)
+#create_unique_mac_address_attempts=5
+
+# If True, skip using the queue and make local calls (boolean
+# value)
+#fake_call=false
+
+# If True, unused gateway devices (VLAN and bridge) are
+# deleted in VLAN network mode with multi hosted networks
+# (boolean value)
+#teardown_unused_network_gateway=false
+
+# If True, send a dhcp release on instance termination
+# (boolean value)
+#force_dhcp_release=true
+
+# If True, when a DNS entry must be updated, it sends a fanout
+# cast to all network hosts to update their DNS entries in
+# multi host mode (boolean value)
+#update_dns_entries=false
+
+# Number of seconds to wait between runs of updates to DNS
+# entries. (integer value)
+#dns_update_periodic_interval=-1
+
+# Domain to use for building the hostnames (string value)
+#dhcp_domain=novalocal
+
+# Indicates underlying L3 management library (string value)
+#l3_lib=nova.network.l3.LinuxNetL3
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# The topic network nodes listen on (string value)
+#network_topic=network
+
+# Default value for multi_host in networks. Also, if set, some
+# rpc network calls will be sent directly to host. (boolean
+# value)
+#multi_host=false
+
+
+#
+# Options defined in nova.network.security_group.openstack_driver
+#
+
+# The full class name of the security API class (string value)
+security_group_api=neutron
+
+
+#
+# Options defined in nova.objects.network
+#
+
+# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE
+# NETWORK. If True in multi_host mode, all compute hosts share
+# the same dhcp address. The same IP address used for DHCP
+# will be added on each nova-network node which is only
+# visible to the vms on the same host. (boolean value)
+#share_dhcp_address=false
+
+# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE
+# NETWORK. MTU setting for network interface. (integer value)
+#network_device_mtu=<None>
+
+
+#
+# Options defined in nova.objectstore.s3server
+#
+
+# Path to S3 buckets (string value)
+#buckets_path=$state_path/buckets
+
+# IP address for S3 API to listen (string value)
+#s3_listen=0.0.0.0
+
+# Port for S3 API to listen (integer value)
+#s3_listen_port=3333
+
+
+#
+# Options defined in nova.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in nova.openstack.common.lockutils
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+lock_path=/var/lock/nova
+
+
+#
+# Options defined in nova.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+use_syslog=True
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in nova.openstack.common.memorycache
+#
+
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
+
+
+#
+# Options defined in nova.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in nova.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+
+#
+# Options defined in nova.pci.pci_request
+#
+
+# An alias for a PCI passthrough device requirement. This
+# allows users to specify the alias in the extra_spec for a
+# flavor, without needing to repeat all the PCI property
+# requirements. For example: pci_alias = { "name":
+# "QuicAssist", "product_id": "0443", "vendor_id": "8086",
+# "device_type": "ACCEL" } defines an alias for the Intel
+# QuickAssist card. (multi valued) (multi valued)
+#pci_alias=
+
+
+#
+# Options defined in nova.pci.pci_whitelist
+#
+
+# White list of PCI devices available to VMs. For example:
+# pci_passthrough_whitelist = [{"vendor_id": "8086",
+# "product_id": "0443"}] (multi valued)
+#pci_passthrough_whitelist=
+
+
+#
+# Options defined in nova.scheduler.driver
+#
+
+# The scheduler host manager class to use (string value)
+scheduler_host_manager={{ SCHEDULER_HOST_MANAGER }}
+
+
+#
+# Options defined in nova.scheduler.filter_scheduler
+#
+
+# New instances will be scheduled on a host chosen randomly
+# from a subset of the N best hosts. This property defines the
+# subset size that a host is chosen from. A value of 1 chooses
+# the first host returned by the weighing functions. This
+# value must be at least 1. Any value less than 1 will be
+# ignored, and 1 will be used instead (integer value)
+#scheduler_host_subset_size=1
+
+
+#
+# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation
+#
+
+# Force the filter to consider only keys matching the given
+# namespace. (string value)
+#aggregate_image_properties_isolation_namespace=<None>
+
+# The separator used between the namespace and keys (string
+# value)
+#aggregate_image_properties_isolation_separator=.
+
+
+#
+# Options defined in nova.scheduler.filters.core_filter
+#
+
+# Virtual CPU to physical CPU allocation ratio which affects
+# all CPU filters. This configuration specifies a global ratio
+# for CoreFilter. For AggregateCoreFilter, it will fall back
+# to this configuration value if no per-aggregate setting
+# found. (floating point value)
+#cpu_allocation_ratio=16.0
+
+
+#
+# Options defined in nova.scheduler.filters.disk_filter
+#
+
+# Virtual disk to physical disk allocation ratio (floating
+# point value)
+#disk_allocation_ratio=1.0
+
+
+#
+# Options defined in nova.scheduler.filters.io_ops_filter
+#
+
+# Tells filters to ignore hosts that have this many or more
+# instances currently in build, resize, snapshot, migrate,
+# rescue or unshelve task states (integer value)
+#max_io_ops_per_host=8
+
+
+#
+# Options defined in nova.scheduler.filters.isolated_hosts_filter
+#
+
+# Images to run on isolated host (list value)
+#isolated_images=
+
+# Host reserved for specific images (list value)
+#isolated_hosts=
+
+# Whether to force isolated hosts to run only isolated images
+# (boolean value)
+#restrict_isolated_hosts_to_isolated_images=true
+
+
+#
+# Options defined in nova.scheduler.filters.num_instances_filter
+#
+
+# Ignore hosts that have too many instances (integer value)
+#max_instances_per_host=50
+
+
+#
+# Options defined in nova.scheduler.filters.ram_filter
+#
+
+# Virtual ram to physical ram allocation ratio which affects
+# all ram filters. This configuration specifies a global ratio
+# for RamFilter. For AggregateRamFilter, it will fall back to
+# this configuration value if no per-aggregate setting found.
+# (floating point value)
+ram_allocation_ratio={{ RAM_ALLOCATION_RATIO }}
+
+
+#
+# Options defined in nova.scheduler.host_manager
+#
+
+# Filter classes available to the scheduler which may be
+# specified more than once. An entry of
+# "nova.scheduler.filters.standard_filters" maps to all
+# filters included with nova. (multi valued)
+#scheduler_available_filters=nova.scheduler.filters.all_filters
+
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
+
+# Which weight class names to use for weighing hosts (list
+# value)
+#scheduler_weight_classes=nova.scheduler.weights.all_weighers
+
+
+#
+# Options defined in nova.scheduler.ironic_host_manager
+#
+
+# Which filter class names to use for filtering baremetal
+# hosts when not specified in the request. (list value)
+#baremetal_scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
+
+# Flag to decide whether to use
+# baremetal_scheduler_default_filters or not. (boolean value)
+#scheduler_use_baremetal_filters=false
+
+
+#
+# Options defined in nova.scheduler.manager
+#
+
+# Default driver to use for the scheduler (string value)
+scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+
+# How often (in seconds) to run periodic tasks in the
+# scheduler driver of your choice. Please note this is likely
+# to interact with the value of service_down_time, but exactly
+# how they interact will depend on your choice of scheduler
+# driver. (integer value)
+#scheduler_driver_task_period=60
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# The topic scheduler nodes listen on (string value)
+#scheduler_topic=scheduler
+
+
+#
+# Options defined in nova.scheduler.scheduler_options
+#
+
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
+
+
+#
+# Options defined in nova.scheduler.utils
+#
+
+# Maximum number of attempts to schedule an instance (integer
+# value)
+#scheduler_max_attempts=3
+
+
+#
+# Options defined in nova.scheduler.weights.ram
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=1.0
+
+
+#
+# Options defined in nova.servicegroup.api
+#
+
+# The driver for servicegroup service (valid options are: db,
+# zk, mc) (string value)
+#servicegroup_driver=db
+
+
+#
+# Options defined in nova.virt.configdrive
+#
+
+# Config drive format. One of iso9660 (default) or vfat
+# (string value)
+#config_drive_format=iso9660
+
+# DEPRECATED (not needed any more): Where to put temporary
+# files associated with config drive creation (string value)
+#config_drive_tempdir=<None>
+
+# Set to force injection to take place on a config drive (if
+# set, valid options are: always) (string value)
+#force_config_drive=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#mkisofs_cmd=genisoimage
+
+
+#
+# Options defined in nova.virt.disk.api
+#
+
+# Name of the mkfs commands for ephemeral device. The format
+# is <os_type>=<mkfs command> (multi valued)
+#virt_mkfs=
+
+# Attempt to resize the filesystem by accessing the image over
+# a block device. This is done by the host and may not be
+# necessary if the image contains a recent version of cloud-
+# init. Possible mechanisms require the nbd driver (for qcow
+# and raw), or loop (for raw). (boolean value)
+#resize_fs_using_block_device=false
+
+
+#
+# Options defined in nova.virt.disk.mount.nbd
+#
+
+# Amount of time, in seconds, to wait for NBD device start up.
+# (integer value)
+#timeout_nbd=10
+
+
+#
+# Options defined in nova.virt.driver
+#
+
+# Driver to use for controlling virtualization. Options
+# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
+# fake.FakeDriver, baremetal.BareMetalDriver,
+# vmwareapi.VMwareVCDriver, hyperv.HyperVDriver (string value)
+compute_driver={{ COMPUTE_DRIVER }}
+
+# The default format an ephemeral_volume will be formatted
+# with on creation. (string value)
+#default_ephemeral_format=<None>
+
+# VM image preallocation mode: "none" => no storage
+# provisioning is done up front, "space" => storage is fully
+# allocated at instance start (string value)
+#preallocate_images=none
+
+# Whether to use cow images (boolean value)
+#use_cow_images=true
+
+# Fail instance boot if vif plugging fails (boolean value)
+#vif_plugging_is_fatal=true
+
+# Number of seconds to wait for neutron vif plugging events to
+# arrive before continuing or failing (see
+# vif_plugging_is_fatal). If this is set to zero and
+# vif_plugging_is_fatal is False, events should not be
+# expected to arrive at all. (integer value)
+#vif_plugging_timeout=300
+
+
+#
+# Options defined in nova.virt.firewall
+#
+
+# Firewall driver (defaults to hypervisor specific iptables
+# driver) (string value)
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+
+# Whether to allow network traffic from same network (boolean
+# value)
+#allow_same_net_traffic=true
+
+
+#
+# Options defined in nova.virt.hardware
+#
+
+# Defines which pcpus that instance vcpus can use. For
+# example, "4-12,^8,15" (string value)
+#vcpu_pin_set=<None>
+
+
+#
+# Options defined in nova.virt.imagecache
+#
+
+# Number of seconds to wait between runs of the image cache
+# manager. Set to -1 to disable. Setting this to 0 will
+# disable, but this will change in the K release to mean "run
+# at the default rate". (integer value)
+#image_cache_manager_interval=2400
+
+# Where cached images are stored under $instances_path. This
+# is NOT the full path - just a folder name. For per-compute-
+# host cached images, set to _base_$my_ip (string value)
+#image_cache_subdirectory_name=_base
+
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images=true
+
+# Unused unresized base images younger than this will not be
+# removed (integer value)
+#remove_unused_original_minimum_age_seconds=86400
+
+
+#
+# Options defined in nova.virt.images
+#
+
+# Force backing images to raw format (boolean value)
+#force_raw_images=true
+
+
+#
+# Options defined in nova.virt.netutils
+#
+
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/interfaces.template
+
+
+#
+# Options defined in nova.vnc
+#
+
+# Location of VNC console proxy, in the form
+# "http://127.0.0.1:6080/vnc_auto.html" (string value)
+novncproxy_base_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6080/vnc_auto.html
+
+# Location of nova xvp VNC console proxy, in the form
+# "http://127.0.0.1:6081/console" (string value)
+#xvpvncproxy_base_url=http://127.0.0.1:6081/console
+
+# IP address on which instance vncservers should listen
+# (string value)
+vncserver_listen=0.0.0.0
+
+# The address to which proxy clients (like nova-xvpvncproxy)
+# should connect (string value)
+vncserver_proxyclient_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Enable VNC related features (boolean value)
+vnc_enabled=true
+
+# Keymap for VNC (string value)
+vnc_keymap=en-us
+
+
+#
+# Options defined in nova.vnc.xvp_proxy
+#
+
+# Port that the XCP VNC proxy should bind to (integer value)
+#xvpvncproxy_port=6081
+
+# Address that the XCP VNC proxy should bind to (string value)
+#xvpvncproxy_host=0.0.0.0
+
+
+#
+# Options defined in nova.volume
+#
+
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=nova.volume.cinder.API
+
+
+[baremetal]
+
+#
+# Options defined in nova.virt.baremetal.db.api
+#
+
+# The backend to use for bare-metal database (string value)
+#db_backend=sqlalchemy
+
+
+#
+# Options defined in nova.virt.baremetal.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# bare-metal database (string value)
+#sql_connection=sqlite:///$state_path/baremetal_nova.sqlite
+
+
+#
+# Options defined in nova.virt.baremetal.driver
+#
+
+# Baremetal VIF driver. (string value)
+#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver
+
+# Baremetal volume driver. (string value)
+#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver
+
+# A list of additional capabilities corresponding to
+# flavor_extra_specs for this compute host to advertise. Valid
+# entries are name=value, pairs For example, "key1:val1,
+# key2:val2" (list value)
+#flavor_extra_specs=
+
+# Baremetal driver back-end (pxe or tilera) (string value)
+#driver=nova.virt.baremetal.pxe.PXE
+
+# Baremetal power management method (string value)
+#power_manager=nova.virt.baremetal.ipmi.IPMI
+
+# Baremetal compute node's tftp root path (string value)
+#tftp_root=/tftpboot
+
+
+#
+# Options defined in nova.virt.baremetal.ipmi
+#
+
+# Path to baremetal terminal program (string value)
+#terminal=shellinaboxd
+
+# Path to baremetal terminal SSL cert(PEM) (string value)
+#terminal_cert_dir=<None>
+
+# Path to directory stores pidfiles of baremetal_terminal
+# (string value)
+#terminal_pid_dir=$state_path/baremetal/console
+
+# Maximal number of retries for IPMI operations (integer
+# value)
+#ipmi_power_retry=10
+
+
+#
+# Options defined in nova.virt.baremetal.pxe
+#
+
+# Default kernel image ID used in deployment phase (string
+# value)
+#deploy_kernel=<None>
+
+# Default ramdisk image ID used in deployment phase (string
+# value)
+#deploy_ramdisk=<None>
+
+# Template file for injected network config (string value)
+#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template
+
+# Additional append parameters for baremetal PXE boot (string
+# value)
+#pxe_append_params=nofb nomodeset vga=normal
+
+# Template file for PXE configuration (string value)
+#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template
+
+# If True, enable file injection for network info, files and
+# admin password (boolean value)
+#use_file_injection=false
+
+# Timeout for PXE deployments. Default: 0 (unlimited) (integer
+# value)
+#pxe_deploy_timeout=0
+
+# If set, pass the network configuration details to the
+# initramfs via cmdline. (boolean value)
+#pxe_network_config=false
+
+# This gets passed to Neutron as the bootfile dhcp parameter.
+# (string value)
+#pxe_bootfile_name=pxelinux.0
+
+
+#
+# Options defined in nova.virt.baremetal.tilera_pdu
+#
+
+# IP address of tilera pdu (string value)
+#tile_pdu_ip=10.0.100.1
+
+# Management script for tilera pdu (string value)
+#tile_pdu_mgr=/tftpboot/pdu_mgr
+
+# Power status of tilera PDU is OFF (integer value)
+#tile_pdu_off=2
+
+# Power status of tilera PDU is ON (integer value)
+#tile_pdu_on=1
+
+# Power status of tilera PDU (integer value)
+#tile_pdu_status=9
+
+# Wait time in seconds until check the result after tilera
+# power operations (integer value)
+#tile_power_wait=9
+
+
+#
+# Options defined in nova.virt.baremetal.virtual_power_driver
+#
+
+# IP or name to virtual power host (string value)
+#virtual_power_ssh_host=
+
+# Port to use for ssh to virtual power host (integer value)
+#virtual_power_ssh_port=22
+
+# Base command to use for virtual power(vbox, virsh) (string
+# value)
+#virtual_power_type=virsh
+
+# User to execute virtual power commands as (string value)
+#virtual_power_host_user=
+
+# Password for virtual power host_user (string value)
+#virtual_power_host_pass=
+
+# The ssh key for virtual power host_user (string value)
+#virtual_power_host_key=<None>
+
+
+#
+# Options defined in nova.virt.baremetal.volume_driver
+#
+
+# Do not set this out of dev/test environments. If a node does
+# not have a fixed PXE IP address, volumes are exported with
+# globally opened ACL (boolean value)
+#use_unsafe_iscsi=false
+
+# The iSCSI IQN prefix used in baremetal volume connections.
+# (string value)
+#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal
+
+
+[cells]
+
+#
+# Options defined in nova.cells.manager
+#
+
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
+
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
+
+
+#
+# Options defined in nova.cells.messaging
+#
+
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
+
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
+
+
+#
+# Options defined in nova.cells.opts
+#
+
+# Enable cell functionality (boolean value)
+#enable=false
+
+# The topic cells nodes listen on (string value)
+#topic=cells
+
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
+
+# Name of this cell (string value)
+#name=nova
+
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
+
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
+
+# Percentage of cell capacity to hold in reserve. Affects both
+# memory and disk utilization (floating point value)
+#reserve_percent=10.0
+
+# Type of cell: api or compute (string value)
+#cell_type=compute
+
+# Number of seconds after which a lack of capability and
+# capacity updates signals the child cell is to be treated as
+# a mute. (integer value)
+#mute_child_interval=300
+
+# Seconds between bandwidth updates for cells. (integer value)
+#bandwidth_update_interval=600
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
+
+
+#
+# Options defined in nova.cells.scheduler
+#
+
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters" maps to all cells filters
+# included with nova. (list value)
+#scheduler_filter_classes=nova.cells.filters.all_filters
+
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers" maps to all cell weighers
+# included with nova. (list value)
+#scheduler_weight_classes=nova.cells.weights.all_weighers
+
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
+
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
+
+
+#
+# Options defined in nova.cells.state
+#
+
+# Interval, in seconds, for getting fresh cell information
+# from the database. (integer value)
+#db_check_interval=60
+
+# Configuration file from which to read cells configuration.
+# If given, overrides reading cells from the database. (string
+# value)
+#cells_config=<None>
+
+
+#
+# Options defined in nova.cells.weights.mute_child
+#
+
+# Multiplier used to weigh mute children. (The value should be
+# negative.) (floating point value)
+#mute_weight_multiplier=-10.0
+
+# Weight value assigned to mute children. (The value should be
+# positive.) (floating point value)
+#mute_weight_value=1000.0
+
+
+#
+# Options defined in nova.cells.weights.ram_by_instance_type
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=10.0
+
+
+#
+# Options defined in nova.cells.weights.weight_offset
+#
+
+# Multiplier used to weigh offset weigher. (floating point
+# value)
+#offset_weight_multiplier=1.0
+
+
+[cinder]
+
+#
+# Options defined in nova.volume.cinder
+#
+
+# Info to match when looking for cinder in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+# Deprecated group/name - [DEFAULT]/cinder_catalog_info
+#catalog_info=volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder
+# endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# (string value)
+# Deprecated group/name - [DEFAULT]/cinder_endpoint_template
+#endpoint_template=<None>
+
+# Region name of this node (string value)
+# Deprecated group/name - [DEFAULT]/os_region_name
+#os_region_name=<None>
+
+# Location of ca certificates file to use for cinder client
+# requests. (string value)
+# Deprecated group/name - [DEFAULT]/cinder_ca_certificates_file
+#ca_certificates_file=<None>
+
+# Number of cinderclient retries on failed http calls (integer
+# value)
+# Deprecated group/name - [DEFAULT]/cinder_http_retries
+#http_retries=3
+
+# HTTP inactivity timeout (in seconds) (integer value)
+# Deprecated group/name - [DEFAULT]/cinder_http_timeout
+#http_timeout=<None>
+
+# Allow to perform insecure SSL requests to cinder (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/cinder_api_insecure
+#api_insecure=false
+
+# Allow attach between instance and volume in different
+# availability zones. (boolean value)
+# Deprecated group/name - [DEFAULT]/cinder_cross_az_attach
+#cross_az_attach=true
+
+
+[conductor]
+
+#
+# Options defined in nova.conductor.api
+#
+
+# Perform nova-conductor operations locally (boolean value)
+use_local=true
+
+# The topic on which conductor nodes listen (string value)
+#topic=conductor
+
+# Full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
+
+# Number of workers for OpenStack Conductor service. The
+# default will be the number of CPUs available. (integer
+# value)
+#workers=<None>
+
+
+[ephemeral_storage_encryption]
+
+#
+# Options defined in nova.compute.api
+#
+
+# Whether to encrypt ephemeral storage (boolean value)
+#enabled=false
+
+# The cipher and mode to be used to encrypt ephemeral storage.
+# Which ciphers are available ciphers depends on kernel
+# support. See /proc/crypto for the list of available options.
+# (string value)
+#cipher=aes-xts-plain64
+
+# The bit length of the encryption key to be used to encrypt
+# ephemeral storage (in XTS mode only half of the bits are
+# used for encryption key) (integer value)
+#key_size=512
+
+
+[glance]
+
+#
+# Options defined in nova.image.glance
+#
+
+# Default glance hostname or IP address (string value)
+# Deprecated group/name - [DEFAULT]/glance_host
+host={{ CONTROLLER_HOST_ADDRESS }}
+
+# Default glance port (integer value)
+# Deprecated group/name - [DEFAULT]/glance_port
+port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+# Deprecated group/name - [DEFAULT]/glance_protocol
+protocol=http
+
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+# Deprecated group/name - [DEFAULT]/glance_api_servers
+api_servers=$host:$port
+
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/glance_api_insecure
+#api_insecure=false
+
+# Number of retries when downloading an image from glance
+# (integer value)
+# Deprecated group/name - [DEFAULT]/glance_num_retries
+#num_retries=0
+
+# A list of url scheme that can be downloaded directly via the
+# direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
+[hyperv]
+
+#
+# Options defined in nova.virt.hyperv.pathutils
+#
+
+# The name of a Windows share name mapped to the
+# "instances_path" dir and used by the resize feature to copy
+# files to the target host. If left blank, an administrative
+# share will be used, looking for the same "instances_path"
+# used locally (string value)
+#instances_path_share=
+
+
+#
+# Options defined in nova.virt.hyperv.utilsfactory
+#
+
+# Force V1 WMI utility classes (boolean value)
+#force_hyperv_utils_v1=false
+
+# Force V1 volume utility class (boolean value)
+#force_volumeutils_v1=false
+
+
+#
+# Options defined in nova.virt.hyperv.vif
+#
+
+# External virtual switch Name, if not provided, the first
+# external virtual switch is used (string value)
+#vswitch_name=<None>
+
+
+#
+# Options defined in nova.virt.hyperv.vmops
+#
+
+# Required for live migration among hosts with different CPU
+# features (boolean value)
+#limit_cpu_features=false
+
+# Sets the admin password in the config drive image (boolean
+# value)
+#config_drive_inject_password=false
+
+# Path of qemu-img command which is used to convert between
+# different image types (string value)
+#qemu_img_cmd=qemu-img.exe
+
+# Attaches the Config Drive image as a cdrom drive instead of
+# a disk drive (boolean value)
+#config_drive_cdrom=false
+
+# Enables metrics collections for an instance by using
+# Hyper-V's metric APIs. Collected data can by retrieved by
+# other apps and services, e.g.: Ceilometer. Requires Hyper-V
+# / Windows Server 2012 and above (boolean value)
+#enable_instance_metrics_collection=false
+
+# Enables dynamic memory allocation (ballooning) when set to a
+# value greater than 1. The value expresses the ratio between
+# the total RAM assigned to an instance and its startup RAM
+# amount. For example a ratio of 2.0 for an instance with
+# 1024MB of RAM implies 512MB of RAM allocated at startup
+# (floating point value)
+#dynamic_memory_ratio=1.0
+
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#wait_soft_reboot_seconds=60
+
+
+#
+# Options defined in nova.virt.hyperv.volumeops
+#
+
+# The number of times to retry to attach a volume (integer
+# value)
+#volume_attach_retry_count=10
+
+# Interval between volume attachment attempts, in seconds
+# (integer value)
+#volume_attach_retry_interval=5
+
+# The number of times to retry checking for a disk mounted via
+# iSCSI. (integer value)
+#mounted_disk_query_retry_count=10
+
+# Interval between checks for a mounted iSCSI disk, in
+# seconds. (integer value)
+#mounted_disk_query_retry_interval=5
+
+
+[image_file_url]
+
+#
+# Options defined in nova.image.download.file
+#
+
+# List of file systems that are configured in this file in the
+# image_file_url:<list entry name> sections (list value)
+#filesystems=
+
+
+[ironic]
+
+#
+# Options defined in nova.virt.ironic.driver
+#
+
+# Version of Ironic API service endpoint. (integer value)
+#api_version=1
+
+# URL for Ironic API endpoint. (string value)
+api_endpoint=http://{{ CONTROLLER_HOST_ADDRESS }}:6385/v1
+
+# Ironic keystone admin name (string value)
+admin_username={{ IRONIC_SERVICE_USER }}
+
+# Ironic keystone admin password. (string value)
+admin_password={{ IRONIC_SERVICE_PASSWORD }}
+
+# Ironic keystone auth token. (string value)
+#admin_auth_token=<None>
+
+# Keystone public API endpoint. (string value)
+admin_url=http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+# Log level override for ironicclient. Set this in order to
+# override the global "default_log_levels", "verbose", and
+# "debug" settings. (string value)
+#client_log_level=<None>
+
+# Ironic keystone tenant name. (string value)
+admin_tenant_name=service
+
+# How many retries when a request does conflict. (integer
+# value)
+#api_max_retries=60
+
+# How often to retry in seconds when a request does conflict
+# (integer value)
+#api_retry_interval=2
+
+
+[keymgr]
+
+#
+# Options defined in nova.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in nova.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
+# value)
+#fixed_key=<None>
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystonemiddleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+auth_protocol=http
+
+# Complete public Identity API endpoint (string value)
+auth_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+identity_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+
+# API version of the admin Identity API endpoint (string
+# value)
+auth_version=v2.0
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user={{ NOVA_SERVICE_USER }}
+
+# Keystone account password (string value)
+admin_password={{ NOVA_SERVICE_PASSWORD }}
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name=service
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=10
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) number of seconds memcached server is considered
+# dead before it is tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (optional) max total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize=10
+
+# (optional) socket timeout in seconds for communicating with
+# a memcache server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (optional) number of seconds a connection to memcached is
+# held unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (optional) number of seconds that an operation will wait to
+# get a memcache client connection from the pool. (integer
+# value)
+#memcache_pool_conn_get_timeout=10
+
+# (optional) use the advanced (eventlet safe) memcache client
+# pool. The advanced pool will only work under python 2.x.
+# (boolean value)
+#memcache_use_advanced_pool=false
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached
+# tokens. This requires that PKI tokens are configured on the
+# Keystone server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will
+# be tried in the order given, so put the preferred one first
+# for performance. The result of the first hash will be stored
+# in the cache. This will typically be set to multiple values
+# only while migrating from a less secure algorithm to a more
+# secure one. Once all the old tokens are expired this option
+# should be set to a single value for better performance.
+# (list value)
+#hash_algorithms=md5
+
+
+[libvirt]
+
+#
+# Options defined in nova.virt.libvirt.driver
+#
+
+# Rescue ami image. This will not be used if an image id is
+# provided by the user. (string value)
+#rescue_image_id=<None>
+
+# Rescue aki image (string value)
+#rescue_kernel_id=<None>
+
+# Rescue ari image (string value)
+#rescue_ramdisk_id=<None>
+
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen) (string value)
+virt_type={{ NOVA_VIRT_TYPE }}
+
+# Override the default libvirt URI (which is dependent on
+# virt_type) (string value)
+#connection_uri=
+
+# Inject the admin password at boot time, without an agent.
+# (boolean value)
+#inject_password=false
+
+# Inject the ssh public key at boot time (boolean value)
+#inject_key=false
+
+# The partition to inject to : -2 => disable, -1 => inspect
+# (libguestfs only), 0 => not partitioned, >0 => partition
+# number (integer value)
+#inject_partition=-2
+
+# Sync virtual and real mouse cursors in Windows VMs (boolean
+# value)
+#use_usb_tablet=true
+
+# Migration target URI (any included "%s" is replaced with the
+# migration target hostname) (string value)
+#live_migration_uri=qemu+tcp://%s/system
+
+# Migration flags to be set for live migration (string value)
+#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED
+
+# Migration flags to be set for block migration (string value)
+#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC
+
+# Maximum bandwidth to be used during migration, in Mbps
+# (integer value)
+#live_migration_bandwidth=0
+
+# Snapshot image format (valid options are : raw, qcow2, vmdk,
+# vdi). Defaults to same as source image (string value)
+#snapshot_image_format=<None>
+
+# DEPRECATED. Libvirt handlers for remote volumes. This option
+# is deprecated and will be removed in the Kilo release. (list
+# value)
+#volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver
+
+# Override the default disk prefix for the devices attached to
+# a server, which is dependent on virt_type. (valid options
+# are: sd, xvd, uvd, vd) (string value)
+#disk_prefix=<None>
+
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#wait_soft_reboot_seconds=120
+
+# Set to "host-model" to clone the host CPU feature flags; to
+# "host-passthrough" to use the host CPU model exactly; to
+# "custom" to use a named CPU model; to "none" to not set any
+# CPU model. If virt_type="kvm|qemu", it will default to
+# "host-model", otherwise it will default to "none" (string
+# value)
+#cpu_mode=<None>
+
+# Set to a named libvirt CPU model (see names listed in
+# /usr/share/libvirt/cpu_map.xml). Only has effect if
+# cpu_mode="custom" and virt_type="kvm|qemu" (string value)
+#cpu_model=<None>
+
+# Location where libvirt driver will store snapshots before
+# uploading them to image service (string value)
+#snapshots_directory=$instances_path/snapshots
+
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader
+
+# Specific cachemodes to use for different disk types e.g:
+# file=directsync,block=none (list value)
+#disk_cachemodes=
+
+# A path to a device that will be used as source of entropy on
+# the host. Permitted options are: /dev/random or /dev/hwrng
+# (string value)
+#rng_dev_path=<None>
+
+# For qemu or KVM guests, set this option to specify a default
+# machine type per host architecture. You can find a list of
+# supported machine types in your environment by checking the
+# output of the "virsh capabilities"command. The format of the
+# value for this config option is host-arch=machine-type. For
+# example: x86_64=machinetype1,armv7l=machinetype2 (list
+# value)
+#hw_machine_type=<None>
+
+# The data source used to the populate the host "serial" UUID
+# exposed to guest in the virtual BIOS. Permitted options are
+# "hardware", "os", "none" or "auto" (default). (string value)
+#sysinfo_serial=auto
+
+# A number of seconds to memory usage statistics period. Zero
+# or negative value mean to disable memory usage statistics.
+# (integer value)
+#mem_stats_period_seconds=10
+
+# List of uid targets and ranges.Syntax is guest-uid:host-
+# uid:countMaximum of 5 allowed. (list value)
+#uid_maps=
+
+# List of guid targets and ranges.Syntax is guest-gid:host-
+# gid:countMaximum of 5 allowed. (list value)
+#gid_maps=
+
+
+#
+# Options defined in nova.virt.libvirt.imagebackend
+#
+
+# VM Images format. Acceptable values are: raw, qcow2, lvm,
+# rbd, default. If default is specified, then use_cow_images
+# flag is used instead of this one. (string value)
+#images_type=default
+
+# LVM Volume Group that is used for VM images, when you
+# specify images_type=lvm. (string value)
+#images_volume_group=<None>
+
+# Create sparse logical volumes (with virtualsize) if this
+# flag is set to True. (boolean value)
+#sparse_logical_volumes=false
+
+# Method used to wipe old volumes (valid options are: none,
+# zero, shred) (string value)
+#volume_clear=zero
+
+# Size in MiB to wipe at start of old volumes. 0 => all
+# (integer value)
+#volume_clear_size=0
+
+# The RADOS pool in which rbd volumes are stored (string
+# value)
+#images_rbd_pool=rbd
+
+# Path to the ceph configuration file to use (string value)
+#images_rbd_ceph_conf=
+
+# Discard option for nova managed disks (valid options are:
+# ignore, unmap). Need Libvirt(1.0.6) Qemu1.5 (raw format)
+# Qemu1.6(qcow2 format) (string value)
+#hw_disk_discard=<None>
+
+
+#
+# Options defined in nova.virt.libvirt.imagecache
+#
+
+# Allows image information files to be stored in non-standard
+# locations (string value)
+#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info
+
+# Should unused kernel images be removed? This is only safe to
+# enable if all compute nodes have been updated to support
+# this option. This will be enabled by default in future.
+# (boolean value)
+#remove_unused_kernels=false
+
+# Unused resized base images younger than this will not be
+# removed (integer value)
+#remove_unused_resized_minimum_age_seconds=3600
+
+# Write a checksum for files in _base to disk (boolean value)
+#checksum_base_images=false
+
+# How frequently to checksum base images (integer value)
+#checksum_interval_seconds=3600
+
+
+#
+# Options defined in nova.virt.libvirt.utils
+#
+
+# Compress snapshot images when possible. This currently
+# applies exclusively to qcow2 images (boolean value)
+#snapshot_compression=false
+
+
+#
+# Options defined in nova.virt.libvirt.vif
+#
+
+# Use virtio for bridge interfaces with KVM/QEMU (boolean
+# value)
+#use_virtio_for_bridges=true
+
+
+#
+# Options defined in nova.virt.libvirt.volume
+#
+
+# Number of times to rescan iSCSI target to find volume
+# (integer value)
+#num_iscsi_scan_tries=5
+
+# Number of times to rescan iSER target to find volume
+# (integer value)
+#num_iser_scan_tries=5
+
+# The RADOS client name for accessing rbd volumes (string
+# value)
+#rbd_user=<None>
+
+# The libvirt UUID of the secret for the rbd_uservolumes
+# (string value)
+#rbd_secret_uuid=<None>
+
+# Directory where the NFS volume is mounted on the compute
+# node (string value)
+#nfs_mount_point_base=$state_path/mnt
+
+# Mount options passedf to the NFS client. See section of the
+# nfs man page for details (string value)
+#nfs_mount_options=<None>
+
+# Number of times to rediscover AoE target to find volume
+# (integer value)
+#num_aoe_discover_tries=3
+
+# Directory where the glusterfs volume is mounted on the
+# compute node (string value)
+#glusterfs_mount_point_base=$state_path/mnt
+
+# Use multipath connection of the iSCSI volume (boolean value)
+#iscsi_use_multipath=false
+
+# Use multipath connection of the iSER volume (boolean value)
+#iser_use_multipath=false
+
+# Path or URL to Scality SOFS configuration file (string
+# value)
+#scality_sofs_config=<None>
+
+# Base dir where Scality SOFS shall be mounted (string value)
+#scality_sofs_mount_point=$state_path/scality
+
+# Protocols listed here will be accessed directly from QEMU.
+# Currently supported protocols: [gluster] (list value)
+#qemu_allowed_storage_drivers=
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[metrics]
+
+#
+# Options defined in nova.scheduler.weights.metrics
+#
+
+# Multiplier used for weighing metrics. (floating point value)
+#weight_multiplier=1.0
+
+# How the metrics are going to be weighed. This should be in
+# the form of "<name1>=<ratio1>, <name2>=<ratio2>, ...", where
+# <nameX> is one of the metrics to be weighed, and <ratioX> is
+# the corresponding ratio. So for "name1=1.0, name2=-1.0" The
+# final weight would be name1.value * 1.0 + name2.value *
+# -1.0. (list value)
+#weight_setting=
+
+# How to treat the unavailable metrics. When a metric is NOT
+# available for a host, if it is set to be True, it would
+# raise an exception, so it is recommended to use the
+# scheduler filter MetricFilter to filter out those hosts. If
+# it is set to be False, the unavailable metric would be
+# treated as a negative factor in weighing process, the
+# returned value would be set by the option
+# weight_of_unavailable. (boolean value)
+#required=true
+
+# The final weight value to be returned if required is set to
+# False and any one of the metrics set by weight_setting is
+# unavailable. (floating point value)
+#weight_of_unavailable=-10000.0
+
+
+[neutron]
+
+#
+# Options defined in nova.api.metadata.handler
+#
+
+# Set flag to indicate Neutron will proxy metadata requests
+# and resolve instance ids. (boolean value)
+# Deprecated group/name - [DEFAULT]/service_neutron_metadata_proxy
+service_metadata_proxy=True
+
+# Shared secret to validate proxies Neutron metadata requests
+# (string value)
+# Deprecated group/name - [DEFAULT]/neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret={{ METADATA_PROXY_SHARED_SECRET }}
+
+
+#
+# Options defined in nova.network.neutronv2.api
+#
+
+# URL for connecting to neutron (string value)
+# Deprecated group/name - [DEFAULT]/neutron_url
+url=http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+
+# Timeout value for connecting to neutron in seconds (integer
+# value)
+# Deprecated group/name - [DEFAULT]/neutron_url_timeout
+#url_timeout=30
+
+# User id for connecting to neutron in admin context (string
+# value)
+#admin_user_id=<None>
+
+# Username for connecting to neutron in admin context (string
+# value)
+# Deprecated group/name - [DEFAULT]/neutron_admin_username
+admin_username={{ NEUTRON_SERVICE_USER }}
+
+# Password for connecting to neutron in admin context (string
+# value)
+# Deprecated group/name - [DEFAULT]/neutron_admin_password
+admin_password={{ NEUTRON_SERVICE_PASSWORD }}
+
+# Tenant id for connecting to neutron in admin context (string
+# value)
+# Deprecated group/name - [DEFAULT]/neutron_admin_tenant_id
+#admin_tenant_id=<None>
+
+# Tenant name for connecting to neutron in admin context. This
+# option will be ignored if neutron_admin_tenant_id is set.
+# Note that with Keystone V3 tenant names are only unique
+# within a domain. (string value)
+# Deprecated group/name - [DEFAULT]/neutron_admin_tenant_name
+admin_tenant_name=service
+
+# Region name for connecting to neutron in admin context
+# (string value)
+# Deprecated group/name - [DEFAULT]/neutron_region_name
+#region_name=<None>
+
+# Authorization URL for connecting to neutron in admin context
+# (string value)
+# Deprecated group/name - [DEFAULT]/neutron_admin_auth_url
+admin_auth_url=http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+# If set, ignore any SSL validation issues (boolean value)
+# Deprecated group/name - [DEFAULT]/neutron_api_insecure
+#api_insecure=false
+
+# Authorization strategy for connecting to neutron in admin
+# context (string value)
+# Deprecated group/name - [DEFAULT]/neutron_auth_strategy
+auth_strategy=keystone
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+# Deprecated group/name - [DEFAULT]/neutron_ovs_bridge
+#ovs_bridge=br-int
+
+# Number of seconds before querying neutron for extensions
+# (integer value)
+# Deprecated group/name - [DEFAULT]/neutron_extension_sync_interval
+#extension_sync_interval=600
+
+# Location of CA certificates file to use for neutron client
+# requests. (string value)
+# Deprecated group/name - [DEFAULT]/neutron_ca_certificates_file
+#ca_certificates_file=<None>
+
+# Allow an instance to have multiple vNICs attached to the
+# same Neutron network. (boolean value)
+#allow_duplicate_networks=false
+
+
+[osapi_v3]
+
+#
+# Options defined in nova.api.openstack
+#
+
+# Whether the V3 API is enabled or not (boolean value)
+#enabled=false
+
+# A list of v3 API extensions to never load. Specify the
+# extension aliases here. (list value)
+#extensions_blacklist=
+
+# If the list is not empty then a v3 API extension will only
+# be loaded if it exists in this list. Specify the extension
+# aliases here. (list value)
+#extensions_whitelist=
+
+
+[rdp]
+
+#
+# Options defined in nova.rdp
+#
+
+# Location of RDP html5 console proxy, in the form
+# "http://127.0.0.1:6083/" (string value)
+#html5_proxy_base_url=http://127.0.0.1:6083/
+
+# Enable RDP related features (boolean value)
+#enabled=false
+
+
+[serial_console]
+
+#
+# Options defined in nova.cmd.serialproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+serialproxy_host=127.0.0.1
+
+# Port on which to listen for incoming requests (integer
+# value)
+#serialproxy_port=6083
+
+
+#
+# Options defined in nova.console.serial
+#
+
+# Enable serial console related features (boolean value)
+enabled=false
+
+# Range of TCP ports to use for serial ports on compute hosts
+# (string value)
+#port_range=10000:20000
+
+# Location of serial console proxy. (string value)
+#base_url=ws://127.0.0.1:6083/
+
+# IP address on which instance serial console should listen
+# (string value)
+#listen=127.0.0.1
+
+# The address to which proxy clients (like nova-serialproxy)
+# should connect (string value)
+#proxyclient_address=127.0.0.1
+
+
+[spice]
+
+#
+# Options defined in nova.cmd.spicehtml5proxy
+#
+
+# Host on which to listen for incoming requests (string value)
+# Deprecated group/name - [DEFAULT]/spicehtml5proxy_host
+#html5proxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+# Deprecated group/name - [DEFAULT]/spicehtml5proxy_port
+#html5proxy_port=6082
+
+
+#
+# Options defined in nova.spice
+#
+
+# Location of spice HTML5 console proxy, in the form
+# "http://127.0.0.1:6082/spice_auto.html" (string value)
+#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html
+
+# IP address on which instance spice server should listen
+# (string value)
+#server_listen=127.0.0.1
+
+# The address to which proxy clients (like nova-
+# spicehtml5proxy) should connect (string value)
+#server_proxyclient_address=127.0.0.1
+
+# Enable spice related features (boolean value)
+enabled=false
+
+# Enable spice guest agent support (boolean value)
+#agent_enabled=true
+
+# Keymap for spice (string value)
+#keymap=en-us
+
+
+[ssl]
+
+#
+# Options defined in nova.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients.
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely.
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely.
+# (string value)
+#key_file=<None>
+
+
+[trusted_computing]
+
+#
+# Options defined in nova.scheduler.filters.trusted_filter
+#
+
+# Attestation server HTTP (string value)
+#attestation_server=<None>
+
+# Attestation server Cert file for Identity verification
+# (string value)
+#attestation_server_ca_file=<None>
+
+# Attestation server port (string value)
+#attestation_port=8443
+
+# Attestation web API URL (string value)
+#attestation_api_url=/OpenAttestationWebServices/V1.0
+
+# Attestation authorization blob - must change (string value)
+#attestation_auth_blob=<None>
+
+# Attestation status cache valid period length (integer value)
+#attestation_auth_timeout=60
+
+# Disable SSL cert verification for Attestation service
+# (boolean value)
+#attestation_insecure_ssl=false
+
+
+[upgrade_levels]
+
+#
+# Options defined in nova.baserpc
+#
+
+# Set a version cap for messages sent to the base api in any
+# service (string value)
+#baseapi=<None>
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Set a version cap for messages sent between cells services
+# (string value)
+#intercell=<None>
+
+
+#
+# Options defined in nova.cells.rpcapi
+#
+
+# Set a version cap for messages sent to local cells services
+# (string value)
+#cells=<None>
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# Set a version cap for messages sent to cert services (string
+# value)
+#cert=<None>
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# Set a version cap for messages sent to compute services. If
+# you plan to do a live upgrade from havana to icehouse, you
+# should set this option to "icehouse-compat" before beginning
+# the live upgrade procedure. (string value)
+#compute=<None>
+
+
+#
+# Options defined in nova.conductor.rpcapi
+#
+
+# Set a version cap for messages sent to conductor services
+# (string value)
+#conductor=<None>
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# Set a version cap for messages sent to console services
+# (string value)
+#console=<None>
+
+
+#
+# Options defined in nova.consoleauth.rpcapi
+#
+
+# Set a version cap for messages sent to consoleauth services
+# (string value)
+#consoleauth=<None>
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# Set a version cap for messages sent to network services
+# (string value)
+#network=<None>
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# Set a version cap for messages sent to scheduler services
+# (string value)
+#scheduler=<None>
+
+
+[vmware]
+
+#
+# Options defined in nova.virt.vmwareapi.driver
+#
+
+# Hostname or IP address for connection to VMware VC host.
+# (string value)
+#host_ip=<None>
+
+# Port for connection to VMware VC host. (integer value)
+#host_port=443
+
+# Username for connection to VMware VC host. (string value)
+#host_username=<None>
+
+# Password for connection to VMware VC host. (string value)
+#host_password=<None>
+
+# Name of a VMware Cluster ComputeResource. (multi valued)
+#cluster_name=<None>
+
+# Regex to match the name of a datastore. (string value)
+#datastore_regex=<None>
+
+# The interval used for polling of remote tasks. (floating
+# point value)
+#task_poll_interval=0.5
+
+# The number of times we retry on failures, e.g., socket
+# error, etc. (integer value)
+#api_retry_count=10
+
+# VNC starting port (integer value)
+#vnc_port=5900
+
+# Total number of VNC ports (integer value)
+#vnc_port_total=10000
+
+# Whether to use linked clone (boolean value)
+#use_linked_clone=true
+
+# Optional VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds (string value)
+#wsdl_location=<None>
+
+
+#
+# Options defined in nova.virt.vmwareapi.vif
+#
+
+# Physical ethernet adapter name for vlan networking (string
+# value)
+#vlan_interface=vmnic0
+
+# Name of Integration Bridge (string value)
+#integration_bridge=br-int
+
+
+#
+# Options defined in nova.virt.vmwareapi.vim_util
+#
+
+# The maximum number of ObjectContent data objects that should
+# be returned in a single result. A positive value will cause
+# the operation to suspend the retrieval when the count of
+# objects reaches the specified maximum. The server may still
+# limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional
+# requests. (integer value)
+#maximum_objects=100
+
+
+[xenserver]
+
+#
+# Options defined in nova.virt.xenapi.agent
+#
+
+# Number of seconds to wait for agent reply (integer value)
+#agent_timeout=30
+
+# Number of seconds to wait for agent to be fully operational
+# (integer value)
+#agent_version_timeout=300
+
+# Number of seconds to wait for agent reply to resetnetwork
+# request (integer value)
+#agent_resetnetwork_timeout=60
+
+# Specifies the path in which the XenAPI guest agent should be
+# located. If the agent is present, network configuration is
+# not injected into the image. Used if
+# compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# (string value)
+#agent_path=usr/sbin/xe-update-networking
+
+# Disables the use of the XenAPI agent in any image regardless
+# of what image properties are present. (boolean value)
+#disable_agent=false
+
+# Determines if the XenAPI agent should be used when the image
+# used does not contain a hint to declare if the agent is
+# present or not. The hint is a glance property
+# "xenapi_use_agent" that has the value "True" or "False".
+# Note that waiting for the agent when it is not present will
+# significantly increase server boot times. (boolean value)
+#use_agent_default=false
+
+
+#
+# Options defined in nova.virt.xenapi.client.session
+#
+
+# Timeout in seconds for XenAPI login. (integer value)
+#login_timeout=10
+
+# Maximum number of concurrent XenAPI connections. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#connection_concurrent=5
+
+
+#
+# Options defined in nova.virt.xenapi.driver
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. A
+# special value of unix://local can be used to connect to the
+# local unix socket. Required if
+# compute_driver=xenapi.XenAPIDriver (string value)
+#connection_url=<None>
+
+# Username for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#connection_username=root
+
+# Password for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#connection_password=<None>
+
+# The interval used for polling of coalescing vhds. Used only
+# if compute_driver=xenapi.XenAPIDriver (floating point value)
+#vhd_coalesce_poll_interval=5.0
+
+# Ensure compute service is running on host XenAPI connects
+# to. (boolean value)
+#check_host=true
+
+# Max number of times to poll for VHD to coalesce. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#vhd_coalesce_max_attempts=20
+
+# Base path to the storage repository (string value)
+#sr_base_path=/var/run/sr-mount
+
+# The iSCSI Target Host (string value)
+#target_host=<None>
+
+# The iSCSI Target Port, default is port 3260 (string value)
+#target_port=3260
+
+# IQN Prefix (string value)
+#iqn_prefix=iqn.2010-10.org.openstack
+
+# Used to enable the remapping of VBD dev (Works around an
+# issue in Ubuntu Maverick) (boolean value)
+#remap_vbd_dev=false
+
+# Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
+# /dev/sdb) (string value)
+#remap_vbd_dev_prefix=sd
+
+
+#
+# Options defined in nova.virt.xenapi.image.bittorrent
+#
+
+# Base URL for torrent files. (string value)
+#torrent_base_url=<None>
+
+# Probability that peer will become a seeder. (1.0 = 100%)
+# (floating point value)
+#torrent_seed_chance=1.0
+
+# Number of seconds after downloading an image via BitTorrent
+# that it should be seeded for other peers. (integer value)
+#torrent_seed_duration=3600
+
+# Cached torrent files not accessed within this number of
+# seconds can be reaped (integer value)
+#torrent_max_last_accessed=86400
+
+# Beginning of port range to listen on (integer value)
+#torrent_listen_port_start=6881
+
+# End of port range to listen on (integer value)
+#torrent_listen_port_end=6891
+
+# Number of seconds a download can remain at the same progress
+# percentage w/o being considered a stall (integer value)
+#torrent_download_stall_cutoff=600
+
+# Maximum number of seeder processes to run concurrently
+# within a given dom0. (-1 = no limit) (integer value)
+#torrent_max_seeder_processes_per_host=1
+
+
+#
+# Options defined in nova.virt.xenapi.pool
+#
+
+# To use for hosts with different CPUs (boolean value)
+#use_join_force=true
+
+
+#
+# Options defined in nova.virt.xenapi.vif
+#
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#ovs_integration_bridge=xapi1
+
+
+#
+# Options defined in nova.virt.xenapi.vm_utils
+#
+
+# Cache glance images locally. `all` will cache all images,
+# `some` will only cache images that have the image_property
+# `cache_in_nova=True`, and `none` turns off caching entirely
+# (string value)
+#cache_images=all
+
+# Compression level for images, e.g., 9 for gzip -9. Range is
+# 1-9, 9 being most compressed but most CPU intensive on dom0.
+# (integer value)
+#image_compression_level=<None>
+
+# Default OS type (string value)
+#default_os_type=linux
+
+# Time to wait for a block device to be created (integer
+# value)
+#block_device_creation_timeout=10
+
+# Maximum size in bytes of kernel or ramdisk images (integer
+# value)
+#max_kernel_ramdisk_size=16777216
+
+# Filter for finding the SR to be used to install guest
+# instances on. To use the Local Storage in default
+# XenServer/XCP installations set this flag to other-config
+# :i18n-key=local-storage. To select an SR with a different
+# matching criteria, you could set it to other-
+# config:my_favorite_sr=true. On the other hand, to fall back
+# on the Default SR, as displayed by XenCenter, set this flag
+# to: default-sr:true (string value)
+#sr_matching_filter=default-sr:true
+
+# Whether to use sparse_copy for copying data on a resize down
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be
+# rsynced (boolean value)
+#sparse_copy=true
+
+# Maximum number of retries to unplug VBD (integer value)
+#num_vbd_unplug_retries=10
+
+# Whether or not to download images via Bit Torrent
+# (all|some|none). (string value)
+#torrent_images=none
+
+# Name of network to use for booting iPXE ISOs (string value)
+#ipxe_network_name=<None>
+
+# URL to the iPXE boot menu (string value)
+#ipxe_boot_menu_url=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#ipxe_mkisofs_cmd=mkisofs
+
+
+#
+# Options defined in nova.virt.xenapi.vmops
+#
+
+# Number of seconds to wait for instance to go to running
+# state (integer value)
+#running_timeout=60
+
+# The XenAPI VIF driver using XenServer Network APIs. (string
+# value)
+#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
+
+# Dom0 plugin driver used to handle image uploads. (string
+# value)
+#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore
+
+
+#
+# Options defined in nova.virt.xenapi.volume_utils
+#
+
+# Number of seconds to wait for an SR to settle if the VDI
+# does not exist when first introduced (integer value)
+#introduce_vdi_retry_wait=20
+
+
+[zookeeper]
+
+#
+# Options defined in nova.servicegroup.drivers.zk
+#
+
+# The ZooKeeper addresses for servicegroup service in the
+# format of host1:port,host2:port,host3:port (string value)
+#address=<None>
+
+# The recv_timeout parameter for the zk session (integer
+# value)
+#recv_timeout=4000
+
+# The prefix used in ZooKeeper to store ephemeral nodes
+# (string value)
+#sg_prefix=/servicegroups
+
+# Number of seconds to wait until retrying to join the session
+# (integer value)
+#sg_retry_interval=5
+
+[database]
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+connection=postgresql://{{ NOVA_DB_USER }}:{{ NOVA_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/nova
diff --git a/openstack/usr/share/openstack/nova/policy.json b/openstack/usr/share/openstack/nova/policy.json
new file mode 100644
index 00000000..cc5b8ea4
--- /dev/null
+++ b/openstack/usr/share/openstack/nova/policy.json
@@ -0,0 +1,324 @@
+{
+ "context_is_admin": "role:admin",
+ "admin_or_owner": "is_admin:True or project_id:%(project_id)s",
+ "default": "rule:admin_or_owner",
+
+ "cells_scheduler_filter:TargetCellFilter": "is_admin:True",
+
+ "compute:create": "",
+ "compute:create:attach_network": "",
+ "compute:create:attach_volume": "",
+ "compute:create:forced_host": "is_admin:True",
+ "compute:get_all": "",
+ "compute:get_all_tenants": "",
+ "compute:start": "rule:admin_or_owner",
+ "compute:stop": "rule:admin_or_owner",
+ "compute:unlock_override": "rule:admin_api",
+
+ "compute:shelve": "",
+ "compute:shelve_offload": "",
+ "compute:unshelve": "",
+
+ "compute:volume_snapshot_create": "",
+ "compute:volume_snapshot_delete": "",
+
+ "admin_api": "is_admin:True",
+ "compute:v3:servers:start": "rule:admin_or_owner",
+ "compute:v3:servers:stop": "rule:admin_or_owner",
+ "compute_extension:v3:os-access-ips:discoverable": "",
+ "compute_extension:v3:os-access-ips": "",
+ "compute_extension:accounts": "rule:admin_api",
+ "compute_extension:admin_actions": "rule:admin_api",
+ "compute_extension:admin_actions:pause": "rule:admin_or_owner",
+ "compute_extension:admin_actions:unpause": "rule:admin_or_owner",
+ "compute_extension:admin_actions:suspend": "rule:admin_or_owner",
+ "compute_extension:admin_actions:resume": "rule:admin_or_owner",
+ "compute_extension:admin_actions:lock": "rule:admin_or_owner",
+ "compute_extension:admin_actions:unlock": "rule:admin_or_owner",
+ "compute_extension:admin_actions:resetNetwork": "rule:admin_api",
+ "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api",
+ "compute_extension:admin_actions:createBackup": "rule:admin_or_owner",
+ "compute_extension:admin_actions:migrateLive": "rule:admin_api",
+ "compute_extension:admin_actions:resetState": "rule:admin_api",
+ "compute_extension:admin_actions:migrate": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions:discoverable": "",
+ "compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api",
+ "compute_extension:v3:os-admin-password": "",
+ "compute_extension:v3:os-admin-password:discoverable": "",
+ "compute_extension:aggregates": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:discoverable": "",
+ "compute_extension:v3:os-aggregates:index": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:create": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:show": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:update": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:delete": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:add_host": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api",
+ "compute_extension:agents": "rule:admin_api",
+ "compute_extension:v3:os-agents": "rule:admin_api",
+ "compute_extension:v3:os-agents:discoverable": "",
+ "compute_extension:attach_interfaces": "",
+ "compute_extension:v3:os-attach-interfaces": "",
+ "compute_extension:v3:os-attach-interfaces:discoverable": "",
+ "compute_extension:baremetal_nodes": "rule:admin_api",
+ "compute_extension:cells": "rule:admin_api",
+ "compute_extension:v3:os-cells": "rule:admin_api",
+ "compute_extension:v3:os-cells:discoverable": "",
+ "compute_extension:certificates": "",
+ "compute_extension:v3:os-certificates:create": "",
+ "compute_extension:v3:os-certificates:show": "",
+ "compute_extension:v3:os-certificates:discoverable": "",
+ "compute_extension:cloudpipe": "rule:admin_api",
+ "compute_extension:cloudpipe_update": "rule:admin_api",
+ "compute_extension:console_output": "",
+ "compute_extension:v3:consoles:discoverable": "",
+ "compute_extension:v3:os-console-output:discoverable": "",
+ "compute_extension:v3:os-console-output": "",
+ "compute_extension:consoles": "",
+ "compute_extension:v3:os-remote-consoles": "",
+ "compute_extension:v3:os-remote-consoles:discoverable": "",
+ "compute_extension:createserverext": "",
+ "compute_extension:v3:os-create-backup:discoverable": "",
+ "compute_extension:v3:os-create-backup": "rule:admin_or_owner",
+ "compute_extension:deferred_delete": "",
+ "compute_extension:v3:os-deferred-delete": "",
+ "compute_extension:v3:os-deferred-delete:discoverable": "",
+ "compute_extension:disk_config": "",
+ "compute_extension:evacuate": "rule:admin_api",
+ "compute_extension:v3:os-evacuate": "rule:admin_api",
+ "compute_extension:v3:os-evacuate:discoverable": "",
+ "compute_extension:extended_server_attributes": "rule:admin_api",
+ "compute_extension:v3:os-extended-server-attributes": "rule:admin_api",
+ "compute_extension:v3:os-extended-server-attributes:discoverable": "",
+ "compute_extension:extended_status": "",
+ "compute_extension:v3:os-extended-status": "",
+ "compute_extension:v3:os-extended-status:discoverable": "",
+ "compute_extension:extended_availability_zone": "",
+ "compute_extension:v3:os-extended-availability-zone": "",
+ "compute_extension:v3:os-extended-availability-zone:discoverable": "",
+ "compute_extension:extended_ips": "",
+ "compute_extension:extended_ips_mac": "",
+ "compute_extension:extended_vif_net": "",
+ "compute_extension:v3:extension_info:discoverable": "",
+ "compute_extension:extended_volumes": "",
+ "compute_extension:v3:os-extended-volumes": "",
+ "compute_extension:v3:os-extended-volumes:swap": "",
+ "compute_extension:v3:os-extended-volumes:discoverable": "",
+ "compute_extension:v3:os-extended-volumes:attach": "",
+ "compute_extension:v3:os-extended-volumes:detach": "",
+ "compute_extension:fixed_ips": "rule:admin_api",
+ "compute_extension:flavor_access": "",
+ "compute_extension:flavor_access:addTenantAccess": "rule:admin_api",
+ "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",
+ "compute_extension:v3:flavor-access": "",
+ "compute_extension:v3:flavor-access:discoverable": "",
+ "compute_extension:v3:flavor-access:remove_tenant_access": "rule:admin_api",
+ "compute_extension:v3:flavor-access:add_tenant_access": "rule:admin_api",
+ "compute_extension:flavor_disabled": "",
+ "compute_extension:flavor_rxtx": "",
+ "compute_extension:v3:os-flavor-rxtx": "",
+ "compute_extension:v3:os-flavor-rxtx:discoverable": "",
+ "compute_extension:flavor_swap": "",
+ "compute_extension:flavorextradata": "",
+ "compute_extension:flavorextraspecs:index": "",
+ "compute_extension:flavorextraspecs:show": "",
+ "compute_extension:flavorextraspecs:create": "rule:admin_api",
+ "compute_extension:flavorextraspecs:update": "rule:admin_api",
+ "compute_extension:flavorextraspecs:delete": "rule:admin_api",
+ "compute_extension:v3:flavors:discoverable": "",
+ "compute_extension:v3:flavor-extra-specs:discoverable": "",
+ "compute_extension:v3:flavor-extra-specs:index": "",
+ "compute_extension:v3:flavor-extra-specs:show": "",
+ "compute_extension:v3:flavor-extra-specs:create": "rule:admin_api",
+ "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api",
+ "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api",
+ "compute_extension:flavormanage": "rule:admin_api",
+ "compute_extension:v3:flavor-manage": "rule:admin_api",
+ "compute_extension:floating_ip_dns": "",
+ "compute_extension:floating_ip_pools": "",
+ "compute_extension:floating_ips": "",
+ "compute_extension:floating_ips_bulk": "rule:admin_api",
+ "compute_extension:fping": "",
+ "compute_extension:fping:all_tenants": "rule:admin_api",
+ "compute_extension:hide_server_addresses": "is_admin:False",
+ "compute_extension:v3:os-hide-server-addresses": "is_admin:False",
+ "compute_extension:v3:os-hide-server-addresses:discoverable": "",
+ "compute_extension:hosts": "rule:admin_api",
+ "compute_extension:v3:os-hosts": "rule:admin_api",
+ "compute_extension:v3:os-hosts:discoverable": "",
+ "compute_extension:hypervisors": "rule:admin_api",
+ "compute_extension:v3:os-hypervisors": "rule:admin_api",
+ "compute_extension:v3:os-hypervisors:discoverable": "",
+ "compute_extension:image_size": "",
+ "compute_extension:instance_actions": "",
+ "compute_extension:v3:os-server-actions": "",
+ "compute_extension:v3:os-server-actions:discoverable": "",
+ "compute_extension:instance_actions:events": "rule:admin_api",
+ "compute_extension:v3:os-server-actions:events": "rule:admin_api",
+ "compute_extension:instance_usage_audit_log": "rule:admin_api",
+ "compute_extension:v3:ips:discoverable": "",
+ "compute_extension:keypairs": "",
+ "compute_extension:keypairs:index": "",
+ "compute_extension:keypairs:show": "",
+ "compute_extension:keypairs:create": "",
+ "compute_extension:keypairs:delete": "",
+ "compute_extension:v3:keypairs:discoverable": "",
+ "compute_extension:v3:keypairs": "",
+ "compute_extension:v3:keypairs:index": "",
+ "compute_extension:v3:keypairs:show": "",
+ "compute_extension:v3:keypairs:create": "",
+ "compute_extension:v3:keypairs:delete": "",
+ "compute_extension:v3:os-lock-server:discoverable": "",
+ "compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner",
+ "compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner",
+ "compute_extension:v3:os-migrate-server:discoverable": "",
+ "compute_extension:v3:os-migrate-server:migrate": "rule:admin_api",
+ "compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api",
+ "compute_extension:multinic": "",
+ "compute_extension:v3:os-multinic": "",
+ "compute_extension:v3:os-multinic:discoverable": "",
+ "compute_extension:networks": "rule:admin_api",
+ "compute_extension:networks:view": "",
+ "compute_extension:networks_associate": "rule:admin_api",
+ "compute_extension:v3:os-pause-server:discoverable": "",
+ "compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner",
+ "compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner",
+ "compute_extension:v3:os-pci:pci_servers": "",
+ "compute_extension:v3:os-pci:discoverable": "",
+ "compute_extension:v3:os-pci:index": "rule:admin_api",
+ "compute_extension:v3:os-pci:detail": "rule:admin_api",
+ "compute_extension:v3:os-pci:show": "rule:admin_api",
+ "compute_extension:quotas:show": "",
+ "compute_extension:quotas:update": "rule:admin_api",
+ "compute_extension:quotas:delete": "rule:admin_api",
+ "compute_extension:v3:os-quota-sets:discoverable": "",
+ "compute_extension:v3:os-quota-sets:show": "",
+ "compute_extension:v3:os-quota-sets:update": "rule:admin_api",
+ "compute_extension:v3:os-quota-sets:delete": "rule:admin_api",
+ "compute_extension:v3:os-quota-sets:detail": "rule:admin_api",
+ "compute_extension:quota_classes": "",
+ "compute_extension:rescue": "",
+ "compute_extension:v3:os-rescue": "",
+ "compute_extension:v3:os-rescue:discoverable": "",
+ "compute_extension:v3:os-scheduler-hints:discoverable": "",
+ "compute_extension:security_group_default_rules": "rule:admin_api",
+ "compute_extension:security_groups": "",
+ "compute_extension:v3:os-security-groups": "",
+ "compute_extension:v3:os-security-groups:discoverable": "",
+ "compute_extension:server_diagnostics": "rule:admin_api",
+ "compute_extension:v3:os-server-diagnostics": "rule:admin_api",
+ "compute_extension:v3:os-server-diagnostics:discoverable": "",
+ "compute_extension:server_groups": "",
+ "compute_extension:server_password": "",
+ "compute_extension:v3:os-server-password": "",
+ "compute_extension:v3:os-server-password:discoverable": "",
+ "compute_extension:server_usage": "",
+ "compute_extension:v3:os-server-usage": "",
+ "compute_extension:v3:os-server-usage:discoverable": "",
+ "compute_extension:services": "rule:admin_api",
+ "compute_extension:v3:os-services": "rule:admin_api",
+ "compute_extension:v3:os-services:discoverable": "",
+ "compute_extension:v3:server-metadata:discoverable": "",
+ "compute_extension:v3:servers:discoverable": "",
+ "compute_extension:shelve": "",
+ "compute_extension:shelveOffload": "rule:admin_api",
+ "compute_extension:v3:os-shelve:shelve": "",
+ "compute_extension:v3:os-shelve:shelve:discoverable": "",
+ "compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api",
+ "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
+ "compute_extension:v3:os-suspend-server:discoverable": "",
+ "compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner",
+ "compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner",
+ "compute_extension:simple_tenant_usage:list": "rule:admin_api",
+ "compute_extension:unshelve": "",
+ "compute_extension:v3:os-shelve:unshelve": "",
+ "compute_extension:users": "rule:admin_api",
+ "compute_extension:v3:os-user-data:discoverable": "",
+ "compute_extension:virtual_interfaces": "",
+ "compute_extension:virtual_storage_arrays": "",
+ "compute_extension:volumes": "",
+ "compute_extension:volume_attachments:index": "",
+ "compute_extension:volume_attachments:show": "",
+ "compute_extension:volume_attachments:create": "",
+ "compute_extension:volume_attachments:update": "",
+ "compute_extension:volume_attachments:delete": "",
+ "compute_extension:volumetypes": "",
+ "compute_extension:availability_zone:list": "",
+ "compute_extension:v3:os-availability-zone:list": "",
+ "compute_extension:v3:os-availability-zone:discoverable": "",
+ "compute_extension:availability_zone:detail": "rule:admin_api",
+ "compute_extension:v3:os-availability-zone:detail": "rule:admin_api",
+ "compute_extension:used_limits_for_admin": "rule:admin_api",
+ "compute_extension:migrations:index": "rule:admin_api",
+ "compute_extension:v3:os-migrations:index": "rule:admin_api",
+ "compute_extension:v3:os-migrations:discoverable": "",
+ "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api",
+ "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api",
+ "compute_extension:console_auth_tokens": "rule:admin_api",
+ "compute_extension:v3:os-console-auth-tokens": "rule:admin_api",
+ "compute_extension:os-server-external-events:create": "rule:admin_api",
+ "compute_extension:v3:os-server-external-events:create": "rule:admin_api",
+
+ "volume:create": "",
+ "volume:get_all": "",
+ "volume:get_volume_metadata": "",
+ "volume:get_snapshot": "",
+ "volume:get_all_snapshots": "",
+
+
+ "volume_extension:types_manage": "rule:admin_api",
+ "volume_extension:types_extra_specs": "rule:admin_api",
+ "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
+
+
+ "network:get_all": "",
+ "network:get": "",
+ "network:create": "",
+ "network:delete": "",
+ "network:associate": "",
+ "network:disassociate": "",
+ "network:get_vifs_by_instance": "",
+ "network:allocate_for_instance": "",
+ "network:deallocate_for_instance": "",
+ "network:validate_networks": "",
+ "network:get_instance_uuids_by_ip_filter": "",
+ "network:get_instance_id_by_floating_address": "",
+ "network:setup_networks_on_host": "",
+ "network:get_backdoor_port": "",
+
+ "network:get_floating_ip": "",
+ "network:get_floating_ip_pools": "",
+ "network:get_floating_ip_by_address": "",
+ "network:get_floating_ips_by_project": "",
+ "network:get_floating_ips_by_fixed_address": "",
+ "network:allocate_floating_ip": "",
+ "network:deallocate_floating_ip": "",
+ "network:associate_floating_ip": "",
+ "network:disassociate_floating_ip": "",
+ "network:release_floating_ip": "",
+ "network:migrate_instance_start": "",
+ "network:migrate_instance_finish": "",
+
+ "network:get_fixed_ip": "",
+ "network:get_fixed_ip_by_address": "",
+ "network:add_fixed_ip_to_instance": "",
+ "network:remove_fixed_ip_from_instance": "",
+ "network:add_network_to_project": "",
+ "network:get_instance_nw_info": "",
+
+ "network:get_dns_domains": "",
+ "network:add_dns_entry": "",
+ "network:modify_dns_entry": "",
+ "network:delete_dns_entry": "",
+ "network:get_dns_entries_by_address": "",
+ "network:get_dns_entries_by_name": "",
+ "network:create_private_dns_domain": "",
+ "network:create_public_dns_domain": "",
+ "network:delete_dns_domain": ""
+}
diff --git a/openstack/usr/share/openstack/openvswitch.yml b/openstack/usr/share/openstack/openvswitch.yml
new file mode 100644
index 00000000..47257f7f
--- /dev/null
+++ b/openstack/usr/share/openstack/openvswitch.yml
@@ -0,0 +1,38 @@
+---
+- hosts: localhost
+ tasks:
+
+ - name: Create openvswitch directories
+ file: path={{ item }} state=directory
+ with_items:
+ - /etc/openvswitch
+ - /var/run/openvswitch
+
+ - shell: >
+ ovsdb-tool create /etc/openvswitch/conf.db /usr/share/openvswitch/vswitch.ovsschema
+ creates=/etc/openvswitch/conf.db
+
+ # We enable the openvswitch-db-server in a different task to identify
+ # the first time we run this script by identifying when we enable the
+ # unit.
+ #
+ # We need to identify this to initialise the database.
+ - name: Enable openvswitch database service
+ service: name={{ item }} enabled=yes
+ with_items:
+ - openvswitch-db-server.service
+ register: openvswitch_db_enable
+
+ - name: Start openvswitch database service
+ service: name={{ item }} state=started
+ with_items:
+ - openvswitch-db-server.service
+
+ - name: initialise openvswitch-db
+ shell: ovs-vsctl --no-wait init
+ when: openvswitch_db_enable|changed
+
+ - name: Enable and start Open vSwitch service
+ service: name={{ item }} enabled=yes state=started
+ with_items:
+ - openvswitch.service
diff --git a/openstack/usr/share/openstack/postgres.yml b/openstack/usr/share/openstack/postgres.yml
new file mode 100644
index 00000000..5ff9355e
--- /dev/null
+++ b/openstack/usr/share/openstack/postgres.yml
@@ -0,0 +1,48 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/postgres.conf"
+ tasks:
+
+ - name: Create postgres user
+ user:
+ name: postgres
+ comment: PostgreSQL Server
+ shell: /sbin/nologin
+ home: /var/lib/pgsql
+
+ - name: Create the postgres directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: postgres
+ group: postgres
+ with_items:
+ - /var/run/postgresql
+ - /var/lib/pgsql/data
+
+ - name: Initialise postgres database
+ command: pg_ctl -D /var/lib/pgsql/data initdb
+ args:
+ creates: /var/lib/pgsql/data/base
+ sudo: yes
+ sudo_user: postgres
+
+ - name: Add the configuration needed for postgres for Openstack
+ template:
+ src: /usr/share/openstack/postgres/{{ item }}
+ dest: /var/lib/pgsql/data/{{ item }}
+ owner: postgres
+ group: postgres
+ mode: 0600
+ with_items:
+ - postgresql.conf
+ - pg_hba.conf
+
+ - name: Enable and start postgres services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - postgres-server
diff --git a/openstack/usr/share/openstack/postgres/pg_hba.conf b/openstack/usr/share/openstack/postgres/pg_hba.conf
new file mode 100644
index 00000000..78186924
--- /dev/null
+++ b/openstack/usr/share/openstack/postgres/pg_hba.conf
@@ -0,0 +1,5 @@
+local all all trust
+host all all 127.0.0.0/8 trust
+host all all ::1/128 trust
+host all all {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}/32 trust
+host all all 0.0.0.0/0 md5
diff --git a/openstack/usr/share/openstack/postgres/postgresql.conf b/openstack/usr/share/openstack/postgres/postgresql.conf
new file mode 100644
index 00000000..74153385
--- /dev/null
+++ b/openstack/usr/share/openstack/postgres/postgresql.conf
@@ -0,0 +1,11 @@
+listen_addresses = '{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}'
+max_connections = 100
+shared_buffers = 128MB
+log_timezone = 'UTC'
+datestyle = 'iso, mdy'
+timezone = 'UTC'
+lc_messages = 'C'
+lc_monetary = 'C'
+lc_numeric = 'C'
+lc_time = 'C'
+default_text_search_config = 'pg_catalog.english'
diff --git a/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf b/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf
new file mode 100644
index 00000000..d4c58dae
--- /dev/null
+++ b/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf
@@ -0,0 +1,3 @@
+# Configure port node where rabbitmq-server will listen from.
+NODE_PORT={{ RABBITMQ_PORT }}
+CONFIG_FILE=/etc/rabbitmq/rabbitmq
diff --git a/openstack/usr/share/openstack/rabbitmq/rabbitmq.config b/openstack/usr/share/openstack/rabbitmq/rabbitmq.config
new file mode 100644
index 00000000..9b93881e
--- /dev/null
+++ b/openstack/usr/share/openstack/rabbitmq/rabbitmq.config
@@ -0,0 +1,9 @@
+%% -*- Rabbit configuration for Openstack in Baserock
+[
+ {rabbit,
+ [
+ {default_user, <<"{{ RABBITMQ_USER }}">>},
+ {default_pass, <<"{{ RABBITMQ_PASSWORD }}">>},
+ {tcp_listeners, [{{ RABBITMQ_PORT }}]}
+ ]}
+].
diff --git a/openstack/usr/share/openstack/swift-controller.yml b/openstack/usr/share/openstack/swift-controller.yml
new file mode 100644
index 00000000..690de087
--- /dev/null
+++ b/openstack/usr/share/openstack/swift-controller.yml
@@ -0,0 +1,52 @@
+---
+- hosts: localhost
+ vars_files:
+ - swift-controller-vars.yml
+ vars:
+ - ring_name_port_map:
+ account:
+ port: 6002
+ container:
+ port: 6001
+ object:
+ port: 6000
+ remote_user: root
+ tasks:
+ - user:
+ name: swift
+ comment: Swift user
+
+ - file:
+ path: /etc/swift
+ owner: swift
+ group: swift
+ state: directory
+
+ - template:
+ src: /usr/share/swift/etc/swift/proxy-server.j2
+ dest: /etc/swift/proxy-server.conf
+ mode: 0644
+ owner: swift
+ group: swift
+
+ - keystone_user:
+ user: swift
+ password: "{{ SWIFT_ADMIN_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - keystone_user:
+ role: admin
+ user: swift
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - keystone_service:
+ name: swift
+ type: object-store
+ description: OpenStack Object Storage
+ publicurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080/v1/AUTH_%(tenant_id)s
+ internalurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080/v1/AUTH_%(tenant_id)s
+ adminurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
diff --git a/openstack/usr/share/swift/etc/rsyncd.j2 b/openstack/usr/share/swift/etc/rsyncd.j2
new file mode 100644
index 00000000..c0657665
--- /dev/null
+++ b/openstack/usr/share/swift/etc/rsyncd.j2
@@ -0,0 +1,23 @@
+uid = swift
+gid = swift
+log file = /var/log/rsyncd.log
+pid file = /var/run/rsyncd.pid
+address = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+[account]
+max connections = 2
+path = /srv/node/
+read only = false
+lock file = /var/lock/account.lock
+
+[container]
+max connections = 2
+path = /srv/node/
+read only = false
+lock file = /var/lock/container.lock
+
+[object]
+max connections = 2
+path = /srv/node/
+read only = false
+lock file = /var/lock/object.lock
diff --git a/openstack/usr/share/swift/etc/swift/proxy-server.j2 b/openstack/usr/share/swift/etc/swift/proxy-server.j2
new file mode 100644
index 00000000..dda82d5a
--- /dev/null
+++ b/openstack/usr/share/swift/etc/swift/proxy-server.j2
@@ -0,0 +1,630 @@
+[DEFAULT]
+# bind_ip = 0.0.0.0
+bind_port = 8080
+# bind_timeout = 30
+# backlog = 4096
+swift_dir = /etc/swift
+user = swift
+
+# Enables exposing configuration settings via HTTP GET /info.
+# expose_info = true
+
+# Key to use for admin calls that are HMAC signed. Default is empty,
+# which will disable admin calls to /info.
+# admin_key = secret_admin_key
+#
+# Allows the ability to withhold sections from showing up in the public calls
+# to /info. You can withhold subsections by separating the dict level with a
+# ".". The following would cause the sections 'container_quotas' and 'tempurl'
+# to not be listed, and the key max_failed_deletes would be removed from
+# bulk_delete. Default is empty, allowing all registered fetures to be listed
+# via HTTP GET /info.
+# disallowed_sections = container_quotas, tempurl, bulk_delete.max_failed_deletes
+
+# Use an integer to override the number of pre-forked processes that will
+# accept connections. Should default to the number of effective cpu
+# cores in the system. It's worth noting that individual workers will
+# use many eventlet co-routines to service multiple concurrent requests.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# Set the following two lines to enable SSL. This is for testing only.
+# cert_file = /etc/swift/proxy.crt
+# key_file = /etc/swift/proxy.key
+#
+# expiring_objects_container_divisor = 86400
+# expiring_objects_account_name = expiring_objects
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_headers = false
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# This optional suffix (default is empty) that would be appended to the swift transaction
+# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
+# This is very useful when one is managing more than one swift cluster.
+# trans_id_suffix =
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host = localhost
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
+# cors_allow_origin =
+# strict_cors_mode = True
+#
+# client_timeout = 60
+# eventlet_debug = false
+
+[pipeline:main]
+#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server
+pipeline = authtoken cache healthcheck keystoneauth proxy-logging proxy-server
+
+[app:proxy-server]
+use = egg:swift#proxy
+# You can override the default log routing for this app here:
+# set log_name = proxy-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_address = /dev/log
+#
+# log_handoffs = true
+# recheck_account_existence = 60
+# recheck_container_existence = 60
+# object_chunk_size = 65536
+# client_chunk_size = 65536
+#
+# How long the proxy server will wait on responses from the a/c/o servers.
+# node_timeout = 10
+#
+# How long the proxy server will wait for an initial response and to read a
+# chunk of data from the object servers while serving GET / HEAD requests.
+# Timeouts from these requests can be recovered from so setting this to
+# something lower than node_timeout would provide quicker error recovery
+# while allowing for a longer timeout for non-recoverable requests (PUTs).
+# Defaults to node_timeout, should be overriden if node_timeout is set to a
+# high number to prevent client timeouts from firing before the proxy server
+# has a chance to retry.
+# recoverable_node_timeout = node_timeout
+#
+# conn_timeout = 0.5
+#
+# How long to wait for requests to finish after a quorum has been established.
+# post_quorum_timeout = 0.5
+#
+# How long without an error before a node's error count is reset. This will
+# also be how long before a node is reenabled after suppression is triggered.
+# error_suppression_interval = 60
+#
+# How many errors can accumulate before a node is temporarily ignored.
+# error_suppression_limit = 10
+#
+# If set to 'true' any authorized user may create and delete accounts; if
+# 'false' no one, even authorized, can.
+allow_account_management = true
+#
+# Set object_post_as_copy = false to turn on fast posts where only the metadata
+# changes are stored anew and the original data file is kept in place. This
+# makes for quicker posts; but since the container metadata isn't updated in
+# this mode, features like container sync won't be able to sync posts.
+# object_post_as_copy = true
+#
+# If set to 'true' authorized accounts that do not yet exist within the Swift
+# cluster will be automatically created.
+account_autocreate = true
+#
+# If set to a positive value, trying to create a container when the account
+# already has at least this maximum containers will result in a 403 Forbidden.
+# Note: This is a soft limit, meaning a user might exceed the cap for
+# recheck_account_existence before the 403s kick in.
+# max_containers_per_account = 0
+#
+# This is a comma separated list of account hashes that ignore the
+# max_containers_per_account cap.
+# max_containers_whitelist =
+#
+# Comma separated list of Host headers to which the proxy will deny requests.
+# deny_host_headers =
+#
+# Prefix used when automatically creating accounts.
+# auto_create_account_prefix = .
+#
+# Depth of the proxy put queue.
+# put_queue_depth = 10
+#
+# Storage nodes can be chosen at random (shuffle), by using timing
+# measurements (timing), or by using an explicit match (affinity).
+# Using timing measurements may allow for lower overall latency, while
+# using affinity allows for finer control. In both the timing and
+# affinity cases, equally-sorting nodes are still randomly chosen to
+# spread load.
+# The valid values for sorting_method are "affinity", "shuffle", and "timing".
+# sorting_method = shuffle
+#
+# If the "timing" sorting_method is used, the timings will only be valid for
+# the number of seconds configured by timing_expiry.
+# timing_expiry = 300
+#
+# The maximum time (seconds) that a large object connection is allowed to last.
+# max_large_object_get_time = 86400
+#
+# Set to the number of nodes to contact for a normal request. You can use
+# '* replicas' at the end to have it use the number given times the number of
+# replicas for the ring being used for the request.
+# request_node_count = 2 * replicas
+#
+# Which backend servers to prefer on reads. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. The value after the equals is
+# the priority; lower numbers are higher priority.
+#
+# Example: first read from region 1 zone 1, then region 1 zone 2, then
+# anything in region 2, then everything else:
+# read_affinity = r1z1=100, r1z2=200, r2=300
+# Default is empty, meaning no preference.
+# read_affinity =
+#
+# Which backend servers to prefer on writes. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. If this is set, then when
+# handling an object PUT request, some number (see setting
+# write_affinity_node_count) of local backend servers will be tried
+# before any nonlocal ones.
+#
+# Example: try to write to regions 1 and 2 before writing to any other
+# nodes:
+# write_affinity = r1, r2
+# Default is empty, meaning no preference.
+# write_affinity =
+#
+# The number of local (as governed by the write_affinity setting)
+# nodes to attempt to contact first, before any non-local ones. You
+# can use '* replicas' at the end to have it use the number given
+# times the number of replicas for the ring being used for the
+# request.
+# write_affinity_node_count = 2 * replicas
+#
+# These are the headers whose values will only be shown to swift_owners. The
+# exact definition of a swift_owner is up to the auth system in use, but
+# usually indicates administrative responsibilities.
+# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-account-access-control
+
+[filter:tempauth]
+use = egg:swift#tempauth
+# You can override the default log routing for this filter here:
+# set log_name = tempauth
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# The reseller prefix will verify a token begins with this prefix before even
+# attempting to validate it. Also, with authorization, only Swift storage
+# accounts with this prefix will be authorized by this middleware. Useful if
+# multiple auth systems are in use for one Swift cluster.
+# reseller_prefix = AUTH
+#
+# The auth prefix will cause requests beginning with this prefix to be routed
+# to the auth subsystem, for granting tokens, etc.
+# auth_prefix = /auth/
+# token_life = 86400
+#
+# This allows middleware higher in the WSGI pipeline to override auth
+# processing, useful for middleware such as tempurl and formpost. If you know
+# you're not going to use such middleware and you want a bit of extra security,
+# you can set this to false.
+# allow_overrides = true
+#
+# This specifies what scheme to return with storage urls:
+# http, https, or default (chooses based on what the server is running as)
+# This can be useful with an SSL load balancer in front of a non-SSL server.
+# storage_url_scheme = default
+#
+# Lastly, you need to list all the accounts/users you want here. The format is:
+# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
+# or if you want underscores in <account> or <user>, you can base64 encode them
+# (with no equal signs) and use this format:
+# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
+# There are special groups of:
+# .reseller_admin = can do anything to any account for this auth
+# .admin = can do anything within the account
+# If neither of these groups are specified, the user can only access containers
+# that have been explicitly allowed for them by a .admin or .reseller_admin.
+# The trailing optional storage_url allows you to specify an alternate url to
+# hand back to the user upon authentication. If not specified, this defaults to
+# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
+# to what the requester would need to use to reach this host.
+# Here are example entries, required for running the tests:
+user_admin_admin = admin .admin .reseller_admin
+user_test_tester = testing .admin
+user_test2_tester2 = testing2 .admin
+user_test_tester3 = testing3
+
+# To enable Keystone authentication you need to have the auth token
+# middleware first to be configured. Here is an example below, please
+# refer to the keystone's documentation for details about the
+# different settings.
+#
+# You'll need to have as well the keystoneauth middleware enabled
+# and have it in your main pipeline so instead of having tempauth in
+# there you can change it to: authtoken keystoneauth
+#
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+# auth_host = keystonehost
+# auth_port = 35357
+# auth_protocol = http
+# auth_uri = http://keystonehost:5000/
+#auth_uri = http://controller:5000/v2.0
+auth_uri = http://127.0.0.1:5000/v2.0
+identity_uri = http://127.0.0.1:35357
+admin_tenant_name = service
+admin_user = swift
+admin_password = {{ SWIFT_ADMIN_PASSWORD }}
+delay_auth_decision = 1
+# cache = swift.cache
+# include_service_catalog = False
+#
+[filter:keystoneauth]
+use = egg:swift#keystoneauth
+# Operator roles is the role which user would be allowed to manage a
+# tenant and be able to create container or give ACL to others.
+# operator_roles = admin, swiftoperator
+operator_roles = admin, _member_
+# The reseller admin role has the ability to create and delete accounts
+# reseller_admin_role = ResellerAdmin
+# For backwards compatibility, keystoneauth will match names in cross-tenant
+# access control lists (ACLs) when both the requesting user and the tenant
+# are in the default domain i.e the domain to which existing tenants are
+# migrated. The default_domain_id value configured here should be the same as
+# the value used during migration of tenants to keystone domains.
+# default_domain_id = default
+# For a new installation, or an installation in which keystone projects may
+# move between domains, you should disable backwards compatible name matching
+# in ACLs by setting allow_names_in_acls to false:
+# allow_names_in_acls = true
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
+# This facility may be used to temporarily remove a Swift node from a load
+# balancer pool during maintenance or upgrade (remove the file to allow the
+# node back into the load balancer pool).
+# disable_path =
+
+[filter:cache]
+use = egg:swift#memcache
+# You can override the default log routing for this filter here:
+# set log_name = cache
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# If not set here, the value for memcache_servers will be read from
+# memcache.conf (see memcache.conf-sample) or lacking that file, it will
+# default to the value below. You can specify multiple servers separated with
+# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
+memcache_servers = 127.0.0.1:11211
+#
+# Sets how memcache values are serialized and deserialized:
+# 0 = older, insecure pickle serialization
+# 1 = json serialization but pickles can still be read (still insecure)
+# 2 = json serialization only (secure and the default)
+# If not set here, the value for memcache_serialization_support will be read
+# from /etc/swift/memcache.conf (see memcache.conf-sample).
+# To avoid an instant full cache flush, existing installations should
+# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
+# set to 2 and reload.
+# In the future, the ability to use pickle serialization will be removed.
+# memcache_serialization_support = 2
+#
+# Sets the maximum number of connections to each memcached server per worker
+# memcache_max_connections = 2
+
+[filter:ratelimit]
+use = egg:swift#ratelimit
+# You can override the default log routing for this filter here:
+# set log_name = ratelimit
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# clock_accuracy should represent how accurate the proxy servers' system clocks
+# are with each other. 1000 means that all the proxies' clock are accurate to
+# each other within 1 millisecond. No ratelimit should be higher than the
+# clock accuracy.
+# clock_accuracy = 1000
+#
+# max_sleep_time_seconds = 60
+#
+# log_sleep_time_seconds of 0 means disabled
+# log_sleep_time_seconds = 0
+#
+# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
+# rate_buffer_seconds = 5
+#
+# account_ratelimit of 0 means disabled
+# account_ratelimit = 0
+
+# these are comma separated lists of account names
+# account_whitelist = a,b
+# account_blacklist = c,d
+
+# with container_limit_x = r
+# for containers of size x limit write requests per second to r. The container
+# rate will be linearly interpolated from the values given. With the values
+# below, a container of size 5 will get a rate of 75.
+# container_ratelimit_0 = 100
+# container_ratelimit_10 = 50
+# container_ratelimit_50 = 20
+
+# Similarly to the above container-level write limits, the following will limit
+# container GET (listing) requests.
+# container_listing_ratelimit_0 = 100
+# container_listing_ratelimit_10 = 50
+# container_listing_ratelimit_50 = 20
+
+[filter:domain_remap]
+use = egg:swift#domain_remap
+# You can override the default log routing for this filter here:
+# set log_name = domain_remap
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# storage_domain = example.com
+# path_root = v1
+# reseller_prefixes = AUTH
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+# You can override the default log routing for this filter here:
+# set log_name = catch_errors
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:cname_lookup]
+# Note: this middleware requires python-dnspython
+use = egg:swift#cname_lookup
+# You can override the default log routing for this filter here:
+# set log_name = cname_lookup
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# Specify the storage_domain that match your cloud, multiple domains
+# can be specified separated by a comma
+# storage_domain = example.com
+#
+# lookup_depth = 1
+
+# Note: Put staticweb just after your auth filter(s) in the pipeline
+[filter:staticweb]
+use = egg:swift#staticweb
+
+# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
+[filter:tempurl]
+use = egg:swift#tempurl
+# The methods allowed with Temp URLs.
+# methods = GET HEAD PUT POST DELETE
+#
+# The headers to remove from incoming requests. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. incoming_allow_headers is a list of exceptions to these
+# removals.
+# incoming_remove_headers = x-timestamp
+#
+# The headers allowed as exceptions to incoming_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# incoming_allow_headers =
+#
+# The headers to remove from outgoing responses. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. outgoing_allow_headers is a list of exceptions to these
+# removals.
+# outgoing_remove_headers = x-object-meta-*
+#
+# The headers allowed as exceptions to outgoing_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# outgoing_allow_headers = x-object-meta-public-*
+
+# Note: Put formpost just before your auth filter(s) in the pipeline
+[filter:formpost]
+use = egg:swift#formpost
+
+# Note: Just needs to be placed before the proxy-server in the pipeline.
+[filter:name_check]
+use = egg:swift#name_check
+# forbidden_chars = '"`<>
+# maximum_length = 255
+# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
+
+[filter:list-endpoints]
+use = egg:swift#list_endpoints
+# list_endpoints_path = /endpoints/
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+# If not set, logging directives from [DEFAULT] without "access_" will be used
+# access_log_name = swift
+# access_log_facility = LOG_LOCAL0
+# access_log_level = INFO
+# access_log_address = /dev/log
+#
+# If set, access_log_udp_host will override access_log_address
+# access_log_udp_host =
+# access_log_udp_port = 514
+#
+# You can use log_statsd_* from [DEFAULT] or override them here:
+# access_log_statsd_host = localhost
+# access_log_statsd_port = 8125
+# access_log_statsd_default_sample_rate = 1.0
+# access_log_statsd_sample_rate_factor = 1.0
+# access_log_statsd_metric_prefix =
+# access_log_headers = false
+#
+# If access_log_headers is True and access_log_headers_only is set only
+# these headers are logged. Multiple headers can be defined as comma separated
+# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
+# access_log_headers_only =
+#
+# By default, the X-Auth-Token is logged. To obscure the value,
+# set reveal_sensitive_prefix to the number of characters to log.
+# For example, if set to 12, only the first 12 characters of the
+# token appear in the log. An unauthorized access of the log file
+# won't allow unauthorized usage of the token. However, the first
+# 12 or so characters is unique enough that you can trace/debug
+# token usage. Set to 0 to suppress the token completely (replaced
+# by '...' in the log).
+# Note: reveal_sensitive_prefix will not affect the value
+# logged with access_log_headers=True.
+# reveal_sensitive_prefix = 16
+#
+# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
+# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
+# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
+#
+# Note: The double proxy-logging in the pipeline is not a mistake. The
+# left-most proxy-logging is there to log requests that were handled in
+# middleware and never made it through to the right-most middleware (and
+# proxy server). Double logging is prevented for normal requests. See
+# proxy-logging docs.
+
+# Note: Put before both ratelimit and auth in the pipeline.
+[filter:bulk]
+use = egg:swift#bulk
+# max_containers_per_extraction = 10000
+# max_failed_extractions = 1000
+# max_deletes_per_request = 10000
+# max_failed_deletes = 1000
+
+# In order to keep a connection active during a potentially long bulk request,
+# Swift may return whitespace prepended to the actual response body. This
+# whitespace will be yielded no more than every yield_frequency seconds.
+# yield_frequency = 10
+
+# Note: The following parameter is used during a bulk delete of objects and
+# their container. This would frequently fail because it is very likely
+# that all replicated objects have not been deleted by the time the middleware got a
+# successful response. It can be configured the number of retries. And the
+# number of seconds to wait between each retry will be 1.5**retry
+
+# delete_container_retry_count = 0
+
+# Note: Put after auth in the pipeline.
+[filter:container-quotas]
+use = egg:swift#container_quotas
+
+# Note: Put after auth and staticweb in the pipeline.
+[filter:slo]
+use = egg:swift#slo
+# max_manifest_segments = 1000
+# max_manifest_size = 2097152
+# min_segment_size = 1048576
+# Start rate-limiting SLO segment serving after the Nth segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 0
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+# Note: Put after auth and staticweb in the pipeline.
+# If you don't put it in the pipeline, it will be inserted for you.
+[filter:dlo]
+use = egg:swift#dlo
+# Start rate-limiting DLO segment serving after the Nth segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 1
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+[filter:account-quotas]
+use = egg:swift#account_quotas
+
+[filter:gatekeeper]
+use = egg:swift#gatekeeper
+# You can override the default log routing for this filter here:
+# set log_name = gatekeeper
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:container_sync]
+use = egg:swift#container_sync
+# Set this to false if you want to disallow any full url values to be set for
+# any new X-Container-Sync-To headers. This will keep any new full urls from
+# coming in, but won't change any existing values already in the cluster.
+# Updating those will have to be done manually, as knowing what the true realm
+# endpoint should be cannot always be guessed.
+# allow_full_urls = true
+# Set this to specify this clusters //realm/cluster as "current" in /info
+# current = //REALM/CLUSTER
+
+# Note: Put it at the beginning of the pipleline to profile all middleware. But
+# it is safer to put this after catch_errors, gatekeeper and healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/proxy.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/pxeboot.check b/pxeboot.check
new file mode 100755
index 00000000..611708a9
--- /dev/null
+++ b/pxeboot.check
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+
+import itertools
+import os
+import subprocess
+import sys
+flatten = itertools.chain.from_iterable
+
+def powerset(iterable):
+ "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+ s = list(iterable)
+ return flatten(itertools.combinations(s, r) for r in range(len(s)+1))
+
+valid_option_sets = frozenset((
+ ('spawn-novlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',))),
+ ('spawn-vlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', 'PXEBOOT_VLAN'))),
+ ('existing-dhcp', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',
+ 'PXEBOOT_CONFIG_TFTP_ADDRESS'))),
+ ('existing-server', frozenset(('PXEBOOT_CONFIG_TFTP_ADDRESS',
+ 'PXEBOOT_ROOTFS_RSYNC_ADDRESS'))),
+))
+valid_modes = frozenset(mode for mode, opt_set in valid_option_sets)
+
+
+def compute_matches(env):
+ complete_matches = set()
+ for mode, opt_set in valid_option_sets:
+ if all(k in env for k in opt_set):
+ complete_matches.add(opt_set)
+ return complete_matches
+
+complete_matches = compute_matches(os.environ)
+
+def word_separate_options(options):
+ assert options
+ s = options.pop(-1)
+ if options:
+ s = '%s and %s' % (', '.join(options), s)
+ return s
+
+
+valid_options = frozenset(flatten(opt_set for (mode, opt_set)
+ in valid_option_sets))
+matched_options = frozenset(o for o in valid_options
+ if o in os.environ)
+if not complete_matches:
+ addable_sets = frozenset(frozenset(os) - matched_options for os in
+ valid_options
+ if frozenset(os) - matched_options)
+ print('Please provide %s' % ' or '.join(
+ word_separate_options(list(opt_set))
+ for opt_set in addable_sets if opt_set))
+ sys.exit(1)
+elif len(complete_matches) > 1:
+ removable_sets = frozenset(matched_options - frozenset(os) for os in
+ powerset(matched_options)
+ if len(compute_matches(os)) == 1)
+ print('WARNING: Following options might not be needed: %s' % ' or '.join(
+ word_separate_options(list(opt_set))
+ for opt_set in removable_sets if opt_set))
+
+if 'PXEBOOT_MODE' in os.environ:
+ mode = os.environ['PXEBOOT_MODE']
+else:
+ try:
+ mode, = (mode for (mode, opt_set) in valid_option_sets
+ if all(o in os.environ for o in opt_set))
+
+ except ValueError as e:
+ print ('More than one candidate for PXEBOOT_MODE, please '
+ 'set a value for it. Type `morph help pxeboot.write for '
+ 'more info')
+ sys.exit(1)
+
+if mode not in valid_modes:
+ print('%s is not a valid PXEBOOT_MODE' % mode)
+ sys.exit(1)
+
+if mode != 'existing-server':
+ with open(os.devnull, 'w') as devnull:
+ if subprocess.call(['systemctl', 'is-active', 'nfs-server'],
+ stdout=devnull) != 0:
+ print ('ERROR: nfs-server.service is not running and is needed '
+ 'for this deployment. Please, run `systemctl start nfs-server` '
+ 'and try `morph deploy` again.')
+ sys.exit(1)
diff --git a/pxeboot.write b/pxeboot.write
new file mode 100644
index 00000000..3a12ebcc
--- /dev/null
+++ b/pxeboot.write
@@ -0,0 +1,755 @@
+#!/usr/bin/env python
+
+
+import collections
+import contextlib
+import errno
+import itertools
+import logging
+import os
+import select
+import signal
+import shutil
+import socket
+import string
+import StringIO
+import subprocess
+import sys
+import tempfile
+import textwrap
+import urlparse
+
+import cliapp
+
+import morphlib
+
+
+def _int_to_quad_dot(i):
+ return '.'.join((
+ str(i >> 24 & 0xff),
+ str(i >> 16 & 0xff),
+ str(i >> 8 & 0xff),
+ str(i & 0xff)))
+
+
+def _quad_dot_to_int(s):
+ i = 0
+ for octet in s.split('.'):
+ i <<= 8
+ i += int(octet, 10)
+ return i
+
+
+def _netmask_to_prefixlen(mask):
+ bs = '{:032b}'.format(mask)
+ prefix = bs.rstrip('0')
+ if '0' in prefix:
+ raise ValueError('abnormal netmask: %s' %
+ _int_to_quad_dot(mask))
+ return len(prefix)
+
+
+def _get_routes():
+ routes = []
+ with open('/proc/net/route', 'r') as f:
+ for line in list(f)[1:]:
+ fields = line.split()
+ destination, flags, mask = fields[1], fields[3], fields[7]
+ flags = int(flags, 16)
+ if flags & 2:
+ # default route, ignore
+ continue
+ destination = socket.ntohl(int(destination, 16))
+ mask = socket.ntohl(int(mask, 16))
+ prefixlen = _netmask_to_prefixlen(mask)
+ routes.append((destination, prefixlen))
+ return routes
+
+
+class IPRange(object):
+ def __init__(self, prefix, prefixlen):
+ self.prefixlen = prefixlen
+ mask = (1 << prefixlen) - 1
+ self.mask = mask << (32 - prefixlen)
+ self.prefix = prefix & self.mask
+ @property
+ def bitstring(self):
+ return ('{:08b}' * 4).format(
+ self.prefix >> 24 & 0xff,
+ self.prefix >> 16 & 0xff,
+ self.prefix >> 8 & 0xff,
+ self.prefix & 0xff
+ )[:self.prefixlen]
+ def startswith(self, other_range):
+ return self.bitstring.startswith(other_range.bitstring)
+
+
+def find_subnet(valid_ranges, invalid_ranges):
+ for vr in valid_ranges:
+ known_subnets = set(ir for ir in invalid_ranges if ir.startswith(vr))
+ prefixlens = set(r.prefixlen for r in known_subnets)
+ prefixlens.add(32 - 2) # need at least 4 addresses in subnet
+ prefixlen = min(prefixlens)
+ if prefixlen <= vr.prefixlen:
+ # valid subnet is full, move on to next
+ continue
+ subnetlen = prefixlen - vr.prefixlen
+ for prefix in (subnetid + vr.prefix
+ for subnetid in xrange(1 << subnetlen)):
+ if any(subnet.prefix == prefix for subnet in known_subnets):
+ continue
+ return prefix, prefixlen
+
+
+def _normalise_macaddr(macaddr):
+ '''pxelinux.0 wants the mac address to be lowercase and - separated'''
+ digits = (c for c in macaddr.lower() if c in string.hexdigits)
+ nibble_pairs = grouper(digits, 2)
+ return '-'.join(''.join(byte) for byte in nibble_pairs)
+
+
+@contextlib.contextmanager
+def executor(target_pid):
+ 'Kills a process if its parent dies'
+ read_fd, write_fd = os.pipe()
+ helper_pid = os.fork()
+ if helper_pid == 0:
+ try:
+ os.close(write_fd)
+ while True:
+ rlist, _, _ = select.select([read_fd], [], [])
+ if read_fd in rlist:
+ d = os.read(read_fd, 1)
+ if not d:
+ os.kill(target_pid, signal.SIGKILL)
+ if d in ('', 'Q'):
+ os._exit(0)
+ else:
+ os._exit(1)
+ except BaseException as e:
+ import traceback
+ traceback.print_exc()
+ os._exit(1)
+ os.close(read_fd)
+ yield
+ os.write(write_fd, 'Q')
+ os.close(write_fd)
+
+
+def grouper(iterable, n, fillvalue=None):
+ "Collect data into fixed-length chunks or blocks"
+ # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
+ args = [iter(iterable)] * n
+ return itertools.izip_longest(*args, fillvalue=fillvalue)
+
+
+class PXEBoot(morphlib.writeexts.WriteExtension):
+ @contextlib.contextmanager
+ def _vlan(self, interface, vlan):
+ viface = '%s.%s' % (interface, vlan)
+ self.status(msg='Creating vlan %(viface)s', viface=viface)
+ subprocess.check_call(['vconfig', 'add', interface, str(vlan)])
+ try:
+ yield viface
+ finally:
+ self.status(msg='Destroying vlan %(viface)s', viface=viface)
+ subprocess.call(['vconfig', 'rem', viface])
+
+ @contextlib.contextmanager
+ def _static_ip(self, iface):
+ valid_ranges = set((
+ IPRange(_quad_dot_to_int('192.168.0.0'), 16),
+ IPRange(_quad_dot_to_int('172.16.0.0'), 12),
+ IPRange(_quad_dot_to_int('10.0.0.0'), 8),
+ ))
+ invalid_ranges = set(IPRange(prefix, prefixlen)
+ for (prefix, prefixlen) in _get_routes())
+ prefix, prefixlen = find_subnet(valid_ranges, invalid_ranges)
+ netaddr = prefix
+ dhcp_server_ip = netaddr + 1
+ client_ip = netaddr + 2
+ broadcast_ip = prefix | ((1 << (32 - prefixlen)) - 1)
+ self.status(msg='Assigning ip address %(ip)s/%(prefixlen)d to '
+ 'iface %(iface)s',
+ ip=_int_to_quad_dot(dhcp_server_ip), prefixlen=prefixlen,
+ iface=iface)
+ subprocess.check_call(['ip', 'addr', 'add',
+ '{}/{}'.format(_int_to_quad_dot(dhcp_server_ip),
+ prefixlen),
+ 'broadcast', _int_to_quad_dot(broadcast_ip),
+ 'scope', 'global',
+ 'dev', iface])
+ try:
+ yield (dhcp_server_ip, client_ip, broadcast_ip)
+ finally:
+ self.status(msg='Removing ip addresses from iface %(iface)s',
+ iface=iface)
+ subprocess.call(['ip', 'addr', 'flush', 'dev', iface])
+
+ @contextlib.contextmanager
+ def _up_interface(self, iface):
+ self.status(msg='Bringing interface %(iface)s up', iface=iface)
+ subprocess.check_call(['ip', 'link', 'set', iface, 'up'])
+ try:
+ yield
+ finally:
+ self.status(msg='Bringing interface %(iface)s down', iface=iface)
+ subprocess.call(['ip', 'link', 'set', iface, 'down'])
+
+ @contextlib.contextmanager
+ def static_ip(self, interface):
+ with self._static_ip(iface=interface) as (host_ip, client_ip,
+ broadcast_ip), \
+ self._up_interface(iface=interface):
+ yield (_int_to_quad_dot(host_ip),
+ _int_to_quad_dot(client_ip),
+ _int_to_quad_dot(broadcast_ip))
+
+ @contextlib.contextmanager
+ def vlan(self, interface, vlan):
+ with self._vlan(interface=interface, vlan=vlan) as viface, \
+ self.static_ip(interface=viface) \
+ as (host_ip, client_ip, broadcast_ip):
+ yield host_ip, client_ip, broadcast_ip
+
+ @contextlib.contextmanager
+ def _tempdir(self):
+ td = tempfile.mkdtemp()
+ print 'Created tempdir:', td
+ try:
+ yield td
+ finally:
+ shutil.rmtree(td, ignore_errors=True)
+
+ @contextlib.contextmanager
+ def _remote_tempdir(self, hostname, template):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ td = cliapp.ssh_runcmd(hostname, ['mktemp', '-d', template]).strip()
+ try:
+ yield td
+ finally:
+ if not persist:
+ cliapp.ssh_runcmd(hostname, ['find', td, '-delete'])
+
+ def _serve_tftpd(self, sock, host, port, interface, tftproot):
+ self.settings.progname = 'tftp server'
+ self._set_process_name()
+ while True:
+ logging.debug('tftpd waiting for connections')
+ # recvfrom with MSG_PEEK is how you accept UDP connections
+ _, peer = sock.recvfrom(0, socket.MSG_PEEK)
+ conn = sock
+ logging.debug('Connecting socket to peer: ' + repr(peer))
+ conn.connect(peer)
+ # The existing socket is now only serving that peer, so we need to
+ # bind a new UDP socket to the wildcard address, which needs the
+ # port to be in REUSEADDR mode.
+ conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ logging.debug('Binding replacement socket to ' + repr((host, port)))
+ sock.bind((host, port))
+
+ logging.debug('tftpd server handing connection to tftpd')
+ tftpd_serve = ['tftpd', '-rl', tftproot]
+ ret = subprocess.call(args=tftpd_serve, stdin=conn,
+ stdout=conn, stderr=None, close_fds=True)
+ # It's handy to turn off REUSEADDR after the rebinding,
+ # so we can protect against future bind attempts on this port.
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
+ logging.debug('tftpd exited %d' % ret)
+ os._exit(0)
+
+ @contextlib.contextmanager
+ def _spawned_tftp_server(self, tftproot, host_ip, interface, tftp_port=0):
+ # inetd-style launchers tend to bind UDP ports with SO_REUSEADDR,
+ # because they need to have multiple ports bound, one for recieving
+ # all connection attempts on that port, and one for each concurrent
+ # connection with a peer
+ # this makes detecting whether there's a tftpd running difficult, so
+ # we'll instead use an ephemeral port and configure the PXE boot to
+ # use that tftp server for the kernel
+ s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
+ s.bind((host_ip, tftp_port))
+ host, port = s.getsockname()
+ self.status(msg='Bound listen socket to %(host)s, %(port)s',
+ host=host, port=port)
+ pid = os.fork()
+ if pid == 0:
+ try:
+ self._serve_tftpd(sock=s, host=host, port=port,
+ interface=interface, tftproot=tftproot)
+ except BaseException as e:
+ import traceback
+ traceback.print_exc()
+ os._exit(1)
+ s.close()
+ with executor(pid):
+ try:
+ yield port
+ finally:
+ self.status(msg='Killing tftpd listener pid=%(pid)d',
+ pid=pid)
+ os.kill(pid, signal.SIGKILL)
+
+ @contextlib.contextmanager
+ def tftp_server(self, host_ip, interface, tftp_port=0):
+ with self._tempdir() as tftproot, \
+ self._spawned_tftp_server(tftproot=tftproot, host_ip=host_ip,
+ interface=interface,
+ tftp_port=tftp_port) as tftp_port:
+ self.status(msg='Serving tftp root %(tftproot)s, on port %(port)d',
+ port=tftp_port, tftproot=tftproot)
+ yield tftp_port, tftproot
+
+ @contextlib.contextmanager
+ def _local_copy(self, src, dst):
+ self.status(msg='Installing %(src)s to %(dst)s',
+ src=src, dst=dst)
+ shutil.copy2(src=src, dst=dst)
+ try:
+ yield
+ finally:
+ self.status(msg='Removing %(dst)s', dst=dst)
+ os.unlink(dst)
+
+ @contextlib.contextmanager
+ def _local_symlink(self, src, dst):
+ os.symlink(src, dst)
+ try:
+ yield
+ finally:
+ os.unlink(dst)
+
+ def local_pxelinux(self, tftproot):
+ return self._local_copy('/usr/share/syslinux/pxelinux.0',
+ os.path.join(tftproot, 'pxelinux.0'))
+
+ def local_kernel(self, rootfs, tftproot):
+ return self._local_copy(os.path.join(rootfs, 'boot/vmlinuz'),
+ os.path.join(tftproot, 'kernel'))
+
+ @contextlib.contextmanager
+ def _remote_copy(self, hostname, src, dst):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ with open(src, 'r') as f:
+ cliapp.ssh_runcmd(hostname,
+ ['install', '-D', '-m644', '/proc/self/fd/0',
+ dst], stdin=f, stdout=None, stderr=None)
+ try:
+ yield
+ finally:
+ if not persist:
+ cliapp.ssh_runcmd(hostname, ['rm', dst])
+
+ @contextlib.contextmanager
+ def _remote_symlink(self, hostname, src, dst):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ cliapp.ssh_runcmd(hostname,
+ ['ln', '-s', '-f', src, dst],
+ stdin=None, stdout=None, stderr=None)
+ try:
+ yield
+ finally:
+ if not persist:
+ cliapp.ssh_runcmd(hostname, ['rm', '-f', dst])
+
+ @contextlib.contextmanager
+ def remote_kernel(self, rootfs, tftp_url, macaddr):
+ for name in ('vmlinuz', 'zImage', 'uImage'):
+ kernel_path = os.path.join(rootfs, 'boot', name)
+ if os.path.exists(kernel_path):
+ break
+ else:
+ raise cliapp.AppException('Failed to locate kernel')
+ url = urlparse.urlsplit(tftp_url)
+ basename = '{}-kernel'.format(_normalise_macaddr(macaddr))
+ target_path = os.path.join(url.path, basename)
+ with self._remote_copy(hostname=url.hostname, src=kernel_path,
+ dst=target_path):
+ yield basename
+
+ @contextlib.contextmanager
+ def remote_fdt(self, rootfs, tftp_url, macaddr):
+ fdt_rel_path = os.environ.get('DTB_PATH', '')
+ if fdt_rel_path == '':
+ yield
+ fdt_abs_path = os.path.join(rootfs, fdt_rel_path)
+ if not fdt_abs_path:
+ raise cliapp.AppException('Failed to locate Flattened Device Tree')
+ url = urlparse.urlsplit(tftp_url)
+ basename = '{}-fdt'.format(_normalise_macaddr(macaddr))
+ target_path = os.path.join(url.path, basename)
+ with self._remote_copy(hostname=url.hostname, src=fdt_abs_path,
+ dst=target_path):
+ yield basename
+
+ @contextlib.contextmanager
+ def local_nfsroot(self, rootfs, target_ip):
+ nfsroot = target_ip + ':' + rootfs
+ self.status(msg='Exporting %(nfsroot)s as local nfsroot',
+ nfsroot=nfsroot)
+ cliapp.runcmd(['exportfs', '-o', 'ro,insecure,no_root_squash',
+ nfsroot])
+ try:
+ yield
+ finally:
+ self.status(msg='Removing %(nfsroot)s from local nfsroots',
+ nfsroot=nfsroot)
+ cliapp.runcmd(['exportfs', '-u', nfsroot])
+
+ @contextlib.contextmanager
+ def remote_nfsroot(self, rootfs, rsync_url, macaddr):
+ url = urlparse.urlsplit(rsync_url)
+ template = os.path.join(url.path,
+ _normalise_macaddr(macaddr) + '.XXXXXXXXXX')
+ with self._remote_tempdir(hostname=url.hostname, template=template) \
+ as tempdir:
+ nfsroot = urlparse.urlunsplit((url.scheme, url.netloc, tempdir,
+ url.query, url.fragment))
+ cliapp.runcmd(['rsync', '-asSPH', '--delete', rootfs, nfsroot],
+ stdin=None, stdout=open(os.devnull, 'w'),
+ stderr=None)
+ yield os.path.join(os.path.basename(tempdir),
+ os.path.basename(rootfs))
+
+ @staticmethod
+ def _write_pxe_config(fh, kernel_tftp_url, rootfs_nfs_url, device=None,
+ fdt_subpath=None, extra_args=''):
+
+ if device is None:
+ ip_cfg = "ip=dhcp"
+ else:
+ ip_cfg = "ip=:::::{device}:dhcp::".format(device=device)
+
+ fh.write(textwrap.dedent('''\
+ DEFAULT default
+ LABEL default
+ LINUX {kernel_url}
+ APPEND root=/dev/nfs {ip_cfg} nfsroot={rootfs_nfs_url} {extra_args}
+ ''').format(kernel_url=kernel_tftp_url, ip_cfg=ip_cfg,
+ rootfs_nfs_url=rootfs_nfs_url, extra_args=extra_args))
+ if fdt_subpath is not None:
+ fh.write("FDT {}\n".format(fdt_subpath))
+ fh.flush()
+
+ @contextlib.contextmanager
+ def local_pxeboot_config(self, tftproot, macaddr, ip, tftp_port,
+ nfsroot_dir, device=None):
+ kernel_tftp_url = 'tftp://{}:{}/kernel'.format(ip, tftp_port)
+ rootfs_nfs_url = '{}:{}'.format(ip, nfsroot_dir)
+ pxe_cfg_filename = _normalise_macaddr(macaddr)
+ pxe_cfg_path = os.path.join(tftproot, 'pxelinux.cfg', pxe_cfg_filename)
+ os.makedirs(os.path.dirname(pxe_cfg_path))
+ with open(pxe_cfg_path, 'w') as f:
+ self._write_pxe_config(fh=f, kernel_tftp_url=kernel_tftp_url,
+ rootfs_nfs_url=rootfs_nfs_url,
+ device=device,
+ extra_args=os.environ.get('KERNEL_ARGS',''))
+
+ try:
+ with self._local_symlink(
+ src=pxe_cfg_filename,
+ dst=os.path.join(tftproot,
+ 'pxelinux.cfg',
+ '01-' + pxe_cfg_filename)):
+ yield
+ finally:
+ os.unlink(pxe_cfg_path)
+
+ @contextlib.contextmanager
+ def remote_pxeboot_config(self, tftproot, kernel_tftproot, kernel_subpath,
+ fdt_subpath, rootfs_nfsroot, rootfs_subpath,
+ macaddr):
+ rootfs_nfs_url = '{}/{}'.format(rootfs_nfsroot, rootfs_subpath)
+ url = urlparse.urlsplit(kernel_tftproot)
+ kernel_tftp_url = '{}:{}'.format(url.netloc, kernel_subpath)
+ pxe_cfg_filename = _normalise_macaddr(macaddr)
+ url = urlparse.urlsplit(tftproot)
+ inst_cfg_path = os.path.join(url.path, 'pxelinux.cfg')
+ with tempfile.NamedTemporaryFile() as f:
+ self._write_pxe_config(
+ fh=f, kernel_tftp_url=kernel_tftp_url,
+ fdt_subpath=fdt_subpath,
+ rootfs_nfs_url=rootfs_nfs_url,
+ extra_args=os.environ.get('KERNEL_ARGS',''))
+ with self._remote_copy(
+ hostname=url.hostname, src=f.name,
+ dst=os.path.join(inst_cfg_path,
+ pxe_cfg_filename)), \
+ self._remote_symlink(
+ hostname=url.hostname,
+ src=pxe_cfg_filename,
+ dst=os.path.join(inst_cfg_path,
+ '01-' + pxe_cfg_filename)):
+ yield
+
+ @contextlib.contextmanager
+ def dhcp_server(self, interface, host_ip, target_ip, broadcast_ip):
+ with self._tempdir() as td:
+ leases_path = os.path.join(td, 'leases')
+ config_path = os.path.join(td, 'config')
+ stdout_path = os.path.join(td, 'stdout')
+ stderr_path = os.path.join(td, 'stderr')
+ pidfile_path = os.path.join(td, 'pid')
+ with open(config_path, 'w') as f:
+ f.write(textwrap.dedent('''\
+ start {target_ip}
+ end {target_ip}
+ interface {interface}
+ max_leases 1
+ lease_file {leases_path}
+ pidfile {pidfile_path}
+ boot_file pxelinux.0
+ option dns {host_ip}
+ option broadcast {broadcast_ip}
+ ''').format(**locals()))
+ with open(stdout_path, 'w') as stdout, \
+ open(stderr_path, 'w') as stderr:
+ sp = subprocess.Popen(['udhcpd', '-f', config_path], cwd=td,
+ stdin=open(os.devnull), stdout=stdout,
+ stderr=stderr)
+ try:
+ with executor(sp.pid):
+ yield
+ finally:
+ sp.terminate()
+
+ def get_interface_ip(self, interface):
+ ip_addresses = []
+ info = cliapp.runcmd(['ip', '-o', '-f', 'inet',
+ 'addr', 'show', interface]).rstrip('\n')
+ if info:
+ tokens = collections.deque(info.split()[1:])
+ ifname = tokens.popleft()
+ while tokens:
+ tok = tokens.popleft()
+ if tok == 'inet':
+ address = tokens.popleft()
+ address, netmask = address.split('/')
+ ip_addresses.append(address)
+ elif tok == 'brd':
+ tokens.popleft() # not interested in broadcast address
+ elif tok == 'scope':
+ tokens.popleft() # not interested in scope tag
+ else:
+ continue
+ if not ip_addresses:
+ raise cliapp.AppException('Interface %s has no addresses'
+ % interface)
+ if len(ip_addresses) > 1:
+ warnings.warn('Interface %s has multiple addresses, '
+ 'using first (%s)' % (interface, ip_addresses[0]))
+ return ip_addresses[0]
+
+ def ipmi_set_target_vlan(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ lan set 1 vlan id "$PXEBOOT_VLAN"
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please set the target\\'s vlan to $PXEBOOT_VLAN, \\
+ then enter \\"vlanned\\"
+ read
+ if [ "$REPLY" = vlanned ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_SET_VLAN_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_pxe_reboot_target(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN
+ default = textwrap.dedent('''\
+ set -- ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST"
+ "$@" chassis bootdev pxe
+ "$@" chassis power reset
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reboot the target in PXE mode, then\\
+ enter \\"pxe-booted\\"
+ read
+ if [ "$REPLY" = pxe-booted ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_PXE_REBOOT_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def wait_for_target_to_install(self):
+ command = os.environ.get(
+ 'PXEBOOT_WAIT_INSTALL_COMMAND',
+ textwrap.dedent('''\
+ while true; do
+ echo Please wait for the system to install, then \\
+ enter \\"installed\\"
+ read
+ if [ "$REPLY" = installed ]; then
+ break
+ fi
+ done
+ '''))
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_unset_target_vlan(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ lan set 1 vlan id off
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reset the target\\'s vlan, \\
+ then enter \\"unvlanned\\"
+ read
+ if [ "$REPLY" = unvlanned ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_UNSET_VLAN_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_reboot_target(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ chassis power reset
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reboot the target, then\\
+ enter \\"rebooted\\"
+ read
+ if [ "$REPLY" = rebooted ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_REBOOT_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def process_args(self, (temp_root, macaddr)):
+ interface = os.environ.get('PXEBOOT_DEPLOYER_INTERFACE', None)
+ target_interface = os.environ.get('PXEBOOT_TARGET_INTERFACE', None)
+ vlan = os.environ.get('PXEBOOT_VLAN')
+ if vlan is not None: vlan = int(vlan)
+ mode = os.environ.get('PXEBOOT_MODE')
+ if mode is None:
+ if interface:
+ if vlan is not None:
+ mode = 'spawn-vlan'
+ else:
+ if 'PXEBOOT_CONFIG_TFTP_ADDRESS' in os.environ:
+ mode = 'existing-dhcp'
+ else:
+ mode = 'spawn-novlan'
+ else:
+ mode = 'existing-server'
+ assert mode in ('spawn-vlan', 'spawn-novlan', 'existing-dhcp',
+ 'existing-server')
+ if mode == 'spawn-vlan':
+ with self.vlan(interface=interface, vlan=vlan) \
+ as (host_ip, target_ip, broadcast_ip), \
+ self.tftp_server(host_ip=host_ip, interface=interface) \
+ as (tftp_port, tftproot), \
+ self.local_pxelinux(tftproot=tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \
+ self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr,
+ device=target_interface,
+ ip=host_ip, tftp_port=tftp_port,
+ nfsroot_dir=temp_root), \
+ self.dhcp_server(interface=interface, host_ip=host_ip,
+ target_ip=target_ip,
+ broadcast_ip=broadcast_ip):
+ self.ipmi_set_target_vlan()
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_unset_target_vlan()
+ self.ipmi_reboot_target()
+ elif mode == 'spawn-novlan':
+ with self.static_ip(interface=interface) as (host_ip, target_ip,
+ broadcast_ip), \
+ self.tftp_server(host_ip=host_ip, interface=interface,
+ tftp_port=69) \
+ as (tftp_port, tftproot), \
+ self.local_pxelinux(tftproot=tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \
+ self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr,
+ device=target_interface,
+ ip=host_ip, tftp_port=tftp_port,
+ nfsroot_dir=temp_root), \
+ self.dhcp_server(interface=interface, host_ip=host_ip,
+ target_ip=target_ip,
+ broadcast_ip=broadcast_ip):
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ elif mode == 'existing-dhcp':
+ ip = self.get_interface_ip(interface)
+ config_tftpaddr = os.environ['PXEBOOT_CONFIG_TFTP_ADDRESS']
+ with self.tftp_server(ip=ip, interface=interface, tftp_port=69) \
+ as (tftp_port, tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, client_ip=''):
+ kernel_tftproot = 'tftp://{}:{}/'.format(ip, tftp_port)
+ rootfs_nfsroot = '{}:{}'.format(ip, temp_root)
+ with self.remote_pxeboot_config(
+ tftproot=config_tftpaddr,
+ kernel_tftproot=kernel_tftproot,
+ kernel_subpath='kernel',
+ rootfs_nfsroot=nfsroot,
+ rootfs_subpath='',
+ macaddr=macaddr):
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ elif mode == 'existing-server':
+ config_tftpaddr = os.environ[ 'PXEBOOT_CONFIG_TFTP_ADDRESS']
+ kernel_tftpaddr = os.environ.get('PXEBOOT_KERNEL_TFTP_ADDRESS',
+ config_tftpaddr)
+ url = urlparse.urlsplit(kernel_tftpaddr)
+ kernel_tftproot = os.environ.get('PXEBOOT_KERNEL_TFTP_ROOT',
+ 'tftp://%s/%s' % (url.hostname,
+ url.path))
+ rootfs_rsync = os.environ['PXEBOOT_ROOTFS_RSYNC_ADDRESS']
+ url = urlparse.urlsplit(rootfs_rsync)
+ nfsroot = os.environ.get('PXEBOOT_ROOTFS_NFSROOT',
+ '%s:%s' % (url.hostname, url.path))
+ with self.remote_kernel(rootfs=temp_root, tftp_url=kernel_tftpaddr,
+ macaddr=macaddr) as kernel_subpath, \
+ self.remote_fdt(rootfs=temp_root, tftp_url=kernel_tftpaddr,
+ macaddr=macaddr) as fdt_subpath, \
+ self.remote_nfsroot(rootfs=temp_root, rsync_url=rootfs_rsync, \
+ macaddr=macaddr) as rootfs_subpath, \
+ self.remote_pxeboot_config(tftproot=config_tftpaddr,
+ kernel_tftproot=kernel_tftproot,
+ kernel_subpath=kernel_subpath,
+ fdt_subpath=fdt_subpath,
+ rootfs_nfsroot=nfsroot,
+ rootfs_subpath=rootfs_subpath,
+ macaddr=macaddr):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ if not persist:
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ else:
+ cliapp.AppException('Invalid PXEBOOT_MODE: %s' % mode)
+
+PXEBoot().run()
diff --git a/pxeboot.write.help b/pxeboot.write.help
new file mode 100644
index 00000000..7cb78bce
--- /dev/null
+++ b/pxeboot.write.help
@@ -0,0 +1,166 @@
+help: >
+ pxeboot.write extension.
+
+
+ This write extension will serve your generated system over NFS to
+ the target system.
+
+ In all modes `location` is the mac address of the interface that
+ the target will PXE boot from. This is used so that the target will
+ load the configuration file appropriate to it.
+
+
+ # `PXEBOOT_MODE`
+
+
+ It has 4 modes, which can be specified with PXEBOOT_MODE, or inferred
+ from which parameters are passed:
+
+
+ ## spawn-vlan
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_VLAN to configure
+ the target to pxeboot on a vlan and spawn a dhcp, nfs and tftp
+ server. This is potentially the fastest, since it doesn't need to
+ copy data to other servers.
+
+ This will create a vlan interface for the interface specified in
+ PXEBOOT_DEPLOYER_INTERFACE and spawn a dhcp server which serves
+ pxelinux.0, a configuration file and a kernel image from itself.
+
+ The configuration file informs the target to boot with a kernel
+ command-line that uses an NFS root served from the deployment host.
+
+
+ ## spawn-novlan
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE without PXEBOOT_VLAN to configure
+ like `spawn-vlan`, but without creating the vlan interface.
+
+ This assumes that you have exclusive access to the interface, such
+ as if you're plugged in to the device directly, or your interface
+ is vlanned by your infrastructure team.
+
+ This is required if you are serving from a VM and bridging it to the
+ correct network via macvtap. For this to work, you need to macvtap
+ bridge to a pre-vlanned interface on your host machine.
+
+
+ ## existing-dhcp
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_CONFIG_TFTP_ADDRESS
+ to put config on an existing tftp server, already configured by the
+ dhcp server.
+
+ This spawns a tftp server and configures the local nfs server, but
+ doesn't spawn a dhcp server. This is useful if you have already got a
+ dhcp server that serves PXE images.
+
+ PXEBOOT_CONFIG_TFTP_ADDRESS is a URL in the form `sftp://$HOST/$PATH`.
+ The configuration file is copied to `$PATH/pxelinux.cfg/` on the
+ target identified by `$HOST`.
+
+
+ ## existing-server
+
+
+ Specify at least PXEBOOT_CONFIG_TFTP_ADDRESS and
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS to specify existing servers to copy
+ config, kernels and the rootfs to.
+
+ Configuration is copied to the target as `existing-dhcp`.
+
+ Specify PXEBOOT_KERNEL_TFTP_ADDRESS if the tftp server that the
+ kernel must be downloaded from is different to that of the pxelinux
+ configuration file.
+
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS is a rsync URL describing where to copy
+ nfsroots to where they will be exported by the NFS server.
+
+ Specify PXEBOOT_ROOTFS_NFSROOT if the nfsroot appears as a different
+ address from the target's perspective.
+
+
+ # IPMI commands
+
+
+ After the PXE boot has been set up, the target needs to be rebooted
+ in PXE mode.
+
+ If the target is IPMI enabled, you can set `IPMI_USER`, `IPMI_HOST`
+ and `IPMI_PASSWORD` to make it reboot the target into netboot mode
+ automatically.
+
+ If they are not specified, then instructions will be displayed, and
+ `pxeboot.write` will wait for you to finish.
+
+ If there are command-line automation tools for rebooting the target
+ in netboot mode, then appropriate commands can be defined in the
+ following variables.
+
+
+ ## PXEBOOT_PXE_REBOOT_COMMAND
+
+
+ This command will be used to reboot the target device with its boot
+ device set to PXE boot.
+
+
+ ## PXEBOOT_REBOOT_COMMAND
+
+
+ This command will be used to reboot the target device in its default
+ boot mode.
+
+
+ ## PXEBOOT_WAIT_INSTALL_COMMAND
+
+
+ If it is possible for the target to notify you that it has finished
+ installing, you can put a command in here to wait for the event.
+
+
+ # Misc
+
+
+ ## KERNEL_ARGS
+
+
+ Additional kernel command line options. Note that the following
+ options
+
+ root=/dev/nfs ip=dhcp nfsroot=$NFSROOT`
+
+ are implicitly added by the extension.
+
+
+ ## DTB_PATH
+
+
+ Location in the deployed root filesystem of the Flattened Device
+ Tree blob (FDT) to use.
+
+
+ ## PXE_INSTALLER
+
+
+ If set to `no`, `False` or any other YAML value for false, the
+ remotely installed rootfs, kernel, bootloader config file and
+ device tree blob if specified, will not be removed after the
+ deployment finishes. This variable is only meanful on the
+ `existing-server` mode.
+
+
+ ## PXEBOOT_TARGET_INTERFACE
+
+ Name of the interface of the target to pxeboot from. Some targets
+ with more than one interface try to get the rootfs from a different
+ interface than the interface from where the pxeboot server is
+ reachable. Using this variable, the kernel arguments will be filled
+ to include the device.
+
+ Note that the name of this interface is the kernel's default name,
+ usually called ethX, and is non-determinisic.
diff --git a/scripts/cycle.sh b/scripts/cycle.sh
new file mode 100755
index 00000000..c0e2aa67
--- /dev/null
+++ b/scripts/cycle.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+usage() {
+ echo "Usage: cycle.sh some-system some-cluster [newversion]"
+ echo
+ echo "This builds and deploys the current checked out version of"
+ echo "some-system, applying it as a self-upgrade to the system you"
+ echo "are working in, using configuration from some-cluster."
+ echo "The upgrade is labelled TEST by default, or [newversion] if"
+ echo "specified, and is set to be the default for next boot."
+}
+
+if [ -z "$1" ] || [ -z "$2" ] || [ ! -z "$4" ] ; then
+ usage
+ exit 1
+fi
+
+newversion=TEST
+if [ ! -z "$3" ] ; then
+ newversion=$3
+ if (echo "$newversion" | grep ' ' > /dev/null 2>&1) ; then
+ echo 'Version label must not contain spaces.'
+ exit 1
+ fi
+fi
+
+if system-version-manager get-running | grep -q "^$newversion$"; then
+ echo "You are currently running the $newversion system."
+ echo "Maybe you want to boot into a different system version?"
+ exit 1
+fi
+
+set -e
+set -v
+
+runningversion=`system-version-manager get-running`
+system-version-manager set-default $runningversion
+if system-version-manager list | grep -q "^$newversion$"; then
+ system-version-manager remove $newversion
+fi
+
+morph gc
+morph build "$1"
+
+sed -i "s|^- morph: .*$|- morph: $1|" "$2"
+morph deploy --upgrade "$2" self.HOSTNAME=$(hostname) self.VERSION_LABEL=$newversion
+system-version-manager list
diff --git a/scripts/licensecheck.pl b/scripts/licensecheck.pl
new file mode 100644
index 00000000..5b6d0d33
--- /dev/null
+++ b/scripts/licensecheck.pl
@@ -0,0 +1,604 @@
+#!/usr/bin/perl
+# This script was originally based on the script of the same name from
+# the KDE SDK (by dfaure@kde.org)
+#
+# This version is
+# Copyright (C) 2007, 2008 Adam D. Barratt
+# Copyright (C) 2012 Francesco Poli
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <https://www.gnu.org/licenses/>.
+
+=head1 NAME
+
+licensecheck - simple license checker for source files
+
+=head1 SYNOPSIS
+
+B<licensecheck> B<--help>|B<--version>
+
+B<licensecheck> [B<--no-conf>] [B<--verbose>] [B<--copyright>]
+[B<-l>|B<--lines=>I<N>] [B<-i>|B<--ignore=>I<regex>] [B<-c>|B<--check=>I<regex>]
+[B<-m>|B<--machine>] [B<-r>|B<--recursive>]
+I<list of files and directories to check>
+
+=head1 DESCRIPTION
+
+B<licensecheck> attempts to determine the license that applies to each file
+passed to it, by searching the start of the file for text belonging to
+various licenses.
+
+If any of the arguments passed are directories, B<licensecheck> will add
+the files contained within to the list of files to process.
+
+=head1 OPTIONS
+
+=over 4
+
+=item B<--verbose>, B<--no-verbose>
+
+Specify whether to output the text being processed from each file before
+the corresponding license information.
+
+Default is to be quiet.
+
+=item B<-l=>I<N>, B<--lines=>I<N>
+
+Specify the number of lines of each file's header which should be parsed
+for license information. (Default is 60).
+
+=item B<-i=>I<regex>, B<--ignore=>I<regex>
+
+When processing the list of files and directories, the regular
+expression specified by this option will be used to indicate those which
+should not be considered (e.g. backup files, VCS metadata).
+
+=item B<-r>, B<--recursive>
+
+Specify that the contents of directories should be added
+recursively.
+
+=item B<-c=>I<regex>, B<--check=>I<regex>
+
+Specify a pattern against which filenames will be matched in order to
+decide which files to check the license of.
+
+The default includes common source files.
+
+=item B<--copyright>
+
+Also display copyright text found within the file
+
+=item B<-m>, B<--machine>
+
+Display the information in a machine readable way, i.e. in the form
+<file><tab><license>[<tab><copyright>] so that it can be easily sorted
+and/or filtered, e.g. with the B<awk> and B<sort> commands.
+Note that using the B<--verbose> option will kill the readability.
+
+=item B<--no-conf>, B<--noconf>
+
+Do not read any configuration files. This can only be used as the first
+option given on the command line.
+
+=back
+
+=head1 CONFIGURATION VARIABLES
+
+The two configuration files F</etc/devscripts.conf> and
+F<~/.devscripts> are sourced by a shell in that order to set
+configuration variables. Command line options can be used to override
+configuration file settings. Environment variable settings are
+ignored for this purpose. The currently recognised variables are:
+
+=over 4
+
+=item B<LICENSECHECK_VERBOSE>
+
+If this is set to I<yes>, then it is the same as the B<--verbose> command
+line parameter being used. The default is I<no>.
+
+=item B<LICENSECHECK_PARSELINES>
+
+If this is set to a positive number then the specified number of lines
+at the start of each file will be read whilst attempting to determine
+the license(s) in use. This is equivalent to the B<--lines> command line
+option.
+
+=back
+
+=head1 LICENSE
+
+This code is copyright by Adam D. Barratt <I<adam@adam-barratt.org.uk>>,
+all rights reserved; based on a script of the same name from the KDE
+SDK, which is copyright by <I<dfaure@kde.org>>.
+This program comes with ABSOLUTELY NO WARRANTY.
+You are free to redistribute this code under the terms of the GNU
+General Public License, version 2 or later.
+
+=head1 AUTHOR
+
+Adam D. Barratt <adam@adam-barratt.org.uk>
+
+=cut
+
+use strict;
+use warnings;
+use Getopt::Long qw(:config gnu_getopt);
+use File::Basename;
+
+my $progname = basename($0);
+
+# From dpkg-source
+my $default_ignore_regex = '
+# Ignore general backup files
+(?:^|/).*~$|
+# Ignore emacs recovery files
+(?:^|/)\.#.*$|
+# Ignore vi swap files
+(?:^|/)\..*\.swp$|
+# Ignore baz-style junk files or directories
+(?:^|/),,.*(?:$|/.*$)|
+# File-names that should be ignored (never directories)
+(?:^|/)(?:DEADJOE|\.cvsignore|\.arch-inventory|\.bzrignore|\.gitignore)$|
+# File or directory names that should be ignored
+(?:^|/)(?:CVS|RCS|\.pc|\.deps|\{arch\}|\.arch-ids|\.svn|\.hg|_darcs|\.git|
+\.shelf|_MTN|\.bzr(?:\.backup|tags)?)(?:$|/.*$)
+';
+
+# Take out comments and newlines
+$default_ignore_regex =~ s/^#.*$//mg;
+$default_ignore_regex =~ s/\n//sg;
+
+my $default_check_regex = '\.(c(c|pp|xx)?|h(h|pp|xx)?|f(77|90)?|go|p(l|m)|xs|sh|php|py(|x)|rb|java|js|vala|el|sc(i|e)|cs|pas|inc|dtd|xsl|mod|m|tex|mli?|(c|l)?hs)$';
+
+my $modified_conf_msg;
+
+my %OPT=(
+ verbose => '',
+ lines => '',
+ noconf => '',
+ ignore => '',
+ check => '',
+ recursive => 0,
+ copyright => 0,
+ machine => 0,
+);
+
+my $def_lines = 60;
+
+# Read configuration files and then command line
+# This is boilerplate
+
+if (@ARGV and $ARGV[0] =~ /^--no-?conf$/) {
+ $modified_conf_msg = " (no configuration files read)";
+ shift;
+} else {
+ my @config_files = ('/etc/devscripts.conf', '~/.devscripts');
+ my %config_vars = (
+ 'LICENSECHECK_VERBOSE' => 'no',
+ 'LICENSECHECK_PARSELINES' => $def_lines,
+ );
+ my %config_default = %config_vars;
+
+ my $shell_cmd;
+ # Set defaults
+ foreach my $var (keys %config_vars) {
+ $shell_cmd .= qq[$var="$config_vars{$var}";\n];
+ }
+ $shell_cmd .= 'for file in ' . join(" ", @config_files) . "; do\n";
+ $shell_cmd .= '[ -f $file ] && . $file; done;' . "\n";
+ # Read back values
+ foreach my $var (keys %config_vars) { $shell_cmd .= "echo \$$var;\n" }
+ my $shell_out = `/bin/bash -c '$shell_cmd'`;
+ @config_vars{keys %config_vars} = split /\n/, $shell_out, -1;
+
+ # Check validity
+ $config_vars{'LICENSECHECK_VERBOSE'} =~ /^(yes|no)$/
+ or $config_vars{'LICENSECHECK_VERBOSE'} = 'no';
+ $config_vars{'LICENSECHECK_PARSELINES'} =~ /^[1-9][0-9]*$/
+ or $config_vars{'LICENSECHECK_PARSELINES'} = $def_lines;
+
+ foreach my $var (sort keys %config_vars) {
+ if ($config_vars{$var} ne $config_default{$var}) {
+ $modified_conf_msg .= " $var=$config_vars{$var}\n";
+ }
+ }
+ $modified_conf_msg ||= " (none)\n";
+ chomp $modified_conf_msg;
+
+ $OPT{'verbose'} = $config_vars{'LICENSECHECK_VERBOSE'} eq 'yes' ? 1 : 0;
+ $OPT{'lines'} = $config_vars{'LICENSECHECK_PARSELINES'};
+}
+
+GetOptions(\%OPT,
+ "help|h",
+ "check|c=s",
+ "copyright",
+ "ignore|i=s",
+ "lines|l=i",
+ "machine|m",
+ "noconf|no-conf",
+ "recursive|r",
+ "verbose!",
+ "version|v",
+) or die "Usage: $progname [options] filelist\nRun $progname --help for more details\n";
+
+$OPT{'lines'} = $def_lines if $OPT{'lines'} !~ /^[1-9][0-9]*$/;
+$OPT{'ignore'} = $default_ignore_regex if ! length $OPT{'ignore'};
+$OPT{'check'} = $default_check_regex if ! length $OPT{'check'};
+
+if ($OPT{'noconf'}) {
+ fatal("--no-conf is only acceptable as the first command-line option!");
+}
+if ($OPT{'help'}) { help(); exit 0; }
+if ($OPT{'version'}) { version(); exit 0; }
+
+die "Usage: $progname [options] filelist\nRun $progname --help for more details\n" unless @ARGV;
+
+$OPT{'lines'} = $def_lines if not defined $OPT{'lines'};
+
+my @files = ();
+my @find_args = ();
+my $files_count = @ARGV;
+
+push @find_args, qw(-maxdepth 1) unless $OPT{'recursive'};
+push @find_args, qw(-follow -type f -print);
+
+while (@ARGV) {
+ my $file = shift @ARGV;
+
+ if (-d $file) {
+ open my $FIND, '-|', 'find', $file, @find_args
+ or die "$progname: couldn't exec find: $!\n";
+
+ while (<$FIND>) {
+ chomp;
+ next unless m%$OPT{'check'}%;
+ # Skip empty files
+ next if (-z $_);
+ push @files, $_ unless m%$OPT{'ignore'}%;
+ }
+ close $FIND;
+ } else {
+ next unless ($files_count == 1) or $file =~ m%$OPT{'check'}%;
+ push @files, $file unless $file =~ m%$OPT{'ignore'}%;
+ }
+}
+
+while (@files) {
+ my $file = shift @files;
+ my $content = '';
+ my $copyright_match;
+ my $copyright = '';
+ my $license = '';
+ my %copyrights;
+
+ open (my $F, '<' ,$file) or die "Unable to access $file\n";
+ while (<$F>) {
+ last if ($. > $OPT{'lines'});
+ $content .= $_;
+ $copyright_match = parse_copyright($_);
+ if ($copyright_match) {
+ $copyrights{lc("$copyright_match")} = "$copyright_match";
+ }
+ }
+ close($F);
+
+ $copyright = join(" / ", reverse sort values %copyrights);
+
+ print qq(----- $file header -----\n$content----- end header -----\n\n)
+ if $OPT{'verbose'};
+
+ $license = parselicense(clean_comments($content));
+
+ if ($OPT{'machine'}) {
+ print "$file\t$license";
+ print "\t" . ($copyright or "*No copyright*") if $OPT{'copyright'};
+ print "\n";
+ } else {
+ print "$file: ";
+ print "*No copyright* " unless $copyright;
+ print $license . "\n";
+ print " [Copyright: " . $copyright . "]\n"
+ if $copyright and $OPT{'copyright'};
+ print "\n" if $OPT{'copyright'};
+ }
+}
+
+sub parse_copyright {
+ my $copyright = '';
+ my $match;
+
+ my $copyright_indicator_regex = '
+ (?:copyright # The full word
+ |copr\. # Legally-valid abbreviation
+ |\x{00a9} # Unicode character COPYRIGHT SIGN
+ |\xc2\xa9 # Unicode copyright sign encoded in iso8859
+ |\(c\) # Legally-null representation of sign
+ )';
+ my $copyright_disindicator_regex = '
+ \b(?:info(?:rmation)? # Discussing copyright information
+ |(notice|statement|claim|string)s? # Discussing the notice
+ |and|or|is|in|to # Part of a sentence
+ |(holder|owner)s? # Part of a sentence
+ |ownership # Part of a sentence
+ )\b';
+ my $copyright_predisindicator_regex = '(
+ ^[#]define\s+.*\(c\) # #define foo(c) -- not copyright
+ )';
+
+ if ( ! m%$copyright_predisindicator_regex%ix) {
+
+ if (m%$copyright_indicator_regex(?::\s*|\s+)(\S.*)$%ix) {
+ $match = $1;
+
+ # Ignore lines matching "see foo for copyright information" etc.
+ if ($match !~ m%^\s*$copyright_disindicator_regex%ix) {
+ # De-cruft
+ $match =~ s/([,.])?\s*$//;
+ $match =~ s/$copyright_indicator_regex//igx;
+ $match =~ s/^\s+//;
+ $match =~ s/\s{2,}/ /g;
+ $match =~ s/\\@/@/g;
+ $copyright = $match;
+ }
+ }
+ }
+
+ return $copyright;
+}
+
+sub clean_comments {
+ local $_ = shift or return q{};
+
+ # Remove generic comments: look for 4 or more lines beginning with
+ # regular comment pattern and trim it. Fall back to old algorithm
+ # if no such pattern found.
+ my @matches = m/^\s*([^a-zA-Z0-9\s]{1,3})\s\w/mg;
+ if (@matches >= 4) {
+ my $comment_re = qr/\s*[\Q$matches[0]\E]{1,3}\s*/;
+ s/^$comment_re//mg;
+ }
+
+ # Remove Fortran comments
+ s/^[cC] //gm;
+ tr/\t\r\n/ /;
+
+ # Remove C / C++ comments
+ s#(\*/|/[/*])##g;
+ tr% A-Za-z.,@;0-9\(\)/-%%cd;
+ tr/ //s;
+
+ return $_;
+}
+
+sub help {
+ print <<"EOF";
+Usage: $progname [options] filename [filename ...]
+Valid options are:
+ --help, -h Display this message
+ --version, -v Display version and copyright info
+ --no-conf, --noconf Don't read devscripts config files; must be
+ the first option given
+ --verbose Display the header of each file before its
+ license information
+ --lines, -l Specify how many lines of the file header
+ should be parsed for license information
+ (Default: $def_lines)
+ --check, -c Specify a pattern indicating which files should
+ be checked
+ (Default: '$default_check_regex')
+ --machine, -m Display in a machine readable way (good for awk)
+ --recursive, -r Add the contents of directories recursively
+ --copyright Also display the file's copyright
+ --ignore, -i Specify that files / directories matching the
+ regular expression should be ignored when
+ checking files
+ (Default: '$default_ignore_regex')
+
+Default settings modified by devscripts configuration files:
+$modified_conf_msg
+EOF
+}
+
+sub version {
+ print <<"EOF";
+This is $progname, from the Debian devscripts package, version ###VERSION###
+Copyright (C) 2007, 2008 by Adam D. Barratt <adam\@adam-barratt.org.uk>; based
+on a script of the same name from the KDE SDK by <dfaure\@kde.org>.
+
+This program comes with ABSOLUTELY NO WARRANTY.
+You are free to redistribute this code under the terms of the
+GNU General Public License, version 2, or (at your option) any
+later version.
+EOF
+}
+
+sub parselicense {
+ my ($licensetext) = @_;
+
+ my $gplver = "";
+ my $extrainfo = "";
+ my $license = "";
+
+ if ($licensetext =~ /version ([^, ]+?)[.,]? (?:\(?only\)?.? )?(?:of the GNU (Affero )?(Lesser |Library )?General Public License )?(as )?published by the Free Software Foundation/i or
+ $licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License (?:as )?published by the Free Software Foundation[;,] version ([^, ]+?)[.,]? /i) {
+
+ $gplver = " (v$1)";
+ } elsif ($licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License, version (\d+(?:\.\d+)?)[ \.]/) {
+ $gplver = " (v$1)";
+ } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or \(at your option\) any later version/) {
+ $gplver = " (v$1 or later)";
+ } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or \(at your option\) version (\d(?:[\.-]\d+)*)/) {
+ $gplver = " (v$1 or v$2)";
+ }
+
+ if ($licensetext =~ /(?:675 Mass Ave|59 Temple Place|51 Franklin Steet|02139|02111-1307)/i) {
+ $extrainfo = " (with incorrect FSF address)$extrainfo";
+ }
+
+ if ($licensetext =~ /permission (?:is (also granted|given))? to link (the code of )?this program with (any edition of )?(Qt|the Qt library)/i) {
+ $extrainfo = " (with Qt exception)$extrainfo"
+ }
+
+ if ($licensetext =~ /(All changes made in this file will be lost|DO NOT (EDIT|delete this file)|Generated (automatically|by|from)|generated.*file)/i) {
+ $license = "GENERATED FILE";
+ }
+
+ if ($licensetext =~ /((is free software.? )?you can redistribute (it|them) and\/or modify (it|them)|is licensed) under the terms of (version [^ ]+ of )?the (GNU (Library |Lesser )General Public License|LGPL)/i) {
+ $license = "LGPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /is free software.? you can redistribute (it|them) and\/or modify (it|them) under the terms of the (GNU Affero General Public License|AGPL)/i) {
+ $license = "AGPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /(is free software.? )?you (can|may) redistribute (it|them) and\/or modify (it|them) under the terms of (?:version [^ ]+ (?:\(?only\)? )?of )?the GNU General Public License/i) {
+ $license = "GPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /is distributed under the terms of the GNU General Public License,/
+ and length $gplver) {
+ $license = "GPL$gplver$extrainfo $license";
+ }
+
+ if ($licensetext =~ /is distributed.*terms.*GPL/) {
+ $license = "GPL (unversioned/unknown version) $license";
+ }
+
+ if ($licensetext =~ /This file is part of the .*Qt GUI Toolkit. This file may be distributed under the terms of the Q Public License as defined/) {
+ $license = "QPL (part of Qt) $license";
+ } elsif ($licensetext =~ /may (be distributed|redistribute it) under the terms of the Q Public License/) {
+ $license = "QPL $license";
+ }
+
+ if ($licensetext =~ /opensource\.org\/licenses\/mit-license\.php/) {
+ $license = "MIT/X11 (BSD like) $license";
+ } elsif ($licensetext =~ /Permission is hereby granted, free of charge, to any person obtaining a copy of this software and(\/or)? associated documentation files \(the (Software|Materials)\), to deal in the (Software|Materials)/) {
+ $license = "MIT/X11 (BSD like) $license";
+ } elsif ($licensetext =~ /Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy, modify, and distribute this software and its documentation for any purpose/) {
+ $license = "MIT/X11 (BSD like) $license";
+ }
+
+ if ($licensetext =~ /Permission to use, copy, modify, and(\/or)? distribute this software for any purpose with or without fee is hereby granted, provided.*copyright notice.*permission notice.*all copies/) {
+ $license = "ISC $license";
+ }
+
+ if ($licensetext =~ /THIS SOFTWARE IS PROVIDED .*AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY/) {
+ if ($licensetext =~ /All advertising materials mentioning features or use of this software must display the following acknowledge?ment.*This product includes software developed by/i) {
+ $license = "BSD (4 clause) $license";
+ } elsif ($licensetext =~ /(The name(?:\(s\))? .*? may not|Neither the (names? .*?|authors?) nor the names of( (its|their|other|any))? contributors may) be used to endorse or promote products derived from this software/i) {
+ $license = "BSD (3 clause) $license";
+ } elsif ($licensetext =~ /Redistributions of source code must retain the above copyright notice/i) {
+ $license = "BSD (2 clause) $license";
+ } else {
+ $license = "BSD $license";
+ }
+ }
+
+ if ($licensetext =~ /Mozilla Public License,? (Version|v\.) (\d+(?:\.\d+)?)/) {
+ $license = "MPL (v$2) $license";
+ }
+
+ if ($licensetext =~ /Released under the terms of the Artistic License ([^ ]+)/) {
+ $license = "Artistic (v$1) $license";
+ }
+
+ if ($licensetext =~ /is free software under the Artistic [Ll]icense/) {
+ $license = "Artistic $license";
+ }
+
+ if ($licensetext =~ /This program is free software; you can redistribute it and\/or modify it under the same terms as Perl itself/) {
+ $license = "Perl $license";
+ }
+
+ if ($licensetext =~ /under the Apache License, Version ([^ ]+)/) {
+ $license = "Apache (v$1) $license";
+ }
+
+ if ($licensetext =~ /(THE BEER-WARE LICENSE)/i) {
+ $license = "Beerware $license";
+ }
+
+ if ($licensetext =~ /This source file is subject to version ([^ ]+) of the PHP license/) {
+ $license = "PHP (v$1) $license";
+ }
+
+ if ($licensetext =~ /under the terms of the CeCILL /) {
+ $license = "CeCILL $license";
+ }
+
+ if ($licensetext =~ /under the terms of the CeCILL-([^ ]+) /) {
+ $license = "CeCILL-$1 $license";
+ }
+
+ if ($licensetext =~ /under the SGI Free Software License B/) {
+ $license = "SGI Free Software License B $license";
+ }
+
+ if ($licensetext =~ /is in the public domain/i) {
+ $license = "Public domain $license";
+ }
+
+ if ($licensetext =~ /terms of the Common Development and Distribution License(, Version ([^(]+))? \(the License\)/) {
+ $license = "CDDL " . ($1 ? "(v$2) " : '') . $license;
+ }
+
+ if ($licensetext =~ /Microsoft Permissive License \(Ms-PL\)/) {
+ $license = "Ms-PL $license";
+ }
+
+ if ($licensetext =~ /Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license \(the \"Software\"\)/ or
+ $licensetext =~ /Boost Software License([ ,-]+Version ([^ ]+)?(\.))/i) {
+ $license = "BSL " . ($1 ? "(v$2) " : '') . $license;
+ }
+
+ if ($licensetext =~ /PYTHON SOFTWARE FOUNDATION LICENSE (VERSION ([^ ]+))/i) {
+ $license = "PSF " . ($1 ? "(v$2) " : '') . $license;
+ }
+
+ if ($licensetext =~ /The origin of this software must not be misrepresented.*Altered source versions must be plainly marked as such.*This notice may not be removed or altered from any source distribution/ or
+ $licensetext =~ /see copyright notice in zlib\.h/) {
+ $license = "zlib/libpng $license";
+ } elsif ($licensetext =~ /This code is released under the libpng license/) {
+ $license = "libpng $license";
+ }
+
+ if ($licensetext =~ /Do What The Fuck You Want To Public License, Version ([^, ]+)/i) {
+ $license = "WTFPL (v$1) $license";
+ }
+
+ if ($licensetext =~ /Do what The Fuck You Want To Public License/i) {
+ $license = "WTFPL $license";
+ }
+
+ if ($licensetext =~ /(License WTFPL|Under (the|a) WTFPL)/i) {
+ $license = "WTFPL $license";
+ }
+
+ $license = "UNKNOWN" if (!length($license));
+
+ # Remove trailing spaces.
+ $license =~ s/\s+$//;
+
+ return $license;
+}
+
+sub fatal {
+ my ($pack,$file,$line);
+ ($pack,$file,$line) = caller();
+ (my $msg = "$progname: fatal error at line $line:\n@_\n") =~ tr/\0//d;
+ $msg =~ s/\n\n$/\n/;
+ die $msg;
+}
diff --git a/scripts/licensecheck.sh b/scripts/licensecheck.sh
new file mode 100755
index 00000000..a57b2f76
--- /dev/null
+++ b/scripts/licensecheck.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+# Copyright (C) 2013 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+usage() {
+ echo "Usage: license-check your-system"
+ echo
+ echo "This checks license info for all the chunks in your-system"
+ echo "It's re-runnable, and does morph edit to get each chunk."
+ echo "The process can take a while."
+}
+
+
+if [ -z "$1" ]; then
+ usage
+ exit 1
+fi
+
+workspace="$PWD"/../../..
+system="$1"
+
+gplv3_chunks="\
+autoconf \
+automake \
+bash \
+binutils \
+bison \
+ccache \
+cmake \
+flex \
+gawk \
+gcc \
+gdbm \
+gettext \
+gperf \
+groff \
+libtool \
+m4 \
+make \
+nano \
+patch \
+rsync \
+texinfo-tarball"
+
+gplv3_repos=""
+
+
+for f in strata/*.morph; do
+ cp "$f" "$f.bak"
+done
+
+
+strata=`grep "morph.*: *" "$system" | cut -d: -f2-`
+for stratum in $strata; do
+ chunks=`grep -E -- "-? +name.*: *" "$stratum" | cut -d: -f2-`
+ for chunk in $chunks; do
+ if ! (echo $gplv3_chunks | grep -wq "$chunk"); then
+ morph edit $chunk 1>&2
+ else
+ repo=`grep "name.*: *$chunk" "$stratum" -A1 | \
+ tail -n1 | cut -d: -f3-`
+ gplv3_repos="$gplv3_repos $repo"
+ fi
+ done
+done
+
+
+repos=`for stratum in $strata; do
+ grep "repo.*: *" "$stratum" | cut -d: -f3-
+ done | sort -u`
+
+
+for repo in $repos; do
+ if ! (echo $gplv3_repos | grep -wq "$repo") && \
+ [ -d "$workspace/upstream/$repo" ] ; then
+ echo "$repo"
+ perl scripts/licensecheck.pl -r "$workspace/upstream/$repo" | \
+ cut -d: -f2- | sort -u
+ echo
+ fi
+done
+
+
+for f in strata/*.morph.bak; do
+ mv "$f" "${f%.bak}"
+done
diff --git a/scripts/organize-morphologies.py b/scripts/organize-morphologies.py
new file mode 100755
index 00000000..abc8c739
--- /dev/null
+++ b/scripts/organize-morphologies.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import json
+import morphlib
+import os
+import subprocess
+import sys
+import urllib
+import urllib2
+import urlparse
+import yaml
+import re
+import errno
+
+''' organize-morphologies.py:
+Tool for organizing morphologies in definitions.
+
+This script will move:
+ - cluster morphologies into clusters directory
+ - system morphologies into systems directory
+ - stratum morphologies into strata directory
+
+This script will download the chunk morphologies for every stratum
+and placed into strata/stratum_which_the_chunk_belongs_to directory.
+
+It also modifies the morphologies fields which points to some morpholgy
+which has been moved.
+'''
+
+
+# NOTE: The following reimplements part of morphlib's remote repo cache stuff
+def parse_repo_alias(repo):
+ domain, path = repo.split(':')
+ if domain == 'baserock':
+ repo = 'ssh://git@git.baserock.org/baserock/%s' % path
+ elif domain == 'upstream':
+ repo = 'ssh://git@git.baserock.org/delta/%s' % path
+ else:
+ raise Exception("I don't know how to parse the repo-alias \"%s\"" % repo)
+ return repo
+
+def make_request(path):
+ server_url = 'http://git.baserock.org:8080/'
+ url = urlparse.urljoin(server_url, '/1.0/%s' % path)
+ handle = urllib2.urlopen(url)
+ return handle.read()
+
+def quote(*args):
+ return tuple(urllib.quote(string) for string in args)
+
+def cat_file(repo, ref, filename):
+ return make_request('files?repo=%s&ref=%s&filename=%s' %
+ quote(repo, ref, filename))
+
+# NOTE: This function reimplement part of morphlib's loader
+def sanitise_morphology_path(morph_field, morph_kind, belongs_to='None'):
+ '''This function receives the name or the morph field of one morphology
+ and returns the path of the morphology depending on the name, kind and
+ if it belongs to other morphologies.
+ '''
+ # Dictionary which match morphology's kind and morphology's
+ # directory in definitions.git
+ morph_dir = { 'chunk': 'chunks', 'stratum': 'strata',
+ 'system':'systems', 'cluster': 'clusters'}
+ # For chunks morphologies we need to know to which stratums
+ # belongs this chunk.
+ if morph_kind == 'chunk':
+ if belongs_to == '':
+ raise morphlib.Error('Chunk morphologies need the stratum name'
+ 'to create the path. Please add the stratum'
+ 'which belongs this morphology')
+ # Get the name of the chunk which we assume is at the end
+ # of the morph file
+ if '/' in morph_field:
+ morph_field = os.path.basename(morph_field)
+
+ # Add the stratum name to the chunk name
+ morph_field = os.path.join(belongs_to, morph_field)
+
+ # Reset the kind to stratum because chunk contains stratum
+ # name in its path.
+ morph_kind = 'stratum'
+
+ # Add the morphology path to the morph field.
+ if not morph_field.startswith(morph_dir[morph_kind]):
+ morph_field = os.path.join(morph_dir[morph_kind], morph_field)
+
+ # Add the morphology suffix if the morphology.
+ if not morph_field.endswith('.morph'):
+ morph_field = morph_field + '.morph'
+
+ return morph_field
+
+def create_directory(name, path):
+ directory = os.path.join(path, name)
+ try:
+ os.makedirs(directory)
+ except OSError as err:
+ if err.errno != errno.EEXIST:
+ raise err
+ else:
+ pass
+ return directory
+
+def move_file(morph, directory, path, loader):
+ if not morph.filename.startswith(directory):
+ filename = os.path.basename(morph.filename)
+ new_location = os.path.join(path, filename)
+ print '\nMoving %s into %s' % (filename, new_location)
+ subprocess.call(['git', 'mv', morph.filename, new_location])
+ morph.filename = new_location
+ loader.unset_defaults(morph)
+ loader.save_to_file(morph.filename, morph)
+
+def load_and_fix_chunk(chunk_str, loader, name):
+ try:
+ chunk_morph = loader.load_from_string(chunk_str)
+ except morphlib.morphloader.InvalidFieldError as err:
+ if "comments" in str(err):
+ # This error is caused because there are old morphologies which
+ # contain the field "comments" instead of "description".
+ # Replacing "comments" field by "description" will allow the morphology
+ # to pass parse_morphology_text check and ready to be written to a file.
+ fixed_chunk = loader.parse_morphology_text(chunk_str, name)
+ fixed_chunk['description'] = fixed_chunk.pop('comments')
+ print "WARNING: Invalid 'comments' field in " \
+ "%s corrected to 'description'" % name
+ chunk_morph = load_and_fix_chunk(str(fixed_chunk), loader, name)
+ elif "buildsystem" in str(err):
+ # This error is caused because a typo in a morphology which
+ # has a field "buildsystem" instead of "build-system".
+ fixed_chunk = loader.parse_morphology_text(chunk_str, name)
+ fixed_chunk['build-system'] = fixed_chunk.pop('buildsystem')
+ print "WARNING: Invalid 'buildsystem' field in %s" \
+ "corrected to 'build-system'" % name
+ chunk_morph = load_and_fix_chunk(str(fixed_chunk), loader, name)
+ else:
+ print "ERROR: %s in chunk %s" %(err, name)
+ raise err
+ except morphlib.morphloader.MorphologyNotYamlError as err:
+ print "WARNING: %s in chunk %s is not valid YAML, " \
+ "attempting to fix..." %(err, name)
+ # This error is caused because there are old morphologies written
+ # in JSON which contain '\t' characters. When try to load this
+ # kind of morphologies load_from_string fails when parse_morphology_text.
+ # Removing this characters will make load_from_string to load the morphology
+ # and translate it into a correct yaml format.
+ fixed_chunk = chunk_str.replace('\t','')
+ print "INFO: %s successfully fixed" % name
+ chunk_morph = load_and_fix_chunk(fixed_chunk, loader, name)
+ return chunk_morph
+
+def move_clusters(morphs, path, loader):
+ kind = 'system'
+ directory = 'clusters'
+ # Move cluster morphologies to clusters folder fixing their dependent
+ # morphologies which are systems.
+ full_path = create_directory(directory, path)
+ for morph in morphs:
+ all_systems = morph['systems'][:]
+ for system in morph['systems']:
+ all_systems.extend(system.get('subsystems', []))
+ # Add the correct path to the morph fields for systems and subsystems
+ for field in all_systems:
+ field['morph'] = sanitise_morphology_path(field['morph'], kind)
+ move_file(morph, directory, full_path, loader)
+
+def move_systems(morphs, path, loader):
+ kind = 'stratum'
+ directory = 'systems'
+ # Move system morphologies to systems folder fixing their dependent
+ # morphologies which are strata.
+ full_path = create_directory(directory, path)
+ for morph in morphs:
+ # Add name field and the correct path to the stratum on the morph
+ # fields in strata.
+ for field in morph['strata']:
+ field['name'] = os.path.basename(field['morph'])
+ field['morph'] = sanitise_morphology_path(field['morph'], kind)
+ move_file(morph, directory, full_path, loader)
+
+def download_chunks(morph, loader):
+ # Download chunks morphologies defined on the stratum and
+ # add them to the directory tree.
+ for chunk in morph['chunks']:
+ name = chunk['name'] + '.morph'
+ try:
+ chunk['morph'] = sanitise_morphology_path(chunk['morph'], 'chunk', morph['name'])
+ except KeyError as err:
+ if 'morph' in str(err):
+ chunk['morph'] = sanitise_morphology_path(chunk['name'], 'chunk', morph['name'])
+ else:
+ raise err
+ ref = chunk['ref']
+ repo = parse_repo_alias(chunk['repo'])
+ try:
+ print "\nDownloading %s from %s into %s" %(name, repo, chunk['morph'])
+ chunk_str = cat_file(repo, ref, name)
+ except urllib2.HTTPError as err:
+ # If there is no morphology in the repository we assume that the morphology
+ # system will be autodetected, so we don't have to create a new one
+ # unless we shut down the autodetecting system (fallback system).
+ if err.code == 404:
+ print 'INFO: Morph will fall-back to build-time' \
+ 'autodetection for %s' %(name)
+ # Remove morph field from autodetected chunks
+ del chunk['morph']
+ else:
+ loaded_chunk = load_and_fix_chunk(chunk_str, loader, name)
+ loader.unset_defaults(loaded_chunk)
+ loader.save_to_file(chunk['morph'], loaded_chunk)
+
+def move_strata(morphs, path, loader):
+ # Create strata directory
+ strata_dir = 'strata/'
+ strata_path = create_directory(strata_dir, path)
+ for morph in morphs:
+ # Create stratum directory where downloading its chunks.
+ stratum_path = strata_path + morph['name']
+ stratum_dir = create_directory(stratum_path, path)
+
+ # Download chunks which belongs to the stratum
+ download_chunks(morph, loader)
+
+ # Add to build-depends the correct path to the dependent stratum morphologies.
+ for build_depends in morph['build-depends']:
+ build_depends['morph'] = sanitise_morphology_path(build_depends['morph'], 'stratum')
+ # Move stratum morphologies to strata
+ move_file(morph, strata_dir, strata_path, loader)
+
+def main():
+ # Load all morphologies in the definitions repo
+ sb = morphlib.sysbranchdir.open_from_within('.')
+ loader = morphlib.morphloader.MorphologyLoader()
+ morphs = [m for m in sb.load_all_morphologies(loader)]
+
+ # Clasify the morphologies regarding of their kind field
+ morphologies = { kind: [m for m in morphs if m['kind'] == kind]
+ for kind in ('chunk', 'stratum', 'system', 'cluster') }
+
+ for kind, morphs in morphologies.iteritems():
+ print 'There are: %d %s.\n' %(len(morphs), kind)
+
+ # Get the path from definitions repo
+ definitions_repo = sb.get_git_directory_name(sb.root_repository_url)
+
+ # Move the morphologies to its directories
+ move_clusters(morphologies['cluster'], definitions_repo, loader)
+ move_systems(morphologies['system'], definitions_repo, loader)
+ move_strata(morphologies['stratum'], definitions_repo, loader)
+
+main()
diff --git a/scripts/release-build b/scripts/release-build
new file mode 100755
index 00000000..5525e9e9
--- /dev/null
+++ b/scripts/release-build
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import cliapp
+import morphlib
+import os
+import subprocess
+import sys
+import time
+
+
+class Build(object):
+ '''A single distbuild instance.'''
+
+ def __init__(self, name, arch, app):
+ self.system_name = name
+ self.controller = app.controllers[arch]
+ self.command = [
+ 'morph', 'distbuild-morphology',
+ '--controller-initiator-address=%s' % self.controller,
+ 'baserock:baserock/definitions', app.ref, self.system_name]
+
+ def start(self):
+ self.process = subprocess.Popen(self.command)
+
+ def completed(self):
+ return (self.process.poll() is not None)
+
+
+class ReleaseApp(cliapp.Application):
+
+ '''Cliapp app that handles distbuilding and deploying a cluster.'''
+
+ def add_settings(self):
+ self.settings.string_list(['controllers'],
+ 'a list of distbuild controllers and their '
+ 'architecture')
+
+ self.settings.string(['trove-host'],
+ 'hostname of Trove instance')
+
+ self.settings.string(['artifact-cache-server'],
+ 'server to fetch artifacts from', default=None)
+
+ self.settings.string(['release-number'],
+ 'Baserock version of the systems being built',
+ default='yy.ww')
+
+ def process_args(self, args):
+ '''Process the command line'''
+ self.controllers = {}
+ controllers_list = self.settings['controllers']
+ for item in controllers_list:
+ arch, controller = item.split(':')
+ self.controllers[arch] = controller
+
+ self.ref = cliapp.runcmd(['git', 'rev-parse', 'HEAD']).strip()
+
+ sb = morphlib.sysbranchdir.open_from_within('.')
+ definitions = sb.get_git_directory_name(sb.root_repository_url)
+ defs_repo = morphlib.gitdir.GitDirectory(definitions)
+ self.loader = morphlib.morphloader.MorphologyLoader()
+ self.finder = morphlib.morphologyfinder.MorphologyFinder(defs_repo)
+
+ cluster_name = args[0]
+ cluster, cluster_path = self.load_morphology(cluster_name)
+
+ builds = self.prepare_builds(cluster)
+ if not os.path.exists('builds'):
+ os.mkdir('builds')
+ os.chdir('builds')
+ for build in builds:
+ build.start()
+
+ while not all(build.completed() for build in builds):
+ time.sleep(1)
+
+ fail = False
+ for build in builds:
+ if build.process.returncode != 0:
+ fail = True
+ sys.stderr.write(
+ 'Building failed for %s\n' % build.system_name)
+ if fail:
+ raise cliapp.AppException('Building of systems failed')
+
+ os.chdir('..')
+ if not os.path.exists('release'):
+ os.mkdir('release')
+ self.deploy_images(cluster, cluster_path)
+
+ def load_morphology(self, name, kind=None):
+ path = morphlib.util.sanitise_morphology_path(name)
+ morph = self.loader.load_from_string(
+ self.finder.read_morphology(path))
+ if kind:
+ assert morph['kind'] == kind
+ return morph, path
+
+ def iterate_systems(self, system_list):
+ for system in system_list:
+ yield system['morph']
+ if 'subsystems' in system:
+ for subsystem in self.iterate_systems(system['subsystems']):
+ yield subsystem
+
+ def prepare_builds(self, cluster):
+ '''Prepare a list of builds'''
+ systems = set(self.iterate_systems(cluster['systems']))
+ builds = []
+ for system_name in systems:
+ system, _ = self.load_morphology(system_name)
+ if system['arch'] in self.controllers:
+ builds.append(Build(system_name, system['arch'], self))
+ return builds
+
+ def deploy_images(self, cluster, cluster_path):
+ version_label = 'baserock-%s' % self.settings['release-number']
+ outputs = {}
+
+ for system in cluster['systems']:
+ morphology_name = system['morph']
+ morphology = self.load_morphology(morphology_name)[0]
+ if morphology['arch'] not in self.controllers:
+ continue
+
+ for deployment_name, deployment_info in system['deploy'].iteritems():
+ # The release.morph cluster must specify a basename for the file,
+ # of name and extension. This script knows about name, but it
+ # can't find out the appropriate file extension without second
+ # guessing the behaviour of write extensions.
+ basename = deployment_info['location']
+
+ if '/' in basename or basename.startswith(version_label):
+ raise cliapp.AppException(
+ 'In %s: system %s.location should be just the base name, '
+ 'e.g. "%s.img"' % (cluster_path, deployment_name, deployment_name))
+
+ filename = os.path.join('release', '%s-%s' % (version_label, basename))
+ if os.path.exists(filename):
+ self.output.write('Reusing existing deployment of %s\n' % filename)
+ else:
+ self.output.write('Creating %s from release.morph\n' % filename)
+ self.deploy_single_image(cluster_path, deployment_name, filename, version_label)
+
+ def deploy_single_image(self, cluster_path, name, location, version_label):
+ deploy_command = [
+ 'morph', 'deploy', cluster_path, name,
+ '--trove-host=%s' % self.settings['trove-host']]
+ artifact_server = self.settings['artifact-cache-server']
+ if artifact_server is not None:
+ deploy_command.append('--artifact-cache-server=%s' % artifact_server)
+ deploy_command.extend((
+ '%s.location=%s' % (name, location),
+ '%s.VERSION_LABEL=%s' % (name, version_label)
+ ))
+
+ cliapp.runcmd(deploy_command, stdout=sys.stdout)
+
+
+ReleaseApp().run()
diff --git a/scripts/release-build.test.conf b/scripts/release-build.test.conf
new file mode 100644
index 00000000..50083352
--- /dev/null
+++ b/scripts/release-build.test.conf
@@ -0,0 +1,6 @@
+[config]
+trove-host = ct-mcr-1.ducie.codethink.co.uk
+controllers = x86_64:ct-mcr-1-distbuild-x86-64-majikthise-controller.dyn.ducie.codethink.co.uk,
+ x86_32:ct-mcr-1-distbuild-x86-32-majikthise-controller.dyn.ducie.codethink.co.uk,
+ armv7lhf:ct-mcr-1-distbuild-armv7lhf-jetson.dyn.ducie.codethink.co.uk
+release-number = 14.29
diff --git a/scripts/release-test b/scripts/release-test
new file mode 100755
index 00000000..a1611721
--- /dev/null
+++ b/scripts/release-test
@@ -0,0 +1,401 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Codethink Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+'''release-test
+
+This script deploys the set of systems in the cluster morphology it is
+instructed to read, to test that they work correctly.
+
+'''
+
+import cliapp
+import os
+import pipes
+import shlex
+import shutil
+import socket
+import tempfile
+import time
+import uuid
+
+import morphlib
+
+
+class MorphologyHelper(object):
+
+ def __init__(self):
+ self.sb = sb = morphlib.sysbranchdir.open_from_within('.')
+ defs_repo_path = sb.get_git_directory_name(sb.root_repository_url)
+ self.defs_repo = morphlib.gitdir.GitDirectory(defs_repo_path)
+ self.loader = morphlib.morphloader.MorphologyLoader()
+ self.finder = morphlib.morphologyfinder.MorphologyFinder(self.defs_repo)
+
+ def load_morphology(self, path):
+ text = self.finder.read_morphology(path)
+ return self.loader.load_from_string(text)
+
+ @classmethod
+ def iterate_systems(cls, systems_list):
+ for system in systems_list:
+ yield morphlib.util.sanitise_morphology_path(system['morph'])
+ if 'subsystems' in system:
+ for subsystem in cls.iterate_systems(system['subsystems']):
+ yield subsystem
+
+ def iterate_cluster_deployments(cls, cluster_morph):
+ for system in cluster_morph['systems']:
+ path = morphlib.util.sanitise_morphology_path(system['morph'])
+ defaults = system.get('deploy-defaults', {})
+ for name, options in system['deploy'].iteritems():
+ config = dict(defaults)
+ config.update(options)
+ yield path, name, config
+
+ def load_cluster_systems(self, cluster_morph):
+ for system_path in set(self.iterate_systems(cluster_morph['systems'])):
+ system_morph = self.load_morphology(system_path)
+ yield system_path, system_morph
+
+
+class TimeoutError(cliapp.AppException):
+
+ """Error to be raised when a connection waits too long"""
+
+ def __init__(self, msg):
+ super(TimeoutError, self).__init__(msg)
+
+
+class VMHost(object):
+
+ def __init__(self, user, address, disk_path):
+ self.user = user
+ self.address = address
+ self.disk_path = disk_path
+
+ @property
+ def ssh_host(self):
+ return '{user}@{address}'.format(user=self.user, address=self.address)
+
+ def runcmd(self, *args, **kwargs):
+ cliapp.ssh_runcmd(self.ssh_host, *args, **kwargs)
+
+ def virsh(self, *args, **kwargs):
+ self.runcmd(['virsh', '-c', 'qemu:///system'] + list(args), **kwargs)
+
+
+class DeployedSystemInstance(object):
+
+ def __init__(self, deployment, config, host_machine, vm_id, rootfs_path):
+ self.deployment = deployment
+ self.config = config
+ # TODO: Stop assuming test machine can DHCP and be assigned its
+ # hostname in the deployer's resolve search path.
+ self.ip_address = self.config['HOSTNAME']
+ self.host_machine = host_machine
+ self.vm_id = vm_id
+ self.rootfs_path = rootfs_path
+
+ @property
+ def ssh_host(self):
+ # TODO: Stop assuming we ssh into test instances as root
+ return 'root@{host}'.format(host=self.ip_address)
+
+ def runcmd(self, argv, chdir='.', **kwargs):
+ ssh_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
+ '-o', 'UserKnownHostsFile=/dev/null', self.ssh_host]
+ cmd = ['sh', '-c', 'cd "$1" && shift && exec "$@"', '-', chdir]
+ cmd += argv
+ ssh_cmd.append(' '.join(map(pipes.quote, cmd)))
+ return cliapp.runcmd(ssh_cmd, **kwargs)
+
+ def _wait_for_dhcp(self, timeout):
+ '''Block until given hostname resolves successfully.
+
+ Raises TimeoutError if the hostname has not appeared in 'timeout'
+ seconds.
+
+ '''
+ start_time = time.time()
+ while True:
+ try:
+ socket.gethostbyname(self.ip_address)
+ return
+ except socket.gaierror:
+ pass
+ if time.time() > start_time + timeout:
+ raise TimeoutError("Host %s did not appear after %i seconds" %
+ (self.ip_address, timeout))
+ time.sleep(0.5)
+
+ def _wait_for_ssh(self, timeout):
+ """Wait until the deployed VM is responding via SSH"""
+ start_time = time.time()
+ while True:
+ try:
+ self.runcmd(['true'], stdin=None, stdout=None, stderr=None)
+ return
+ except cliapp.AppException:
+ # TODO: Stop assuming the ssh part of the command is what failed
+ if time.time() > start_time + timeout:
+ raise TimeoutError("%s sshd did not start after %i seconds"
+ % (self.ip_address, timeout))
+ time.sleep(0.5)
+
+ def wait_until_online(self, timeout=10):
+ self._wait_for_dhcp(timeout)
+ self._wait_for_ssh(timeout)
+
+ def delete(self):
+ # Stop and remove VM
+ try:
+ self.host_machine.virsh('destroy', self.vm_id)
+ except cliapp.AppException as e:
+ # TODO: Stop assuming that destroy failed because it wasn't running
+ pass
+ try:
+ self.host_machine.virsh('undefine', self.vm_id, '--remove-all-storage')
+ except cliapp.AppException as e:
+ # TODO: Stop assuming that undefine failed because it was
+ # already removed
+ pass
+
+
+class Deployment(object):
+
+ def __init__(self, cluster_path, name, deployment_config, host_machine):
+ self.cluster_path = cluster_path
+ self.name = name
+ self.deployment_config = deployment_config
+ self.host_machine = host_machine
+
+ @staticmethod
+ def _ssh_host_key_exists(hostname):
+ """Check if an ssh host key exists in known_hosts"""
+ if not os.path.exists('/root/.ssh/known_hosts'):
+ return False
+ with open('/root/.ssh/known_hosts', 'r') as known_hosts:
+ return any(line.startswith(hostname) for line in known_hosts)
+
+ def _update_known_hosts(self):
+ if not self._ssh_host_key_exists(self.host_machine.address):
+ with open('/root/.ssh/known_hosts', 'a') as known_hosts:
+ cliapp.runcmd(['ssh-keyscan', self.host_machine.address],
+ stdout=known_hosts)
+
+ @staticmethod
+ def _generate_sshkey_config(tempdir, config):
+ manifest = os.path.join(tempdir, 'manifest')
+ with open(manifest, 'w') as f:
+ f.write('0040700 0 0 /root/.ssh\n')
+ f.write('overwrite 0100600 0 0 /root/.ssh/authorized_keys\n')
+ authkeys = os.path.join(tempdir, 'root', '.ssh', 'authorized_keys')
+ os.makedirs(os.path.dirname(authkeys))
+ with open(authkeys, 'w') as auth_f:
+ with open('/root/.ssh/id_rsa.pub', 'r') as key_f:
+ shutil.copyfileobj(key_f, auth_f)
+
+ install_files = shlex.split(config.get('INSTALL_FILES', ''))
+ install_files.append(manifest)
+ yield 'INSTALL_FILES', ' '.join(pipes.quote(f) for f in install_files)
+
+ def deploy(self):
+ self._update_known_hosts()
+
+ hostname = str(uuid.uuid4())
+ vm_id = hostname
+ image_base = self.host_machine.disk_path
+ rootpath = '{image_base}/{hostname}.img'.format(image_base=image_base,
+ hostname=hostname)
+ loc = 'kvm+ssh://{ssh_host}/{id}/{path}'.format(
+ ssh_host=self.host_machine.ssh_host, id=vm_id, path=rootpath)
+
+ options = {
+ 'type': 'kvm',
+ 'location': loc,
+ 'AUTOSTART': 'True',
+ 'HOSTNAME': hostname,
+ 'DISK_SIZE': '20G',
+ 'RAM_SIZE': '2G',
+ 'VERSION_LABEL': 'release-test',
+ }
+
+ tempdir = tempfile.mkdtemp()
+ try:
+ options.update(
+ self._generate_sshkey_config(tempdir,
+ self.deployment_config))
+
+ args = ['morph', 'deploy', self.cluster_path, self.name]
+ for k, v in options.iteritems():
+ args.append('%s.%s=%s' % (self.name, k, v))
+ cliapp.runcmd(args, stdin=None, stdout=None, stderr=None)
+
+ config = dict(self.deployment_config)
+ config.update(options)
+
+ return DeployedSystemInstance(self, config, self.host_machine,
+ vm_id, rootpath)
+ finally:
+ shutil.rmtree(tempdir)
+
+
+class ReleaseApp(cliapp.Application):
+
+ """Cliapp application which handles automatic builds and tests"""
+
+ def add_settings(self):
+ """Add the command line options needed"""
+ group_main = 'Program Options'
+ self.settings.string_list(['deployment-host'],
+ 'ARCH:HOST:PATH that VMs can be deployed to',
+ default=None,
+ group=group_main)
+ self.settings.string(['trove-host'],
+ 'Address of Trove for test systems to build from',
+ default=None,
+ group=group_main)
+ self.settings.string(['trove-id'],
+ 'ID of Trove for test systems to build from',
+ default=None,
+ group=group_main)
+ self.settings.string(['build-ref-prefix'],
+ 'Prefix of build branches for test systems',
+ default=None,
+ group=group_main)
+
+ @staticmethod
+ def _run_tests(instance, system_path, system_morph,
+ (trove_host, trove_id, build_ref_prefix),
+ morph_helper, systems):
+ instance.wait_until_online()
+
+ tests = []
+ def baserock_build_test(instance):
+ instance.runcmd(['git', 'config', '--global', 'user.name',
+ 'Test Instance of %s' % instance.deployment.name])
+ instance.runcmd(['git', 'config', '--global', 'user.email',
+ 'ci-test@%s' % instance.config['HOSTNAME']])
+ instance.runcmd(['mkdir', '-p', '/src/ws', '/src/cache',
+ '/src/tmp'])
+ def morph_cmd(*args, **kwargs):
+ # TODO: decide whether to use cached artifacts or not by
+ # adding --artifact-cache-server= --cache-server=
+ argv = ['morph', '--log=/src/morph.log', '--cachedir=/src/cache',
+ '--tempdir=/src/tmp', '--log-max=100M',
+ '--trove-host', trove_host, '--trove-id', trove_id,
+ '--build-ref-prefix', build_ref_prefix]
+ argv.extend(args)
+ instance.runcmd(argv, **kwargs)
+
+ repo = morph_helper.sb.root_repository_url
+ ref = morph_helper.defs_repo.HEAD
+ sha1 = morph_helper.defs_repo.resolve_ref_to_commit(ref)
+ morph_cmd('init', '/src/ws')
+ chdir = '/src/ws'
+
+ morph_cmd('checkout', repo, ref, chdir=chdir)
+ # TODO: Add a morph subcommand that gives the path to the root repository.
+ repo_path = os.path.relpath(
+ morph_helper.sb.get_git_directory_name(repo),
+ morph_helper.sb.root_directory)
+ chdir = os.path.join(chdir, ref, repo_path)
+
+ instance.runcmd(['git', 'reset', '--hard', sha1], chdir=chdir)
+ print 'Building test systems for {sys}'.format(sys=system_path)
+ for to_build_path, to_build_morph in systems.iteritems():
+ if to_build_morph['arch'] == system_morph['arch']:
+ print 'Test building {path}'.format(path=to_build_path)
+ morph_cmd('build', to_build_path, chdir=chdir,
+ stdin=None, stdout=None, stderr=None)
+ print 'Finished Building test systems'
+
+ def python_smoke_test(instance):
+ instance.runcmd(['python', '-c', 'print "Hello World"'])
+
+ # TODO: Come up with a better way of determining which tests to run
+ if 'devel' in system_path:
+ tests.append(baserock_build_test)
+ else:
+ tests.append(python_smoke_test)
+
+ for test in tests:
+ test(instance)
+
+ def deploy_and_test_systems(self, cluster_path,
+ deployment_hosts, build_test_config):
+ """Run the deployments and tests"""
+
+ version = 'release-test'
+
+ morph_helper = MorphologyHelper()
+ cluster_morph = morph_helper.load_morphology(cluster_path)
+ systems = dict(morph_helper.load_cluster_systems(cluster_morph))
+
+ for system_path, deployment_name, deployment_config in \
+ morph_helper.iterate_cluster_deployments(cluster_morph):
+
+ system_morph = systems[system_path]
+ # We can only test systems in KVM that have a BSP
+ if not any('bsp' in si['morph'] for si in system_morph['strata']):
+ continue
+
+ # We can only test systems in KVM that we have a host for
+ if system_morph['arch'] not in deployment_hosts:
+ continue
+ host_machine = deployment_hosts[system_morph['arch']]
+ deployment = Deployment(cluster_path, deployment_name,
+ deployment_config, host_machine)
+
+ instance = deployment.deploy()
+ try:
+ self._run_tests(instance, system_path, system_morph,
+ build_test_config, morph_helper, systems)
+ finally:
+ instance.delete()
+
+ def process_args(self, args):
+ """Process the command line args and kick off the builds/tests"""
+ if self.settings['build-ref-prefix'] is None:
+ self.settings['build-ref-prefix'] = (
+ os.path.join(self.settings['trove-id'], 'builds'))
+ for setting in ('deployment-host', 'trove-host',
+ 'trove-id', 'build-ref-prefix'):
+ self.settings.require(setting)
+
+ deployment_hosts = {}
+ for host_config in self.settings['deployment-host']:
+ arch, address = host_config.split(':', 1)
+ user, address = address.split('@', 1)
+ address, disk_path = address.split(':', 1)
+ if user == '':
+ user = 'root'
+ # TODO: Don't assume root is the user with deploy access
+ deployment_hosts[arch] = VMHost(user, address, disk_path)
+
+ build_test_config = (self.settings['trove-host'],
+ self.settings['trove-id'],
+ self.settings['build-ref-prefix'])
+
+ if len(args) != 1:
+ raise cliapp.AppException('Usage: release-test CLUSTER')
+ cluster_path = morphlib.util.sanitise_morphology_path(args[0])
+ self.deploy_and_test_systems(cluster_path, deployment_hosts,
+ build_test_config)
+
+
+if __name__ == '__main__':
+ ReleaseApp().run()
diff --git a/scripts/release-test-os b/scripts/release-test-os
new file mode 100755
index 00000000..a886300e
--- /dev/null
+++ b/scripts/release-test-os
@@ -0,0 +1,526 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Codethink Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+'''release-test
+
+This script deploys the set of systems in the cluster morphology it is
+instructed to read, to test that they work correctly.
+
+'''
+
+import cliapp
+import os
+import pipes
+import shlex
+import shutil
+import socket
+import tempfile
+import time
+import uuid
+
+import morphlib
+
+
+class NovaList:
+ def __init__(self):
+ self.output = []
+ self.lines = []
+ self.instance = []
+
+ def update(self):
+ self.output = cliapp.runcmd(['nova', 'list'])
+ self.lines = self.output.split('\n')
+ self.lines = self.lines[3:-2]
+
+ def get_nova_details_for_instance(self, name):
+ self.update()
+
+ for line in self.lines:
+ entries = line.split('|')
+ stripped_line = [entry.strip() for entry in entries]
+ if stripped_line.count(name) == 1:
+ self.instance = stripped_line
+
+ def get_nova_state_for_instance(self, name):
+ self.get_nova_details_for_instance(name)
+ if not self.instance:
+ return
+ return self.instance[3]
+
+ def get_nova_ip_for_instance(self, name):
+ self.get_nova_details_for_instance(name)
+ if not self.instance:
+ return
+
+ if self.get_nova_state_for_instance(name) != 'ACTIVE':
+ return
+
+ return self.instance[6]
+
+ def get_nova_ip_for_instance_timeout(self, name, timeout=120):
+ start_time = time.time()
+
+ while self.get_nova_state_for_instance(name) != 'ACTIVE':
+
+ if time.time() > start_time + timeout:
+ print "%s not ACTIVE after %i seconds" % (name, timeout)
+ return
+
+ time.sleep(1)
+
+ ip_addr = self.get_nova_ip_for_instance(name)
+ if not ip_addr:
+ return
+
+ if ip_addr.count('=') == 0:
+ return
+
+ ip_addr = ip_addr[ip_addr.find('=') + 1:]
+
+ if ip_addr.count(',') == 0:
+ return ip_addr
+
+ return ip_addr[:ip_addr.find(',')]
+
+
+
+class MorphologyHelper(object):
+
+ def __init__(self):
+ self.sb = sb = morphlib.sysbranchdir.open_from_within('.')
+ defs_repo_path = sb.get_git_directory_name(sb.root_repository_url)
+ self.defs_repo = morphlib.gitdir.GitDirectory(defs_repo_path)
+ self.loader = morphlib.morphloader.MorphologyLoader()
+ self.finder = morphlib.morphologyfinder.MorphologyFinder(self.defs_repo)
+
+ def load_morphology(self, path):
+ text = self.finder.read_morphology(path)
+ return self.loader.load_from_string(text)
+
+ @classmethod
+ def iterate_systems(cls, systems_list):
+ for system in systems_list:
+ yield morphlib.util.sanitise_morphology_path(system['morph'])
+ if 'subsystems' in system:
+ for subsystem in cls.iterate_systems(system['subsystems']):
+ yield subsystem
+
+ def iterate_cluster_deployments(cls, cluster_morph):
+ for system in cluster_morph['systems']:
+ path = morphlib.util.sanitise_morphology_path(system['morph'])
+ defaults = system.get('deploy-defaults', {})
+ for name, options in system['deploy'].iteritems():
+ config = dict(defaults)
+ config.update(options)
+ yield path, name, config
+
+ def load_cluster_systems(self, cluster_morph):
+ for system_path in set(self.iterate_systems(cluster_morph['systems'])):
+ system_morph = self.load_morphology(system_path)
+ yield system_path, system_morph
+
+
+class TimeoutError(cliapp.AppException):
+
+ """Error to be raised when a connection waits too long"""
+
+ def __init__(self, msg):
+ super(TimeoutError, self).__init__(msg)
+
+
+class VMHost(object):
+
+ def __init__(self, user, address, disk_path):
+ self.user = user
+ self.address = address
+ self.disk_path = disk_path
+
+ @property
+ def ssh_host(self):
+ return '{user}@{address}'.format(user=self.user, address=self.address)
+
+ def runcmd(self, *args, **kwargs):
+ cliapp.ssh_runcmd(self.ssh_host, *args, **kwargs)
+
+
+class DeployedSystemInstance(object):
+
+ def __init__(self, deployment, config, host_machine, vm_id, rootfs_path,
+ ip_addr, hostname):
+ self.deployment = deployment
+ self.config = config
+ self.ip_address = ip_addr
+ self.host_machine = host_machine
+ self.vm_id = vm_id
+ self.rootfs_path = rootfs_path
+ self.hostname = hostname
+
+ @property
+ def ssh_host(self):
+ # TODO: Stop assuming we ssh into test instances as root
+ return 'root@{host}'.format(host=self.ip_address)
+
+ def runcmd(self, argv, chdir='.', **kwargs):
+ ssh_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
+ '-o', 'UserKnownHostsFile=/dev/null', self.ssh_host]
+ cmd = ['sh', '-c', 'cd "$1" && shift && exec "$@"', '-', chdir]
+ cmd += argv
+ ssh_cmd.append(' '.join(map(pipes.quote, cmd)))
+ return cliapp.runcmd(ssh_cmd, **kwargs)
+
+ def _wait_for_dhcp(self, timeout):
+ '''Block until given hostname resolves successfully.
+
+ Raises TimeoutError if the hostname has not appeared in 'timeout'
+ seconds.
+
+ '''
+ start_time = time.time()
+ while True:
+ try:
+ socket.gethostbyname(self.ip_address)
+ return
+ except socket.gaierror:
+ pass
+ if time.time() > start_time + timeout:
+ raise TimeoutError("Host %s did not appear after %i seconds" %
+ (self.ip_address, timeout))
+ time.sleep(0.5)
+
+ def _wait_for_ssh(self, timeout):
+ """Wait until the deployed VM is responding via SSH"""
+ start_time = time.time()
+ while True:
+ try:
+ self.runcmd(['true'], stdin=None, stdout=None, stderr=None)
+ return
+ except cliapp.AppException:
+ # TODO: Stop assuming the ssh part of the command is what failed
+ if time.time() > start_time + timeout:
+ raise TimeoutError("%s sshd did not start after %i seconds"
+ % (self.ip_address, timeout))
+ time.sleep(0.5)
+
+ def _wait_for_cloud_init(self, timeout):
+ """Wait until cloud init has resized the disc"""
+ start_time = time.time()
+ while True:
+ try:
+ out = self.runcmd(['sh', '-c',
+ 'test -e "$1" && echo exists || echo does not exist',
+ '-',
+ '/root/cloud-init-finished'])
+ except:
+ import traceback
+ traceback.print_exc()
+ raise
+ if out.strip() == 'exists':
+ return
+ if time.time() > start_time + timeout:
+ raise TimeoutError("Disc size not increased after %i seconds"
+ % (timeout))
+ time.sleep(3)
+
+ def wait_until_online(self, timeout=120):
+ self._wait_for_dhcp(timeout)
+ self._wait_for_ssh(timeout)
+ self._wait_for_cloud_init(timeout)
+ print "Test system %s ready to run tests." % (self.hostname)
+
+ def delete(self):
+ # Stop and remove VM
+ print "Deleting %s test instance" % (self.hostname)
+ try:
+ cliapp.runcmd(['nova', 'delete', self.hostname])
+ except cliapp.AppException as e:
+ # TODO: Stop assuming that delete failed because the instance
+ # wasn't running
+ print "- Failed"
+ pass
+ print "Deleting %s test disc image" % (self.hostname)
+ try:
+ cliapp.runcmd(['nova', 'image-delete', self.hostname])
+ except cliapp.AppException as e:
+ # TODO: Stop assuming that image-delete failed because it was
+ # already removed
+ print "- Failed"
+ pass
+
+
+class Deployment(object):
+
+ def __init__(self, cluster_path, name, deployment_config,
+ host_machine, net_id):
+ self.cluster_path = cluster_path
+ self.name = name
+ self.deployment_config = deployment_config
+ self.host_machine = host_machine
+ self.net_id = net_id
+
+ @staticmethod
+ def _ssh_host_key_exists(hostname):
+ """Check if an ssh host key exists in known_hosts"""
+ if not os.path.exists('/root/.ssh/known_hosts'):
+ return False
+ with open('/root/.ssh/known_hosts', 'r') as known_hosts:
+ return any(line.startswith(hostname) for line in known_hosts)
+
+ def _update_known_hosts(self):
+ if not self._ssh_host_key_exists(self.host_machine.address):
+ with open('/root/.ssh/known_hosts', 'a') as known_hosts:
+ cliapp.runcmd(['ssh-keyscan', self.host_machine.address],
+ stdout=known_hosts)
+
+ @staticmethod
+ def _generate_sshkey_config(tempdir, config):
+ manifest = os.path.join(tempdir, 'manifest')
+ with open(manifest, 'w') as f:
+ f.write('0040700 0 0 /root/.ssh\n')
+ f.write('overwrite 0100600 0 0 /root/.ssh/authorized_keys\n')
+ authkeys = os.path.join(tempdir, 'root', '.ssh', 'authorized_keys')
+ os.makedirs(os.path.dirname(authkeys))
+ with open(authkeys, 'w') as auth_f:
+ with open('/root/.ssh/id_rsa.pub', 'r') as key_f:
+ shutil.copyfileobj(key_f, auth_f)
+
+ install_files = shlex.split(config.get('INSTALL_FILES', ''))
+ install_files.append(manifest)
+ yield 'INSTALL_FILES', ' '.join(pipes.quote(f) for f in install_files)
+
+ def deploy(self):
+ self._update_known_hosts()
+
+ hostname = str(uuid.uuid4())
+ vm_id = hostname
+ image_base = self.host_machine.disk_path
+ rootpath = '{image_base}/{hostname}.img'.format(image_base=image_base,
+ hostname=hostname)
+ loc = 'http://{ssh_host}:5000/v2.0'.format(
+ ssh_host=self.host_machine.ssh_host, id=vm_id, path=rootpath)
+
+ options = {
+ 'type': 'openstack',
+ 'location': loc,
+ 'HOSTNAME': hostname,
+ 'DISK_SIZE': '5G',
+ 'RAM_SIZE': '2G',
+ 'VERSION_LABEL': 'release-test',
+ 'OPENSTACK_USER': os.environ['OS_USERNAME'],
+ 'OPENSTACK_TENANT': os.environ['OS_TENANT_NAME'],
+ 'OPENSTACK_PASSWORD': os.environ['OS_PASSWORD'],
+ 'OPENSTACK_IMAGENAME': hostname,
+ 'CLOUD_INIT': 'yes',
+ 'KERNEL_ARGS': 'console=tty0 console=ttyS0',
+ }
+
+ tempdir = tempfile.mkdtemp()
+ try:
+ options.update(
+ self._generate_sshkey_config(tempdir,
+ self.deployment_config))
+
+ # Deploy the image to openstack
+ args = ['morph', 'deploy', self.cluster_path, self.name]
+ for k, v in options.iteritems():
+ args.append('%s.%s=%s' % (self.name, k, v))
+ cliapp.runcmd(args, stdin=None, stdout=None, stderr=None)
+
+ config = dict(self.deployment_config)
+ config.update(options)
+
+ # Boot an instance from the image
+ args = ['nova', 'boot',
+ '--flavor', 'm1.medium',
+ '--image', hostname,
+ '--user-data', '/usr/lib/mason/os-init-script',
+ '--nic', "net-id=%s" % (self.net_id),
+ hostname]
+ output = cliapp.runcmd(args)
+
+ # Print nova boot output, with adminPass line removed
+ output_lines = output.split('\n')
+ for line in output_lines:
+ if line.find('adminPass') != -1:
+ password_line = line
+ output_lines.remove(password_line)
+ output = '\n'.join(output_lines)
+ print output
+
+ # Get ip address from nova list
+ nl = NovaList()
+ ip_addr = nl.get_nova_ip_for_instance_timeout(hostname)
+ print "IP address for instance %s: %s" % (hostname, ip_addr)
+
+ return DeployedSystemInstance(self, config, self.host_machine,
+ vm_id, rootpath, ip_addr, hostname)
+ finally:
+ shutil.rmtree(tempdir)
+
+
+class ReleaseApp(cliapp.Application):
+
+ """Cliapp application which handles automatic builds and tests"""
+
+ def add_settings(self):
+ """Add the command line options needed"""
+ group_main = 'Program Options'
+ self.settings.string_list(['deployment-host'],
+ 'ARCH:HOST:PATH that VMs can be deployed to',
+ default=None,
+ group=group_main)
+ self.settings.string(['trove-host'],
+ 'Address of Trove for test systems to build from',
+ default=None,
+ group=group_main)
+ self.settings.string(['trove-id'],
+ 'ID of Trove for test systems to build from',
+ default=None,
+ group=group_main)
+ self.settings.string(['build-ref-prefix'],
+ 'Prefix of build branches for test systems',
+ default=None,
+ group=group_main)
+ self.settings.string(['net-id'],
+ 'Openstack network ID',
+ default=None,
+ group=group_main)
+
+ @staticmethod
+ def _run_tests(instance, system_path, system_morph,
+ (trove_host, trove_id, build_ref_prefix),
+ morph_helper, systems):
+ instance.wait_until_online()
+
+ tests = []
+ def baserock_build_test(instance):
+ instance.runcmd(['git', 'config', '--global', 'user.name',
+ 'Test Instance of %s' % instance.deployment.name])
+ instance.runcmd(['git', 'config', '--global', 'user.email',
+ 'ci-test@%s' % instance.config['HOSTNAME']])
+ instance.runcmd(['mkdir', '-p', '/src/ws', '/src/cache',
+ '/src/tmp'])
+ def morph_cmd(*args, **kwargs):
+ # TODO: decide whether to use cached artifacts or not by
+ # adding --artifact-cache-server= --cache-server=
+ argv = ['morph', '--log=/src/morph.log', '--cachedir=/src/cache',
+ '--tempdir=/src/tmp', '--log-max=100M',
+ '--trove-host', trove_host, '--trove-id', trove_id,
+ '--build-ref-prefix', build_ref_prefix]
+ argv.extend(args)
+ instance.runcmd(argv, **kwargs)
+
+ repo = morph_helper.sb.root_repository_url
+ ref = morph_helper.defs_repo.HEAD
+ sha1 = morph_helper.defs_repo.resolve_ref_to_commit(ref)
+ morph_cmd('init', '/src/ws')
+ chdir = '/src/ws'
+
+ morph_cmd('checkout', repo, ref, chdir=chdir)
+ # TODO: Add a morph subcommand that gives the path to the root repository.
+ repo_path = os.path.relpath(
+ morph_helper.sb.get_git_directory_name(repo),
+ morph_helper.sb.root_directory)
+ chdir = os.path.join(chdir, ref, repo_path)
+
+ instance.runcmd(['git', 'reset', '--hard', sha1], chdir=chdir)
+ print 'Building test systems for {sys}'.format(sys=system_path)
+ for to_build_path, to_build_morph in systems.iteritems():
+ if to_build_morph['arch'] == system_morph['arch']:
+ print 'Test building {path}'.format(path=to_build_path)
+ morph_cmd('build', to_build_path, chdir=chdir,
+ stdin=None, stdout=None, stderr=None)
+ print 'Finished Building test systems'
+
+ def python_smoke_test(instance):
+ instance.runcmd(['python', '-c', 'print "Hello World"'])
+
+ # TODO: Come up with a better way of determining which tests to run
+ if 'devel' in system_path:
+ tests.append(baserock_build_test)
+ else:
+ tests.append(python_smoke_test)
+
+ for test in tests:
+ test(instance)
+
+ def deploy_and_test_systems(self, cluster_path,
+ deployment_hosts, build_test_config,
+ net_id):
+ """Run the deployments and tests"""
+
+ version = 'release-test'
+
+ morph_helper = MorphologyHelper()
+ cluster_morph = morph_helper.load_morphology(cluster_path)
+ systems = dict(morph_helper.load_cluster_systems(cluster_morph))
+
+ for system_path, deployment_name, deployment_config in \
+ morph_helper.iterate_cluster_deployments(cluster_morph):
+
+ system_morph = systems[system_path]
+ # We can only test systems in KVM that have a BSP
+ if not any('bsp' in si['morph'] for si in system_morph['strata']):
+ continue
+
+ # We can only test systems in KVM that we have a host for
+ if system_morph['arch'] not in deployment_hosts:
+ continue
+ host_machine = deployment_hosts[system_morph['arch']]
+ deployment = Deployment(cluster_path, deployment_name,
+ deployment_config, host_machine,
+ net_id)
+
+ instance = deployment.deploy()
+ try:
+ self._run_tests(instance, system_path, system_morph,
+ build_test_config, morph_helper, systems)
+ finally:
+ instance.delete()
+
+ def process_args(self, args):
+ """Process the command line args and kick off the builds/tests"""
+ if self.settings['build-ref-prefix'] is None:
+ self.settings['build-ref-prefix'] = (
+ os.path.join(self.settings['trove-id'], 'builds'))
+ for setting in ('deployment-host', 'trove-host',
+ 'trove-id', 'build-ref-prefix', 'net-id'):
+ self.settings.require(setting)
+
+ deployment_hosts = {}
+ for host_config in self.settings['deployment-host']:
+ arch, address = host_config.split(':', 1)
+ user, address = address.split('@', 1)
+ address, disk_path = address.split(':', 1)
+ if user == '':
+ user = 'root'
+ # TODO: Don't assume root is the user with deploy access
+ deployment_hosts[arch] = VMHost(user, address, disk_path)
+
+ build_test_config = (self.settings['trove-host'],
+ self.settings['trove-id'],
+ self.settings['build-ref-prefix'])
+
+ if len(args) != 1:
+ raise cliapp.AppException('Usage: release-test CLUSTER')
+ cluster_path = morphlib.util.sanitise_morphology_path(args[0])
+ self.deploy_and_test_systems(cluster_path, deployment_hosts,
+ build_test_config,
+ self.settings['net-id'])
+
+
+if __name__ == '__main__':
+ ReleaseApp().run()
diff --git a/scripts/release-upload b/scripts/release-upload
new file mode 100755
index 00000000..273f9ed5
--- /dev/null
+++ b/scripts/release-upload
@@ -0,0 +1,473 @@
+#!/usr/bin/python
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+'''Upload and publish Baserock binaries for a release.
+
+This utility is used for the Baserock release process. See
+http://wiki.baserock.org/guides/release-process/ for details on the
+release process.
+
+This utility uploads two sets of binaries:
+
+* The build artifacts (built chunks and strata) used to construct the
+ systems being released. The systems are found in `release.morph` and
+ the artifacts from the Trove used to prepare the release. They get
+ uploaded to a public Trove (by default git.baserock.org). If they're
+ the same Trove, then nothing happens.
+
+* The released system images (disk images, tar archives, etc)
+ specified in `release.morph` get uploaded to a download server (by
+ default download.baserock.org).
+
+'''
+
+
+import json
+import logging
+import os
+import pwd
+import shutil
+import sys
+import urllib
+import urllib2
+import urlparse
+
+import cliapp
+import yaml
+
+import morphlib
+
+class ReleaseUploader(cliapp.Application):
+
+ def add_settings(self):
+ group = 'Release upload settings'
+
+ local_username = self.get_local_username()
+
+ self.settings.string(
+ ['build-trove-host'],
+ 'get build artifacts from Trove at ADDRESS',
+ metavar='ADDRESS',
+ group=group)
+
+ self.settings.string(
+ ['public-trove-host'],
+ 'publish build artifacts on Trove at ADDRESS',
+ metavar='ADDRESS',
+ default='git.baserock.org',
+ group=group)
+
+ self.settings.string(
+ ['public-trove-username'],
+ 'log into public trove as USER',
+ metavar='USER',
+ default=local_username,
+ group=group)
+
+ self.settings.string(
+ ['public-trove-artifact-dir'],
+ 'put published artifacts into DIR',
+ metavar='DIR',
+ default='/home/cache/artifacts',
+ group=group)
+
+ self.settings.string(
+ ['release-artifact-dir'],
+ 'get release artifacts from DIR (all files from there)',
+ metavar='DIR',
+ default='.',
+ group=group)
+
+ self.settings.string(
+ ['download-server-address'],
+ 'publish release artifacts on server at ADDRESS',
+ metavar='ADDRESS',
+ default='download.baserock.org',
+ group=group)
+
+ self.settings.string(
+ ['download-server-username'],
+ 'log into download server as USER',
+ metavar='USER',
+ default=local_username,
+ group=group)
+
+ self.settings.string(
+ ['download-server-private-dir'],
+ 'use DIR as the temporary location for uploaded release '
+ 'artifacts',
+ metavar='DIR',
+ default='/srv/download.baserock.org/baserock/.publish-temp',
+ group=group)
+
+ self.settings.string(
+ ['download-server-public-dir'],
+ 'put published release artifacts in DIR',
+ metavar='DIR',
+ default='/srv/download.baserock.org/baserock',
+ group=group)
+
+ self.settings.string(
+ ['local-build-artifacts-dir'],
+ 'keep build artifacts to be uploaded temporarily in DIR',
+ metavar='DIR',
+ default='build-artifacts',
+ group=group)
+
+ self.settings.string(
+ ['morph-cmd'],
+ 'run FILE to invoke morph',
+ metavar='FILE',
+ default='morph',
+ group=group)
+
+ self.settings.string_list(
+ ['arch'],
+ 'Upload files from morphologies of ARCH',
+ metavar='ARCH',
+ default=[],
+ group=group)
+
+ self.settings.boolean(
+ ['upload-build-artifacts'],
+ 'upload build artifacts?',
+ default=True)
+
+ self.settings.boolean(
+ ['upload-release-artifacts'],
+ 'upload release artifacts (disk images etc)?',
+ default=True)
+
+ def get_local_username(self):
+ uid = os.getuid()
+ return pwd.getpwuid(uid)[0]
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Usage: release-upload CLUSTER')
+ cluster_morphology_path = args[0]
+ self.status(msg='Uploading and publishing Baserock release')
+
+ if self.settings['upload-build-artifacts']:
+ self.publish_build_artifacts(cluster_morphology_path)
+ else:
+ self.status(
+ msg='Not uploading build artifacts '
+ '(upload-build-artifacts set to false')
+
+ if self.settings['upload-release-artifacts']:
+ self.publish_release_artifacts()
+ else:
+ self.status(
+ msg='Not uploading release artifacts '
+ '(upload-release-artifacts set to false')
+
+ def publish_build_artifacts(self, cluster_morphology_path):
+ publisher = BuildArtifactPublisher(self.settings, self.status)
+ publisher.publish_build_artifacts(cluster_morphology_path)
+ self.status(msg='Build artifacts have been published')
+
+ def publish_release_artifacts(self):
+ publisher = ReleaseArtifactPublisher(self.settings, self.status)
+ publisher.publish_release_artifacts()
+ self.status(msg='Release artifacts have been published')
+
+ def status(self, msg, **kwargs):
+ formatted = msg.format(**kwargs)
+ logging.info(formatted)
+ sys.stdout.write(formatted + '\n')
+ sys.stdout.flush()
+
+
+class BuildArtifactPublisher(object):
+
+ '''Publish build artifacts related to the release.'''
+
+ def __init__(self, settings, status):
+ self.settings = settings
+ self.status = status
+
+ def publish_build_artifacts(self, cluster_path):
+ artifact_basenames = self.list_build_artifacts_for_release(cluster_path)
+ self.status(
+ msg='Found {count} build artifact files in release',
+ count=len(artifact_basenames))
+
+ to_be_uploaded = self.filter_away_build_artifacts_on_public_trove(
+ artifact_basenames)
+
+ logging.debug('List of artifacts (basenames) to upload (without already uploaded):')
+ for i, basename in enumerate(to_be_uploaded):
+ logging.debug(' {0}: {1}'.format(i, basename))
+ logging.debug('End of artifact list (to_be_uploaded)')
+
+ self.status(
+ msg='Need to fetch locally, then upload {count} build artifacts',
+ count=len(to_be_uploaded))
+
+ self.upload_build_artifacts_to_public_trove(to_be_uploaded)
+
+ def list_build_artifacts_for_release(self, cluster_morphology_path):
+ self.status(msg='Find build artifacts included in release')
+
+ # FIXME: These are hardcoded for simplicity. They would be
+ # possible to deduce automatically from the workspace, but
+ # that can happen later.
+ repo = 'file://%s' % os.path.abspath('.')
+ ref = 'HEAD'
+
+ argv = [self.settings['morph-cmd'], 'list-artifacts', '--quiet', repo, ref]
+ argv += self.find_system_morphologies(cluster_morphology_path)
+ output = cliapp.runcmd(argv)
+ basenames = output.splitlines()
+ logging.debug('List of build artifacts in release:')
+ for basename in basenames:
+ logging.debug(' {0}'.format(basename))
+ logging.debug('End of list of build artifacts in release')
+
+ return basenames
+
+ def find_system_morphologies(self, cluster_morphology_path):
+ cluster = self.load_cluster_morphology(cluster_morphology_path)
+ system_dicts = self.find_systems_in_parsed_cluster_morphology(cluster)
+ if self.settings['arch']:
+ system_dicts = self.choose_systems_for_wanted_architectures(
+ system_dicts, self.settings['arch'])
+ return [sd['morph'] for sd in system_dicts]
+
+ def load_cluster_morphology(self, pathname):
+ with open(pathname) as f:
+ return yaml.load(f)
+
+ def find_systems_in_parsed_cluster_morphology(self, cluster):
+ return cluster['systems']
+
+ def choose_systems_for_wanted_architectures(self, system_dicts, archs):
+ return [
+ sd
+ for sd in system_dicts
+ if self.system_is_for_wanted_arch(sd, archs)]
+
+ def system_is_for_wanted_arch(self, system_dict, archs):
+ morph = self.load_system_morphology(system_dict)
+ return morph['arch'] in archs
+
+ def load_system_morphology(self, system_dict):
+ pathname = morphlib.util.sanitise_morphology_path(system_dict['morph'])
+ return self.load_morphology_from_named_file(pathname)
+
+ def load_morphology_from_named_file(self, pathname):
+ finder = self.get_morphology_finder_for_root_repository()
+ morphology_text = finder.read_morphology(pathname)
+ loader = morphlib.morphloader.MorphologyLoader()
+ return loader.load_from_string(morphology_text)
+
+ def get_morphology_finder_for_root_repository(self):
+ sb = morphlib.sysbranchdir.open_from_within('.')
+ definitions = sb.get_git_directory_name(sb.root_repository_url)
+ definitions_repo = morphlib.gitdir.GitDirectory(definitions)
+ return morphlib.morphologyfinder.MorphologyFinder(definitions_repo)
+
+ def filter_away_build_artifacts_on_public_trove(self, basenames):
+ result = []
+ logging.debug('Filtering away already existing artifacts:')
+ for basename, exists in self.query_public_trove_for_artifacts(basenames):
+ logging.debug(' {0}: {1}'.format(basename, exists))
+ if not exists:
+ result.append(basename)
+ logging.debug('End of filtering away')
+ return result
+
+ def query_public_trove_for_artifacts(self, basenames):
+ host = self.settings['public-trove-host']
+
+ # FIXME: This could use
+ # contextlib.closing(urllib2.urlopen(url, data=data) instead
+ # of explicit closing.
+ url = 'http://{host}:8080/1.0/artifacts'.format(host=host)
+ data = json.dumps(basenames)
+ f = urllib2.urlopen(url, data=data)
+ obj = json.load(f)
+ return obj.items()
+
+ def upload_build_artifacts_to_public_trove(self, basenames):
+ self.download_artifacts_locally(basenames)
+ self.upload_artifacts_to_public_trove(basenames)
+
+ def download_artifacts_locally(self, basenames):
+ dirname = self.settings['local-build-artifacts-dir']
+ self.create_directory_if_missing(dirname)
+ for i, basename in enumerate(basenames):
+ url = self.construct_artifact_url(basename)
+ pathname = os.path.join(dirname, basename)
+ if not os.path.exists(pathname):
+ self.status(
+ msg='Downloading {i}/{total} {basename}',
+ basename=repr(basename), i=i, total=len(basenames))
+ self.download_from_url(url, dirname, pathname)
+
+ def create_directory_if_missing(self, dirname):
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ def construct_artifact_url(self, basename):
+ scheme = 'http'
+ netloc = '{host}:8080'.format(host=self.settings['build-trove-host'])
+ path = '/1.0/artifacts'
+ query = 'filename={0}'.format(urllib.quote_plus(basename))
+ fragment = ''
+ components = (scheme, netloc, path, query, fragment)
+ return urlparse.urlunsplit(components)
+
+ def download_from_url(self, url, dirname, pathname):
+ logging.info(
+ 'Downloading {url} to {pathname}'.format(
+ url=url, pathname=pathname))
+ with open(pathname, 'wb') as output:
+ try:
+ incoming = urllib2.urlopen(url)
+ shutil.copyfileobj(incoming, output)
+ incoming.close()
+ except urllib2.HTTPError as e:
+ if pathname.endswith('.meta'):
+ return
+ self.status(
+ msg="ERROR: Can't download {url}: {explanation}",
+ url=url,
+ explanation=str(e))
+ os.remove(pathname)
+ raise
+
+ def upload_artifacts_to_public_trove(self, basenames):
+ self.status(
+ msg='Upload build artifacts to {trove}',
+ trove=self.settings['public-trove-host'])
+ rsync_files_to_server(
+ self.settings['local-build-artifacts-dir'],
+ basenames,
+ self.settings['public-trove-username'],
+ self.settings['public-trove-host'],
+ self.settings['public-trove-artifact-dir'])
+ set_permissions_on_server(
+ self.settings['public-trove-username'],
+ self.settings['public-trove-host'],
+ self.settings['public-trove-artifact-dir'],
+ basenames)
+
+class ReleaseArtifactPublisher(object):
+
+ '''Publish release artifacts for a release.'''
+
+ def __init__(self, settings, status):
+ self.settings = settings
+ self.status = status
+
+ def publish_release_artifacts(self):
+ files = self.list_release_artifacts()
+ if files:
+ self.upload_release_artifacts_to_private_dir(files)
+ self.move_release_artifacts_to_public_dir(files)
+ self.create_symlinks_to_new_release_artifacts(files)
+
+ def list_release_artifacts(self):
+ self.status(msg='Find release artifacts to publish')
+ return os.listdir(self.settings['release-artifact-dir'])
+
+ def upload_release_artifacts_to_private_dir(self, files):
+ self.status(msg='Upload release artifacts to private directory')
+ path = self.settings['download-server-private-dir']
+ self.create_directory_on_download_server(path)
+ self.rsync_files_to_download_server(files, path)
+
+ def create_directory_on_download_server(self, path):
+ user = self.settings['download-server-username']
+ host = self.settings['download-server-address']
+ self.status(msg='Create {host}:{path}', host=host, path=path)
+ target = '{user}@{host}'.format(user=user, host=host)
+ cliapp.ssh_runcmd(target, ['mkdir', '-p', path])
+
+ def rsync_files_to_download_server(self, files, path):
+ self.status(msg='Upload release artifacts to download server')
+ rsync_files_to_server(
+ self.settings['release-artifact-dir'],
+ files,
+ self.settings['download-server-username'],
+ self.settings['download-server-address'],
+ path)
+ set_permissions_on_server(
+ self.settings['download-server-username'],
+ self.settings['download-server-address'],
+ path,
+ files)
+
+ def move_release_artifacts_to_public_dir(self, files):
+ self.status(msg='Move release artifacts to public directory')
+ private_dir = self.settings['download-server-private-dir']
+ public_dir = self.settings['download-server-public-dir']
+ self.create_directory_on_download_server(public_dir)
+
+ # Move just the contents of the private dir, not the dir
+ # itself (-mindepth). Avoid overwriting existing files (mv
+ # -n).
+ argv = ['find', private_dir, '-mindepth', '1',
+ '-exec', 'mv', '-n', '{}', public_dir + '/.', ';']
+
+ target = '{user}@{host}'.format(
+ user=self.settings['download-server-username'],
+ host=self.settings['download-server-address'])
+ cliapp.ssh_runcmd(target, argv)
+
+ def create_symlinks_to_new_release_artifacts(self, files):
+ self.status(msg='FIXME: Create symlinks to new releas artifacts')
+
+
+def rsync_files_to_server(
+ source_dir, source_filenames, user, host, target_dir):
+
+ if not source_filenames:
+ return
+
+ argv = [
+ 'rsync',
+ '-a',
+ '--progress',
+ '--partial',
+ '--human-readable',
+ '--sparse',
+ '--protect-args',
+ '-0',
+ '--files-from=-',
+ source_dir,
+ '{user}@{host}:{path}'.format(user=user, host=host, path=target_dir),
+ ]
+
+ files_list = '\0'.join(filename for filename in source_filenames)
+ cliapp.runcmd(argv, feed_stdin=files_list, stdout=None, stderr=None)
+
+
+def set_permissions_on_server(user, host, target_dir, filenames):
+ # If we have no files, we can't form a valid command to run on the server
+ if not filenames:
+ return
+ target = '{user}@{host}'.format(user=user, host=host)
+ argv = ['xargs', '-0', 'chmod', '0644']
+ files_list = ''.join(
+ '{0}\0'.format(os.path.join(target_dir, filename)) for filename in filenames)
+ cliapp.ssh_runcmd(target, argv, feed_stdin=files_list, stdout=None, stderr=None)
+
+
+ReleaseUploader(description=__doc__).run()
diff --git a/scripts/release-upload.test.conf b/scripts/release-upload.test.conf
new file mode 100644
index 00000000..13227983
--- /dev/null
+++ b/scripts/release-upload.test.conf
@@ -0,0 +1,10 @@
+[config]
+download-server-address = localhost
+download-server-private-dir = /tmp/private
+download-server-public-dir = /tmp/public
+build-trove-host = ct-mcr-1.ducie.codethink.co.uk
+public-trove-host = localhost
+public-trove-username = root
+public-trove-artifact-dir = /tmp/artifacts
+release-artifact-dir = t.release-files
+morph-cmd = /home/root/git-morph
diff --git a/sdk.write b/sdk.write
new file mode 100755
index 00000000..8d3d2a63
--- /dev/null
+++ b/sdk.write
@@ -0,0 +1,284 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+
+set -eu
+
+die(){
+ echo "$@" >&2
+ exit 1
+}
+
+shellescape(){
+ echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'"
+}
+
+########################## END OF COMMON HEADER ###############################
+#
+# The above lines, as well as being part of this script, are copied into the
+# self-installing SDK blob's header script, as a means of re-using content.
+#
+
+help(){
+ cat <<EOF
+sdk.write: Write extension for making an SDK installer.
+
+Description:
+ This is a write extension for producing a self-installing SDK blob
+ from a configured system.
+
+ It generates a shell script header and appends the rootfs as a tarball,
+ which the header later extracts, and performs various configuration
+ to have it useable as a relocatable toolchain.
+
+ This is similar to what the shar and makeself programs do, but we
+ need custom setup, so shar isn't appropriate, and makeself's api is
+ insufficiently flexible for our requirements.
+
+ The toolchain relocation is handled by sedding every text file in the
+ SDK directory, and using the patchelf from inside the SDK to change
+ every ELF binary in the toolchain to use the linker and libraries from
+ inside the SDK.
+
+ The ELF patching is required so that the SDK can work independently
+ of the versions of libraries installed on the host system.
+
+Location: Path to create the script at
+
+ENV VARS:
+ PREFIX (optional) The prefix the toolchain is built with
+ defaults to /usr
+ TARGET (mandatory) The gnu triplet the toolchain was built with
+EOF
+}
+
+ROOTDIR="$1"
+OUTPUT_SCRIPT="$2"
+PREFIX=${PREFIX-/usr}
+
+find_patchelf(){
+ # Look for patchelf in the usual places
+ for binpath in /bin "$PREFIX/bin"; do
+ if [ -x "$ROOTDIR$binpath/patchelf" ]; then
+ echo "$binpath/patchelf"
+ return
+ fi
+ done
+ die "patchelf not found in rootfs"
+}
+
+read_elf_interpreter(){
+ # Use readelf and sed to find the interpreter a binary uses this is
+ # required since we can't yet guarantee that the deploying system
+ # contains patchelf
+ readelf --wide --program-headers "$1" |
+ sed -nr -f /proc/self/fd/3 3<<'EOF'
+/\s+INTERP/{
+ n # linker is on line after INTERP line
+ s/^\s*\[Requesting program interpreter: (.*)]$/\1/
+ p # in -n mode, so need to print our text
+}
+EOF
+}
+
+find_lib_paths(){
+ local found_first=false
+ for libpath in "$PREFIX/lib32" "$PREFIX/lib64" "$PREFIX/lib" \
+ /lib32 /lib64 /lib; do
+ if [ -e "$ROOTDIR$libpath" ]; then
+ if "$found_first"; then
+ printf ":%s" "$libpath"
+ else
+ printf "%s" "$libpath"
+ found_first=true
+ fi
+ fi
+ done
+}
+
+# Create script with common header
+header_end=$(grep -En -m1 -e '^#+ END OF COMMON HEADER #+$' "$0" | cut -d: -f1)
+head -n "$header_end" "$0" | install -m 755 -D /dev/stdin "$OUTPUT_SCRIPT"
+
+# Determine any config
+PATCHELF="$(find_patchelf)"
+RTLD="$(read_elf_interpreter "$ROOTDIR$PATCHELF")"
+LIB_PATH="${LIB_PATH-$(find_lib_paths)}"
+
+# Append deploy-time config to header
+cat >>"$OUTPUT_SCRIPT" <<EOF
+#################### START OF DEPLOY TIME CONFIGURATION #######################
+
+TARGET=$(shellescape "$TARGET")
+PREFIX=$(shellescape "$PREFIX")
+PATCHELF=$(shellescape "$PATCHELF")
+RTLD=$(shellescape "$RTLD")
+LIB_PATH=$(shellescape "$LIB_PATH")
+
+##################### END OF DEPLOY TIME CONFIGURATION ########################
+EOF
+
+# Append deployment script
+cat >>"$OUTPUT_SCRIPT" <<'EOF'
+########################### START OF HEADER SCRIPT ############################
+
+usage(){
+ cat <<USAGE
+usage: $0 TOOLCHAIN_PATH
+USAGE
+}
+
+if [ "$#" != 1 ]; then
+ echo TOOLCHAIN_PATH not given >&2
+ usage >&2
+ exit 1
+fi
+
+TOOLCHAIN_PATH="$(readlink -f \"$1\")"
+
+sedescape(){
+ # Escape the passed in string so it can be safely interpolated into
+ # a sed expression as a literal value.
+ echo "$1" | sed -e 's/[\/&]/\\&/g'
+}
+
+prepend_to_path_elements(){
+ # Prepend $1 to every entry in the : separated list specified as $2.
+ local prefix="$1"
+ (
+ # Split path into components
+ IFS=:
+ set -- $2
+ # Print path back out with new prefix
+ printf %s "$prefix/$1"
+ shift
+ for arg in "$@"; do
+ printf ":%s" "$prefix/$arg"
+ done
+ )
+}
+
+extract_rootfs(){
+ # Extract the bzipped tarball at the end of the script passed as $1
+ # to the path specified as $2
+ local selfextractor="$1"
+ local target="$2"
+ local script_end="$(($(\
+ grep -aEn -m1 -e '^#+ END OF HEADER SCRIPT #+$' "$selfextractor" |
+ cut -d: -f1) + 1 ))"
+ mkdir -p "$target"
+ tail -n +"$script_end" "$selfextractor" | tar -xj -C "$target" .
+}
+
+amend_text_file_paths(){
+ # Replace all instances of $3 with $4 in the directory specified by $1
+ # excluding the subdirectory $2
+ local root="$1"
+ local inner_sysroot="$2"
+ local old_prefix="$3"
+ local new_prefix="$4"
+ find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \
+ -exec sh -c 'file "$1" | grep -q text' - {} \; \
+ -exec sed -i -e \
+ "s/$(sedescape "$old_prefix")/$(sedescape "$new_prefix")/g" {} +
+}
+
+filter_patchelf_errors(){
+ # Filter out warnings from patchelf that are acceptable
+ # The warning that it's making a file bigger is just noise
+ # The warning about not being an ELF executable just means we got a
+ # false positive from file that it was an ELF binary
+ # Failing to find .interp is because for convenience, we set the
+ # interpreter in the same command as setting the rpath, even though
+ # we give it both executables and libraries.
+ grep -v -e 'warning: working around a Linux kernel bug' \
+ -e 'not an ELF executable' \
+ -e 'cannot find section .interp'
+}
+
+patch_elves(){
+ # Set the interpreter and library paths of ELF binaries in $1,
+ # except for the $2 subdirectory, using the patchelf command in the
+ # toolchain specified as $3, so that it uses the linker specified
+ # as $4 as the interpreter, and the runtime path specified by $5.
+ #
+ # The patchelf inside the toolchain is used to ensure that it works
+ # independently of the availability of patchelf on the host.
+ #
+ # This is possible by invoking the linker directly and specifying
+ # --linker-path as the RPATH we want to set the binaries to use.
+ local root="$1"
+ local inner_sysroot="$2"
+ local patchelf="$3"
+ local linker="$4"
+ local lib_path="$5"
+ find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \
+ -type f -perm +u=x \
+ -exec sh -c 'file "$1" | grep -q "ELF"' - {} \; \
+ -exec "$linker" --library-path "$lib_path" \
+ "$patchelf" --set-interpreter "$linker" \
+ --set-rpath "$lib_path" {} \; 2>&1 \
+ | filter_patchelf_errors
+}
+
+generate_environment_setup(){
+ local target="$1"
+ install -m 644 -D /dev/stdin "$target" <<ENVSETUP
+export PATH=$(shellescape "$TOOLCHAIN_PATH/usr/bin"):"\$PATH"
+export TARGET_PREFIX=$(shellescape "$TARGET"-)
+export CC=$(shellescape "$TARGET-gcc")
+export CXX=$(shellescape "$TARGET-g++")
+export CPP=$(shellescape "$TARGET-gcc -E")
+export AS=$(shellescape "$TARGET-as")
+export LD=$(shellescape "$TARGET-ld")
+export STRIP=$(shellescape "$TARGET-strip")
+export RANLIB=$(shellescape "$TARGET-ranlib")
+export OBJCOPY=$(shellescape "$TARGET-objcopy")
+export OBJDUMP=$(shellescape "$TARGET-objdump")
+export AR=$(shellescape "$TARGET-ar")
+export NM=$(shellescape "$TARGET-nm")
+ENVSETUP
+}
+
+SYSROOT="$TOOLCHAIN_PATH$PREFIX/$TARGET/sys-root"
+PATCHELF="$TOOLCHAIN_PATH$PATCHELF"
+RTLD="$TOOLCHAIN_PATH$RTLD"
+OLD_PREFIX="$PREFIX"
+NEW_PREFIX="$TOOLCHAIN_PATH/$PREFIX"
+RPATH="$(prepend_to_path_elements "$TOOLCHAIN_PATH" "$LIB_PATH")"
+ENV_SETUP_FILE="$(dirname "$TOOLCHAIN_PATH")/environment-setup-$TARGET"
+
+echo Writing environment setup script to "$ENV_SETUP_FILE"
+generate_environment_setup "$ENV_SETUP_FILE"
+
+echo Extracting rootfs
+extract_rootfs "$0" "$TOOLCHAIN_PATH"
+
+echo "Relocating prefix references of $OLD_PREFIX to $NEW_PREFIX in" \
+ "the toolchain's textual config files."
+amend_text_file_paths "$TOOLCHAIN_PATH" "$SYSROOT" "$OLD_PREFIX" "$NEW_PREFIX"
+
+echo "Patching ELF binary files' interpreter and runtime library paths" \
+ "to refer to libraries within the toolchain"
+patch_elves "$TOOLCHAIN_PATH" "$SYSROOT" "$PATCHELF" "$RTLD" "$RPATH"
+
+exit
+############################ END OF HEADER SCRIPT #############################
+EOF
+
+# Append rootfs as tarball
+tar -C "$1" -cj >>"$OUTPUT_SCRIPT" .
diff --git a/strata/NetworkManager-common.morph b/strata/NetworkManager-common.morph
new file mode 100644
index 00000000..93b94dff
--- /dev/null
+++ b/strata/NetworkManager-common.morph
@@ -0,0 +1,23 @@
+name: NetworkManager-common
+kind: stratum
+build-depends:
+- morph: strata/audio-bluetooth.morph
+- morph: strata/network-security.morph
+- morph: strata/connectivity.morph
+chunks:
+- name: libndp
+ repo: upstream:libndp
+ ref: f3a3a63d5b5abced8f75731d7b995606933c6e33
+ unpetrify-ref: v1.4
+- name: readline6
+ repo: upstream:readline
+ ref: 7628b745a813aac53586b640da056a975f1c443e
+ unpetrify-ref: readline-6.3
+- name: NetworkManager
+ morph: strata/NetworkManager-common/NetworkManager.morph
+ repo: upstream:NetworkManager
+ ref: acdaf78a068b6c65ba799a7098b867953db4801c
+ unpetrify-ref: 1.0.0
+ build-depends:
+ - libndp
+ - readline6
diff --git a/strata/NetworkManager-common/NetworkManager.morph b/strata/NetworkManager-common/NetworkManager.morph
new file mode 100644
index 00000000..553c979b
--- /dev/null
+++ b/strata/NetworkManager-common/NetworkManager.morph
@@ -0,0 +1,5 @@
+name: NetworkManager
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --with-session-tracking=systemd --disable-ppp --enable-more-warnings=yes
diff --git a/strata/ansible.morph b/strata/ansible.morph
new file mode 100644
index 00000000..43ebc2bd
--- /dev/null
+++ b/strata/ansible.morph
@@ -0,0 +1,17 @@
+name: ansible
+kind: stratum
+description: A stratum with ansible and its dependencies
+build-depends:
+- morph: strata/core.morph
+- morph: strata/python-common.morph
+chunks:
+- name: ansible
+ morph: strata/ansible/ansible.morph
+ repo: upstream:ansible
+ ref: 0a7124541247cc613352054f4bc0c3e116e0b657
+ unpetrify-ref: baserock/v1.8.4
+- name: openstack-ansible-modules
+ morph: strata/ansible/openstack-ansible-modules.morph
+ repo: upstream:openstack/openstack-ansible-modules
+ ref: 451dcdeadfd35615a867d5e59a684e4aa82959ae
+ unpetrify-ref: master
diff --git a/strata/ansible/ansible.morph b/strata/ansible/ansible.morph
new file mode 100644
index 00000000..dbc6ec4b
--- /dev/null
+++ b/strata/ansible/ansible.morph
@@ -0,0 +1,9 @@
+name: ansible
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+- |
+ install -D -m644 /proc/self/fd/0 "$DESTDIR/etc/ansible/ansible.cfg" <<EOF
+ [defaults]
+ log_path=/var/log/ansible
+ EOF
diff --git a/strata/ansible/openstack-ansible-modules.morph b/strata/ansible/openstack-ansible-modules.morph
new file mode 100644
index 00000000..d8e885e6
--- /dev/null
+++ b/strata/ansible/openstack-ansible-modules.morph
@@ -0,0 +1,5 @@
+name: openstack-ansible-modules
+kind: chunk
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/share/ansible/ansible-openstack-modules
+- cp -r * "$DESTDIR$PREFIX"/share/ansible/ansible-openstack-modules
diff --git a/strata/apache-httpd-server.morph b/strata/apache-httpd-server.morph
new file mode 100644
index 00000000..7a403535
--- /dev/null
+++ b/strata/apache-httpd-server.morph
@@ -0,0 +1,39 @@
+name: apache-httpd-server
+kind: stratum
+description: apache http web server and some utilities related to it
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/pcre-utils.morph
+- morph: strata/python-core.morph
+chunks:
+- name: apr
+ morph: strata/apache-httpd-server/apr.morph
+ repo: upstream:apache/apr
+ ref: 3c818c6d7351f0130282d212a69035642f5fecad
+ unpetrify-ref: trunk
+- name: httpd
+ morph: strata/apache-httpd-server/httpd-server.morph
+ repo: upstream:apache/httpd
+ ref: 6d8e0b2fd95268fcba96326ba9dce7bb8f712c19
+ unpetrify-ref: 2.4.10
+ build-depends:
+ - apr
+- name: psutil
+ repo: upstream:python-packages/psutil
+ ref: 2bc8555f0428af81c2d067aa76168ed7bc5e0179
+ unpetrify-ref: release-2.2.0
+- name: mod_wsgi-metrics
+ repo: upstream:python-packages/mod_wsgi-metrics
+ ref: ec715eb593255229f9f45f3f323edbb845c691d8
+ unpetrify-ref: 1.1.1
+ build-depends:
+ - psutil
+- name: mod_wsgi
+ morph: strata/apache-httpd-server/mod_wsgi.morph
+ repo: upstream:python-packages/mod_wsgi
+ ref: 1be8b37aaf07d8cb3083da99d6fda12375221b07
+ unpetrify-ref: 4.4.5
+ build-depends:
+ - apr
+ - httpd
+ - mod_wsgi-metrics
diff --git a/strata/apache-httpd-server/apr.morph b/strata/apache-httpd-server/apr.morph
new file mode 100644
index 00000000..aaba1954
--- /dev/null
+++ b/strata/apache-httpd-server/apr.morph
@@ -0,0 +1,22 @@
+name: apr
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./buildconf
+- |
+ ./configure --prefix="$PREFIX" \
+ --disable-static \
+ --enable-threads \
+ --with-installbuilddir="$PREFIX"/share/apr-1/build
+post-install-commands:
+# Install files required for apr-util and httpd
+- |
+ for file in find_apr.m4 apr_common.m4 install.sh gen-build.py get-version.sh \
+ config.guess config.sub; do
+ cp build/"$file" "$DESTDIR$PREFIX"/share/apr-1/build/"$file";
+ done
+# Create a symlink in the build directory to the include directory (this is needed for httpd)
+- ln -sf /usr/include/apr-2 "$DESTDIR$PREFIX"/share/apr-1/build/
+# Copy apr headers needed for mod_wsgi
+- cp include/private/apr_support.h "$DESTDIR$PREFIX"/include/apr-2
+- cp include/arch/unix/apr_private.h "$DESTDIR$PREFIX"/include/apr-2
diff --git a/strata/apache-httpd-server/httpd-server.morph b/strata/apache-httpd-server/httpd-server.morph
new file mode 100644
index 00000000..41f0c36b
--- /dev/null
+++ b/strata/apache-httpd-server/httpd-server.morph
@@ -0,0 +1,67 @@
+name: httpd-server
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./buildconf --with-apr=/usr/share/apr-1
+# We override --libexecdir with the same value as --enable-layout=RPM
+# ought to provide, because otherwise there is a bug where when mod_wsgi
+# calls apxs, it fails as follows because the variable is empty.
+#
+#/usr/share/apr-1/build/libtool --silent --mode=link gcc -std=gnu99
+# -o src/server/mod_wsgi.la -rpath # -module -avoid-version
+# src/server/wsgi_validate.lo src/server/wsgi_stream.lo
+# src/server/wsgi_server.lo src/server/wsgi_restrict.lo
+# src/server/wsgi_metrics.lo src/server/wsgi_logger.lo
+# src/server/wsgi_interp.lo src/server/wsgi_daemon.lo
+# src/server/wsgi_convert.lo src/server/wsgi_buckets.lo
+# src/server/wsgi_apache.lo src/server/mod_wsgi.lo
+# -L/usr/lib -L/usr/lib/python2.7/config -lpython2.7 -lpthread -ldl -lutil -lm
+# libtool: error: only absolute run-paths are allowed
+# apxs:Error: Command failed with rc=65536
+#
+- |
+ ./configure --prefix="$PREFIX" \
+ --libexecdir="$PREFIX"/lib/httpd/modules \
+ --enable-layout=RPM \
+ --enable-mpms-shared=all \
+ --with-apr="$PREFIX"/bin/apr-2-config \
+ --enable-suexec --with-suexec \
+ --with-suexec-caller=apache \
+ --with-suexec-docroot=/var/www \
+ --with-suexec-logfile=/var/log/httpd/suexec.log \
+ --with-suexec-bin="$PREFIX"/sbin/suexec \
+ --with-suexec-uidmin=500 --with-suexec-gidmin=100\
+ --with-suexec-userdir=public_html \
+ --enable-pie \
+ --with-pcre \
+ --enable-mods-shared=all \
+ --enable-ssl --with-ssl \
+ --enable-case-filter --enable-case-filter-in \
+ --enable-cgid --enable-cgi\
+ --enable-so
+post-install-commands:
+# Add perl interpreter path to apxs script, required for loading mod_wsgi.
+- |
+ sed -i 's|#!/replace/with/path/to/perl/interpreter -w|#!/usr/bin/perl -w|g' \
+ "$DESTDIR$PREFIX"/bin/apxs
+# Add conf.d directory where apache will look for other configurations to load.
+- mkdir -p "$DESTDIR"/etc/httpd/conf.d
+- echo 'Include /etc/httpd/conf.d/*.conf' >> "$DESTDIR"/etc/httpd/conf/httpd.conf
+####################################################################################
+# This chunk should add an apache user and an apache group, create the apache home
+# directory and move suexec to /usr/lib/httpd/suexec as system-integration time.
+# This is not possible because linux-user-chroot drops all capabilities for security so
+# it does not allow to change the owners of directories or files.
+# So for now you should create a script which includes the following commands:
+# - mkdir -p /var/www
+# - groupadd -r apache
+# - |
+# useradd -c "Apache Server" -d /var/www -g apache \
+# -s /bin/false apache
+#
+# - mkdir -p /usr/lib/httpd
+# - mv -v /usr/bin/suexec /usr/lib/httpd/suexec
+# - chgrp apache /usr/lib/httpd/suexec
+# - chmod 4754 /usr/lib/httpd/suexec
+# - chown -R apache:apache /var/www
+####################################################################################
diff --git a/strata/apache-httpd-server/mod_wsgi.morph b/strata/apache-httpd-server/mod_wsgi.morph
new file mode 100644
index 00000000..86004ad3
--- /dev/null
+++ b/strata/apache-httpd-server/mod_wsgi.morph
@@ -0,0 +1,18 @@
+name: mod_wsgi
+kind: chunk
+build-system: autotools
+configure-commands:
+- |
+ ./configure --prefix="$PREFIX" \
+ --with-apxs=/usr/bin/apxs
+post-install-commands:
+- |
+ install -D -m 644 /proc/self/fd/0 <<'EOF' "$DESTDIR"/etc/httpd/conf.d/wsgi.conf
+ # NOTE: mod_wsgi can not coexist in the same apache process as
+ # mod_wsgi_python3. Only load if mod_wsgi_python3 is not
+ # already loaded.
+
+ <IfModule !wsgi_module>
+ LoadModule wsgi_module /usr/lib/httpd/modules/mod_wsgi.so
+ </IfModule>
+ EOF
diff --git a/strata/armv7lhf-cross-toolchain.morph b/strata/armv7lhf-cross-toolchain.morph
new file mode 100644
index 00000000..ffc4a9dd
--- /dev/null
+++ b/strata/armv7lhf-cross-toolchain.morph
@@ -0,0 +1,54 @@
+name: armv7lhf-cross-toolchain
+kind: stratum
+description: Stratum for building an armv7lhf cross compiler for your native system.
+build-depends:
+- morph: strata/build-essential.morph
+chunks:
+- name: armv7lhf-cross-binutils
+ morph: strata/armv7lhf-cross-toolchain/armv7lhf-cross-binutils.morph
+ repo: upstream:binutils-redhat
+ ref: b1d3b01332ae49a60ff5d6bf53d3a5b1805769c8
+ unpetrify-ref: baserock/build-essential
+
+- name: armv7lhf-cross-linux-api-headers
+ morph: strata/armv7lhf-cross-toolchain/armv7lhf-cross-linux-api-headers.morph
+ repo: upstream:linux
+ ref: df2e1b9168a7ab5dd8149e38b5ac70cdef86d1fa
+ unpetrify-ref: baserock/v3.8
+
+- name: armv7lhf-cross-gcc-nolibc
+ morph: strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc-nolibc.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - armv7lhf-cross-binutils
+
+- name: armv7lhf-cross-glibc
+ morph: strata/armv7lhf-cross-toolchain/armv7lhf-cross-glibc.morph
+ repo: upstream:glibc
+ ref: 4e42b5b8f89f0e288e68be7ad70f9525aebc2cff
+ unpetrify-ref: baserock/glibc-2.21
+ build-depends:
+ - armv7lhf-cross-binutils
+ - armv7lhf-cross-gcc-nolibc
+ - armv7lhf-cross-linux-api-headers
+
+- name: armv7lhf-cross-gcc
+ morph: strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - armv7lhf-cross-binutils
+ - armv7lhf-cross-glibc
+
+- name: armv7lhf-cross-libstdc++
+ morph: strata/armv7lhf-cross-toolchain/armv7lhf-cross-libstdc++.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - armv7lhf-cross-binutils
+ - armv7lhf-cross-gcc
+ - armv7lhf-cross-glibc
diff --git a/strata/armv7lhf-cross-toolchain/armv7lhf-cross-binutils.morph b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-binutils.morph
new file mode 100644
index 00000000..8e842e41
--- /dev/null
+++ b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-binutils.morph
@@ -0,0 +1,24 @@
+name: armv7lhf-cross-binutils
+kind: chunk
+build-system: autotools
+
+configure-commands:
+- |
+ # The TARGET used is the final triplet we expect, rather than that
+ # of the libc-less GCC we build, since the binutils only needs to
+ # be built once.
+ # This requires extra effort to get the stage 1 GCC to use these
+ # tools, but saves having an extra binutils build.
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+
+ # 1. Binutils gets installed in the default sysroot path of
+ # $PREFIX/$TARGET/{bin,lib}, with the actual sysroot at
+ # $PREFIX/$TARGET/sys-root and its tools also available as
+ # $PREFIX/bin/$TARGET-$tool
+ # 2. The '=' makes the path we give relative to the sysroot.
+ ./configure --prefix="$PREFIX" --disable-nls --disable-werror \
+ --build=$(sh config.guess) \
+ --host=$(sh config.guess) \
+ --target="$TARGET" \
+ `# [1]` --with-sysroot \
+ `# [2]` --with-lib-path="=$PREFIX/lib"
diff --git a/strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc-nolibc.morph b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc-nolibc.morph
new file mode 100644
index 00000000..b5a8c338
--- /dev/null
+++ b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc-nolibc.morph
@@ -0,0 +1,74 @@
+name: armv7lhf-cross-gcc-nolibc
+kind: chunk
+
+configure-commands:
+- mkdir o
+
+# Configure flag notes:
+# 1. Standard flags. See gcc.morph.
+# 2. See stage1-gcc.morph.
+# 3. Our binutils is for the final $TARGET, rather than the intermediate
+# target our GCC is being built for, so we need to set
+# with-build-time-tools to get it to find our binutils at
+# build-time and with-as and with-ld so our temporary GCC uses
+# the appropriate tools when it compiles our libc, rather than
+# trying to use $TARGET_STAGE1-as when producing binaries.
+# 4. Disable stuff that doesn't work when building a cross compiler
+# without an existing libc, and generally try to keep this build as
+# simple as possible.
+- |
+ export MORPH_ARCH=armv7lhf
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+ export TARGET_STAGE1=armv7lhf-none-linux-gnueabi
+
+ case "$MORPH_ARCH" in
+ armv7lhf) ARCH_FLAGS="--with-arch=armv7-a \
+ --with-cpu=cortex-a9 \
+ --with-tune=cortex-a9 \
+ --with-fpu=vfpv3-d16 \
+ --with-float=hard" ;;
+ armv7*) ARCH_FLAGS="--with-arch=armv7-a" ;;
+ esac
+
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --build=$(sh ../config.guess) \
+ --host=$(sh ../config.guess) \
+ --target="$TARGET_STAGE1" \
+ --prefix="$PREFIX" \
+ `# [1]` --libdir="$PREFIX/lib" \
+ `# [2]` --with-sysroot=/nonexistentdir \
+ --with-newlib \
+ `# [2]` --with-local-prefix="$PREFIX" \
+ `# [2]` --with-native-system-header-dir="$PREFIX/include" \
+ `# [3]` --with-build-time-tools="$PREFIX/$TARGET/bin" \
+ `# [3]` --with-as="$PREFIX/$TARGET/bin/as" \
+ `# [3]` --with-ld="$PREFIX/$TARGET/bin/ld" \
+ --without-headers \
+ --disable-nls \
+ --disable-shared \
+ `# [4]` --disable-multilib \
+ `# [4]` --disable-decimal-float \
+ `# [4]` --disable-threads \
+ `# [4]` --disable-libatomic \
+ `# [4]` --disable-libgomp \
+ `# [4]` --disable-libitm \
+ `# [4]` --disable-libquadmath \
+ `# [4]` --disable-libsanitizer \
+ `# [4]` --disable-libssp \
+ `# [4]` --disable-libvtv \
+ `# [4]` --disable-libcilkrts \
+ `# [4]` --disable-libstdc++-v3 \
+ --enable-languages=c,c++
+
+build-commands:
+- |
+ # See stage1-gcc.morph.
+ case "$MORPH_ARCH" in
+ armv7*) sed -i "s/--host=none/--host=armv7a/" o/Makefile
+ sed -i "s/--target=none/--target=armv7a/" o/Makefile ;;
+ esac
+ cd o && make
+
+install-commands:
+- cd o && make DESTDIR="$DESTDIR" install
diff --git a/strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc.morph b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc.morph
new file mode 100644
index 00000000..140a29b1
--- /dev/null
+++ b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-gcc.morph
@@ -0,0 +1,48 @@
+name: armv7lhf-cross-gcc
+kind: chunk
+
+configure-commands:
+- mkdir o
+
+# Configure flag notes:
+# 1. Use the default sysroot path to install to and locate headers
+# 2. Avoid having more than one copy of ZLib in use on the system
+# 3. Standard flags. See gcc.morph.
+- |
+ export MORPH_ARCH=armv7lhf
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+
+ case "$MORPH_ARCH" in
+ armv7lhf) ARCH_FLAGS="--with-arch=armv7-a \
+ --with-cpu=cortex-a9 \
+ --with-tune=cortex-a9 \
+ --with-fpu=vfpv3-d16 \
+ --with-float=hard" ;;
+ armv7*) ARCH_FLAGS="--with-arch=armv7-a" ;;
+ esac
+
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --build=$(sh ../config.guess) \
+ --host=$(sh ../config.guess) \
+ --target="$TARGET" \
+ --prefix="$PREFIX" \
+ `# [1]` --with-sysroot \
+ --disable-bootstrap \
+ `# [2]` --with-system-zlib \
+ `# [3]` --libdir="$PREFIX/lib" \
+ `# [3]` --disable-multilib \
+ `# [3]` --enable-languages=c,c++,fortran
+
+build-commands:
+- cd o && make
+
+install-commands:
+- cd o && make DESTDIR="$DESTDIR" install
+- ln -s gcc "$DESTDIR/$PREFIX/bin/cc"
+- |
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+ for fortran_alias in f77 f90 f95; do
+ ln -s "$TARGET-gfortran" \
+ "$DESTDIR/$PREFIX/bin/$TARGET-$fortran_alias"
+ done
diff --git a/strata/armv7lhf-cross-toolchain/armv7lhf-cross-glibc.morph b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-glibc.morph
new file mode 100644
index 00000000..2a692c38
--- /dev/null
+++ b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-glibc.morph
@@ -0,0 +1,51 @@
+name: armv7lhf-cross-glibc
+kind: chunk
+
+configure-commands:
+- mkdir o
+
+# Configure flag notes:
+# 1. Location of linux-api-headers, needed since glibc doesn't
+# support being given a sysroot.
+# 2. Location of binutils, since we build binutils for the final
+# target triplet, but our gcc of our stage 1 target triplet.
+# 3. Normal flags. See strata/build-essential/glibc.morph.
+# 4. Force configuration values of certain things that can't be detected
+# in a cross-compile.
+- |
+ export MORPH_ARCH=armv7lhf
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+ export TARGET_STAGE1=armv7lhf-none-linux-gnueabi
+
+ export CFLAGS="-O2 $CFLAGS"
+
+ # glibc looks for $TARGET-gcc, but not $TARGET_STAGE1-gcc even
+ # though --build is specified, so we have to override what gcc to
+ # use here.
+ export CC="$TARGET_STAGE1-gcc"
+
+ case "$MORPH_ARCH" in
+ armv7*)
+ ARCH_FLAGS="--without-fp" ;;
+ esac
+
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --build=$TARGET_STAGE1 --host=$TARGET \
+ --prefix="$PREFIX" \
+ `# [1]` --with-headers="$PREFIX/$TARGET/sys-root/$PREFIX/include" \
+ `# [2]` --with-binutils="$PREFIX/$TARGET/bin" \
+ `# [3]` --disable-profile --enable-kernel=2.6.25 \
+ `# [4]` libc_cv_c_cleanup=yes libc_cv_ctors_header=yes \
+ libc_cv_forced_unwind=yes libc_cv_ssp=no
+
+build-commands:
+- cd o && make localtime=UTC
+
+install-commands:
+- |
+ # glibc doesn't help with sysroots, so we need to spell out the
+ # full path ourselves.
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+ cd o && make install_root="$DESTDIR/$PREFIX/$TARGET/sys-root/" \
+ localtime=UTC install
diff --git a/strata/armv7lhf-cross-toolchain/armv7lhf-cross-libstdc++.morph b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-libstdc++.morph
new file mode 100644
index 00000000..7743bb5b
--- /dev/null
+++ b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-libstdc++.morph
@@ -0,0 +1,32 @@
+name: armv7lhf-cross-libstdc++
+kind: chunk
+configure-commands:
+- mkdir o
+
+# Configure flag notes:
+# 1. Use the default sysroot path to install to and locate headers
+# 2. From LFS: the header location of C++ needs to be explicitly given
+# as we are running the configure script from the top-level
+# directory.
+- |
+ export MORPH_ARCH=armv7lhf
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+
+ # -fPIC must be given, otherwise it will not be possible to create
+ # shared libraries linked to libstdc++
+ export CPPFLAGS="-fPIC"
+ cd o && ../libstdc++-v3/configure \
+ --build=$(sh ../config.guess) \
+ --host=$(sh ../config.guess) \
+ --target="$TARGET" \
+ --prefix="$PREFIX" \
+ --disable-nls \
+ --disable-shared \
+ --disable-multilib \
+ `# [1]` --with-sysroot
+
+build-commands:
+- cd o && make
+
+install-commands:
+- cd o && make DESTDIR="$DESTDIR" install
diff --git a/strata/armv7lhf-cross-toolchain/armv7lhf-cross-linux-api-headers.morph b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-linux-api-headers.morph
new file mode 100644
index 00000000..c2654419
--- /dev/null
+++ b/strata/armv7lhf-cross-toolchain/armv7lhf-cross-linux-api-headers.morph
@@ -0,0 +1,10 @@
+name: armv7lhf-cross-linux-api-headers
+kind: chunk
+install-commands:
+- ARCH=arm make INSTALL_HDR_PATH=dest headers_install
+- |
+ # Copy headers to the sysroot where the eglibc build will find them,
+ # and install its own headers and libraries.
+ export TARGET=armv7lhf-baserock-linux-gnueabi
+ install -d "$DESTDIR$PREFIX/$TARGET/sys-root/$PREFIX/include"
+ cp -r dest/include/* "$DESTDIR/$PREFIX/$TARGET/sys-root/$PREFIX/include"
diff --git a/strata/audio-bluetooth.morph b/strata/audio-bluetooth.morph
new file mode 100644
index 00000000..00a614c9
--- /dev/null
+++ b/strata/audio-bluetooth.morph
@@ -0,0 +1,92 @@
+name: audio-bluetooth
+kind: stratum
+description: Components required for audio and bluetooth.
+build-depends:
+- morph: strata/foundation.morph
+chunks:
+- name: libatomic_ops
+ repo: upstream:libatomic_ops
+ ref: 0a58f5d63969c10d2141af89cb7a53df786909ab
+ unpetrify-ref: baserock/morph
+- name: alsa-lib
+ repo: upstream:alsa-lib
+ ref: 9cfd1faa27511edbb8bebbab85030125f33590ae
+ unpetrify-ref: v1.0.28
+- name: alsa-utils
+ morph: strata/audio-bluetooth/alsa-utils.morph
+ repo: upstream:alsa-utils
+ ref: bbf137f66b2c8ce36db73f59ee69d5c443436524
+ unpetrify-ref: v1.0.28
+ build-depends:
+ - alsa-lib
+- name: libical
+ morph: strata/audio-bluetooth/libical.morph
+ repo: upstream:libical
+ ref: 52568782ae136e1ec4ddf3eb6da7b0f3fbd3f1b3
+ unpetrify-ref: baserock/morph
+- name: bluez
+ morph: strata/audio-bluetooth/bluez.morph
+ repo: upstream:bluez
+ ref: a18201b5321e8b41d2ee7c7bc7285d976fc433e6
+ unpetrify-ref: baserock/v5.17-patched
+ build-depends:
+ - libical
+ - alsa-lib
+- name: bluez-tools
+ morph: strata/audio-bluetooth/bluez-tools.morph
+ repo: upstream:bluez-tools
+ ref: 7350787e96a6ecf2cc5d4afddb3321ccad9fa461
+ unpetrify-ref: baserock/morph
+- name: obexd
+ repo: upstream:obexd
+ ref: 4225280022c206762c91beee47c62d05e1baeacc
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - bluez
+ - libical
+- name: speex
+ repo: upstream:speex
+ ref: e5dc987a403426ee3d0d81768afffa0fe74e20fa
+ unpetrify-ref: baserock/morph
+- name: nohands
+ morph: strata/audio-bluetooth/nohands.morph
+ repo: upstream:nohands
+ ref: 20ec5cc245fd5e74d9a0fc21497d1ba4164f2a3a
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - bluez
+ - speex
+- name: mobile-broadband-provider-info
+ repo: upstream:mobile-broadband-provider-info
+ ref: 4ed19e11c2975105b71b956440acdb25d46a347d
+ unpetrify-ref: baserock/morph
+- name: ofono
+ morph: strata/audio-bluetooth/ofono.morph
+ repo: upstream:ofono
+ ref: d05b718cc0b0d367227fbfbf52e60fc5462cc549
+ unpetrify-ref: "1.15"
+ build-depends:
+ - mobile-broadband-provider-info
+ - bluez
+- name: json-c
+ morph: strata/audio-bluetooth/json-c.morph
+ repo: upstream:json-c
+ ref: e8bd6865d7a671e9f75ece05dfe86a19ba610581
+ unpetrify-ref: baserock/json-c-0.12-20140410-plus-patch
+- name: libsndfile
+ morph: strata/audio-bluetooth/libsndfile.morph
+ repo: upstream:libsndfile
+ ref: 6c05c615c95ffef7a8a5a707cde0bb52bdd74244
+ unpetrify-ref: baserock/morph
+- name: pulseaudio
+ morph: strata/audio-bluetooth/pulseaudio.morph
+ repo: upstream:pulseaudio
+ ref: 53ad8aa7caa33caac52e35f71253e29d5a15f6e7
+ unpetrify-ref: v6.0
+ build-depends:
+ - libsndfile
+ - libatomic_ops
+ - speex
+ - json-c
+ - alsa-lib
+ - bluez
diff --git a/strata/audio-bluetooth/alsa-utils.morph b/strata/audio-bluetooth/alsa-utils.morph
new file mode 100644
index 00000000..a6a1e041
--- /dev/null
+++ b/strata/audio-bluetooth/alsa-utils.morph
@@ -0,0 +1,7 @@
+name: alsa-utils
+kind: chunk
+description: ALSA utilities
+build-system: autotools
+configure-commands:
+- autoreconf -ifv
+- ./configure --prefix="$PREFIX" --disable-xmlto
diff --git a/strata/audio-bluetooth/bluez-tools.morph b/strata/audio-bluetooth/bluez-tools.morph
new file mode 100644
index 00000000..80e47ef5
--- /dev/null
+++ b/strata/audio-bluetooth/bluez-tools.morph
@@ -0,0 +1,10 @@
+name: bluez-tools
+kind: chunk
+description: A set of tools to manage Bluetooth devices for Linux
+build-system: autotools
+configure-commands:
+- sed -i -e 's/-lreadline/-lreadline -lncurses/g' configure.ac
+- ./autogen.sh
+- ./configure --prefix="$PREFIX" --disable-obex
+build-commands:
+- LDFLAGS="$LDFLAGS -lncurses" make
diff --git a/strata/audio-bluetooth/bluez.morph b/strata/audio-bluetooth/bluez.morph
new file mode 100644
index 00000000..1395b98d
--- /dev/null
+++ b/strata/audio-bluetooth/bluez.morph
@@ -0,0 +1,12 @@
+name: bluez
+kind: chunk
+description: Bluetooth protocol stack for Linux
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX" --enable-tools --enable-test --enable-alsa --enable-library
+ --with-telephony=ofono --sysconfdir=/etc --localstatedir=/var --libexecdir=/lib
+install-commands:
+- make install
+- mkdir -p "$DESTDIR/etc/systemd/system/bluetooth.target.wants"
+- ln -s /lib/systemd/system/bluetooth.service "$DESTDIR/etc/systemd/system/bluetooth.target.wants/bluetooth.service"
diff --git a/strata/audio-bluetooth/json-c.morph b/strata/audio-bluetooth/json-c.morph
new file mode 100644
index 00000000..c3d9243c
--- /dev/null
+++ b/strata/audio-bluetooth/json-c.morph
@@ -0,0 +1,7 @@
+name: json-c
+kind: chunk
+description: JSON C Library
+build-system: autotools
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- install -m755 json_object_iterator.h "$DESTDIR/usr/include/json"
diff --git a/strata/audio-bluetooth/libical.morph b/strata/audio-bluetooth/libical.morph
new file mode 100644
index 00000000..1f772fd9
--- /dev/null
+++ b/strata/audio-bluetooth/libical.morph
@@ -0,0 +1,6 @@
+name: libical
+kind: chunk
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX"
diff --git a/strata/audio-bluetooth/libsndfile.morph b/strata/audio-bluetooth/libsndfile.morph
new file mode 100644
index 00000000..8d6c2fe0
--- /dev/null
+++ b/strata/audio-bluetooth/libsndfile.morph
@@ -0,0 +1,5 @@
+name: libsndfile
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-external-libs
diff --git a/strata/audio-bluetooth/nohands.morph b/strata/audio-bluetooth/nohands.morph
new file mode 100644
index 00000000..ebe249c0
--- /dev/null
+++ b/strata/audio-bluetooth/nohands.morph
@@ -0,0 +1,5 @@
+name: nohands
+kind: chunk
+build-system: autotools
+build-commands:
+- make CXXFLAGS="-fpermissive -lpthread"
diff --git a/strata/audio-bluetooth/ofono.morph b/strata/audio-bluetooth/ofono.morph
new file mode 100644
index 00000000..a775b0c1
--- /dev/null
+++ b/strata/audio-bluetooth/ofono.morph
@@ -0,0 +1,11 @@
+name: ofono
+kind: chunk
+description: ofono Open Source Telephony
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX" --sysconfdir=/etc --localstatedir=/var
+install-commands:
+- make install
+- mkdir -p "$DESTDIR/etc/systemd/system/multi-user.target.wants"
+- ln -s /lib/systemd/system/ofono.service "$DESTDIR/etc/systemd/system/multi-user.target.wants/ofono.service"
diff --git a/strata/audio-bluetooth/pulseaudio.morph b/strata/audio-bluetooth/pulseaudio.morph
new file mode 100644
index 00000000..fa183fcb
--- /dev/null
+++ b/strata/audio-bluetooth/pulseaudio.morph
@@ -0,0 +1,11 @@
+name: pulseaudio
+kind: chunk
+description: PulseAudio System
+build-system: autotools
+configure-commands:
+- ./autogen.sh
+- ./configure --prefix="$PREFIX" --localstatedir=/var --sysconfdir=/etc --with-database=simple --with-systemduserunitdir=/lib/systemd/system
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR/etc/systemd/system/multi-user.target.wants"
+- ln -s /lib/systemd/system/pulseaudio.service "$DESTDIR/etc/systemd/system/multi-user.target.wants/pulseaudio.service"
diff --git a/strata/baserock-import.morph b/strata/baserock-import.morph
new file mode 100644
index 00000000..5623886e
--- /dev/null
+++ b/strata/baserock-import.morph
@@ -0,0 +1,21 @@
+name: baserock-import
+kind: stratum
+description: Tools for importing software into Baserock definitions.
+build-depends:
+- morph: strata/python-common.morph
+- morph: strata/morph-utils.morph
+chunks:
+- name: ansicolor
+ repo: upstream:python-packages/ansicolor
+ ref: 9d3adbaff35bb6cde1d63414a8cc72f572c4d0ef
+ unpetrify-ref: 0.2.4
+- name: requests-cache
+ repo: upstream:python-packages/requests-cache
+ ref: a1b5e65ccd715f1e8434f18f27913715340d6aaf
+ unpetrify-ref: 0.4.6
+- name: baserock-import
+ repo: baserock:baserock/import
+ ref: 461979515ca30cd8e5acdec4cdb5ca3adeb6a9e7
+ unpetrify-ref: master
+ build-depends:
+ - ansicolor
diff --git a/strata/bsp-armv5l-openbmc-aspeed.morph b/strata/bsp-armv5l-openbmc-aspeed.morph
new file mode 100644
index 00000000..83bb74ef
--- /dev/null
+++ b/strata/bsp-armv5l-openbmc-aspeed.morph
@@ -0,0 +1,19 @@
+name: bsp-armv5l-openbmc-aspeed
+kind: stratum
+description:
+- |
+ The set of platform specific components required for booting
+ armvl5-openbmc-aspeed based systems
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-armv5l-openbmc-aspeed-wedge
+ morph: strata/bsp-armv5l-openbmc-aspeed/linux-armv5l-openbmc-aspeed.morph
+ repo: upstream:linux-stable
+ ref: 5cbce86c2115075b8054e4dba8cdf328aa6fa5b6
+ unpetrify-ref: baserock/v2.6.28.9/openbmc_aspeed_wedge
+- name: u-boot@aspeed
+ morph: strata/bsp-armv5l-openbmc-aspeed/u-boot@aspeed.morph
+ repo: upstream:u-boot
+ ref: af962087db0b88f4073f8f5a6184cb7f011d224d
+ unpetrify-ref: baserock/v2013.07+aspeed_wedge
diff --git a/strata/bsp-armv5l-openbmc-aspeed/linux-armv5l-openbmc-aspeed.morph b/strata/bsp-armv5l-openbmc-aspeed/linux-armv5l-openbmc-aspeed.morph
new file mode 100644
index 00000000..c6e38c63
--- /dev/null
+++ b/strata/bsp-armv5l-openbmc-aspeed/linux-armv5l-openbmc-aspeed.morph
@@ -0,0 +1,9 @@
+name: linux-armv5l-openbmc-aspeed
+kind: chunk
+configure-commands:
+- make ast2400_defconfig
+build-commands:
+- make $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp arch/arm/boot/Image "$DESTDIR/boot/vmlinux"
diff --git a/strata/bsp-armv5l-openbmc-aspeed/u-boot@aspeed.morph b/strata/bsp-armv5l-openbmc-aspeed/u-boot@aspeed.morph
new file mode 100644
index 00000000..1ded05c5
--- /dev/null
+++ b/strata/bsp-armv5l-openbmc-aspeed/u-boot@aspeed.morph
@@ -0,0 +1,14 @@
+name: u-boot@aspeed
+kind: chunk
+configure-commands:
+- make ast2400_spi_config
+build-commands:
+- make
+- make tools
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX/bin"
+- install -m 755 tools/img2brec.sh "$DESTDIR$PREFIX/bin/"
+- install -m 755 tools/jtagconsole "$DESTDIR$PREFIX/bin/"
+- install -m 755 tools/netconsole "$DESTDIR$PREFIX/bin/"
+- install -m 755 tools/mkenvimage "$DESTDIR$PREFIX/bin/"
+- install -m 755 tools/mkimage "$DESTDIR$PREFIX/bin/"
diff --git a/strata/bsp-armv7-highbank.morph b/strata/bsp-armv7-highbank.morph
new file mode 100644
index 00000000..9ac97e6c
--- /dev/null
+++ b/strata/bsp-armv7-highbank.morph
@@ -0,0 +1,17 @@
+name: bsp-armv7-highbank
+kind: stratum
+description: The platform dependent components required to boot an ARM highbank board.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-armv7-highbank
+ morph: strata/bsp-armv7-highbank/linux-armv7-highbank.morph
+ repo: upstream:linux-stable
+
+ # Linux 3.16 and later do not boot on Calxeda. They fail just before
+ # starting 'init' with this sort of message:
+ #
+ # Unhandled fault: imprecise external abort (0xc06) at 0xb6fea878
+
+ ref: f35b5e46feabab668a44df5b33f3558629f94dfc
+ unpetrify-ref: v3.15.10
diff --git a/strata/bsp-armv7-highbank/linux-armv7-highbank.morph b/strata/bsp-armv7-highbank/linux-armv7-highbank.morph
new file mode 100644
index 00000000..e904ff45
--- /dev/null
+++ b/strata/bsp-armv7-highbank/linux-armv7-highbank.morph
@@ -0,0 +1,52 @@
+name: linux-armv7-highbank
+kind: chunk
+configure-commands:
+- make multi_v7_defconfig
+- scripts/config -e AUTOFS4_FS
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -e CGROUPS
+- scripts/config -e CONFIG_POSIX_MQUEUE
+- scripts/config -e CPU_IDLE
+- scripts/config -e EXT4_FS
+- scripts/config -e FUSE_FS
+#- scripts/config -e OVERLAY_FS # Activate when we build Linux >= 3.18
+- scripts/config -e EXT4_USE_FOR_EXT23
+- scripts/config -e PACKET
+- scripts/config -e UNIX
+- scripts/config -e INET
+- scripts/config -e IP_MULTICAST
+- scripts/config -e IP_PNP
+- scripts/config -e IP_PNP_DHCP
+- scripts/config -e IP_PNP_BOOTP
+- scripts/config -e MSDOS_FS
+- scripts/config -e NETWORK_FILESYSTEMS
+- scripts/config -e NET_KEY
+- scripts/config -e NFS_FS
+- scripts/config -e NFS_V3
+- scripts/config -e ROOT_NFS
+- scripts/config -e LOCKD
+- scripts/config -e LOCKD_V4
+- scripts/config -e NFS_COMMON
+- scripts/config -e SUNRPC
+- scripts/config -e SYSVIPC
+- scripts/config -e BTRFS_FS
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e TMPFS
+- scripts/config -e TMPFS_POSIX_ACL
+- scripts/config -e VFAT_FS
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e VETH
+- yes '' | make oldconfig
+build-commands:
+- make zImage $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp arch/arm/boot/zImage "$DESTDIR"/boot/zImage
diff --git a/strata/bsp-armv7-versatile.morph b/strata/bsp-armv7-versatile.morph
new file mode 100644
index 00000000..470f6c56
--- /dev/null
+++ b/strata/bsp-armv7-versatile.morph
@@ -0,0 +1,12 @@
+name: bsp-armv7-versatile
+kind: stratum
+description: The platform dependent components required to boot an ARM versatile development
+ board.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-armv7-versatile
+ morph: strata/bsp-armv7-versatile/linux-armv7-versatile.morph
+ repo: upstream:linux-stable
+ ref: 660613d1a4e94144490850b6c3d350331860fac4
+ unpetrify-ref: v3.19.2
diff --git a/strata/bsp-armv7-versatile/linux-armv7-versatile.morph b/strata/bsp-armv7-versatile/linux-armv7-versatile.morph
new file mode 100644
index 00000000..ed0194b8
--- /dev/null
+++ b/strata/bsp-armv7-versatile/linux-armv7-versatile.morph
@@ -0,0 +1,31 @@
+name: linux-armv7-versatile
+kind: chunk
+configure-commands:
+- make ARCH=arm vexpress_defconfig
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -d BTRFS_FS_CHECK_INTEGRITY
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e FB_VESA
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- yes '' | make oldconfig
+build-commands:
+- make ARCH=arm zImage $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp arch/arm/boot/zImage "$DESTDIR"/boot/zImage
diff --git a/strata/bsp-armv7b-highbank.morph b/strata/bsp-armv7b-highbank.morph
new file mode 100644
index 00000000..397b9fcb
--- /dev/null
+++ b/strata/bsp-armv7b-highbank.morph
@@ -0,0 +1,17 @@
+name: bsp-armv7b-highbank
+kind: stratum
+description: The platform dependent components required to boot an ARM highbank board.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-armv7b-highbank
+ morph: strata/bsp-armv7b-highbank/linux-armv7b-highbank.morph
+ repo: upstream:linux-stable
+
+ # Linux 3.16 and later do not boot on Calxeda. They fail just before
+ # starting 'init' with this sort of message:
+ #
+ # Unhandled fault: imprecise external abort (0xc06) at 0xb6fea878
+
+ ref: f35b5e46feabab668a44df5b33f3558629f94dfc
+ unpetrify-ref: v3.15.10
diff --git a/strata/bsp-armv7b-highbank/linux-armv7b-highbank.morph b/strata/bsp-armv7b-highbank/linux-armv7b-highbank.morph
new file mode 100644
index 00000000..1b86d320
--- /dev/null
+++ b/strata/bsp-armv7b-highbank/linux-armv7b-highbank.morph
@@ -0,0 +1,53 @@
+name: linux-armv7b-highbank
+kind: chunk
+configure-commands:
+- make multi_v7_defconfig
+- scripts/config -e CPU_BIG_ENDIAN
+- scripts/config -e CPU_ENDIAN_BE8
+- scripts/config -e CPU_BE8_BOOT_LE
+- scripts/config -e PACKET
+- scripts/config -e UNIX
+- scripts/config -e NET_KEY
+- scripts/config -e INET
+- scripts/config -e IP_MULTICAST
+- scripts/config -e IP_PNP
+- scripts/config -e IP_PNP_DHCP
+- scripts/config -e IP_PNP_BOOTP
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -e BTRFS_FS
+- scripts/config -e TMPFS
+- scripts/config -e FUSE_FS
+#- scripts/config -e OVERLAY_FS # Activate when we build Linux >= 3.18
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config -e NETWORK_FILESYSTEMS
+- scripts/config -e NFS_FS
+- scripts/config -e NFS_V3
+- scripts/config -e ROOT_NFS
+- scripts/config -e LOCKD
+- scripts/config -e LOCKD_V4
+- scripts/config -e NFS_COMMON
+- scripts/config -e SUNRPC
+- scripts/config -e SYSVIPC
+- scripts/config -e CONFIG_POSIX_MQUEUE
+- scripts/config -e CPU_IDLE
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_USE_FOR_EXT23
+- scripts/config -e MSDOS_FS
+- scripts/config -e VFAT_FS
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- yes '' | make oldconfig
+build-commands:
+- make zImage $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp arch/arm/boot/zImage "$DESTDIR"/boot/zImage
diff --git a/strata/bsp-armv7b-vexpress-tc2.morph b/strata/bsp-armv7b-vexpress-tc2.morph
new file mode 100644
index 00000000..ea67c386
--- /dev/null
+++ b/strata/bsp-armv7b-vexpress-tc2.morph
@@ -0,0 +1,11 @@
+name: bsp-armv7b-vexpress-tc2
+kind: stratum
+description: The platform dependent components required to boot Versatile TC2 board
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-armv7b-vexpress-tc2
+ morph: strata/bsp-armv7b-vexpress-tc2/linux-armv7b-vexpress-tc2.morph
+ repo: upstream:linux-stable
+ ref: 660613d1a4e94144490850b6c3d350331860fac4
+ unpetrify-ref: v3.19.2
diff --git a/strata/bsp-armv7b-vexpress-tc2/linux-armv7b-vexpress-tc2.morph b/strata/bsp-armv7b-vexpress-tc2/linux-armv7b-vexpress-tc2.morph
new file mode 100644
index 00000000..fd6fab15
--- /dev/null
+++ b/strata/bsp-armv7b-vexpress-tc2/linux-armv7b-vexpress-tc2.morph
@@ -0,0 +1,57 @@
+name: linux-armv7b-vexpress-tc2
+kind: chunk
+configure-commands:
+- make vexpress_defconfig
+- scripts/config -e CPU_BIG_ENDIAN
+- scripts/config -e CPU_ENDIAN_BE8
+- scripts/config -e CPU_BE8_BOOT_LE
+- scripts/config -e PACKET
+- scripts/config -e UNIX
+- scripts/config -e NET_KEY
+- scripts/config -e INET
+- scripts/config -e IP_MULTICAST
+- scripts/config -e IP_PNP
+- scripts/config -e IP_PNP_DHCP
+- scripts/config -e IP_PNP_BOOTP
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -e BTRFS_FS
+- scripts/config -e TMPFS
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e CGROUPS
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e AUTOFS4_FS
+- scripts/config -e NETWORK_FILESYSTEMS
+- scripts/config -e NFS_FS
+- scripts/config -e NFS_V3
+- scripts/config -e ROOT_NFS
+- scripts/config -e LOCKD
+- scripts/config -e LOCKD_V4
+- scripts/config -e NFS_COMMON
+- scripts/config -e SUNRPC
+- scripts/config -e SYSVIPC
+- scripts/config -e CONFIG_POSIX_MQUEUE
+- scripts/config -e CPU_IDLE
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_USE_FOR_EXT23
+- scripts/config -e MSDOS_FS
+- scripts/config -e VFAT_FS
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e VETH
+- yes '' | make oldconfig
+build-commands:
+- make zImage vexpress-v2p-ca15_a7.dtb
+- |
+ cat arch/arm/boot/zImage arch/arm/boot/dts/vexpress-v2p-ca15_a7.dtb |
+ dd conv=sync bs=4 of=kernel.bin
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp kernel.bin "$DESTDIR"/boot/zImage
diff --git a/strata/bsp-armv8b64-generic.morph b/strata/bsp-armv8b64-generic.morph
new file mode 100644
index 00000000..a4d6645b
--- /dev/null
+++ b/strata/bsp-armv8b64-generic.morph
@@ -0,0 +1,16 @@
+name: bsp-armv8b64-generic
+kind: stratum
+description:
+- |
+ The set of platform specific components required for booting generic ARMv8
+ systems in big-endian mode, such as the 64-bit APM XGene ARM based m400
+ Moonshot cartridge.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-armv8b64-generic
+ morph: strata/bsp-armv8b64-generic/linux-armv8b64-generic.morph
+ repo: upstream:linux
+ ref: cd1397b1f6e429b82e94565b0af93c6bd44e4271
+ unpetrify-ref: baserock/bjdooks/m400-be3
+ build-depends: []
diff --git a/strata/bsp-armv8b64-generic/linux-armv8b64-generic.morph b/strata/bsp-armv8b64-generic/linux-armv8b64-generic.morph
new file mode 100644
index 00000000..9362eb6e
--- /dev/null
+++ b/strata/bsp-armv8b64-generic/linux-armv8b64-generic.morph
@@ -0,0 +1,278 @@
+name: linux-armv8b64-generic
+kind: chunk
+configure-commands:
+- make ARCH=arm64 defconfig
+- scripts/config -e PACKET
+- scripts/config -e SATA_AHCI
+- scripts/config -e ATA_GENERIC
+- scripts/config -e HAVE_IDE
+- scripts/config -d BLK_DEV_IDE_SATA
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e EXT2_FS
+- scripts/config -e EXT2_FS_XATTR
+- scripts/config -e EXT2_FS_POSIX_ACL
+- scripts/config -e EXT2_FS_SECURITY
+- scripts/config -e EXT2_FS_XIP
+- scripts/config -e EXT3_FS
+- scripts/config -d EXT3_DEFAULTS_TO_ORDERED
+- scripts/config -e EXT3_FS_XATTR
+- scripts/config -e EXT3_FS_POSIX_ACL
+- scripts/config -e EXT3_FS_SECURITY
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_FS_XATTR
+- scripts/config -e EXT4_FS_POSIX_ACL
+- scripts/config -e EXT4_FS_SECURITY
+- scripts/config -d EXT4_DEBUG
+- scripts/config -e XFS_FS
+- scripts/config -e LIBCRC32C
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -d JBD_DEBUG
+- scripts/config -d JBD2_DEBUG
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -d BLK_DEV_CRYPTOLOOP
+- scripts/config -e CEPH_FS
+- scripts/config --set-val BLK_DEV_LOOP_MIN_COUNT 8
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e VIRTIO
+- scripts/config -e VIRTIO_RING
+- scripts/config -e VIRTIO_PCI
+- scripts/config -e VIRTIO_BALLOON
+- scripts/config -e VIRTIO_BLK
+- scripts/config -e VIRTIO_NET
+- scripts/config -e VIRTIO_CONSOLE
+- scripts/config -e HW_RANDOM_VIRTIO
+- scripts/config -e 9P_FS
+- scripts/config -e 9P_FSCACHE
+- scripts/config -e 9P_FS_POSIX_ACL
+- scripts/config -e NET_9P
+- scripts/config -e NET_9P_VIRTIO
+- scripts/config -e R8169
+- scripts/config -e 8139TOO
+- scripts/config -e 8139CP
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e NFSD
+- scripts/config -e NFSD_V3
+- scripts/config -e KVM
+- scripts/config -e TUN
+- scripts/config -e BRIDGE
+- scripts/config -e VHOST_NET
+- scripts/config -e NF_NAT
+- scripts/config -e IP_NF_NAT
+- scripts/config -e IP_NF_TARGET_MASQUERADE
+- scripts/config -e FB_VESA
+- scripts/config -e HOTPLUG_PCI
+- scripts/config -e HOTPLUG_PCI_ACPI
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e IPV6
+- scripts/config -e BLK_DEV_NBD
+- scripts/config -e BRIDGE_NF_EBTABLES
+- scripts/config -e NETFILTER
+- scripts/config -e NETFILTER_ADVANCED
+- scripts/config -e NETFILTER_XT_MATCH_ADDRTYPE
+- scripts/config -e OPENVSWITCH
+- scripts/config -e OPENVSWITCH_VXLAN
+- scripts/config -e CONFIG_OPENVSWITCH_GRE
+- scripts/config -e NET_CLS_BASIC
+- scripts/config -e NET_SCH_INGRESS
+- scripts/config -e NET_ACT_POLICE
+- scripts/config -e NET_IPGRE_DEMUX
+- scripts/config -e NET_SCH_HTB
+- scripts/config -e NET_SCH_HFSC
+- scripts/config -e VXLAN
+- scripts/config -e VETH
+- scripts/config -e IP_NF_MATCH_AH
+- scripts/config -e IP_NF_MATCH_ECN
+- scripts/config -e IP_NF_MATCH_RPFILTER
+- scripts/config -e IP_NF_MATCH_TTL
+- scripts/config -e IP_NF_TARGET_SYNPROXY
+- scripts/config -e IP_NF_TARGET_NETMAP
+- scripts/config -e IP_NF_TARGET_REDIRECT
+- scripts/config -e IP_NF_TARGET_CLUSTERIP
+- scripts/config -e IP_NF_TARGET_ECN
+- scripts/config -e IP_NF_TARGET_TTL
+- scripts/config -e IP_NF_RAW
+- scripts/config -e IP_NF_SECURITY
+- scripts/config -e IP_NF_ARPTABLES
+- scripts/config -e NETFILTER_NETLINK_QUEUE
+- scripts/config -e SCSI_NETLINK
+- scripts/config -e NETFILTER_XT_TARGET_MARK
+- scripts/config -e NETFILTER_XT_SET
+- scripts/config -e IP_SET
+- scripts/config -e IP_SET_BITMAP_IP
+- scripts/config -e IP_SET_BITMAP_IPMAC
+- scripts/config -e IP_SET_BITMAP_PORT
+- scripts/config -e IP_SET_HASH_IP
+- scripts/config -e IP_SET_HASH_IPMARK
+- scripts/config -e IP_SET_HASH_IPPORT
+- scripts/config -e IP_SET_HASH_IPPORTIP
+- scripts/config -e IP_SET_HASH_IPPORTNET
+- scripts/config -e IP_SET_HASH_MAC
+- scripts/config -e IP_SET_HASH_NETPORTNET
+- scripts/config -e IP_SET_HASH_NET
+- scripts/config -e IP_SET_HASH_NETNET
+- scripts/config -e IP_SET_HASH_NETPORT
+- scripts/config -e IP_SET_HASH_NETIFACE
+- scripts/config -e IP_SET_LIST_SET
+- scripts/config -e NF_CONNTRACK_TIMEOUT
+- scripts/config -e NF_CONNTRACK_TIMESTAMP
+- scripts/config -e NF_CONNTRACK_EVENTS
+- scripts/config -e NF_CONNTRACK_LABELS
+- scripts/config -e NETFILTER_NETLINK_ACCT
+- scripts/config -e NETFILTER_NETLINK_QUEUE_CT
+- scripts/config -e NF_CT_PROTO_DCCP
+- scripts/config -e NF_CT_PROTO_GRE
+- scripts/config -e NF_CT_PROTO_SCTP
+- scripts/config -e NF_CT_PROTO_UDPLITE
+- scripts/config -e NF_CT_NETLINK_TIMEOUT
+- scripts/config -e NF_CT_NETLINK_HELPER
+- scripts/config -e NF_CONNTRACK_AMANDA
+- scripts/config -e NF_CONNTRACK_H323
+- scripts/config -e NF_CONNTRACK_BROADCAST
+- scripts/config -e NF_CONNTRACK_NETBIOS_NS
+- scripts/config -e NF_CONNTRACK_SNMP
+- scripts/config -e NF_CONNTRACK_PPTP
+- scripts/config -e NF_CONNTRACK_SANE
+- scripts/config -e NF_CONNTRACK_TFTP
+- scripts/config -e NF_LOG_COMMON
+- scripts/config -e NF_NAT_PROTO_DCCP
+- scripts/config -e NF_NAT_PROTO_UDPLITE
+- scripts/config -e NF_NAT_PROTO_SCTP
+- scripts/config -e NF_NAT_AMANDA
+- scripts/config -e NF_NAT_TFTP
+- scripts/config -e NF_TABLES
+- scripts/config -e NF_TABLES_INET
+- scripts/config -e NFT_COMPAT
+- scripts/config -e NFT_EXTHDR
+- scripts/config -e NFT_META
+- scripts/config -e NFT_CT
+- scripts/config -e NFT_LIMIT
+- scripts/config -e NFT_NAT
+- scripts/config -e NFT_QUEUE
+- scripts/config -e NFT_REJECT
+- scripts/config -e NFT_REJECT_INET
+- scripts/config -e NFT_RBTREE
+- scripts/config -e NFT_HASH
+- scripts/config -e NFT_COUNTER
+- scripts/config -e NFT_LOG
+- scripts/config -e NFT_MASQ
+- scripts/config -e NETFILTER_XT_CONNMARK
+- scripts/config -e NETFILTER_XT_TARGET_AUDIT
+- scripts/config -e NETFILTER_XT_TARGET_CHECKSUM
+- scripts/config -e NETFILTER_XT_TARGET_CLASSIFY
+- scripts/config -e NETFILTER_XT_TARGET_CT
+- scripts/config -e NETFILTER_XT_TARGET_DSCP
+- scripts/config -e NETFILTER_XT_TARGET_HMARK
+- scripts/config -e NETFILTER_XT_TARGET_LED
+- scripts/config -e NETFILTER_XT_TARGET_LOG
+- scripts/config -e NETFILTER_XT_TARGET_NFQUEUE
+- scripts/config -e NETFILTER_XT_TARGET_RATEEST
+- scripts/config -e NETFILTER_XT_TARGET_TPROXY
+- scripts/config -e NETFILTER_XT_TARGET_TCPOPTSTRIP
+- scripts/config -e NETFILTER_XT_TARGET_TEE
+- scripts/config -e NETFILTER_XT_TARGET_TRACE
+- scripts/config -e NETFILTER_XT_TARGET_IDLETIMER
+- scripts/config -e NETFILTER_XT_MATCH_BPF
+- scripts/config -e NETFILTER_XT_MATCH_CLUSTER
+- scripts/config -e NETFILTER_XT_MATCH_COMMENT
+- scripts/config -e NETFILTER_XT_MATCH_CONNBYTES
+- scripts/config -e NETFILTER_XT_MATCH_CONNLABEL
+- scripts/config -e NETFILTER_XT_MATCH_CONNLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_CPU
+- scripts/config -e NETFILTER_XT_MATCH_DCCP
+- scripts/config -e NETFILTER_XT_MATCH_DEVGROUP
+- scripts/config -e NETFILTER_XT_MATCH_DSCP
+- scripts/config -e NETFILTER_XT_MATCH_ESP
+- scripts/config -e NETFILTER_XT_MATCH_HASHLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_HELPER
+- scripts/config -e NETFILTER_XT_MATCH_IPCOMP
+- scripts/config -e NETFILTER_XT_MATCH_IPRANGE
+- scripts/config -e NETFILTER_XT_MATCH_IPVS
+- scripts/config -e NETFILTER_XT_MATCH_L2TP
+- scripts/config -e NETFILTER_XT_MATCH_LENGTH
+- scripts/config -e NETFILTER_XT_MATCH_LIMIT
+- scripts/config -e NETFILTER_XT_MATCH_MAC
+- scripts/config -e NETFILTER_XT_MATCH_MULTIPORT
+- scripts/config -e NETFILTER_XT_MATCH_NFACCT
+- scripts/config -e NETFILTER_XT_MATCH_OSF
+- scripts/config -e NETFILTER_XT_MATCH_OWNER
+- scripts/config -e NETFILTER_XT_MATCH_CGROUP
+- scripts/config -e NETFILTER_XT_MATCH_PHYSDEV
+- scripts/config -e NETFILTER_XT_MATCH_PKTTYPE
+- scripts/config -e NETFILTER_XT_MATCH_QUOTA
+- scripts/config -e NETFILTER_XT_MATCH_RATEEST
+- scripts/config -e NETFILTER_XT_MATCH_REALM
+- scripts/config -e NETFILTER_XT_MATCH_RECENT
+- scripts/config -e NETFILTER_XT_MATCH_SCTP
+- scripts/config -e NETFILTER_XT_MATCH_SOCKET
+- scripts/config -e NETFILTER_XT_MATCH_STATISTIC
+- scripts/config -e NETFILTER_XT_MATCH_STRING
+- scripts/config -e NETFILTER_XT_MATCH_TCPMSS
+- scripts/config -e NETFILTER_XT_MATCH_TIME
+- scripts/config -e NETFILTER_XT_MATCH_U32
+- scripts/config -e IP_VS
+- scripts/config -e BRIDGE_NETFILTER
+- scripts/config -e CRYPTO_CRC32C
+- scripts/config -e CONFIGFS_FS
+- scripts/config -e EXPERT
+- scripts/config -e TARGET_CORE
+- scripts/config -e ISCSI_TARGET
+- scripts/config -e TCM_IBLOCK
+- scripts/config -e TCM_FILEIO
+- scripts/config -e TCM_PSCSI
+- scripts/config -e TCM_USER
+- scripts/config -e CONFIG_UIO
+- scripts/config -e LOOPBACK_TARGET
+- scripts/config -e TCM_FC
+- scripts/config -e LIBFC
+- scripts/config -e SCSI_FC_ATTRS
+- scripts/config -e SCSI_ISCSI_ATTRS
+- scripts/config -e ISCSI_TCP
+- scripts/config -e SCSI_LOWLEVEL
+# Required for M400
+- scripts/config -e SATA_AHCI_PLATFORM
+- scripts/config -e AHCI_XGENE
+- scripts/config -e GENERIC_PHY
+- scripts/config -e PHY_XGENE
+- scripts/config -e NET_VENDOR_MELLANOX
+- scripts/config -e MLX4_EN
+- scripts/config -e MLX4_EN_DCB
+- scripts/config -e MLX4_EN_VXLAN
+- scripts/config -e MLX4_CORE
+- scripts/config -e MLX5_CORE
+- scripts/config -e NET_IP_TUNNEL
+- scripts/config -e VXLAN
+- scripts/config -e UIO
+- scripts/config -e UIO_PDRV_GENIRQ
+- scripts/config -e GPIO_DWAPB
+- scripts/config -e KEYBOARD_GPIO
+- scripts/config -e INPUT_POLLDEV
+- scripts/config -e KEYBOARD_GPIO_POLLED
+- scripts/config -d DMA_CMA
+- scripts/config -e POWER_RESET
+- scripts/config -e POWER_RESET_GPIO
+- scripts/config -e POWER_RESET_SYSCON
+- scripts/config -e POWER_AVS
+# Big endian kernel
+- scripts/config -e CPU_BIG_ENDIAN
+- yes '' | make ARCH=arm64 oldconfig
+build-commands:
+# Only generate Image here, uImage generated in deployment
+# due to availability of mkimage build systems
+- make vmlinux dtbs $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- make install dtbs_install INSTALL_PATH="$DESTDIR/boot"
+- cp arch/arm64/boot/Image "$DESTDIR/boot/vmlinux"
diff --git a/strata/bsp-armv8l64-generic.morph b/strata/bsp-armv8l64-generic.morph
new file mode 100644
index 00000000..a5edb9db
--- /dev/null
+++ b/strata/bsp-armv8l64-generic.morph
@@ -0,0 +1,15 @@
+name: bsp-armv8l64-generic
+kind: stratum
+description:
+- |
+ The set of platform specific components required for booting
+ armv8l64-based systems, like the 64-bit APM XGene ARM based m400
+ Moonshot cartridge.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-armv8l64-generic
+ morph: strata/bsp-armv8l64-generic/linux-armv8l64-generic.morph
+ repo: upstream:linux
+ ref: 5f06398ae6a04f414932243de38b5cf3d264ff84
+ unpetrify-ref: baserock/apm-xgene-m400-moonshot-cartridge
diff --git a/strata/bsp-armv8l64-generic/linux-armv8l64-generic.morph b/strata/bsp-armv8l64-generic/linux-armv8l64-generic.morph
new file mode 100644
index 00000000..dd54b4e4
--- /dev/null
+++ b/strata/bsp-armv8l64-generic/linux-armv8l64-generic.morph
@@ -0,0 +1,276 @@
+name: linux-armv8l64-generic
+kind: chunk
+configure-commands:
+- make ARCH=arm64 defconfig
+- scripts/config -e PACKET
+- scripts/config -e SATA_AHCI
+- scripts/config -e ATA_GENERIC
+- scripts/config -e HAVE_IDE
+- scripts/config -d BLK_DEV_IDE_SATA
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e EXT2_FS
+- scripts/config -e EXT2_FS_XATTR
+- scripts/config -e EXT2_FS_POSIX_ACL
+- scripts/config -e EXT2_FS_SECURITY
+- scripts/config -e EXT2_FS_XIP
+- scripts/config -e EXT3_FS
+- scripts/config -d EXT3_DEFAULTS_TO_ORDERED
+- scripts/config -e EXT3_FS_XATTR
+- scripts/config -e EXT3_FS_POSIX_ACL
+- scripts/config -e EXT3_FS_SECURITY
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_FS_XATTR
+- scripts/config -e EXT4_FS_POSIX_ACL
+- scripts/config -e EXT4_FS_SECURITY
+- scripts/config -d EXT4_DEBUG
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -e XFS_FS
+- scripts/config -e LIBCRC32C
+- scripts/config -d JBD_DEBUG
+- scripts/config -d JBD2_DEBUG
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -d BLK_DEV_CRYPTOLOOP
+- scripts/config -e CEPH_FS
+- scripts/config --set-val BLK_DEV_LOOP_MIN_COUNT 8
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e VIRTIO
+- scripts/config -e VIRTIO_RING
+- scripts/config -e VIRTIO_PCI
+- scripts/config -e VIRTIO_BALLOON
+- scripts/config -e VIRTIO_BLK
+- scripts/config -e VIRTIO_NET
+- scripts/config -e VIRTIO_CONSOLE
+- scripts/config -e HW_RANDOM_VIRTIO
+- scripts/config -e 9P_FS
+- scripts/config -e 9P_FSCACHE
+- scripts/config -e 9P_FS_POSIX_ACL
+- scripts/config -e NET_9P
+- scripts/config -e NET_9P_VIRTIO
+- scripts/config -e R8169
+- scripts/config -e 8139TOO
+- scripts/config -e 8139CP
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e NFSD
+- scripts/config -e NFSD_V3
+- scripts/config -e KVM
+- scripts/config -e TUN
+- scripts/config -e BRIDGE
+- scripts/config -e VHOST_NET
+- scripts/config -e NF_NAT
+- scripts/config -e IP_NF_NAT
+- scripts/config -e IP_NF_TARGET_MASQUERADE
+- scripts/config -e FB_VESA
+- scripts/config -e HOTPLUG_PCI
+- scripts/config -e HOTPLUG_PCI_ACPI
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e IPV6
+- scripts/config -e BLK_DEV_NBD
+- scripts/config -e BRIDGE_NF_EBTABLES
+- scripts/config -e NETFILTER
+- scripts/config -e NETFILTER_ADVANCED
+- scripts/config -e NETFILTER_XT_MATCH_ADDRTYPE
+- scripts/config -e OPENVSWITCH
+- scripts/config -e OPENVSWITCH_VXLAN
+- scripts/config -e CONFIG_OPENVSWITCH_GRE
+- scripts/config -e NET_CLS_BASIC
+- scripts/config -e NET_SCH_INGRESS
+- scripts/config -e NET_ACT_POLICE
+- scripts/config -e NET_IPGRE_DEMUX
+- scripts/config -e NET_SCH_HTB
+- scripts/config -e NET_SCH_HFSC
+- scripts/config -e VXLAN
+- scripts/config -e VETH
+- scripts/config -e IP_NF_MATCH_AH
+- scripts/config -e IP_NF_MATCH_ECN
+- scripts/config -e IP_NF_MATCH_RPFILTER
+- scripts/config -e IP_NF_MATCH_TTL
+- scripts/config -e IP_NF_TARGET_SYNPROXY
+- scripts/config -e IP_NF_TARGET_NETMAP
+- scripts/config -e IP_NF_TARGET_REDIRECT
+- scripts/config -e IP_NF_TARGET_CLUSTERIP
+- scripts/config -e IP_NF_TARGET_ECN
+- scripts/config -e IP_NF_TARGET_TTL
+- scripts/config -e IP_NF_RAW
+- scripts/config -e IP_NF_SECURITY
+- scripts/config -e IP_NF_ARPTABLES
+- scripts/config -e NETFILTER_NETLINK_QUEUE
+- scripts/config -e SCSI_NETLINK
+- scripts/config -e NETFILTER_XT_TARGET_MARK
+- scripts/config -e NETFILTER_XT_SET
+- scripts/config -e IP_SET
+- scripts/config -e IP_SET_BITMAP_IP
+- scripts/config -e IP_SET_BITMAP_IPMAC
+- scripts/config -e IP_SET_BITMAP_PORT
+- scripts/config -e IP_SET_HASH_IP
+- scripts/config -e IP_SET_HASH_IPMARK
+- scripts/config -e IP_SET_HASH_IPPORT
+- scripts/config -e IP_SET_HASH_IPPORTIP
+- scripts/config -e IP_SET_HASH_IPPORTNET
+- scripts/config -e IP_SET_HASH_MAC
+- scripts/config -e IP_SET_HASH_NETPORTNET
+- scripts/config -e IP_SET_HASH_NET
+- scripts/config -e IP_SET_HASH_NETNET
+- scripts/config -e IP_SET_HASH_NETPORT
+- scripts/config -e IP_SET_HASH_NETIFACE
+- scripts/config -e IP_SET_LIST_SET
+- scripts/config -e NF_CONNTRACK_TIMEOUT
+- scripts/config -e NF_CONNTRACK_TIMESTAMP
+- scripts/config -e NF_CONNTRACK_EVENTS
+- scripts/config -e NF_CONNTRACK_LABELS
+- scripts/config -e NETFILTER_NETLINK_ACCT
+- scripts/config -e NETFILTER_NETLINK_QUEUE_CT
+- scripts/config -e NF_CT_PROTO_DCCP
+- scripts/config -e NF_CT_PROTO_GRE
+- scripts/config -e NF_CT_PROTO_SCTP
+- scripts/config -e NF_CT_PROTO_UDPLITE
+- scripts/config -e NF_CT_NETLINK_TIMEOUT
+- scripts/config -e NF_CT_NETLINK_HELPER
+- scripts/config -e NF_CONNTRACK_AMANDA
+- scripts/config -e NF_CONNTRACK_H323
+- scripts/config -e NF_CONNTRACK_BROADCAST
+- scripts/config -e NF_CONNTRACK_NETBIOS_NS
+- scripts/config -e NF_CONNTRACK_SNMP
+- scripts/config -e NF_CONNTRACK_PPTP
+- scripts/config -e NF_CONNTRACK_SANE
+- scripts/config -e NF_CONNTRACK_TFTP
+- scripts/config -e NF_LOG_COMMON
+- scripts/config -e NF_NAT_PROTO_DCCP
+- scripts/config -e NF_NAT_PROTO_UDPLITE
+- scripts/config -e NF_NAT_PROTO_SCTP
+- scripts/config -e NF_NAT_AMANDA
+- scripts/config -e NF_NAT_TFTP
+- scripts/config -e NF_TABLES
+- scripts/config -e NF_TABLES_INET
+- scripts/config -e NFT_COMPAT
+- scripts/config -e NFT_EXTHDR
+- scripts/config -e NFT_META
+- scripts/config -e NFT_CT
+- scripts/config -e NFT_LIMIT
+- scripts/config -e NFT_NAT
+- scripts/config -e NFT_QUEUE
+- scripts/config -e NFT_REJECT
+- scripts/config -e NFT_REJECT_INET
+- scripts/config -e NFT_RBTREE
+- scripts/config -e NFT_HASH
+- scripts/config -e NFT_COUNTER
+- scripts/config -e NFT_LOG
+- scripts/config -e NFT_MASQ
+- scripts/config -e NETFILTER_XT_CONNMARK
+- scripts/config -e NETFILTER_XT_TARGET_AUDIT
+- scripts/config -e NETFILTER_XT_TARGET_CHECKSUM
+- scripts/config -e NETFILTER_XT_TARGET_CLASSIFY
+- scripts/config -e NETFILTER_XT_TARGET_CT
+- scripts/config -e NETFILTER_XT_TARGET_DSCP
+- scripts/config -e NETFILTER_XT_TARGET_HMARK
+- scripts/config -e NETFILTER_XT_TARGET_LED
+- scripts/config -e NETFILTER_XT_TARGET_LOG
+- scripts/config -e NETFILTER_XT_TARGET_NFQUEUE
+- scripts/config -e NETFILTER_XT_TARGET_RATEEST
+- scripts/config -e NETFILTER_XT_TARGET_TPROXY
+- scripts/config -e NETFILTER_XT_TARGET_TCPOPTSTRIP
+- scripts/config -e NETFILTER_XT_TARGET_TEE
+- scripts/config -e NETFILTER_XT_TARGET_TRACE
+- scripts/config -e NETFILTER_XT_TARGET_IDLETIMER
+- scripts/config -e NETFILTER_XT_MATCH_BPF
+- scripts/config -e NETFILTER_XT_MATCH_CLUSTER
+- scripts/config -e NETFILTER_XT_MATCH_COMMENT
+- scripts/config -e NETFILTER_XT_MATCH_CONNBYTES
+- scripts/config -e NETFILTER_XT_MATCH_CONNLABEL
+- scripts/config -e NETFILTER_XT_MATCH_CONNLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_CPU
+- scripts/config -e NETFILTER_XT_MATCH_DCCP
+- scripts/config -e NETFILTER_XT_MATCH_DEVGROUP
+- scripts/config -e NETFILTER_XT_MATCH_DSCP
+- scripts/config -e NETFILTER_XT_MATCH_ESP
+- scripts/config -e NETFILTER_XT_MATCH_HASHLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_HELPER
+- scripts/config -e NETFILTER_XT_MATCH_IPCOMP
+- scripts/config -e NETFILTER_XT_MATCH_IPRANGE
+- scripts/config -e NETFILTER_XT_MATCH_IPVS
+- scripts/config -e NETFILTER_XT_MATCH_L2TP
+- scripts/config -e NETFILTER_XT_MATCH_LENGTH
+- scripts/config -e NETFILTER_XT_MATCH_LIMIT
+- scripts/config -e NETFILTER_XT_MATCH_MAC
+- scripts/config -e NETFILTER_XT_MATCH_MULTIPORT
+- scripts/config -e NETFILTER_XT_MATCH_NFACCT
+- scripts/config -e NETFILTER_XT_MATCH_OSF
+- scripts/config -e NETFILTER_XT_MATCH_OWNER
+- scripts/config -e NETFILTER_XT_MATCH_CGROUP
+- scripts/config -e NETFILTER_XT_MATCH_PHYSDEV
+- scripts/config -e NETFILTER_XT_MATCH_PKTTYPE
+- scripts/config -e NETFILTER_XT_MATCH_QUOTA
+- scripts/config -e NETFILTER_XT_MATCH_RATEEST
+- scripts/config -e NETFILTER_XT_MATCH_REALM
+- scripts/config -e NETFILTER_XT_MATCH_RECENT
+- scripts/config -e NETFILTER_XT_MATCH_SCTP
+- scripts/config -e NETFILTER_XT_MATCH_SOCKET
+- scripts/config -e NETFILTER_XT_MATCH_STATISTIC
+- scripts/config -e NETFILTER_XT_MATCH_STRING
+- scripts/config -e NETFILTER_XT_MATCH_TCPMSS
+- scripts/config -e NETFILTER_XT_MATCH_TIME
+- scripts/config -e NETFILTER_XT_MATCH_U32
+- scripts/config -e IP_VS
+- scripts/config -e BRIDGE_NETFILTER
+- scripts/config -e CRYPTO_CRC32C
+- scripts/config -e CONFIGFS_FS
+- scripts/config -e EXPERT
+- scripts/config -e TARGET_CORE
+- scripts/config -e ISCSI_TARGET
+- scripts/config -e TCM_IBLOCK
+- scripts/config -e TCM_FILEIO
+- scripts/config -e TCM_PSCSI
+- scripts/config -e TCM_USER
+- scripts/config -e CONFIG_UIO
+- scripts/config -e LOOPBACK_TARGET
+- scripts/config -e TCM_FC
+- scripts/config -e LIBFC
+- scripts/config -e SCSI_FC_ATTRS
+- scripts/config -e SCSI_ISCSI_ATTRS
+- scripts/config -e ISCSI_TCP
+- scripts/config -e SCSI_LOWLEVEL
+# Required for M400
+- scripts/config -e SATA_AHCI_PLATFORM
+- scripts/config -e AHCI_XGENE
+- scripts/config -e GENERIC_PHY
+- scripts/config -e PHY_XGENE
+- scripts/config -e NET_VENDOR_MELLANOX
+- scripts/config -e MLX4_EN
+- scripts/config -e MLX4_EN_DCB
+- scripts/config -e MLX4_EN_VXLAN
+- scripts/config -e MLX4_CORE
+- scripts/config -e MLX5_CORE
+- scripts/config -e NET_IP_TUNNEL
+- scripts/config -e VXLAN
+- scripts/config -e UIO
+- scripts/config -e UIO_PDRV_GENIRQ
+- scripts/config -e GPIO_DWAPB
+- scripts/config -e KEYBOARD_GPIO
+- scripts/config -e INPUT_POLLDEV
+- scripts/config -e KEYBOARD_GPIO_POLLED
+- scripts/config -d DMA_CMA
+- scripts/config -e POWER_RESET
+- scripts/config -e POWER_RESET_GPIO
+- scripts/config -e POWER_RESET_SYSCON
+- scripts/config -e POWER_AVS
+- yes '' | make ARCH=arm64 oldconfig
+build-commands:
+# Only generate Image here, uImage generated in deployment
+# due to availability of mkimage build systems
+- make vmlinux dtbs $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- make install dtbs_install INSTALL_PATH="$DESTDIR/boot"
+- cp arch/arm64/boot/Image "$DESTDIR/boot/vmlinux"
diff --git a/strata/bsp-jetson.morph b/strata/bsp-jetson.morph
new file mode 100644
index 00000000..48d7218c
--- /dev/null
+++ b/strata/bsp-jetson.morph
@@ -0,0 +1,43 @@
+name: bsp-jetson
+kind: stratum
+description: |
+ Platform-specific chunks for NVIDIA Jetson TK1
+
+ Support for the Tegra platform is still being developed. A very recent
+ kernel with some patches is currently required to get a fully
+ accelerated graphics stack on this platform. Note that the SATA and
+ networking will not work unless you have flashed the bootloader on
+ your board to the version of U-Boot built in this stratum. Also, the
+ CPUs on the Jetson run at a slow speed with this version of Linux.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: device-tree-compiler
+ morph: strata/bsp-jetson/device-tree-compiler.morph
+ repo: upstream:device-tree-compiler
+ ref: 302fca9f4c283e1994cf0a5a9ce1cf43ca15e6d2
+ unpetrify-ref: v1.4.1
+- name: u-boot@jetson
+ morph: strata/bsp-jetson/u-boot@jetson.morph
+ repo: upstream:u-boot
+ ref: f33cdaa4c3da4a8fd35aa2f9a3172f31cc887b35
+ unpetrify-ref: v2015.04
+ build-depends:
+ - device-tree-compiler
+- name: linux-jetson-tk1
+ morph: strata/bsp-jetson/linux-jetson-tk1.morph
+ repo: upstream:linux
+ ref: a6df05fd37874251833bd6f18404cd2efaa62603
+ unpetrify-ref: baserock/tegra/4.0-with-cpufreq-gpu
+- name: nouveau-drm
+ morph: strata/bsp-jetson/nouveau-drm.morph
+ repo: upstream:nouveau
+ ref: 73de0a7758eb447d6157d2ed79c84d1a4e6ca09b
+ unpetrify-ref: baserock/tegra/4.0
+ build-depends:
+ - linux-jetson-tk1
+- name: linux-firmware-jetson
+ morph: strata/bsp-jetson/linux-firmware-jetson.morph
+ repo: upstream:linux-firmware
+ ref: ff2afc9d9649cab2a1f79b8d4eeb0cc0100a2f85
+ unpetrify-ref: baserock/tegra/4.0
diff --git a/strata/bsp-jetson/bsp-support.morph b/strata/bsp-jetson/bsp-support.morph
new file mode 100644
index 00000000..5adb02e4
--- /dev/null
+++ b/strata/bsp-jetson/bsp-support.morph
@@ -0,0 +1,6 @@
+name: bsp-support
+kind: chunk
+install-commands:
+- install -o 0 -g 0 -m 644 -D nv-ondemand-cpufreq-governor.service "$DESTDIR/usr/lib/systemd/system/nv-ondemand-cpufreq-governor.service"
+- install -d "$DESTDIR/usr/lib/systemd/system/sysinit.target.wants"
+- ln -s /usr/lib/systemd/system/nv-ondemand-cpufreq-governor.service "$DESTDIR/usr/lib/systemd/system/sysinit.target.wants/nv-ondemand-cpufreq-governor.service"
diff --git a/strata/bsp-jetson/device-tree-compiler.morph b/strata/bsp-jetson/device-tree-compiler.morph
new file mode 100644
index 00000000..8abfafc8
--- /dev/null
+++ b/strata/bsp-jetson/device-tree-compiler.morph
@@ -0,0 +1,6 @@
+name: device-tree-compiler
+kind: chunk
+build-commands:
+- make all
+install-commands:
+- make install DESTDIR="$DESTDIR" PREFIX="$PREFIX"
diff --git a/strata/bsp-jetson/linux-firmware-jetson.morph b/strata/bsp-jetson/linux-firmware-jetson.morph
new file mode 100644
index 00000000..e24e7ec7
--- /dev/null
+++ b/strata/bsp-jetson/linux-firmware-jetson.morph
@@ -0,0 +1,15 @@
+name: linux-firmware-jetson
+kind: chunk
+install-commands:
+- install -d "$DESTDIR/lib/firmware/nvidia/tegra124"
+- install -d "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_fuc409c "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_fuc409d "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_fuc41ac "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_fuc41ad "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_bundle "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_method "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_sw_ctx "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nouveau/nvea_sw_nonctx "$DESTDIR/lib/firmware/nouveau"
+- install -m644 nvidia/tegra124/xusb.bin "$DESTDIR/lib/firmware/nvidia/tegra124"
+- install -m644 LICENCE.nvidia "$DESTDIR/lib/firmware"
diff --git a/strata/bsp-jetson/linux-jetson-tk1.morph b/strata/bsp-jetson/linux-jetson-tk1.morph
new file mode 100644
index 00000000..784a39c4
--- /dev/null
+++ b/strata/bsp-jetson/linux-jetson-tk1.morph
@@ -0,0 +1,248 @@
+name: linux-jetson-tk1
+kind: chunk
+configure-commands:
+- make ARCH=arm tegra_defconfig
+- scripts/config -d KERNEL_LZO
+- scripts/config -e KERNEL_GZIP
+- scripts/config -e NAMESPACES
+- scripts/config -e PACKET
+- scripts/config -e SATA_AHCI
+- scripts/config -e ATA_GENERIC
+- scripts/config -e HAVE_IDE
+- scripts/config -d BLK_DEV_IDE_SATA
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e EXT2_FS
+- scripts/config -e EXT2_FS_XATTR
+- scripts/config -e EXT2_FS_POSIX_ACL
+- scripts/config -e EXT2_FS_SECURITY
+- scripts/config -e EXT2_FS_XIP
+- scripts/config -e EXT3_FS
+- scripts/config -d EXT3_DEFAULTS_TO_ORDERED
+- scripts/config -e EXT3_FS_XATTR
+- scripts/config -e EXT3_FS_POSIX_ACL
+- scripts/config -e EXT3_FS_SECURITY
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_FS_XATTR
+- scripts/config -e EXT4_FS_POSIX_ACL
+- scripts/config -e EXT4_FS_SECURITY
+- scripts/config -d EXT4_DEBUG
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -e XFS_FS
+- scripts/config -e LIBCRC32C
+- scripts/config -d JBD_DEBUG
+- scripts/config -d JBD2_DEBUG
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -d BLK_DEV_CRYPTOLOOP
+- scripts/config --set-val BLK_DEV_LOOP_MIN_COUNT 8
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e NFSD
+- scripts/config -e NFSD_V3
+- scripts/config -e DRM_TEGRA_STAGING
+- scripts/config -m DRM_NOUVEAU
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e BLK_DEV_NBD
+- scripts/config -e BRIDGE_NF_EBTABLES
+- scripts/config -e NETFILTER
+- scripts/config -e NETFILTER_ADVANCED
+- scripts/config -e NETFILTER_XT_MATCH_ADDRTYPE
+- scripts/config -e OPENVSWITCH
+- scripts/config -e OPENVSWITCH_VXLAN
+- scripts/config -e CONFIG_OPENVSWITCH_GRE
+- scripts/config -e NET_CLS_BASIC
+- scripts/config -e NET_SCH_INGRESS
+- scripts/config -e NET_ACT_POLICE
+- scripts/config -e NET_IPGRE_DEMUX
+- scripts/config -e NET_SCH_HTB
+- scripts/config -e NET_SCH_HFSC
+- scripts/config -e VXLAN
+- scripts/config -e VETH
+- scripts/config -e IP_NF_MATCH_AH
+- scripts/config -e IP_NF_MATCH_ECN
+- scripts/config -e IP_NF_MATCH_RPFILTER
+- scripts/config -e IP_NF_MATCH_TTL
+- scripts/config -e IP_NF_TARGET_SYNPROXY
+- scripts/config -e IP_NF_TARGET_NETMAP
+- scripts/config -e IP_NF_TARGET_REDIRECT
+- scripts/config -e IP_NF_TARGET_CLUSTERIP
+- scripts/config -e IP_NF_TARGET_ECN
+- scripts/config -e IP_NF_TARGET_TTL
+- scripts/config -e IP_NF_RAW
+- scripts/config -e IP_NF_SECURITY
+- scripts/config -e IP_NF_ARPTABLES
+- scripts/config -e NETFILTER_NETLINK_QUEUE
+- scripts/config -e SCSI_NETLINK
+- scripts/config -e NETFILTER_XT_TARGET_MARK
+- scripts/config -e NETFILTER_XT_SET
+- scripts/config -e IP_SET
+- scripts/config -e IP_SET_BITMAP_IP
+- scripts/config -e IP_SET_BITMAP_IPMAC
+- scripts/config -e IP_SET_BITMAP_PORT
+- scripts/config -e IP_SET_HASH_IP
+- scripts/config -e IP_SET_HASH_IPMARK
+- scripts/config -e IP_SET_HASH_IPPORT
+- scripts/config -e IP_SET_HASH_IPPORTIP
+- scripts/config -e IP_SET_HASH_IPPORTNET
+- scripts/config -e IP_SET_HASH_MAC
+- scripts/config -e IP_SET_HASH_NETPORTNET
+- scripts/config -e IP_SET_HASH_NET
+- scripts/config -e IP_SET_HASH_NETNET
+- scripts/config -e IP_SET_HASH_NETPORT
+- scripts/config -e IP_SET_HASH_NETIFACE
+- scripts/config -e IP_SET_LIST_SET
+- scripts/config -e NF_CONNTRACK_TIMEOUT
+- scripts/config -e NF_CONNTRACK_TIMESTAMP
+- scripts/config -e NF_CONNTRACK_EVENTS
+- scripts/config -e NF_CONNTRACK_LABELS
+- scripts/config -e NETFILTER_NETLINK_ACCT
+- scripts/config -e NETFILTER_NETLINK_QUEUE_CT
+- scripts/config -e NF_CT_PROTO_DCCP
+- scripts/config -e NF_CT_PROTO_GRE
+- scripts/config -e NF_CT_PROTO_SCTP
+- scripts/config -e NF_CT_PROTO_UDPLITE
+- scripts/config -e NF_CT_NETLINK_TIMEOUT
+- scripts/config -e NF_CT_NETLINK_HELPER
+- scripts/config -e NF_CONNTRACK_AMANDA
+- scripts/config -e NF_CONNTRACK_H323
+- scripts/config -e NF_CONNTRACK_BROADCAST
+- scripts/config -e NF_CONNTRACK_NETBIOS_NS
+- scripts/config -e NF_CONNTRACK_SNMP
+- scripts/config -e NF_CONNTRACK_PPTP
+- scripts/config -e NF_CONNTRACK_SANE
+- scripts/config -e NF_CONNTRACK_TFTP
+- scripts/config -e NF_LOG_COMMON
+- scripts/config -e NF_NAT_PROTO_DCCP
+- scripts/config -e NF_NAT_PROTO_UDPLITE
+- scripts/config -e NF_NAT_PROTO_SCTP
+- scripts/config -e NF_NAT_AMANDA
+- scripts/config -e NF_NAT_TFTP
+- scripts/config -e NF_TABLES
+- scripts/config -e NF_TABLES_INET
+- scripts/config -e NFT_COMPAT
+- scripts/config -e NFT_EXTHDR
+- scripts/config -e NFT_META
+- scripts/config -e NFT_CT
+- scripts/config -e NFT_LIMIT
+- scripts/config -e NFT_NAT
+- scripts/config -e NFT_QUEUE
+- scripts/config -e NFT_REJECT
+- scripts/config -e NFT_REJECT_INET
+- scripts/config -e NFT_RBTREE
+- scripts/config -e NFT_HASH
+- scripts/config -e NFT_COUNTER
+- scripts/config -e NFT_LOG
+- scripts/config -e NFT_MASQ
+- scripts/config -e NETFILTER_XT_CONNMARK
+- scripts/config -e NETFILTER_XT_TARGET_AUDIT
+- scripts/config -e NETFILTER_XT_TARGET_CHECKSUM
+- scripts/config -e NETFILTER_XT_TARGET_CLASSIFY
+- scripts/config -e NETFILTER_XT_TARGET_CT
+- scripts/config -e NETFILTER_XT_TARGET_DSCP
+- scripts/config -e NETFILTER_XT_TARGET_HMARK
+- scripts/config -e NETFILTER_XT_TARGET_LED
+- scripts/config -e NETFILTER_XT_TARGET_LOG
+- scripts/config -e NETFILTER_XT_TARGET_NFQUEUE
+- scripts/config -e NETFILTER_XT_TARGET_RATEEST
+- scripts/config -e NETFILTER_XT_TARGET_TPROXY
+- scripts/config -e NETFILTER_XT_TARGET_TCPOPTSTRIP
+- scripts/config -e NETFILTER_XT_TARGET_TEE
+- scripts/config -e NETFILTER_XT_TARGET_TRACE
+- scripts/config -e NETFILTER_XT_TARGET_IDLETIMER
+- scripts/config -e NETFILTER_XT_MATCH_BPF
+- scripts/config -e NETFILTER_XT_MATCH_CLUSTER
+- scripts/config -e NETFILTER_XT_MATCH_COMMENT
+- scripts/config -e NETFILTER_XT_MATCH_CONNBYTES
+- scripts/config -e NETFILTER_XT_MATCH_CONNLABEL
+- scripts/config -e NETFILTER_XT_MATCH_CONNLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_CPU
+- scripts/config -e NETFILTER_XT_MATCH_DCCP
+- scripts/config -e NETFILTER_XT_MATCH_DEVGROUP
+- scripts/config -e NETFILTER_XT_MATCH_DSCP
+- scripts/config -e NETFILTER_XT_MATCH_ESP
+- scripts/config -e NETFILTER_XT_MATCH_HASHLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_HELPER
+- scripts/config -e NETFILTER_XT_MATCH_IPCOMP
+- scripts/config -e NETFILTER_XT_MATCH_IPRANGE
+- scripts/config -e NETFILTER_XT_MATCH_IPVS
+- scripts/config -e NETFILTER_XT_MATCH_L2TP
+- scripts/config -e NETFILTER_XT_MATCH_LENGTH
+- scripts/config -e NETFILTER_XT_MATCH_LIMIT
+- scripts/config -e NETFILTER_XT_MATCH_MAC
+- scripts/config -e NETFILTER_XT_MATCH_MULTIPORT
+- scripts/config -e NETFILTER_XT_MATCH_NFACCT
+- scripts/config -e NETFILTER_XT_MATCH_OSF
+- scripts/config -e NETFILTER_XT_MATCH_OWNER
+- scripts/config -e NETFILTER_XT_MATCH_CGROUP
+- scripts/config -e NETFILTER_XT_MATCH_PHYSDEV
+- scripts/config -e NETFILTER_XT_MATCH_PKTTYPE
+- scripts/config -e NETFILTER_XT_MATCH_QUOTA
+- scripts/config -e NETFILTER_XT_MATCH_RATEEST
+- scripts/config -e NETFILTER_XT_MATCH_REALM
+- scripts/config -e NETFILTER_XT_MATCH_RECENT
+- scripts/config -e NETFILTER_XT_MATCH_SCTP
+- scripts/config -e NETFILTER_XT_MATCH_SOCKET
+- scripts/config -e NETFILTER_XT_MATCH_STATISTIC
+- scripts/config -e NETFILTER_XT_MATCH_STRING
+- scripts/config -e NETFILTER_XT_MATCH_TCPMSS
+- scripts/config -e NETFILTER_XT_MATCH_TIME
+- scripts/config -e NETFILTER_XT_MATCH_U32
+- scripts/config -e IP_VS
+- scripts/config -e BRIDGE_NETFILTER
+- scripts/config -e CRYPTO_CRC32C
+- scripts/config -e CONFIGFS_FS
+- scripts/config -e EXPERT
+- scripts/config -e TARGET_CORE
+- scripts/config -e ISCSI_TARGET
+- scripts/config -e TCM_IBLOCK
+- scripts/config -e TCM_FILEIO
+- scripts/config -e TCM_PSCSI
+- scripts/config -e TCM_USER
+- scripts/config -e CONFIG_UIO
+- scripts/config -e LOOPBACK_TARGET
+- scripts/config -e TCM_FC
+- scripts/config -e LIBFC
+- scripts/config -e SCSI_FC_ATTRS
+- scripts/config -e SCSI_ISCSI_ATTRS
+- scripts/config -e ISCSI_TCP
+- scripts/config -e SCSI_LOWLEVEL
+- scripts/config -m DRM_NOUVEAU
+- scripts/config -e NOUVEAU_PLATFORM_DRIVER
+- scripts/config -e DRM_TEGRA_STAGING
+- scripts/config --set-val NOUVEAU_DEBUG 5
+- scripts/config --set-val NOUVEAU_DEBUG_DEFAULT 3
+- yes '' | make ARCH=arm oldconfig
+build-commands:
+- make $MAKEFLAGS ARCH=arm LOADADDR=0x80200000 zImage dtbs
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp arch/arm/boot/zImage "$DESTDIR"/boot/zImage
+- cp arch/arm/boot/dts/tegra124-jetson-tk1.dtb "$DESTDIR"/boot/.
+- make modules
+- make INSTALL_MOD_PATH="$DESTDIR" modules_install
+- install -d "$DESTDIR$PREFIX/src/linux"
+- |
+ (
+ printf 'Makefile\0'
+ printf 'Module.symvers\0'
+ find arch/arm -maxdepth 1 -name 'Makefile*' -print0
+ find arch/arm \( -name 'module.lds' -o -name 'Kbuild.platforms' -o -name 'Platform' \) -print0
+ find arch/arm \( -type d -a \( -name include -o -name scripts \) \) -o \
+ \! -type d -a \( -path '*include/*' -o -path '*scripts/*' \) -print0
+ find include -name 'asm*' -prune -o -print0
+ find include/asm-generic -print0
+ find include/uapi -print0
+ find scripts -print0
+ ) | cpio -0pumd "$DESTDIR$PREFIX/src/linux"
diff --git a/strata/bsp-jetson/nouveau-drm.morph b/strata/bsp-jetson/nouveau-drm.morph
new file mode 100644
index 00000000..33221a70
--- /dev/null
+++ b/strata/bsp-jetson/nouveau-drm.morph
@@ -0,0 +1,12 @@
+name: nouveau-drm
+kind: chunk
+build-commands:
+ - sed -e 's/.*android\/sync.*/#ifdef CONFIG_SYNC\n&\n#endif/' -i drm/nouveau/nouveau_fence.c
+ - cd drm/nouveau && make ARCH=arm M=$(pwd) -C /usr/src/linux/ modules
+install-commands:
+ - cd drm/nouveau && make ARCH=arm M="$(pwd)" -C /usr/src/linux/ INSTALL_MOD_PATH="$DESTDIR" modules_install
+system-integration:
+ nouveau-drm-misc:
+ 00-earlyconf:
+ - (cd /lib/modules && for version in *; do rm "$version/kernel/drivers/gpu/drm/nouveau/nouveau.ko"; done)
+ - (cd /lib/modules && for version in *; do depmod -a "$version"; done)
diff --git a/strata/bsp-jetson/u-boot@jetson.morph b/strata/bsp-jetson/u-boot@jetson.morph
new file mode 100644
index 00000000..533f253a
--- /dev/null
+++ b/strata/bsp-jetson/u-boot@jetson.morph
@@ -0,0 +1,20 @@
+name: u-boot@jetson
+kind: chunk
+configure-commands:
+- make ARCH=arm jetson-tk1_config
+build-commands:
+- make ARCH=arm CROSS_COMPILE=/usr/bin/
+- make ARCH=arm CROSS_COMPILE=/usr/bin/ tools
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX/bin"
+- mkdir -p "$DESTDIR/boot"
+- mkdir -p "$DESTDIR/boot/u-boot/"
+- install -m 755 u-boot-dtb-tegra.bin "$DESTDIR/boot/u-boot.bin"
+- install -m 755 u-boot-nodtb-tegra.bin "$DESTDIR/boot/u-boot/u-boot-nodtb-tegra.bin"
+- install -m 755 u-boot.dtb "$DESTDIR/boot/u-boot/u-boot.dtb"
+- size -A u-boot | grep -w .bss | tr -s ' ' | cut -d ' ' -f 2 > "$DESTDIR/boot/u-boot/u-boot-bss-size"
+- install -m 755 tools/img2brec.sh "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/jtagconsole "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/netconsole "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/mkenvimage "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/mkimage "$DESTDIR$PREFIX/bin/."
diff --git a/strata/bsp-ppc64-generic.morph b/strata/bsp-ppc64-generic.morph
new file mode 100644
index 00000000..ea606a75
--- /dev/null
+++ b/strata/bsp-ppc64-generic.morph
@@ -0,0 +1,11 @@
+name: bsp-ppc64-generic
+kind: stratum
+description: The platform dependent components required to boot a ppc64 board.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-ppc64
+ morph: strata/bsp-ppc64-generic/linux-ppc64.morph
+ repo: upstream:linux
+ ref: 39a8804455fb23f09157341d3ba7db6d7ae6ee76
+ unpetrify-ref: v4.0
diff --git a/strata/bsp-ppc64-generic/linux-ppc64.morph b/strata/bsp-ppc64-generic/linux-ppc64.morph
new file mode 100644
index 00000000..d2ef0747
--- /dev/null
+++ b/strata/bsp-ppc64-generic/linux-ppc64.morph
@@ -0,0 +1,226 @@
+name: linux-ppc64
+kind: chunk
+configure-commands:
+- make ARCH=powerpc ppc64_defconfig
+- scripts/config -e NAMESPACES
+- scripts/config -e PACKET
+- scripts/config -e SATA_AHCI
+- scripts/config -e ATA_GENERIC
+- scripts/config -e HAVE_IDE
+- scripts/config -d BLK_DEV_IDE_SATA
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e EXT2_FS
+- scripts/config -e EXT2_FS_XATTR
+- scripts/config -e EXT2_FS_POSIX_ACL
+- scripts/config -e EXT2_FS_SECURITY
+- scripts/config -e EXT2_FS_XIP
+- scripts/config -e EXT3_FS
+- scripts/config -d EXT3_DEFAULTS_TO_ORDERED
+- scripts/config -e EXT3_FS_XATTR
+- scripts/config -e EXT3_FS_POSIX_ACL
+- scripts/config -e EXT3_FS_SECURITY
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_FS_XATTR
+- scripts/config -e EXT4_FS_POSIX_ACL
+- scripts/config -e EXT4_FS_SECURITY
+- scripts/config -d EXT4_DEBUG
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -e XFS_FS
+- scripts/config -e LIBCRC32C
+- scripts/config -d JBD_DEBUG
+- scripts/config -d JBD2_DEBUG
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -d BLK_DEV_CRYPTOLOOP
+- scripts/config --set-val BLK_DEV_LOOP_MIN_COUNT 8
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e NFSD
+- scripts/config -e NFSD_V3
+- scripts/config -e IP_PNP
+- scripts/config -e IP_PNP_DHCP
+- scripts/config -e NFS_FS
+- scripts/config -e ROOT_NFS
+- scripts/config -e PCI
+- scripts/config -e TIGON3
+- scripts/config -e BLK_DEV_NBD
+- scripts/config -e BRIDGE_NF_EBTABLES
+- scripts/config -e NETFILTER
+- scripts/config -e NETFILTER_ADVANCED
+- scripts/config -e NETFILTER_XT_MATCH_ADDRTYPE
+- scripts/config -e OPENVSWITCH
+- scripts/config -e OPENVSWITCH_VXLAN
+- scripts/config -e CONFIG_OPENVSWITCH_GRE
+- scripts/config -e NET_CLS_BASIC
+- scripts/config -e NET_SCH_INGRESS
+- scripts/config -e NET_ACT_POLICE
+- scripts/config -e NET_IPGRE_DEMUX
+- scripts/config -e NET_SCH_HTB
+- scripts/config -e NET_SCH_HFSC
+- scripts/config -e VXLAN
+- scripts/config -e VETH
+- scripts/config -e IP_NF_MATCH_AH
+- scripts/config -e IP_NF_MATCH_ECN
+- scripts/config -e IP_NF_MATCH_RPFILTER
+- scripts/config -e IP_NF_MATCH_TTL
+- scripts/config -e IP_NF_TARGET_SYNPROXY
+- scripts/config -e IP_NF_TARGET_NETMAP
+- scripts/config -e IP_NF_TARGET_REDIRECT
+- scripts/config -e IP_NF_TARGET_CLUSTERIP
+- scripts/config -e IP_NF_TARGET_ECN
+- scripts/config -e IP_NF_TARGET_TTL
+- scripts/config -e IP_NF_RAW
+- scripts/config -e IP_NF_SECURITY
+- scripts/config -e IP_NF_ARPTABLES
+- scripts/config -e NETFILTER_NETLINK_QUEUE
+- scripts/config -e SCSI_NETLINK
+- scripts/config -e NETFILTER_XT_TARGET_MARK
+- scripts/config -e NETFILTER_XT_SET
+- scripts/config -e IP_SET
+- scripts/config -e IP_SET_BITMAP_IP
+- scripts/config -e IP_SET_BITMAP_IPMAC
+- scripts/config -e IP_SET_BITMAP_PORT
+- scripts/config -e IP_SET_HASH_IP
+- scripts/config -e IP_SET_HASH_IPMARK
+- scripts/config -e IP_SET_HASH_IPPORT
+- scripts/config -e IP_SET_HASH_IPPORTIP
+- scripts/config -e IP_SET_HASH_IPPORTNET
+- scripts/config -e IP_SET_HASH_MAC
+- scripts/config -e IP_SET_HASH_NETPORTNET
+- scripts/config -e IP_SET_HASH_NET
+- scripts/config -e IP_SET_HASH_NETNET
+- scripts/config -e IP_SET_HASH_NETPORT
+- scripts/config -e IP_SET_HASH_NETIFACE
+- scripts/config -e IP_SET_LIST_SET
+- scripts/config -e NF_CONNTRACK_TIMEOUT
+- scripts/config -e NF_CONNTRACK_TIMESTAMP
+- scripts/config -e NF_CONNTRACK_EVENTS
+- scripts/config -e NF_CONNTRACK_LABELS
+- scripts/config -e NETFILTER_NETLINK_ACCT
+- scripts/config -e NETFILTER_NETLINK_QUEUE_CT
+- scripts/config -e NF_CT_PROTO_DCCP
+- scripts/config -e NF_CT_PROTO_GRE
+- scripts/config -e NF_CT_PROTO_SCTP
+- scripts/config -e NF_CT_PROTO_UDPLITE
+- scripts/config -e NF_CT_NETLINK_TIMEOUT
+- scripts/config -e NF_CT_NETLINK_HELPER
+- scripts/config -e NF_CONNTRACK_AMANDA
+- scripts/config -e NF_CONNTRACK_H323
+- scripts/config -e NF_CONNTRACK_BROADCAST
+- scripts/config -e NF_CONNTRACK_NETBIOS_NS
+- scripts/config -e NF_CONNTRACK_SNMP
+- scripts/config -e NF_CONNTRACK_PPTP
+- scripts/config -e NF_CONNTRACK_SANE
+- scripts/config -e NF_CONNTRACK_TFTP
+- scripts/config -e NF_LOG_COMMON
+- scripts/config -e NF_NAT_PROTO_DCCP
+- scripts/config -e NF_NAT_PROTO_UDPLITE
+- scripts/config -e NF_NAT_PROTO_SCTP
+- scripts/config -e NF_NAT_AMANDA
+- scripts/config -e NF_NAT_TFTP
+- scripts/config -e NF_TABLES
+- scripts/config -e NF_TABLES_INET
+- scripts/config -e NFT_COMPAT
+- scripts/config -e NFT_EXTHDR
+- scripts/config -e NFT_META
+- scripts/config -e NFT_CT
+- scripts/config -e NFT_LIMIT
+- scripts/config -e NFT_NAT
+- scripts/config -e NFT_QUEUE
+- scripts/config -e NFT_REJECT
+- scripts/config -e NFT_REJECT_INET
+- scripts/config -e NFT_RBTREE
+- scripts/config -e NFT_HASH
+- scripts/config -e NFT_COUNTER
+- scripts/config -e NFT_LOG
+- scripts/config -e NFT_MASQ
+- scripts/config -e NETFILTER_XT_CONNMARK
+- scripts/config -e NETFILTER_XT_TARGET_AUDIT
+- scripts/config -e NETFILTER_XT_TARGET_CHECKSUM
+- scripts/config -e NETFILTER_XT_TARGET_CLASSIFY
+- scripts/config -e NETFILTER_XT_TARGET_CT
+- scripts/config -e NETFILTER_XT_TARGET_DSCP
+- scripts/config -e NETFILTER_XT_TARGET_HMARK
+- scripts/config -e NETFILTER_XT_TARGET_LED
+- scripts/config -e NETFILTER_XT_TARGET_LOG
+- scripts/config -e NETFILTER_XT_TARGET_NFQUEUE
+- scripts/config -e NETFILTER_XT_TARGET_RATEEST
+- scripts/config -e NETFILTER_XT_TARGET_TPROXY
+- scripts/config -e NETFILTER_XT_TARGET_TCPOPTSTRIP
+- scripts/config -e NETFILTER_XT_TARGET_TEE
+- scripts/config -e NETFILTER_XT_TARGET_TRACE
+- scripts/config -e NETFILTER_XT_TARGET_IDLETIMER
+- scripts/config -e NETFILTER_XT_MATCH_BPF
+- scripts/config -e NETFILTER_XT_MATCH_CLUSTER
+- scripts/config -e NETFILTER_XT_MATCH_COMMENT
+- scripts/config -e NETFILTER_XT_MATCH_CONNBYTES
+- scripts/config -e NETFILTER_XT_MATCH_CONNLABEL
+- scripts/config -e NETFILTER_XT_MATCH_CONNLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_CPU
+- scripts/config -e NETFILTER_XT_MATCH_DCCP
+- scripts/config -e NETFILTER_XT_MATCH_DEVGROUP
+- scripts/config -e NETFILTER_XT_MATCH_DSCP
+- scripts/config -e NETFILTER_XT_MATCH_ESP
+- scripts/config -e NETFILTER_XT_MATCH_HASHLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_HELPER
+- scripts/config -e NETFILTER_XT_MATCH_IPCOMP
+- scripts/config -e NETFILTER_XT_MATCH_IPRANGE
+- scripts/config -e NETFILTER_XT_MATCH_IPVS
+- scripts/config -e NETFILTER_XT_MATCH_L2TP
+- scripts/config -e NETFILTER_XT_MATCH_LENGTH
+- scripts/config -e NETFILTER_XT_MATCH_LIMIT
+- scripts/config -e NETFILTER_XT_MATCH_MAC
+- scripts/config -e NETFILTER_XT_MATCH_MULTIPORT
+- scripts/config -e NETFILTER_XT_MATCH_NFACCT
+- scripts/config -e NETFILTER_XT_MATCH_OSF
+- scripts/config -e NETFILTER_XT_MATCH_OWNER
+- scripts/config -e NETFILTER_XT_MATCH_CGROUP
+- scripts/config -e NETFILTER_XT_MATCH_PHYSDEV
+- scripts/config -e NETFILTER_XT_MATCH_PKTTYPE
+- scripts/config -e NETFILTER_XT_MATCH_QUOTA
+- scripts/config -e NETFILTER_XT_MATCH_RATEEST
+- scripts/config -e NETFILTER_XT_MATCH_REALM
+- scripts/config -e NETFILTER_XT_MATCH_RECENT
+- scripts/config -e NETFILTER_XT_MATCH_SCTP
+- scripts/config -e NETFILTER_XT_MATCH_SOCKET
+- scripts/config -e NETFILTER_XT_MATCH_STATISTIC
+- scripts/config -e NETFILTER_XT_MATCH_STRING
+- scripts/config -e NETFILTER_XT_MATCH_TCPMSS
+- scripts/config -e NETFILTER_XT_MATCH_TIME
+- scripts/config -e NETFILTER_XT_MATCH_U32
+- scripts/config -e IP_VS
+- scripts/config -e BRIDGE_NETFILTER
+- scripts/config -e CRYPTO_CRC32C
+- scripts/config -e CONFIGFS_FS
+- scripts/config -e EXPERT
+- scripts/config -e TARGET_CORE
+- scripts/config -e ISCSI_TARGET
+- scripts/config -e TCM_IBLOCK
+- scripts/config -e TCM_FILEIO
+- scripts/config -e TCM_PSCSI
+- scripts/config -e TCM_USER
+- scripts/config -e CONFIG_UIO
+- scripts/config -e LOOPBACK_TARGET
+- scripts/config -e TCM_FC
+- scripts/config -e LIBFC
+- scripts/config -e SCSI_FC_ATTRS
+- scripts/config -e SCSI_ISCSI_ATTRS
+- scripts/config -e ISCSI_TCP
+- scripts/config -e SCSI_LOWLEVEL
+- yes '' | make oldconfig
+build-commands:
+- make $MAKEFLAGS ARCH=powerpc zImage
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp arch/powerpc/boot/zImage "$DESTDIR"/boot/zImage
diff --git a/strata/bsp-wandboard.morph b/strata/bsp-wandboard.morph
new file mode 100644
index 00000000..ba704b1b
--- /dev/null
+++ b/strata/bsp-wandboard.morph
@@ -0,0 +1,18 @@
+name: bsp-wandboard
+kind: stratum
+description: The platform dependent components required to boot an wandboard ARM board.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: u-boot@wandboard
+ morph: strata/bsp-wandboard/u-boot@wandboard.morph
+ repo: upstream:u-boot
+ ref: fe57382d04b46c37f34cf8d3b3ad876554fd12bf
+ unpetrify-ref: baserock/morph
+- name: linux-armv7-wandboard
+ morph: strata/bsp-wandboard/linux-armv7-wandboard.morph
+ repo: upstream:linux-stable
+ ref: 660613d1a4e94144490850b6c3d350331860fac4
+ unpetrify-ref: v3.19.2
+ build-depends:
+ - u-boot@wandboard
diff --git a/strata/bsp-wandboard/linux-armv7-wandboard.morph b/strata/bsp-wandboard/linux-armv7-wandboard.morph
new file mode 100644
index 00000000..8137198e
--- /dev/null
+++ b/strata/bsp-wandboard/linux-armv7-wandboard.morph
@@ -0,0 +1,70 @@
+name: linux-armv7-wandboard
+kind: chunk
+configure-commands:
+- make ARCH=arm imx_v6_v7_defconfig
+- scripts/config -d KERNEL_LZO
+- scripts/config -e KERNEL_GZIP
+- scripts/config -e NAMESPACES
+- scripts/config -e PACKET
+- scripts/config -e SATA_AHCI
+- scripts/config -e ATA_GENERIC
+- scripts/config -e HAVE_IDE
+- scripts/config -d BLK_DEV_IDE_SATA
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e EXT2_FS
+- scripts/config -e EXT2_FS_XATTR
+- scripts/config -e EXT2_FS_POSIX_ACL
+- scripts/config -e EXT2_FS_SECURITY
+- scripts/config -e EXT2_FS_XIP
+- scripts/config -e EXT3_FS
+- scripts/config -d EXT3_DEFAULTS_TO_ORDERED
+- scripts/config -e EXT3_FS_XATTR
+- scripts/config -e EXT3_FS_POSIX_ACL
+- scripts/config -e EXT3_FS_SECURITY
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_FS_XATTR
+- scripts/config -e EXT4_FS_POSIX_ACL
+- scripts/config -e EXT4_FS_SECURITY
+- scripts/config -d EXT4_DEBUG
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -d JBD_DEBUG
+- scripts/config -d JBD2_DEBUG
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -d BLK_DEV_CRYPTOLOOP
+- scripts/config --set-val BLK_DEV_LOOP_MIN_COUNT 8
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e NFSD
+- scripts/config -e NFSD_V3
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e VETH
+- yes '' | make oldconfig
+build-commands:
+- make $MAKEFLAGS ARCH=arm LOADADDR=0x10800000 uImage dtbs
+- |
+ cat >boot.script <<'EOF'
+ setenv image_addr "0x12000000"
+ setenv bootargs "console=ttymxc0,115200 root=/dev/nfs rw ip=dhcp nfsroot=${fileserver}:${nfsroot},v3,tcp"
+ dhcp ${image_addr} ${fileserver}:${tfpboot}/uImage
+ bootm ${image_addr}
+ EOF
+- mkimage -A arm -T script -C none -n "Wandboard TFTP Boot Script" -d boot.script
+ boot.scr
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- cp arch/arm/boot/uImage "$DESTDIR"/boot/uImage
+- cp boot.scr "$DESTDIR"/boot/boot.scr
+- cp arch/arm/boot/dts/imx6*-wandboard.dtb "$DESTDIR"/boot/.
diff --git a/strata/bsp-wandboard/u-boot@wandboard.morph b/strata/bsp-wandboard/u-boot@wandboard.morph
new file mode 100644
index 00000000..8a51ee7c
--- /dev/null
+++ b/strata/bsp-wandboard/u-boot@wandboard.morph
@@ -0,0 +1,11 @@
+name: u-boot@wandboard
+kind: chunk
+build-commands:
+- make tools
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX/bin"
+- install -m 755 tools/img2brec.sh "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/jtagconsole "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/netconsole "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/mkenvimage "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/mkimage "$DESTDIR$PREFIX/bin/."
diff --git a/strata/bsp-x86_32-generic.morph b/strata/bsp-x86_32-generic.morph
new file mode 100644
index 00000000..41003643
--- /dev/null
+++ b/strata/bsp-x86_32-generic.morph
@@ -0,0 +1,24 @@
+name: bsp-x86_32-generic
+kind: stratum
+description: The set of platform specific components required for booting a 32-bit
+ x86 based system.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-x86-32-generic
+ morph: strata/bsp-x86_32-generic/linux-x86-32-generic.morph
+ repo: upstream:linux
+ ref: 39a8804455fb23f09157341d3ba7db6d7ae6ee76
+ unpetrify-ref: v4.0
+- name: nasm
+ morph: strata/bsp-x86_32-generic/nasm.morph
+ repo: upstream:nasm
+ ref: 78bdad3d14fb875d5f2062957e326ba2a9e4ccb0
+ unpetrify-ref: baserock/morph
+- name: syslinux
+ morph: strata/bsp-x86_32-generic/syslinux.morph
+ repo: upstream:syslinux
+ ref: 2aab8555987b547b617cbb887e61083fece01541
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - nasm
diff --git a/strata/bsp-x86_32-generic/linux-x86-32-generic.morph b/strata/bsp-x86_32-generic/linux-x86-32-generic.morph
new file mode 100644
index 00000000..208eb506
--- /dev/null
+++ b/strata/bsp-x86_32-generic/linux-x86-32-generic.morph
@@ -0,0 +1,287 @@
+name: linux-x86-32-generic
+kind: chunk
+products:
+- artifact: linux-x86-32-generic-devel
+ include:
+ - (usr/)?src/linux/.*
+- artifact: linux-x86-32-generic-bins
+ include:
+ - boot/.*
+- artifact: linux-x86-32-generic-libs
+ include:
+ - lib/.*
+configure-commands:
+- make defconfig
+- scripts/config -e PACKET
+- scripts/config -e SATA_AHCI
+- scripts/config -e ATA_GENERIC
+- scripts/config -e HAVE_IDE
+- scripts/config -d BLK_DEV_IDE_SATA
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e EXT2_FS
+- scripts/config -e EXT2_FS_XATTR
+- scripts/config -e EXT2_FS_POSIX_ACL
+- scripts/config -e EXT2_FS_SECURITY
+- scripts/config -e EXT2_FS_XIP
+- scripts/config -e EXT3_FS
+- scripts/config -d EXT3_DEFAULTS_TO_ORDERED
+- scripts/config -e EXT3_FS_XATTR
+- scripts/config -e EXT3_FS_POSIX_ACL
+- scripts/config -e EXT3_FS_SECURITY
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_FS_XATTR
+- scripts/config -e EXT4_FS_POSIX_ACL
+- scripts/config -e EXT4_FS_SECURITY
+- scripts/config -d EXT4_DEBUG
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -e XFS_FS
+- scripts/config -e LIBCRC32C
+- scripts/config -d JBD_DEBUG
+- scripts/config -d JBD2_DEBUG
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -d BLK_DEV_CRYPTOLOOP
+- scripts/config -e CEPH_FS
+- scripts/config --set-val BLK_DEV_LOOP_MIN_COUNT 8
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e VIRTIO
+- scripts/config -e VIRTIO_RING
+- scripts/config -e VIRTIO_PCI
+- scripts/config -e VIRTIO_BALLOON
+- scripts/config -e VIRTIO_BLK
+- scripts/config -e VIRTIO_NET
+- scripts/config -e VIRTIO_CONSOLE
+- scripts/config -e HW_RANDOM_VIRTIO
+- scripts/config -e 9P_FS
+- scripts/config -e 9P_FSCACHE
+- scripts/config -e 9P_FS_POSIX_ACL
+- scripts/config -e NET_9P
+- scripts/config -e NET_9P_VIRTIO
+- scripts/config -e R8169
+- scripts/config -e 8139TOO
+- scripts/config -e 8139CP
+- scripts/config -e ATL1
+- scripts/config -e ATL1C
+- scripts/config -e ATL1E
+- scripts/config -e E100
+- scripts/config -e E1000
+- scripts/config -e E1000E
+- scripts/config -e USB_USBNET
+- scripts/config -e USB_NET_AX8817X
+- scripts/config -e USB_NET_AX88179_178A
+- scripts/config -e USB_NET_DM9601
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e NFSD
+- scripts/config -e NFSD_V3
+- scripts/config -e KVM
+- scripts/config -e TUN
+- scripts/config -e BRIDGE
+- scripts/config -e VHOST_NET
+- scripts/config -e NF_NAT
+- scripts/config -e IP_NF_NAT
+- scripts/config -e IP_NF_TARGET_MASQUERADE
+- scripts/config -e FB_VESA
+- scripts/config -e HOTPLUG_PCI
+- scripts/config -e HOTPLUG_PCI_ACPI
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e BLK_DEV_NBD
+- scripts/config -e BRIDGE_NF_EBTABLES
+- scripts/config -e NETFILTER
+- scripts/config -e NETFILTER_ADVANCED
+- scripts/config -e NETFILTER_XT_MATCH_ADDRTYPE
+- scripts/config -e OPENVSWITCH
+- scripts/config -e OPENVSWITCH_VXLAN
+- scripts/config -e CONFIG_OPENVSWITCH_GRE
+- scripts/config -e NET_CLS_BASIC
+- scripts/config -e NET_SCH_INGRESS
+- scripts/config -e NET_ACT_POLICE
+- scripts/config -e NET_IPGRE_DEMUX
+- scripts/config -e NET_SCH_HTB
+- scripts/config -e NET_SCH_HFSC
+- scripts/config -e VXLAN
+- scripts/config -e VETH
+- scripts/config -e IP_NF_MATCH_AH
+- scripts/config -e IP_NF_MATCH_ECN
+- scripts/config -e IP_NF_MATCH_RPFILTER
+- scripts/config -e IP_NF_MATCH_TTL
+- scripts/config -e IP_NF_TARGET_SYNPROXY
+- scripts/config -e IP_NF_TARGET_NETMAP
+- scripts/config -e IP_NF_TARGET_REDIRECT
+- scripts/config -e IP_NF_TARGET_CLUSTERIP
+- scripts/config -e IP_NF_TARGET_ECN
+- scripts/config -e IP_NF_TARGET_TTL
+- scripts/config -e IP_NF_RAW
+- scripts/config -e IP_NF_SECURITY
+- scripts/config -e IP_NF_ARPTABLES
+- scripts/config -e KVM_INTEL
+- scripts/config -e NETFILTER_NETLINK_QUEUE
+- scripts/config -e SCSI_NETLINK
+- scripts/config -e NETFILTER_XT_TARGET_MARK
+- scripts/config -e NETFILTER_XT_SET
+- scripts/config -e IP_SET
+- scripts/config -e IP_SET_BITMAP_IP
+- scripts/config -e IP_SET_BITMAP_IPMAC
+- scripts/config -e IP_SET_BITMAP_PORT
+- scripts/config -e IP_SET_HASH_IP
+- scripts/config -e IP_SET_HASH_IPMARK
+- scripts/config -e IP_SET_HASH_IPPORT
+- scripts/config -e IP_SET_HASH_IPPORTIP
+- scripts/config -e IP_SET_HASH_IPPORTNET
+- scripts/config -e IP_SET_HASH_MAC
+- scripts/config -e IP_SET_HASH_NETPORTNET
+- scripts/config -e IP_SET_HASH_NET
+- scripts/config -e IP_SET_HASH_NETNET
+- scripts/config -e IP_SET_HASH_NETPORT
+- scripts/config -e IP_SET_HASH_NETIFACE
+- scripts/config -e IP_SET_LIST_SET
+- scripts/config -e NF_CONNTRACK_TIMEOUT
+- scripts/config -e NF_CONNTRACK_TIMESTAMP
+- scripts/config -e NF_CONNTRACK_EVENTS
+- scripts/config -e NF_CONNTRACK_LABELS
+- scripts/config -e NETFILTER_NETLINK_ACCT
+- scripts/config -e NETFILTER_NETLINK_QUEUE_CT
+- scripts/config -e NF_CT_PROTO_DCCP
+- scripts/config -e NF_CT_PROTO_GRE
+- scripts/config -e NF_CT_PROTO_SCTP
+- scripts/config -e NF_CT_PROTO_UDPLITE
+- scripts/config -e NF_CT_NETLINK_TIMEOUT
+- scripts/config -e NF_CT_NETLINK_HELPER
+- scripts/config -e NF_CONNTRACK_AMANDA
+- scripts/config -e NF_CONNTRACK_H323
+- scripts/config -e NF_CONNTRACK_BROADCAST
+- scripts/config -e NF_CONNTRACK_NETBIOS_NS
+- scripts/config -e NF_CONNTRACK_SNMP
+- scripts/config -e NF_CONNTRACK_PPTP
+- scripts/config -e NF_CONNTRACK_SANE
+- scripts/config -e NF_CONNTRACK_TFTP
+- scripts/config -e NF_LOG_COMMON
+- scripts/config -e NF_NAT_PROTO_DCCP
+- scripts/config -e NF_NAT_PROTO_UDPLITE
+- scripts/config -e NF_NAT_PROTO_SCTP
+- scripts/config -e NF_NAT_AMANDA
+- scripts/config -e NF_NAT_TFTP
+- scripts/config -e NF_TABLES
+- scripts/config -e NF_TABLES_INET
+- scripts/config -e NFT_COMPAT
+- scripts/config -e NFT_EXTHDR
+- scripts/config -e NFT_META
+- scripts/config -e NFT_CT
+- scripts/config -e NFT_LIMIT
+- scripts/config -e NFT_NAT
+- scripts/config -e NFT_QUEUE
+- scripts/config -e NFT_REJECT
+- scripts/config -e NFT_REJECT_INET
+- scripts/config -e NFT_RBTREE
+- scripts/config -e NFT_HASH
+- scripts/config -e NFT_COUNTER
+- scripts/config -e NFT_LOG
+- scripts/config -e NFT_MASQ
+- scripts/config -e NETFILTER_XT_CONNMARK
+- scripts/config -e NETFILTER_XT_TARGET_AUDIT
+- scripts/config -e NETFILTER_XT_TARGET_CHECKSUM
+- scripts/config -e NETFILTER_XT_TARGET_CLASSIFY
+- scripts/config -e NETFILTER_XT_TARGET_CT
+- scripts/config -e NETFILTER_XT_TARGET_DSCP
+- scripts/config -e NETFILTER_XT_TARGET_HMARK
+- scripts/config -e NETFILTER_XT_TARGET_LED
+- scripts/config -e NETFILTER_XT_TARGET_LOG
+- scripts/config -e NETFILTER_XT_TARGET_NFQUEUE
+- scripts/config -e NETFILTER_XT_TARGET_RATEEST
+- scripts/config -e NETFILTER_XT_TARGET_TPROXY
+- scripts/config -e NETFILTER_XT_TARGET_TCPOPTSTRIP
+- scripts/config -e NETFILTER_XT_TARGET_TEE
+- scripts/config -e NETFILTER_XT_TARGET_TRACE
+- scripts/config -e NETFILTER_XT_TARGET_IDLETIMER
+- scripts/config -e NETFILTER_XT_MATCH_BPF
+- scripts/config -e NETFILTER_XT_MATCH_CLUSTER
+- scripts/config -e NETFILTER_XT_MATCH_COMMENT
+- scripts/config -e NETFILTER_XT_MATCH_CONNBYTES
+- scripts/config -e NETFILTER_XT_MATCH_CONNLABEL
+- scripts/config -e NETFILTER_XT_MATCH_CONNLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_CPU
+- scripts/config -e NETFILTER_XT_MATCH_DCCP
+- scripts/config -e NETFILTER_XT_MATCH_DEVGROUP
+- scripts/config -e NETFILTER_XT_MATCH_DSCP
+- scripts/config -e NETFILTER_XT_MATCH_ESP
+- scripts/config -e NETFILTER_XT_MATCH_HASHLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_HELPER
+- scripts/config -e NETFILTER_XT_MATCH_IPCOMP
+- scripts/config -e NETFILTER_XT_MATCH_IPRANGE
+- scripts/config -e NETFILTER_XT_MATCH_IPVS
+- scripts/config -e NETFILTER_XT_MATCH_L2TP
+- scripts/config -e NETFILTER_XT_MATCH_LENGTH
+- scripts/config -e NETFILTER_XT_MATCH_LIMIT
+- scripts/config -e NETFILTER_XT_MATCH_MAC
+- scripts/config -e NETFILTER_XT_MATCH_MULTIPORT
+- scripts/config -e NETFILTER_XT_MATCH_NFACCT
+- scripts/config -e NETFILTER_XT_MATCH_OSF
+- scripts/config -e NETFILTER_XT_MATCH_OWNER
+- scripts/config -e NETFILTER_XT_MATCH_CGROUP
+- scripts/config -e NETFILTER_XT_MATCH_PHYSDEV
+- scripts/config -e NETFILTER_XT_MATCH_PKTTYPE
+- scripts/config -e NETFILTER_XT_MATCH_QUOTA
+- scripts/config -e NETFILTER_XT_MATCH_RATEEST
+- scripts/config -e NETFILTER_XT_MATCH_REALM
+- scripts/config -e NETFILTER_XT_MATCH_RECENT
+- scripts/config -e NETFILTER_XT_MATCH_SCTP
+- scripts/config -e NETFILTER_XT_MATCH_SOCKET
+- scripts/config -e NETFILTER_XT_MATCH_STATISTIC
+- scripts/config -e NETFILTER_XT_MATCH_STRING
+- scripts/config -e NETFILTER_XT_MATCH_TCPMSS
+- scripts/config -e NETFILTER_XT_MATCH_TIME
+- scripts/config -e NETFILTER_XT_MATCH_U32
+- scripts/config -e IP_VS
+- scripts/config -e BRIDGE_NETFILTER
+- scripts/config -e CRYPTO_CRC32C
+- scripts/config -e CONFIGFS_FS
+- scripts/config -e EXPERT
+- scripts/config -e TARGET_CORE
+- scripts/config -e ISCSI_TARGET
+- scripts/config -e TCM_IBLOCK
+- scripts/config -e TCM_FILEIO
+- scripts/config -e TCM_PSCSI
+- scripts/config -e TCM_USER
+- scripts/config -e CONFIG_UIO
+- scripts/config -e LOOPBACK_TARGET
+- scripts/config -e TCM_FC
+- scripts/config -e LIBFC
+- scripts/config -e SCSI_FC_ATTRS
+- scripts/config -e SCSI_ISCSI_ATTRS
+- scripts/config -e ISCSI_TCP
+- scripts/config -e SCSI_LOWLEVEL
+- scripts/config -e SCSI_VIRTIO
+- scripts/config -e HYPERVISOR_GUEST
+- scripts/config -e PARAVIRT
+- yes '' | make oldconfig
+build-commands:
+- make $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- make INSTALL_PATH="$DESTDIR"/boot install
+- make INSTALL_MOD_PATH="$DESTDIR" modules_install
+- install -d "$DESTDIR$PREFIX/src/linux"
+- |
+ (
+ printf 'Makefile\0'
+ printf 'Module.symvers\0'
+ find arch/x86 -maxdepth 1 -name 'Makefile*' -print0
+ find arch/x86 \( -name 'module.lds' -o -name 'Kbuild.platforms' -o -name 'Platform' \) -print0
+ find arch/x86 \( -type d -a \( -name include -o -name scripts \) \) -o \
+ \! -type d -a \( -path '*include/*' -o -path '*scripts/*' \) -print0
+ find include -name 'asm*' -prune -o -print0
+ find include/asm-generic -print0
+ find include/uapi -print0
+ find scripts -print0
+ ) | cpio -0pumd "$DESTDIR$PREFIX/src/linux"
diff --git a/strata/bsp-x86_32-generic/nasm.morph b/strata/bsp-x86_32-generic/nasm.morph
new file mode 100644
index 00000000..0ab1a629
--- /dev/null
+++ b/strata/bsp-x86_32-generic/nasm.morph
@@ -0,0 +1,5 @@
+name: nasm
+kind: chunk
+build-system: autotools
+install-commands:
+- make INSTALLROOT="$DESTDIR" install
diff --git a/strata/bsp-x86_32-generic/syslinux.morph b/strata/bsp-x86_32-generic/syslinux.morph
new file mode 100644
index 00000000..4570865b
--- /dev/null
+++ b/strata/bsp-x86_32-generic/syslinux.morph
@@ -0,0 +1,12 @@
+name: syslinux
+kind: chunk
+products:
+- artifact: syslinux-devel
+ include:
+ - (usr/)?share/syslinux/com32.*
+build-commands:
+- make clean
+- make NO_WERROR=1
+- make NO_WERROR=1 installer
+install-commands:
+- make INSTALLROOT="$DESTDIR" install
diff --git a/strata/bsp-x86_64-generic.morph b/strata/bsp-x86_64-generic.morph
new file mode 100644
index 00000000..799af9d2
--- /dev/null
+++ b/strata/bsp-x86_64-generic.morph
@@ -0,0 +1,24 @@
+name: bsp-x86_64-generic
+kind: stratum
+description: The set of platform specific components required for booting a 64-bit
+ x86 based system.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: linux-x86-64-generic
+ morph: strata/bsp-x86_64-generic/linux-x86-64-generic.morph
+ repo: upstream:linux
+ ref: 39a8804455fb23f09157341d3ba7db6d7ae6ee76
+ unpetrify-ref: v4.0
+- name: nasm
+ morph: strata/bsp-x86_64-generic/nasm.morph
+ repo: upstream:nasm
+ ref: 78bdad3d14fb875d5f2062957e326ba2a9e4ccb0
+ unpetrify-ref: baserock/morph
+- name: syslinux
+ morph: strata/bsp-x86_64-generic/syslinux.morph
+ repo: upstream:syslinux
+ ref: 2aab8555987b547b617cbb887e61083fece01541
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - nasm
diff --git a/strata/bsp-x86_64-generic/linux-x86-64-generic.morph b/strata/bsp-x86_64-generic/linux-x86-64-generic.morph
new file mode 100644
index 00000000..bc8ac7d1
--- /dev/null
+++ b/strata/bsp-x86_64-generic/linux-x86-64-generic.morph
@@ -0,0 +1,287 @@
+name: linux-x86-64-generic
+kind: chunk
+products:
+- artifact: linux-x86-64-generic-devel
+ include:
+ - (usr/)?src/linux/.*
+- artifact: linux-x86-64-generic-bins
+ include:
+ - boot/.*
+- artifact: linux-x86-64-generic-libs
+ include:
+ - lib/.*
+configure-commands:
+- make defconfig
+- scripts/config -e PACKET
+- scripts/config -e SATA_AHCI
+- scripts/config -e ATA_GENERIC
+- scripts/config -e HAVE_IDE
+- scripts/config -d BLK_DEV_IDE_SATA
+- scripts/config -e BTRFS_FS
+- scripts/config -e BTRFS_FS_POSIX_ACL
+- scripts/config -e DEVTMPFS
+- scripts/config -e DEVTMPFS_MOUNT
+- scripts/config -e CONFIG_FHANDLE
+- scripts/config -e DEVPTS_MULTIPLE_INSTANCES
+- scripts/config -e CGROUPS
+- scripts/config -e AUTOFS4_FS
+- scripts/config --set-str UEVENT_HELPER_PATH ''
+- scripts/config -e EXT2_FS
+- scripts/config -e EXT2_FS_XATTR
+- scripts/config -e EXT2_FS_POSIX_ACL
+- scripts/config -e EXT2_FS_SECURITY
+- scripts/config -e EXT2_FS_XIP
+- scripts/config -e EXT3_FS
+- scripts/config -d EXT3_DEFAULTS_TO_ORDERED
+- scripts/config -e EXT3_FS_XATTR
+- scripts/config -e EXT3_FS_POSIX_ACL
+- scripts/config -e EXT3_FS_SECURITY
+- scripts/config -e EXT4_FS
+- scripts/config -e EXT4_FS_XATTR
+- scripts/config -e EXT4_FS_POSIX_ACL
+- scripts/config -e EXT4_FS_SECURITY
+- scripts/config -d EXT4_DEBUG
+- scripts/config -e FUSE_FS
+- scripts/config -e OVERLAY_FS
+- scripts/config -e XFS_FS
+- scripts/config -e LIBCRC32C
+- scripts/config -d JBD_DEBUG
+- scripts/config -d JBD2_DEBUG
+- scripts/config -e BLK_DEV_LOOP
+- scripts/config -d BLK_DEV_CRYPTOLOOP
+- scripts/config -e CEPH_FS
+- scripts/config --set-val BLK_DEV_LOOP_MIN_COUNT 8
+- scripts/config -e DM_SNAPSHOT
+- scripts/config -e VIRTIO
+- scripts/config -e VIRTIO_RING
+- scripts/config -e VIRTIO_PCI
+- scripts/config -e VIRTIO_BALLOON
+- scripts/config -e VIRTIO_BLK
+- scripts/config -e VIRTIO_NET
+- scripts/config -e VIRTIO_CONSOLE
+- scripts/config -e HW_RANDOM_VIRTIO
+- scripts/config -e 9P_FS
+- scripts/config -e 9P_FSCACHE
+- scripts/config -e 9P_FS_POSIX_ACL
+- scripts/config -e NET_9P
+- scripts/config -e NET_9P_VIRTIO
+- scripts/config -e R8169
+- scripts/config -e 8139TOO
+- scripts/config -e 8139CP
+- scripts/config -e ATL1
+- scripts/config -e ATL1C
+- scripts/config -e ATL1E
+- scripts/config -e E100
+- scripts/config -e E1000
+- scripts/config -e E1000E
+- scripts/config -e USB_USBNET
+- scripts/config -e USB_NET_AX8817X
+- scripts/config -e USB_NET_AX88179_178A
+- scripts/config -e USB_NET_DM9601
+- scripts/config -e IKCONFIG
+- scripts/config -e IKCONFIG_PROC
+- scripts/config -e SECCOMP
+- scripts/config -d DEBUG_STACK_TRACE
+- scripts/config -e NFSD
+- scripts/config -e NFSD_V3
+- scripts/config -e KVM
+- scripts/config -e TUN
+- scripts/config -e BRIDGE
+- scripts/config -e VHOST_NET
+- scripts/config -e NF_NAT
+- scripts/config -e IP_NF_NAT
+- scripts/config -e IP_NF_TARGET_MASQUERADE
+- scripts/config -e FB_VESA
+- scripts/config -e HOTPLUG_PCI
+- scripts/config -e HOTPLUG_PCI_ACPI
+- scripts/config -e VLAN_8021Q
+- scripts/config -e BRIDGE_VLAN_FILTERING
+- scripts/config -e BLK_DEV_NBD
+- scripts/config -e BRIDGE_NF_EBTABLES
+- scripts/config -e NETFILTER
+- scripts/config -e NETFILTER_ADVANCED
+- scripts/config -e NETFILTER_XT_MATCH_ADDRTYPE
+- scripts/config -e OPENVSWITCH
+- scripts/config -e OPENVSWITCH_VXLAN
+- scripts/config -e CONFIG_OPENVSWITCH_GRE
+- scripts/config -e NET_CLS_BASIC
+- scripts/config -e NET_SCH_INGRESS
+- scripts/config -e NET_ACT_POLICE
+- scripts/config -e NET_IPGRE_DEMUX
+- scripts/config -e NET_SCH_HTB
+- scripts/config -e NET_SCH_HFSC
+- scripts/config -e VXLAN
+- scripts/config -e VETH
+- scripts/config -e IP_NF_MATCH_AH
+- scripts/config -e IP_NF_MATCH_ECN
+- scripts/config -e IP_NF_MATCH_RPFILTER
+- scripts/config -e IP_NF_MATCH_TTL
+- scripts/config -e IP_NF_TARGET_SYNPROXY
+- scripts/config -e IP_NF_TARGET_NETMAP
+- scripts/config -e IP_NF_TARGET_REDIRECT
+- scripts/config -e IP_NF_TARGET_CLUSTERIP
+- scripts/config -e IP_NF_TARGET_ECN
+- scripts/config -e IP_NF_TARGET_TTL
+- scripts/config -e IP_NF_RAW
+- scripts/config -e IP_NF_SECURITY
+- scripts/config -e IP_NF_ARPTABLES
+- scripts/config -e KVM_INTEL
+- scripts/config -e NETFILTER_NETLINK_QUEUE
+- scripts/config -e SCSI_NETLINK
+- scripts/config -e NETFILTER_XT_TARGET_MARK
+- scripts/config -e NETFILTER_XT_SET
+- scripts/config -e IP_SET
+- scripts/config -e IP_SET_BITMAP_IP
+- scripts/config -e IP_SET_BITMAP_IPMAC
+- scripts/config -e IP_SET_BITMAP_PORT
+- scripts/config -e IP_SET_HASH_IP
+- scripts/config -e IP_SET_HASH_IPMARK
+- scripts/config -e IP_SET_HASH_IPPORT
+- scripts/config -e IP_SET_HASH_IPPORTIP
+- scripts/config -e IP_SET_HASH_IPPORTNET
+- scripts/config -e IP_SET_HASH_MAC
+- scripts/config -e IP_SET_HASH_NETPORTNET
+- scripts/config -e IP_SET_HASH_NET
+- scripts/config -e IP_SET_HASH_NETNET
+- scripts/config -e IP_SET_HASH_NETPORT
+- scripts/config -e IP_SET_HASH_NETIFACE
+- scripts/config -e IP_SET_LIST_SET
+- scripts/config -e NF_CONNTRACK_TIMEOUT
+- scripts/config -e NF_CONNTRACK_TIMESTAMP
+- scripts/config -e NF_CONNTRACK_EVENTS
+- scripts/config -e NF_CONNTRACK_LABELS
+- scripts/config -e NETFILTER_NETLINK_ACCT
+- scripts/config -e NETFILTER_NETLINK_QUEUE_CT
+- scripts/config -e NF_CT_PROTO_DCCP
+- scripts/config -e NF_CT_PROTO_GRE
+- scripts/config -e NF_CT_PROTO_SCTP
+- scripts/config -e NF_CT_PROTO_UDPLITE
+- scripts/config -e NF_CT_NETLINK_TIMEOUT
+- scripts/config -e NF_CT_NETLINK_HELPER
+- scripts/config -e NF_CONNTRACK_AMANDA
+- scripts/config -e NF_CONNTRACK_H323
+- scripts/config -e NF_CONNTRACK_BROADCAST
+- scripts/config -e NF_CONNTRACK_NETBIOS_NS
+- scripts/config -e NF_CONNTRACK_SNMP
+- scripts/config -e NF_CONNTRACK_PPTP
+- scripts/config -e NF_CONNTRACK_SANE
+- scripts/config -e NF_CONNTRACK_TFTP
+- scripts/config -e NF_LOG_COMMON
+- scripts/config -e NF_NAT_PROTO_DCCP
+- scripts/config -e NF_NAT_PROTO_UDPLITE
+- scripts/config -e NF_NAT_PROTO_SCTP
+- scripts/config -e NF_NAT_AMANDA
+- scripts/config -e NF_NAT_TFTP
+- scripts/config -e NF_TABLES
+- scripts/config -e NF_TABLES_INET
+- scripts/config -e NFT_COMPAT
+- scripts/config -e NFT_EXTHDR
+- scripts/config -e NFT_META
+- scripts/config -e NFT_CT
+- scripts/config -e NFT_LIMIT
+- scripts/config -e NFT_NAT
+- scripts/config -e NFT_QUEUE
+- scripts/config -e NFT_REJECT
+- scripts/config -e NFT_REJECT_INET
+- scripts/config -e NFT_RBTREE
+- scripts/config -e NFT_HASH
+- scripts/config -e NFT_COUNTER
+- scripts/config -e NFT_LOG
+- scripts/config -e NFT_MASQ
+- scripts/config -e NETFILTER_XT_CONNMARK
+- scripts/config -e NETFILTER_XT_TARGET_AUDIT
+- scripts/config -e NETFILTER_XT_TARGET_CHECKSUM
+- scripts/config -e NETFILTER_XT_TARGET_CLASSIFY
+- scripts/config -e NETFILTER_XT_TARGET_CT
+- scripts/config -e NETFILTER_XT_TARGET_DSCP
+- scripts/config -e NETFILTER_XT_TARGET_HMARK
+- scripts/config -e NETFILTER_XT_TARGET_LED
+- scripts/config -e NETFILTER_XT_TARGET_LOG
+- scripts/config -e NETFILTER_XT_TARGET_NFQUEUE
+- scripts/config -e NETFILTER_XT_TARGET_RATEEST
+- scripts/config -e NETFILTER_XT_TARGET_TPROXY
+- scripts/config -e NETFILTER_XT_TARGET_TCPOPTSTRIP
+- scripts/config -e NETFILTER_XT_TARGET_TEE
+- scripts/config -e NETFILTER_XT_TARGET_TRACE
+- scripts/config -e NETFILTER_XT_TARGET_IDLETIMER
+- scripts/config -e NETFILTER_XT_MATCH_BPF
+- scripts/config -e NETFILTER_XT_MATCH_CLUSTER
+- scripts/config -e NETFILTER_XT_MATCH_COMMENT
+- scripts/config -e NETFILTER_XT_MATCH_CONNBYTES
+- scripts/config -e NETFILTER_XT_MATCH_CONNLABEL
+- scripts/config -e NETFILTER_XT_MATCH_CONNLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_CPU
+- scripts/config -e NETFILTER_XT_MATCH_DCCP
+- scripts/config -e NETFILTER_XT_MATCH_DEVGROUP
+- scripts/config -e NETFILTER_XT_MATCH_DSCP
+- scripts/config -e NETFILTER_XT_MATCH_ESP
+- scripts/config -e NETFILTER_XT_MATCH_HASHLIMIT
+- scripts/config -e NETFILTER_XT_MATCH_HELPER
+- scripts/config -e NETFILTER_XT_MATCH_IPCOMP
+- scripts/config -e NETFILTER_XT_MATCH_IPRANGE
+- scripts/config -e NETFILTER_XT_MATCH_IPVS
+- scripts/config -e NETFILTER_XT_MATCH_L2TP
+- scripts/config -e NETFILTER_XT_MATCH_LENGTH
+- scripts/config -e NETFILTER_XT_MATCH_LIMIT
+- scripts/config -e NETFILTER_XT_MATCH_MAC
+- scripts/config -e NETFILTER_XT_MATCH_MULTIPORT
+- scripts/config -e NETFILTER_XT_MATCH_NFACCT
+- scripts/config -e NETFILTER_XT_MATCH_OSF
+- scripts/config -e NETFILTER_XT_MATCH_OWNER
+- scripts/config -e NETFILTER_XT_MATCH_CGROUP
+- scripts/config -e NETFILTER_XT_MATCH_PHYSDEV
+- scripts/config -e NETFILTER_XT_MATCH_PKTTYPE
+- scripts/config -e NETFILTER_XT_MATCH_QUOTA
+- scripts/config -e NETFILTER_XT_MATCH_RATEEST
+- scripts/config -e NETFILTER_XT_MATCH_REALM
+- scripts/config -e NETFILTER_XT_MATCH_RECENT
+- scripts/config -e NETFILTER_XT_MATCH_SCTP
+- scripts/config -e NETFILTER_XT_MATCH_SOCKET
+- scripts/config -e NETFILTER_XT_MATCH_STATISTIC
+- scripts/config -e NETFILTER_XT_MATCH_STRING
+- scripts/config -e NETFILTER_XT_MATCH_TCPMSS
+- scripts/config -e NETFILTER_XT_MATCH_TIME
+- scripts/config -e NETFILTER_XT_MATCH_U32
+- scripts/config -e IP_VS
+- scripts/config -e BRIDGE_NETFILTER
+- scripts/config -e CRYPTO_CRC32C
+- scripts/config -e CONFIGFS_FS
+- scripts/config -e EXPERT
+- scripts/config -e TARGET_CORE
+- scripts/config -e ISCSI_TARGET
+- scripts/config -e TCM_IBLOCK
+- scripts/config -e TCM_FILEIO
+- scripts/config -e TCM_PSCSI
+- scripts/config -e TCM_USER
+- scripts/config -e CONFIG_UIO
+- scripts/config -e LOOPBACK_TARGET
+- scripts/config -e TCM_FC
+- scripts/config -e LIBFC
+- scripts/config -e SCSI_FC_ATTRS
+- scripts/config -e SCSI_ISCSI_ATTRS
+- scripts/config -e ISCSI_TCP
+- scripts/config -e SCSI_LOWLEVEL
+- scripts/config -e SCSI_VIRTIO
+- scripts/config -e HYPERVISOR_GUEST
+- scripts/config -e PARAVIRT
+- yes '' | make oldconfig
+build-commands:
+- make $MAKEFLAGS
+install-commands:
+- mkdir -p "$DESTDIR"/boot
+- make INSTALL_PATH="$DESTDIR"/boot install
+- make INSTALL_MOD_PATH="$DESTDIR" modules_install
+- install -d "$DESTDIR$PREFIX/src/linux"
+- |
+ (
+ printf 'Makefile\0'
+ printf 'Module.symvers\0'
+ find arch/x86 -maxdepth 1 -name 'Makefile*' -print0
+ find arch/x86 \( -name 'module.lds' -o -name 'Kbuild.platforms' -o -name 'Platform' \) -print0
+ find arch/x86 \( -type d -a \( -name include -o -name scripts \) \) -o \
+ \! -type d -a \( -path '*include/*' -o -path '*scripts/*' \) -print0
+ find include -name 'asm*' -prune -o -print0
+ find include/asm-generic -print0
+ find include/uapi -print0
+ find scripts -print0
+ ) | cpio -0pumd "$DESTDIR$PREFIX/src/linux"
diff --git a/strata/bsp-x86_64-generic/nasm.morph b/strata/bsp-x86_64-generic/nasm.morph
new file mode 100644
index 00000000..0ab1a629
--- /dev/null
+++ b/strata/bsp-x86_64-generic/nasm.morph
@@ -0,0 +1,5 @@
+name: nasm
+kind: chunk
+build-system: autotools
+install-commands:
+- make INSTALLROOT="$DESTDIR" install
diff --git a/strata/bsp-x86_64-generic/syslinux.morph b/strata/bsp-x86_64-generic/syslinux.morph
new file mode 100644
index 00000000..4570865b
--- /dev/null
+++ b/strata/bsp-x86_64-generic/syslinux.morph
@@ -0,0 +1,12 @@
+name: syslinux
+kind: chunk
+products:
+- artifact: syslinux-devel
+ include:
+ - (usr/)?share/syslinux/com32.*
+build-commands:
+- make clean
+- make NO_WERROR=1
+- make NO_WERROR=1 installer
+install-commands:
+- make INSTALLROOT="$DESTDIR" install
diff --git a/strata/bsp-x86_both-tools.morph b/strata/bsp-x86_both-tools.morph
new file mode 100644
index 00000000..f7212054
--- /dev/null
+++ b/strata/bsp-x86_both-tools.morph
@@ -0,0 +1,19 @@
+name: bsp-x86_both-tools
+kind: stratum
+description: The set of platform specific components required for configuring a bootable
+ x86 based system.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: nasm
+ morph: strata/bsp-x86_both-tools/nasm.morph
+ repo: upstream:nasm
+ ref: 78bdad3d14fb875d5f2062957e326ba2a9e4ccb0
+ unpetrify-ref: baserock/morph
+- name: syslinux
+ morph: strata/bsp-x86_both-tools/syslinux.morph
+ repo: upstream:syslinux
+ ref: d715b39c0801ecea5e52f9029cea7c76320f93cf
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - nasm
diff --git a/strata/bsp-x86_both-tools/nasm.morph b/strata/bsp-x86_both-tools/nasm.morph
new file mode 100644
index 00000000..0ab1a629
--- /dev/null
+++ b/strata/bsp-x86_both-tools/nasm.morph
@@ -0,0 +1,5 @@
+name: nasm
+kind: chunk
+build-system: autotools
+install-commands:
+- make INSTALLROOT="$DESTDIR" install
diff --git a/strata/bsp-x86_both-tools/syslinux.morph b/strata/bsp-x86_both-tools/syslinux.morph
new file mode 100644
index 00000000..4570865b
--- /dev/null
+++ b/strata/bsp-x86_both-tools/syslinux.morph
@@ -0,0 +1,12 @@
+name: syslinux
+kind: chunk
+products:
+- artifact: syslinux-devel
+ include:
+ - (usr/)?share/syslinux/com32.*
+build-commands:
+- make clean
+- make NO_WERROR=1
+- make NO_WERROR=1 installer
+install-commands:
+- make INSTALLROOT="$DESTDIR" install
diff --git a/strata/build-essential.morph b/strata/build-essential.morph
new file mode 100644
index 00000000..4a03fe35
--- /dev/null
+++ b/strata/build-essential.morph
@@ -0,0 +1,411 @@
+name: build-essential
+kind: stratum
+description: |
+ Toolchain stratum
+
+ Stage 1: build a minimal cross compiler with the host's tools.
+
+ Starting with a cross compiler ensures that (a) nothing from the host
+ can leak into the build-essential artifacts, and (b) cross-compiling
+ build-essential is fully tested and supported, since we always use the
+ cross code paths.
+
+ Stage 2: cross-build the whole of build-essential, using the host's tools
+ but the cross-compiler toolchain.
+
+ Stage 2 GCC outputs code for the same 'bootstrap' machine as stage 1 GCC,
+ but because stage 2 GCC is also built to *run* on the bootstrap machine
+ it can only execute inside the stage 3 chroot (due to being built against
+ a libc with a non-standard prefix).
+
+ Stage 3: build the whole of build-essential again, this time using a
+ staging area containing only the output of stage 2. The result of this
+ build is fully reproducible.
+
+ We do a switch-a-roo between stage 2 and 3: stages 2 chunks are all built
+ to run on a host *-bootstrap-* while stage 3 chunks are native-built for
+ a *-baserock-* machine. This works, because the cross build was all for
+ show (and cleanliness) and the binaries actually still run on the host.
+
+ After build-essential is built we do another trick. See
+ stage2-fhs-dirs.morph for details. Basically, /bin is a symlink to
+ /tools/bin during stage 2 but in stage 3 it becomes a real directory
+ again.
+
+ PLEASE KEEP THE REFS IN 'armv7lhf-cross-toolchain' STRATUM UP TO DATE WITH
+ THIS ONE!
+
+ Please note that the chunk 'build-depends' field is treated differently in
+ this stratum to how other strata in definitions.git use it. Other strata
+ rely on the fact that dependencies of a dependency (called 'transitive
+ dependencies') are implicitly included in the staging area. Within
+ build-essential, some chunks list transitive dependencies explicitly to
+ make clearer what is going on.
+products:
+- artifact: build-essential-minimal
+ include:
+ - fhs-dirs-.*
+ - busybox-.*
+ - glibc-nss
+chunks:
+- name: stage1-binutils
+ morph: strata/build-essential/stage1-binutils.morph
+ repo: upstream:binutils-redhat
+ ref: b1d3b01332ae49a60ff5d6bf53d3a5b1805769c8
+ unpetrify-ref: baserock/build-essential
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage1-gcc
+ morph: strata/build-essential/stage1-gcc.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage1-binutils
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-linux-api-headers
+ morph: strata/build-essential/stage2-linux-api-headers.morph
+ repo: upstream:linux
+ ref: 39a8804455fb23f09157341d3ba7db6d7ae6ee76
+ unpetrify-ref: v4.0
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-glibc
+ morph: strata/build-essential/stage2-glibc.morph
+ repo: upstream:glibc
+ ref: 4e42b5b8f89f0e288e68be7ad70f9525aebc2cff
+ unpetrify-ref: glibc-2.21
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-libstdc++
+ morph: strata/build-essential/stage2-libstdc++.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-binutils
+ morph: strata/build-essential/stage2-binutils.morph
+ repo: upstream:binutils-redhat
+ ref: b1d3b01332ae49a60ff5d6bf53d3a5b1805769c8
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-gcc-fixed-headers
+ morph: strata/build-essential/stage2-gcc-fixed-headers.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-gcc
+ morph: strata/build-essential/stage2-gcc.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ - stage2-gcc-fixed-headers
+ - stage2-libstdc++
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-busybox
+ morph: strata/build-essential/stage2-busybox.morph
+ repo: upstream:busybox
+ ref: 1ecfe811fe2f70380170ef7d820e8150054e88ca
+ unpetrify-ref: 1_23_1
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-fake-bash
+ morph: strata/build-essential/stage2-fake-bash.morph
+ repo: upstream:bash
+ ref: 3590145af6f1c9fa321dff231f69ae696e7e740b
+ unpetrify-ref: baserock/bash-4.3-patch-27
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-fhs-dirs
+ morph: strata/build-essential/stage2-fhs-dirs.morph
+ repo: baserock:baserock/fhs-dirs
+ ref: 1218cbd38a4f4e9da75af6f7fae946ca92666afb
+ unpetrify-ref: master
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-gawk
+ morph: strata/build-essential/stage2-gawk.morph
+ repo: upstream:gawk
+ ref: 925f9363c4b0a5bb9375298afcdcf404efb32587
+ unpetrify-ref: gawk-4.1-stable
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-make
+ morph: strata/build-essential/stage2-make.morph
+ repo: upstream:make-tarball
+ ref: f75919b038da8a28388a911303fb86ed7a70ea2c
+ unpetrify-ref: make-4.1
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: stage2-reset-specs
+ morph: strata/build-essential/stage2-reset-specs.morph
+ repo: upstream:glibc
+ ref: 4e42b5b8f89f0e288e68be7ad70f9525aebc2cff
+ unpetrify-ref: glibc-2.21
+ build-depends:
+ - stage1-binutils
+ - stage1-gcc
+ - stage2-linux-api-headers
+ - stage2-glibc
+ build-mode: bootstrap
+ prefix: /tools
+
+- name: fhs-dirs
+ morph: strata/build-essential/fhs-dirs.morph
+ repo: baserock:baserock/fhs-dirs
+ ref: 1218cbd38a4f4e9da75af6f7fae946ca92666afb
+ unpetrify-ref: master
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+
+- name: linux-api-headers
+ morph: strata/build-essential/linux-api-headers.morph
+ repo: upstream:linux
+ ref: 39a8804455fb23f09157341d3ba7db6d7ae6ee76
+ unpetrify-ref: v4.0
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+
+- name: glibc
+ morph: strata/build-essential/glibc.morph
+ repo: upstream:glibc
+ ref: 4e42b5b8f89f0e288e68be7ad70f9525aebc2cff
+ unpetrify-ref: glibc-2.21
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-fake-bash
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - linux-api-headers
+ artifacts:
+ glibc-gconv: build-essential-runtime
+ glibc-libs: build-essential-minimal
+ glibc-nss: build-essential-runtime
+
+- name: zlib
+ morph: strata/build-essential/zlib.morph
+ repo: upstream:zlib
+ ref: db333af7e9b90a23fd7f9cd8dc128123b34bf698
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+ artifacts:
+ zlib-libs: build-essential-minimal
+
+- name: binutils
+ morph: strata/build-essential/binutils.morph
+ repo: upstream:binutils-redhat
+ ref: b1d3b01332ae49a60ff5d6bf53d3a5b1805769c8
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+ - zlib
+
+- name: busybox
+ morph: strata/build-essential/busybox.morph
+ repo: upstream:busybox
+ ref: 1ecfe811fe2f70380170ef7d820e8150054e88ca
+ unpetrify-ref: 1_23_1
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+
+- name: gawk
+ morph: strata/build-essential/gawk.morph
+ repo: upstream:gawk
+ ref: dc5af665700d9b04fdf9c18930526d28eef5d5d9
+ unpetrify-ref: gawk-4.1.1
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+
+- name: m4-tarball
+ morph: strata/build-essential/m4-tarball.morph
+ repo: upstream:m4-tarball
+ ref: 23c11479b3ad787adc7a651ee0c4347839e47723
+ unpetrify-ref: m4-1.4.17
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+
+- name: gcc
+ morph: strata/build-essential/gcc.morph
+ repo: upstream:gcc-tarball
+ ref: b3c9b176c1f10ebeff5700eb3760e9511f23fa06
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+ - zlib
+ - m4-tarball
+ - linux-api-headers
+ artifacts:
+ gcc-libs: build-essential-minimal
+
+- name: make
+ morph: strata/build-essential/make.morph
+ repo: upstream:make-tarball
+ ref: f75919b038da8a28388a911303fb86ed7a70ea2c
+ unpetrify-ref: make-4.1
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+
+- name: ccache
+ morph: strata/build-essential/ccache.morph
+ repo: upstream:ccache
+ ref: 567631456f0899cdf0c382f898d38aadc8901d32
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - stage2-binutils
+ - stage2-busybox
+ - stage2-glibc
+ - stage2-fhs-dirs
+ - stage2-gawk
+ - stage2-gcc
+ - stage2-linux-api-headers
+ - stage2-make
+ - stage2-reset-specs
+ - glibc
+ - zlib
diff --git a/strata/build-essential/binutils.morph b/strata/build-essential/binutils.morph
new file mode 100644
index 00000000..bab52672
--- /dev/null
+++ b/strata/build-essential/binutils.morph
@@ -0,0 +1,8 @@
+name: binutils
+kind: chunk
+build-system: autotools
+
+configure-commands:
+- |
+ ./configure --prefix="$PREFIX" --disable-nls --disable-werror \
+ --with-system-zlib
diff --git a/strata/build-essential/busybox.morph b/strata/build-essential/busybox.morph
new file mode 100644
index 00000000..b5418bc0
--- /dev/null
+++ b/strata/build-essential/busybox.morph
@@ -0,0 +1,81 @@
+name: busybox
+kind: chunk
+
+configure-commands:
+# Busybox's default config has everything enabled.
+- make defconfig
+
+- sed -e 's|.*UDHCPC_DEFAULT_SCRIPT.*|CONFIG_UDHCPC_DEFAULT_SCRIPT="'"$PREFIX"/share/udhcpc/default.script'"|' -i .config
+- sed -e 's|.*IFUPDOWN_IFSTATE_PATH.*|CONFIG_IFUPDOWN_IFSTATE_PATH="/run/ifstate"|' -i .config
+
+# Avoid dividing applets between $PREFIX/[s]bin and $PREFIX/usr/[s]bin.
+- '[ "$PREFIX" = /usr ] || sed -e ''s/.*INSTALL_NO_USR.*/CONFIG_INSTALL_NO_USR=y/'' -i .config'
+
+# We have GAWK, but in GENIVI baseline we want to get rid of it
+# - sed -e 's/CONFIG_AWK=y.*/# CONFIG_AWK is not set/' -i .config
+
+# Depends on stuff that was removed since eglibc 2.14.
+- sed -e 's/CONFIG_INETD=y.*/# CONFIG_INETD is not set/' -i .config
+
+# Busybox Patch is incompatible enough with GNU Patch that it can't be
+# used for GNULib projects built from Git.
+- sed -e 's/CONFIG_PATCH=y.*/# CONFIG_PATCH is not set/' -i .config
+
+# None of this is needed because we have kmod; and it actually breaks the
+# Linux build because depmod isn't compatible enough with util-linux's.
+- sed -e 's/CONFIG_DEPMOD=y.*/# CONFIG_DEPMOD is not set/' -i .config
+- sed -e 's/CONFIG_INSMOD=y.*/# CONFIG_INSMOD is not set/' -i .config
+- sed -e 's/CONFIG_MODPROBE=y.*/# CONFIG_MODPROBE is not set/' -i .config
+- sed -e 's/CONFIG_MODPROBE_SMALL=y.*/# CONFIG_MODPROBE_SMALL is not set/' -i .config
+- sed -e 's/CONFIG_LSMOD=y.*/# CONFIG_LSMOD is not set/' -i .config
+- sed -e 's/CONFIG_RMMOD=y.*/# CONFIG_RMMOD is not set/' -i .config
+
+# General features that we don't need.
+- sed -e 's/CONFIG_FEATURE_MOUNT_CIFS=y.*/# CONFIG_FEATURE_MOUNT_CIFS is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_EXTRA_QUIET=y.*/# CONFIG_FEATURE_EXTRA_QUIET is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INIT_COREDUMPS=y.*/# CONFIG_FEATURE_INIT_COREDUMPS is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INIT_SCTTY=y.*/# CONFIG_FEATURE_INIT_SCTTY is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INIT_SYSLOG=y.*/# CONFIG_FEATURE_INIT_SYSLOG is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INITRD=y.*/# CONFIG_FEATURE_INITRD is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_MINIX2=y.*/# CONFIG_FEATURE_MINIX2 is not set/' -i .config
+- sed -e 's/CONFIG_FSCK_MINIX=y.*/# CONFIG_FSCK_MINIX is not set/' -i .config
+- sed -e 's/CONFIG_LOSETUP=y.*/# CONFIG_LOSETUP is not set/' -i .config
+- sed -e 's/CONFIG_LSUSB=y.*/# CONFIG_LSUSB is not set/' -i .config
+- sed -e 's/CONFIG_LSPCI=y.*/# CONFIG_LSPCI is not set/' -i .config
+- sed -e 's/CONFIG_LZMA=y.*/# CONFIG_LZMA is not set/' -i .config
+- sed -e 's/CONFIG_MKFS_EXT2=y.*/# CONFIG_MKFS_EXT2 is not set/' -i .config
+- sed -e 's/CONFIG_MKFS_MINIX=y.*/# CONFIG_MKFS_MINIX is not set/' -i .config
+- sed -e 's/CONFIG_STRINGS=y.*/# CONFIG_STRINGS is not set/' -i .config
+- sed -e 's/CONFIG_UNLZMA=y.*/# CONFIG_UNLZMA is not set/' -i .config
+- sed -e 's/CONFIG_UNXZ=y.*/# CONFIG_UNXZ is not set/' -i .config
+- sed -e 's/CONFIG_XZ=y.*/# CONFIG_XZ is not set/' -i .config
+
+# Now turn on some little bits we do need
+- sed -e 's/# CONFIG_BBCONFIG is not set/CONFIG_BBCONFIG=y/' -i .config
+- sed -e 's/# CONFIG_FEATURE_COMPRESS_BBCONFIG is not set/CONFIG_FEATURE_COMPRESS_BBCONFIG=y/' -i .config
+- sed -e 's/# CONFIG_FEATURE_MOUNT_HELPERS is not set/CONFIG_FEATURE_MOUNT_HELPERS=y/' -i .config
+
+
+build-commands:
+- make
+
+install-commands:
+- |
+ if [ "$PREFIX" = /usr ]; then PREFIX=; fi &&
+ make CONFIG_PREFIX="$DESTDIR$PREFIX" install &&
+ chmod 6755 "$DESTDIR$PREFIX"/bin/busybox
+
+# Set up man environment variables
+- mkdir -p "$DESTDIR"/etc
+- |
+ cat << EOF > "$DESTDIR/etc/man.conf"
+ # This file is used by man to provide a manpath for those without one by
+ # examining their PATH environment variable.
+ #
+ # Lines beginning with `#' are comments and are ignored. Any combination of
+ # tabs or spaces may be used as `whitespace' separators.
+
+ MANDATORY_MANPATH /usr/man
+ MANDATORY_MANPATH /usr/share/man
+ MANDATORY_MANPATH /usr/local/share/man
+ EOF
diff --git a/strata/build-essential/ccache.morph b/strata/build-essential/ccache.morph
new file mode 100644
index 00000000..383eee8a
--- /dev/null
+++ b/strata/build-essential/ccache.morph
@@ -0,0 +1,12 @@
+name: ccache
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX"
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR/$PREFIX/lib/ccache"
+- for cc in gcc cc g++ c++; do ln -sf "$PREFIX/bin/ccache" "$DESTDIR/$PREFIX/lib/ccache/$cc";
+ done
+- for cc in gcc cc g++ c++; do ln -sf "$PREFIX/bin/ccache" "$DESTDIR/$PREFIX/lib/ccache/$TARGET-$cc";
+ done
diff --git a/strata/build-essential/fhs-dirs.morph b/strata/build-essential/fhs-dirs.morph
new file mode 100644
index 00000000..f4a1b1e9
--- /dev/null
+++ b/strata/build-essential/fhs-dirs.morph
@@ -0,0 +1,49 @@
+name: fhs-dirs
+kind: chunk
+description: create the FHS 2.3 directory hierarchy and the usual files
+install-commands:
+- sh ./create-fhs-dirs "$DESTDIR"
+- install -m 644 passwd "$DESTDIR/etc/passwd"
+- install -m 600 shadow "$DESTDIR/etc/shadow"
+- install -m 644 interfaces "$DESTDIR/etc/network/interfaces"
+- install -m 644 group "$DESTDIR/etc/group"
+- install -m 644 issue "$DESTDIR/etc/issue"
+- install -m 644 services "$DESTDIR/etc/services"
+- install -m 644 protocols "$DESTDIR/etc/protocols"
+- echo baserock > "$DESTDIR"/etc/hostname
+devices:
+- type: c
+ filename: /dev/console
+ gid: 0
+ major: 5
+ minor: 1
+ permissions: '0600'
+ uid: 0
+- type: c
+ filename: /dev/full
+ gid: 0
+ major: 1
+ minor: 7
+ permissions: '0666'
+ uid: 0
+- type: c
+ filename: /dev/null
+ gid: 0
+ major: 1
+ minor: 3
+ permissions: '0666'
+ uid: 0
+- type: c
+ filename: /dev/urandom
+ gid: 0
+ major: 1
+ minor: 9
+ permissions: '0666'
+ uid: 0
+- type: c
+ filename: /dev/zero
+ gid: 0
+ major: 1
+ minor: 5
+ permissions: '0666'
+ uid: 0
diff --git a/strata/build-essential/gawk.morph b/strata/build-essential/gawk.morph
new file mode 100644
index 00000000..1ad871c6
--- /dev/null
+++ b/strata/build-essential/gawk.morph
@@ -0,0 +1,5 @@
+name: gawk
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-nls
diff --git a/strata/build-essential/gcc.morph b/strata/build-essential/gcc.morph
new file mode 100644
index 00000000..a9d25ac1
--- /dev/null
+++ b/strata/build-essential/gcc.morph
@@ -0,0 +1,64 @@
+name: gcc
+kind: chunk
+
+products:
+- artifact: gcc-libs
+ include:
+ - (usr/)lib/lib.*\.so(\.\d+)*$
+- artifact: gcc-doc
+ include:
+ - (usr/)?share/doc/.*
+ - (usr/)?share/man/.*
+ - (usr/)?share/info/.*
+- artifact: gcc-devel
+ # devel includes everything which isn't documentation or libs, since
+ # everything else gcc produces is required for compiling
+ # this is the -devel artifact instead of -misc, since it goes in -devel
+ # stratum artifacts by default
+ include: [ .* ]
+
+configure-commands:
+- mkdir o
+
+# Configure flag notes:
+# 1. An attempt to stop anything going in $PREFIX/lib64 (which doesn't
+# fully work; we will need to hobble the multilib configuration in
+# config/i386/t-linux64 if we really want to kill /lib64).
+# 2. Avoid having more than one copy of ZLib in use on the system
+# 3. Multilib does not make sense in Baserock.
+- |
+ case "$MORPH_ARCH" in
+ armv7lhf) ARCH_FLAGS="--with-arch=armv7-a \
+ --with-cpu=cortex-a9 \
+ --with-tune=cortex-a9 \
+ --with-fpu=vfpv3-d16 \
+ --with-float=hard" ;;
+ armv7*) ARCH_FLAGS="--with-arch=armv7-a" ;;
+ esac
+
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --prefix="$PREFIX" \
+ `# [1]` --libdir=$PREFIX/lib \
+ --disable-bootstrap \
+ `# [2]` --with-system-zlib \
+ `# [3]` --disable-multilib \
+ --enable-languages=c,c++,fortran
+
+build-commands:
+- |
+ case "$MORPH_ARCH" in
+ armv5*) sed -i "s/--host=none/--host=armv5/" o/Makefile
+ sed -i "s/--target=none/--target=armv5/" o/Makefile ;;
+ armv7*) sed -i "s/--host=none/--host=armv7a/" o/Makefile
+ sed -i "s/--target=none/--target=armv7a/" o/Makefile ;;
+ esac
+ cd o && make
+
+install-commands:
+- cd o && make DESTDIR="$DESTDIR" install
+- ln -s gcc "$DESTDIR/$PREFIX/bin/cc"
+- >
+ for fortran_alias in f77 f90 f95; do
+ ln -s gfortran "$DESTDIR/$PREFIX/bin/$fortran_alias"
+ done
diff --git a/strata/build-essential/glibc.morph b/strata/build-essential/glibc.morph
new file mode 100644
index 00000000..f820547d
--- /dev/null
+++ b/strata/build-essential/glibc.morph
@@ -0,0 +1,98 @@
+name: glibc
+kind: chunk
+products:
+- artifact: glibc-nss
+ include:
+ - etc/nsswitch.conf
+ - (usr/)?lib/libnss.*
+- artifact: glibc-gconv
+ include:
+ - (usr/)?lib/gconv/.*
+- artifact: glibc-libs
+ include:
+ - sbin/ldconfig
+ - lib(32|64)?/ld-.*
+ - (usr/)?lib(exec)?/pt_chown
+- artifact: glibc-bins
+ include:
+ - (usr/)?s?bin/.*
+ - (usr/)?libexec/getconf/.*
+ - (usr/)?lib/libSegFault\.so(\.\d+)*$
+- artifact: glibc-libs
+ include:
+ # This is processed after bins, so bins can take libSegFault.so
+ - (usr/)?lib(32|64)?/lib[^/]*\.so(\.\d+)*$
+ - etc/ld.so.conf
+ - etc/ld.so.conf.d
+- artifact: glibc-devel
+ include:
+ - (usr/)?include/.*
+ - (usr/)?lib(32|64)?/lib.*\.a
+ - (usr/)?lib(32|64)?/lib.*\.la
+ - (usr/)?(lib(32|64)?|share)/pkgconfig/.*\.pc
+ - (usr/)?lib(32|64)?/.*\.o
+- artifact: glibc-locale
+ include:
+ - (usr/)?share/locale/.*
+ - (usr/)?share/i18n/.*
+ - (usr/)?share/zoneinfo/.*
+- artifact: glibc-misc
+ include:
+ - .*
+configure-commands:
+- mkdir o
+
+- |
+ case "$MORPH_ARCH" in
+ armv7*)
+ ARCH_FLAGS="--without-fp" ;;
+ esac
+
+ # We override the PATH here to remove /tools/bin from it.
+ # Thanks to this glibc finds bash in /bin/bash through the /bin
+ # symlink. This is important because glibc changes the path to bash
+ # of the shebang in some scripts and these scripts will be broken if
+ # they point to bash in /tools/bin/bash.
+ export PATH="/usr/bin:/sbin:/bin";
+ export CFLAGS="-O2 $CFLAGS";
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --prefix="$PREFIX" \
+ --disable-profile \
+ --enable-kernel=3.0.0 \
+ --without-cvs \
+ --without-selinux \
+ --enable-obsolete-rpc
+
+build-commands:
+- cd o && make localtime=UTC
+
+install-commands:
+- cd o && make install_root="$DESTDIR" localtime=UTC install
+- mkdir -p "$DESTDIR/etc"
+- mkdir -p "$DESTDIR/etc/ld.so.conf.d"
+- |
+ cat <<EOF > nsswitch.conf
+ passwd: compat
+ group: compat
+ shadow: compat
+
+ hosts: files myhostname mdns4_minimal [NOTFOUND=return] dns mdns4
+ networks: files
+
+ protocols: db files
+ services: db files
+ ethers: db files
+ rpc: db files
+
+ netgroup: nis
+ EOF
+- install -m 644 -o root -g root nsswitch.conf "$DESTDIR/etc/nsswitch.conf"
+- |
+ cat <<EOF > ld.so.conf
+ /lib
+ /usr/lib
+ /usr/local/lib
+ include /etc/ld.so.conf.d/*.conf
+ EOF
+- install -m 644 -o root -g root ld.so.conf "$DESTDIR/etc/ld.so.conf"
diff --git a/strata/build-essential/linux-api-headers.morph b/strata/build-essential/linux-api-headers.morph
new file mode 100644
index 00000000..62aa22b8
--- /dev/null
+++ b/strata/build-essential/linux-api-headers.morph
@@ -0,0 +1,24 @@
+name: linux-api-headers
+kind: chunk
+install-commands:
+- |
+ case "$MORPH_ARCH" in
+ armv5l)
+ ARCH="arm" ;;
+ armv7b|armv7l|armv7lhf)
+ ARCH="arm" ;;
+ armv8l64|armv8b64)
+ ARCH="arm64" ;;
+ x86_32)
+ ARCH="i386" ;;
+ x86_64)
+ ARCH="x86_64" ;;
+ ppc64)
+ ARCH="powerpc" ;;
+ *)
+ echo "Error: unsupported Morph architecture: $MORPH_ARCH" >&2
+ exit 1
+ esac
+ ARCH=$ARCH make INSTALL_HDR_PATH=dest headers_install
+- install -d "$DESTDIR${PREFIX-/usr}/include"
+- cp -r dest/include/* "$DESTDIR/${PREFIX-/usr}/include"
diff --git a/strata/build-essential/m4-tarball.morph b/strata/build-essential/m4-tarball.morph
new file mode 100644
index 00000000..eb16726a
--- /dev/null
+++ b/strata/build-essential/m4-tarball.morph
@@ -0,0 +1,5 @@
+name: m4-tarball
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-gcc-warnings
diff --git a/strata/build-essential/make.morph b/strata/build-essential/make.morph
new file mode 100644
index 00000000..1259b342
--- /dev/null
+++ b/strata/build-essential/make.morph
@@ -0,0 +1,5 @@
+name: make
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-nls
diff --git a/strata/build-essential/stage1-binutils.morph b/strata/build-essential/stage1-binutils.morph
new file mode 100644
index 00000000..69a79d14
--- /dev/null
+++ b/strata/build-essential/stage1-binutils.morph
@@ -0,0 +1,23 @@
+name: stage1-binutils
+kind: chunk
+build-system: autotools
+
+configure-commands:
+# We set the sysroot location dynamically at runtime by passing
+# `--sysroot` to GCC, so we need to build a linker with sysroot support.
+# We set it to a non-existent directory as a safety net to avoid looking
+# at the host dirs in case we forget to set the sysroot. Setting the
+# lib path is vital to avoid the tools we build linking to the libraries
+# on the host system; the '=' makes the path we give relative to the
+# sysroot, which we can then set at runtime by passing -Wl,--sysroot to
+# GCC. Although nothing should be installed on /lib64, we configure the
+# linker to look at that directory as well to make things more robust
+# (currently GCC installs libraries to this directory at least on
+# x86_64).
+
+- |
+ ./configure --prefix="$PREFIX" --disable-nls --disable-werror \
+ --build=$(sh config.guess) \
+ --host=$(sh config.guess) \
+ --target=$TARGET_STAGE1 \
+ --with-sysroot=/nonexistentdir --with-lib-path="=$PREFIX/lib:=$PREFIX/lib64"
diff --git a/strata/build-essential/stage1-gcc.morph b/strata/build-essential/stage1-gcc.morph
new file mode 100644
index 00000000..f76b0cc6
--- /dev/null
+++ b/strata/build-essential/stage1-gcc.morph
@@ -0,0 +1,80 @@
+name: stage1-gcc
+kind: chunk
+
+configure-commands:
+# Workaround from LFS due GCC not detecting stack protection correctly
+- sed -i '/k prot/agcc_cv_libc_provides_ssp=yes' gcc/configure
+
+- mkdir o
+
+# Configure flag notes:
+# 1. See gcc.morph.
+# 2. Although we will be setting a sysroot at runtime, giving a
+# temporary one at configuration time seems necessary so that
+# `--with-native-system-header-dir` produces effect and
+# /tools/include is in the include path for the newly built GCC. We
+# set it by default to a non-existent directory to avoid GCC looking
+# at the host dirs, in case we forget to give it at runtime.
+# 3. Disable searching /usr/local/include for headers
+# 4. The pass 1 compiler needs to find the libraries we build in pass
+# 2. Include path must be set explicility, because it defaults to
+# $SYSROOT/usr/include rather than $SYSROOT/include.
+# 5. Disable stuff that doesn't work when building a cross compiler
+# without an existing libc, and generally try to keep this build as
+# simple as possible.
+- |
+ case "$MORPH_ARCH" in
+ armv7lhf) ARCH_FLAGS="--with-arch=armv7-a \
+ --with-cpu=cortex-a9 \
+ --with-tune=cortex-a9 \
+ --with-fpu=vfpv3-d16 \
+ --with-float=hard" ;;
+ armv7*) ARCH_FLAGS="--with-arch=armv7-a" ;;
+ esac
+
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --build=$(sh ../config.guess) \
+ --host=$(sh ../config.guess) \
+ --target=$TARGET_STAGE1 \
+ --prefix="$PREFIX" \
+ `# [1]` --libdir="$PREFIX/lib" \
+ `# [2]` --with-sysroot=/nonexistentdir \
+ --with-newlib \
+ `# [3]` --with-local-prefix="$PREFIX" \
+ `# [4]` --with-native-system-header-dir="$PREFIX/include" \
+ --without-headers \
+ --disable-nls \
+ --disable-shared \
+ --disable-multilib \
+ `# [5]` --disable-decimal-float \
+ `# [5]` --disable-threads \
+ `# [5]` --disable-libatomic \
+ `# [5]` --disable-libgomp \
+ `# [5]` --disable-libitm \
+ `# [5]` --disable-libquadmath \
+ `# [5]` --disable-libsanitizer \
+ `# [5]` --disable-libssp \
+ `# [5]` --disable-libvtv \
+ `# [5]` --disable-libcilkrts \
+ `# [5]` --disable-libstdc++-v3 \
+ --enable-languages=c,c++
+
+build-commands:
+- |
+ # GCC is not passing the correct host/target flags to GMP's configure
+ # script, which causes it to not use the machine-dependent code for
+ # the platform and use the generic one instead. However, the generic
+ # code results on an undefined reference to `__gmpn_invert_limb' in
+ # ARMv7. Fix the invocation of GMP's configure script so that GMP can
+ # use the machine-dependent code.
+ case "$MORPH_ARCH" in
+ armv5*) sed -i "s/--host=none/--host=armv5/" o/Makefile
+ sed -i "s/--target=none/--target=armv5/" o/Makefile ;;
+ armv7*) sed -i "s/--host=none/--host=armv7a/" o/Makefile
+ sed -i "s/--target=none/--target=armv7a/" o/Makefile ;;
+ esac
+ cd o && make
+
+install-commands:
+- cd o && make DESTDIR="$DESTDIR" install
diff --git a/strata/build-essential/stage2-binutils.morph b/strata/build-essential/stage2-binutils.morph
new file mode 100644
index 00000000..1c64fb29
--- /dev/null
+++ b/strata/build-essential/stage2-binutils.morph
@@ -0,0 +1,21 @@
+name: stage2-binutils
+kind: chunk
+build-system: autotools
+
+configure-commands:
+- |
+ export STAGE2_SYSROOT="$(dirname $(pwd))"
+ export CXX=false
+ # binutils has its own embedded libtool, which is old and strips out
+ # `--sysroot`. Work around by modifying the compiler command to
+ # include the sysroot flag
+ export CC="$TARGET_STAGE1-gcc --sysroot=$STAGE2_SYSROOT"
+ ./configure --prefix="$PREFIX" --disable-nls --disable-werror \
+ --build=$(sh config.guess) \
+ --host=$TARGET_STAGE1 \
+ --target=$TARGET_STAGE1
+
+build-commands:
+- |
+ export STAGE2_SYSROOT="$(dirname $(pwd))"
+ make
diff --git a/strata/build-essential/stage2-busybox.morph b/strata/build-essential/stage2-busybox.morph
new file mode 100644
index 00000000..98e4bf33
--- /dev/null
+++ b/strata/build-essential/stage2-busybox.morph
@@ -0,0 +1,72 @@
+name: stage2-busybox
+kind: chunk
+
+configure-commands:
+# Explicitly setting HOSTCC is required because we have a 'gcc' earlier in
+# the PATH supplied by the stage2-gcc chunk, which can't execute outside of
+# the stage 3 staging area.
+- make HOSTCC="/usr/bin/gcc" CROSS_COMPILE=$TARGET_STAGE1- defconfig
+
+# Avoid dividing applets between $PREFIX/[s]bin and $PREFIX/usr/[s]bin.
+- '[ "$PREFIX" = /usr ] || sed -e ''s/.*INSTALL_NO_USR.*/CONFIG_INSTALL_NO_USR=y/'' -i .config'
+
+# We have GAWK.
+- sed -e 's/CONFIG_AWK=y.*/# CONFIG_AWK is not set/' -i .config
+
+# Depends on stuff that was removed since eglibc 2.14.
+- sed -e 's/CONFIG_INETD=y.*/# CONFIG_INETD is not set/' -i .config
+
+# Busybox Patch is incompatible enough with GNU Patch that it can't be
+# used for GNULib projects built from Git.
+- sed -e 's/CONFIG_PATCH=y.*/# CONFIG_PATCH is not set/' -i .config
+
+# None of this is needed because we have kmod; and it actually breaks the
+# Linux build because depmod isn't compatible enough with util-linux's.
+- sed -e 's/CONFIG_DEPMOD=y.*/# CONFIG_DEPMOD is not set/' -i .config
+- sed -e 's/CONFIG_INSMOD=y.*/# CONFIG_INSMOD is not set/' -i .config
+- sed -e 's/CONFIG_MODPROBE=y.*/# CONFIG_MODPROBE is not set/' -i .config
+- sed -e 's/CONFIG_MODPROBE_SMALL=y.*/# CONFIG_MODPROBE_SMALL is not set/' -i .config
+- sed -e 's/CONFIG_LSMOD=y.*/# CONFIG_LSMOD is not set/' -i .config
+- sed -e 's/CONFIG_RMMOD=y.*/# CONFIG_RMMOD is not set/' -i .config
+
+# General features that we don't need.
+- sed -e 's/CONFIG_FEATURE_MOUNT_CIFS=y.*/# CONFIG_FEATURE_MOUNT_CIFS is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_EXTRA_QUIET=y.*/# CONFIG_FEATURE_EXTRA_QUIET is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INIT_COREDUMPS=y.*/# CONFIG_FEATURE_INIT_COREDUMPS is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INIT_SCTTY=y.*/# CONFIG_FEATURE_INIT_SCTTY is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INIT_SYSLOG=y.*/# CONFIG_FEATURE_INIT_SYSLOG is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_INITRD=y.*/# CONFIG_FEATURE_INITRD is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_USE_INITTAB=y.*/# CONFIG_FEATURE_USE_INITTAB is not set/' -i .config
+- sed -e 's/CONFIG_FEATURE_MINIX2=y.*/# CONFIG_FEATURE_MINIX2 is not set/' -i .config
+- sed -e 's/CONFIG_FSCK_MINIX=y.*/# CONFIG_FSCK_MINIX is not set/' -i .config
+- sed -e 's/CONFIG_HALT=y.*/# CONFIG_HALT is not set/' -i .config
+- sed -e 's/CONFIG_INIT=y.*/# CONFIG_INIT is not set/' -i .config
+- sed -e 's/CONFIG_INIT_TERMINAL_TYPE=y.*/CONFIG_INIT_TERMINAL_TYPE=""/' -i .config
+- sed -e 's/CONFIG_LOSETUP=y.*/# CONFIG_LOSETUP is not set/' -i .config
+- sed -e 's/CONFIG_LSUSB=y.*/# CONFIG_LSUSB is not set/' -i .config
+- sed -e 's/CONFIG_LZMA=y.*/# CONFIG_LZMA is not set/' -i .config
+- sed -e 's/CONFIG_MKFS_EXT2=y.*/# CONFIG_MKFS_EXT2 is not set/' -i .config
+- sed -e 's/CONFIG_MKFS_MINIX=y.*/# CONFIG_MKFS_MINIX is not set/' -i .config
+- sed -e 's/CONFIG_RUNLEVEL=y.*/# CONFIG_RUNLEVEL is not set/' -i .config
+- sed -e 's/CONFIG_STRINGS=y.*/# CONFIG_STRINGS is not set/' -i .config
+- sed -e 's/CONFIG_UNLZMA=y.*/# CONFIG_UNLZMA is not set/' -i .config
+- sed -e 's/CONFIG_UNXZ=y.*/# CONFIG_UNXZ is not set/' -i .config
+- sed -e 's/CONFIG_XZ=y.*/# CONFIG_XZ is not set/' -i .config
+
+build-commands:
+- |
+ export STAGE2_SYSROOT="$(dirname $(pwd))"
+ export CPPFLAGS="--sysroot=$STAGE2_SYSROOT"
+ export LDFLAGS="--sysroot=$STAGE2_SYSROOT"
+ make HOSTCC="/usr/bin/gcc" CROSS_COMPILE=$TARGET_STAGE1-
+
+install-commands:
+# We expect to be built with a non-standard prefix in stage 2 (i.e. not
+# /usr). The install will break if prefix is set to /usr.
+- |
+ export STAGE2_SYSROOT="$(dirname $(pwd))"
+ export CPPFLAGS="--sysroot=$STAGE2_SYSROOT"
+ export LDFLAGS="--sysroot=$STAGE2_SYSROOT"
+ make CONFIG_PREFIX="$DESTDIR$PREFIX" \
+ HOSTCC="/usr/bin/gcc" CROSS_COMPILE=$TARGET_STAGE1- install &&
+ chmod 6755 "$DESTDIR$PREFIX"/bin/busybox
diff --git a/strata/build-essential/stage2-fake-bash.morph b/strata/build-essential/stage2-fake-bash.morph
new file mode 100644
index 00000000..021fd366
--- /dev/null
+++ b/strata/build-essential/stage2-fake-bash.morph
@@ -0,0 +1,4 @@
+name: stage2-fake-bash
+kind: chunk
+install-commands:
+- printf '#!/bin/sh\nexec /bin/sh "$@"\n' | install -D /proc/self/fd/0 -m 755 "$DESTDIR$PREFIX/bin/bash"
diff --git a/strata/build-essential/stage2-fhs-dirs.morph b/strata/build-essential/stage2-fhs-dirs.morph
new file mode 100644
index 00000000..64080217
--- /dev/null
+++ b/strata/build-essential/stage2-fhs-dirs.morph
@@ -0,0 +1,51 @@
+name: stage2-fhs-dirs
+kind: chunk
+description: create the FHS 2.3 directory hierarchy and the usual files
+install-commands:
+- sh ./create-fhs-dirs "$DESTDIR"
+- rmdir "$DESTDIR/bin"
+- ln -s "$PREFIX/bin" "$DESTDIR/bin"
+- install -m 644 passwd "$DESTDIR/etc/passwd"
+- install -m 600 shadow "$DESTDIR/etc/shadow"
+- install -m 644 interfaces "$DESTDIR/etc/network/interfaces"
+- install -m 644 group "$DESTDIR/etc/group"
+- install -m 644 issue "$DESTDIR/etc/issue"
+- install -m 644 services "$DESTDIR/etc/services"
+- install -m 644 protocols "$DESTDIR/etc/protocols"
+- echo baserock > "$DESTDIR"/etc/hostname
+devices:
+- type: c
+ filename: /dev/console
+ gid: 0
+ major: 5
+ minor: 1
+ permissions: '0600'
+ uid: 0
+- type: c
+ filename: /dev/full
+ gid: 0
+ major: 1
+ minor: 7
+ permissions: '0666'
+ uid: 0
+- type: c
+ filename: /dev/null
+ gid: 0
+ major: 1
+ minor: 3
+ permissions: '0666'
+ uid: 0
+- type: c
+ filename: /dev/urandom
+ gid: 0
+ major: 1
+ minor: 9
+ permissions: '0666'
+ uid: 0
+- type: c
+ filename: /dev/zero
+ gid: 0
+ major: 1
+ minor: 5
+ permissions: '0666'
+ uid: 0
diff --git a/strata/build-essential/stage2-gawk.morph b/strata/build-essential/stage2-gawk.morph
new file mode 100644
index 00000000..15c838b6
--- /dev/null
+++ b/strata/build-essential/stage2-gawk.morph
@@ -0,0 +1,9 @@
+name: stage2-gawk
+kind: chunk
+build-system: autotools
+configure-commands:
+- STAGE2_SYSROOT="$(dirname $(pwd))" CPPFLAGS="--sysroot=$STAGE2_SYSROOT" CXX=false
+ LDFLAGS="--sysroot=$STAGE2_SYSROOT" ./configure --prefix="$PREFIX" --disable-nls
+ --build=$(sh config.guess) --host=$TARGET_STAGE1
+build-commands:
+- STAGE2_SYSROOT="$(dirname $(pwd))" make
diff --git a/strata/build-essential/stage2-gcc-fixed-headers.morph b/strata/build-essential/stage2-gcc-fixed-headers.morph
new file mode 100644
index 00000000..10794872
--- /dev/null
+++ b/strata/build-essential/stage2-gcc-fixed-headers.morph
@@ -0,0 +1,19 @@
+name: stage2-gcc-fixed-headers
+kind: chunk
+install-commands:
+# Stage 1 GCC's fixincludes process created a limits.h before there was
+# a real limits.h available for the target. This step (taken from Linux
+# Linux From Scratch) creates a better one so that stage 2 GCC can compile.
+#
+# THIS IS A FRAGILE HACK! We need to replace the headers. The only way to
+# overwrite files in a staging area is to install a new chunk.
+# This is undesired behaviour in the long term, as we want to never
+# have overlaps, so this functionality may go away.
+- |
+ libgcc_dir=$(dirname $($TARGET_STAGE1-gcc -print-libgcc-file-name))
+ sysroot="$(dirname "$(pwd)")"
+ target_libgcc_dir="${libgcc_dir#$sysroot}"
+ mkdir -p "$DESTDIR/$target_libgcc_dir/include-fixed"
+ cat "gcc/limitx.h" "gcc/glimits.h" "gcc/limity.h" \
+ >"$DESTDIR/$target_libgcc_dir/include-fixed/limits.h"
+
diff --git a/strata/build-essential/stage2-gcc.morph b/strata/build-essential/stage2-gcc.morph
new file mode 100644
index 00000000..cd600e18
--- /dev/null
+++ b/strata/build-essential/stage2-gcc.morph
@@ -0,0 +1,82 @@
+name: stage2-gcc
+kind: chunk
+
+configure-commands:
+- mkdir o
+
+# In other projects we specify the sysroot location using CPPFLAGS.
+# Here, that breaks because GCC compiles stuff for the *build* machine,
+# too ... and this requires using the host's compiler, which cannot use
+# the same set of CPPFLAGS as the target. If we specify the sysroot
+# using CC instead then we don't interfere, because we are only
+# specifying the *host* C compiler.
+#
+# Configure flag notes:
+# 1. It's vital that this compiler runs in the bootstrap machine, and
+# targets the same machine (TARGET_STAGE1) so that the stage 1 GCC
+# is used instead of the compiler of the build machine.
+# 2. See gcc.morph.
+# 3. Disable searching /usr/local/include for headers
+# 4. This flag causes the correct --sysroot flag to be passed when
+# calling stage 1 GCC.
+- |
+ case "$MORPH_ARCH" in
+ armv7lhf) ARCH_FLAGS="--with-arch=armv7-a \
+ --with-cpu=cortex-a9 \
+ --with-tune=cortex-a9 \
+ --with-fpu=vfpv3-d16 \
+ --with-float=hard" ;;
+ armv7*) ARCH_FLAGS="--with-arch=armv7-a" ;;
+ esac
+ export STAGE2_SYSROOT="$(dirname $(pwd))"
+ export CC="$TARGET_STAGE1-gcc --sysroot=$STAGE2_SYSROOT"
+ export CXX="$TARGET_STAGE1-g++ --sysroot=$STAGE2_SYSROOT"
+ export AR="$TARGET_STAGE1-ar"
+ export RANLIB="$TARGET_STAGE1-ranlib"
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --build=$(sh ../config.guess) \
+ `# [1]` --host=$TARGET_STAGE1 \
+ `# [1]` --target=$TARGET_STAGE1 \
+ --prefix="$PREFIX" \
+ `# [2]` --libdir=$PREFIX/lib \
+ `# [3]` --with-local-prefix=$PREFIX \
+ `# [4]` --with-build-sysroot="$STAGE2_SYSROOT" \
+ --disable-bootstrap \
+ --disable-nls \
+ --disable-multilib \
+ --disable-libgomp \
+ --disable-libstdcxx-pch \
+ --enable-languages=c,c++
+
+build-commands:
+- |
+ case "$MORPH_ARCH" in
+ armv5*) sed -i "s/--host=none/--host=armv5/" o/Makefile
+ sed -i "s/--target=none/--target=armv5/" o/Makefile ;;
+ armv7*) sed -i "s/--host=none/--host=armv7a/" o/Makefile
+ sed -i "s/--target=none/--target=armv7a/" o/Makefile ;;
+ esac
+ export STAGE2_SYSROOT="$(dirname $(pwd))"
+ cd o && make
+
+install-commands:
+- cd o && make DESTDIR="$DESTDIR" install
+
+# Stage 3 builds need to link against this file in the location that
+# it will be in the final system, so we make a temporary link now.
+#
+# On x86_64 GCC resolutely installs its libraries into lib64. To fix this
+# would require hobbling the MULTILIB_OSDIRNAMES field in
+# gcc/config/i386/t-linux64 and this might break things, so for now we
+# tolerate the inconsistency.
+- |
+ if [ "$(echo $TARGET | cut -c -6)" = "x86_64" ]; then
+ libdir=lib64
+ else
+ libdir=lib
+ fi
+
+ install -d "$DESTDIR/lib"
+ ln -s "$PREFIX/$libdir/libgcc_s.so" "$DESTDIR/lib/"
+ ln -s "$PREFIX/$libdir/libgcc_s.so.1" "$DESTDIR/lib/"
diff --git a/strata/build-essential/stage2-glibc.morph b/strata/build-essential/stage2-glibc.morph
new file mode 100644
index 00000000..f44d0ebb
--- /dev/null
+++ b/strata/build-essential/stage2-glibc.morph
@@ -0,0 +1,103 @@
+name: stage2-glibc
+kind: chunk
+
+configure-commands:
+- mkdir o
+
+# Configure flag notes:
+# 1. Avoid installing to PREFIX/lib64 on x86_64.
+# 2. Location of linux-api-headers.
+# 3. Normal flags. See glibc.morph.
+# 4. Force configuration values of certain things that can't be detected
+# in a cross-compile.
+- |
+ case "$MORPH_ARCH" in
+ armv7*)
+ ARCH_FLAGS="--without-fp" ;;
+ esac
+
+ export CFLAGS="-O2 $CFLAGS"; export CXX=false; \
+ cd o && ../configure \
+ $ARCH_FLAGS \
+ --build=$(../scripts/config.guess) --host=$TARGET_STAGE1 \
+ --prefix="$PREFIX" \
+ `# [1]` --libdir="$PREFIX/lib" \
+ `# [2]` --with-headers="$(pwd)/../../$PREFIX/include" \
+ `# [3]` --disable-profile --enable-kernel=2.6.25 \
+ `# [4]` libc_cv_c_cleanup=yes libc_cv_ctors_header=yes \
+ libc_cv_forced_unwind=yes libc_cv_ssp=no
+
+build-commands:
+- cd o && make localtime=UTC
+
+install-commands:
+- cd o && make install_root="$DESTDIR" localtime=UTC install
+- mkdir -p "$DESTDIR/etc"
+- mkdir -p "$DESTDIR/etc/ld.so.conf.d"
+- |
+ cat <<EOF > ld.so.conf
+ /lib
+ /usr/lib
+ /usr/local/lib
+ include /etc/ld.so.conf.d/*.conf
+ EOF
+- install -m 644 -o root -g root ld.so.conf "$DESTDIR/etc/ld.so.conf"
+- |
+ # Fix up GCC to handle the sysroot containing glibc being in a different
+ # location for each chunk build.
+ #
+ # For headers, it's enough to pass -B in the CPPFLAGS. This would work for
+ # the startup files (crt*.o) as well, except where libtool is involved (in
+ # which case it strips -B out of your LDFLAGS before calling GCC). We get
+ # around this by making GCC locate them relative to the environment variable
+ # STAGE2_SYSROOT, which we can then set along with CPPFLAGS in each stage 2
+ # chunk build.
+ #
+ # We also force the use of the program loader at PREFIX/lib/ld.so instead
+ # of its usual home in /lib or /lib64, which is necessary for the output of
+ # stage 2 to work as a chroot when building stage 3.
+
+ sysroot="$(dirname "$(pwd)")"
+ specs_dir="$(dirname $($TARGET_STAGE1-gcc --print-libgcc-file-name))"
+ target_specs_dir="$DESTDIR/${specs_dir#$sysroot}"
+ mkdir -p "$target_specs_dir"
+
+ $TARGET_STAGE1-gcc -dumpspecs |
+ sed -e "s@[gMS]\?crt[1in].o%s@%:getenv(STAGE2_SYSROOT $PREFIX/lib/&)@g" \
+ -e "s@/lib\(64\)\?/ld@$PREFIX/lib/ld@g" \
+ > "$target_specs_dir/specs-for-sysroot"
+
+ # NASTY HACK #
+ # We create a symlink to the actual specs here, so that later the
+ # symlink can be replaced with a dangling link.
+ #
+ # This is necessary as we need to have gcc use its internal specs,
+ # which can differ to the specs generated by `gcc -dumpspecs`.
+ #
+ # The dangling symlink will not make it onto the final system, just
+ # like all other bootstrap only components.
+ ln -s specs-for-sysroot "$target_specs_dir/specs"
+
+# Install a symlink for the program interpreter (ld.so) so that binaries
+# built in stage 3 before the stage 3 glibc is built can use it.
+# FIXME: get a better way of finding the name of the loader. The lib64
+# path is hardcoded into glibc in the file
+# sysdeps/unix/sysv/linux/configure.
+- install -d $DESTDIR/lib
+- |
+ cpu=$(echo $TARGET | cut -d '-' -f 1)
+ case "$cpu" in
+ x86_64)
+ install -d "$DESTDIR/lib64"
+ ln -s "$PREFIX/lib/ld-linux-x86-64.so.2" \
+ "$DESTDIR/lib64/ld-linux-x86-64.so.2" ;;
+ ppc64)
+ install -d "$DESTDIR/lib64"
+ ln -s "$PREFIX/lib/ld64.so.1" \
+ "$DESTDIR/lib64/ld64.so.1" ;;
+ *)
+ loader=$(basename $(ls "$DESTDIR$PREFIX"/lib/ld-linux*))
+ [ -z $loader ] && loader=$(basename $(ls "$DESTDIR$PREFIX"/lib/ld.so*))
+ [ -z $loader ] && ( echo "Bug in stage2-glibc ld.so symlinks" ; exit 1 )
+ ln -s "$PREFIX/lib/$loader" "$DESTDIR/lib/$loader"
+ esac
diff --git a/strata/build-essential/stage2-libstdc++.morph b/strata/build-essential/stage2-libstdc++.morph
new file mode 100644
index 00000000..3d89ad85
--- /dev/null
+++ b/strata/build-essential/stage2-libstdc++.morph
@@ -0,0 +1,36 @@
+name: stage2-libstdc++
+kind: chunk
+configure-commands:
+- mkdir o
+
+# Configure flag notes:
+# 1. The thread C++ library cannot be built, as the thread C library
+# was not build in stage1-gcc.
+# 2. Prevents the installation of precompiled include files, which are
+# not needed at this stage.
+# 3. From LFS: the header location of C++ needs to be explicitly given
+# as we are running the configure script from the top-level
+# directory.
+- |
+ export STAGE2_SYSROOT="$(dirname $(pwd))"
+ # -fPIC must be given, otherwise it will not be possible to create
+ # shared libraries linked to libstdc++
+ export CPPFLAGS="--sysroot=$STAGE2_SYSROOT -fPIC"
+ export LDFLAGS="--sysroot=$STAGE2_SYSROOT"
+ cd o && ../libstdc++-v3/configure \
+ --build=$(sh ../config.guess) \
+ --host="$TARGET_STAGE1" \
+ --target="$TARGET_STAGE1" \
+ --prefix="$PREFIX" \
+ --disable-nls \
+ --disable-shared \
+ --disable-multilib \
+ `# [1]` --disable-libstdcxx-threads \
+ `# [2]` --disable-libstdcxx-pch \
+ `# [3]` --with-gxx-include-dir=/tools/"$TARGET_STAGE1"/include/c++/4.9.2
+
+build-commands:
+- cd o && make
+
+install-commands:
+- cd o && make DESTDIR="$DESTDIR" install
diff --git a/strata/build-essential/stage2-linux-api-headers.morph b/strata/build-essential/stage2-linux-api-headers.morph
new file mode 100644
index 00000000..1da94ee9
--- /dev/null
+++ b/strata/build-essential/stage2-linux-api-headers.morph
@@ -0,0 +1,24 @@
+name: stage2-linux-api-headers
+kind: chunk
+install-commands:
+- |
+ case "$MORPH_ARCH" in
+ armv5l)
+ ARCH="arm" ;;
+ armv7b|armv7l|armv7lhf)
+ ARCH="arm" ;;
+ armv8l64|armv8b64)
+ ARCH="arm64" ;;
+ x86_32)
+ ARCH="i386" ;;
+ x86_64)
+ ARCH="x86_64" ;;
+ ppc64)
+ ARCH="powerpc" ;;
+ *)
+ echo "Error: unsupported Morph architecture: $MORPH_ARCH" >&2
+ exit 1
+ esac
+ ARCH=$ARCH make INSTALL_HDR_PATH=dest headers_install
+- install -d "$DESTDIR${PREFIX-/usr}/include"
+- cp -r dest/include/* "$DESTDIR/${PREFIX-/usr}/include"
diff --git a/strata/build-essential/stage2-make.morph b/strata/build-essential/stage2-make.morph
new file mode 100644
index 00000000..0b05581c
--- /dev/null
+++ b/strata/build-essential/stage2-make.morph
@@ -0,0 +1,9 @@
+name: stage2-make
+kind: chunk
+build-system: autotools
+configure-commands:
+- STAGE2_SYSROOT="$(dirname $(pwd))" CPPFLAGS="--sysroot=$STAGE2_SYSROOT" CXX=false
+ LDFLAGS="--sysroot=$STAGE2_SYSROOT" ./configure --prefix="$PREFIX" --disable-nls
+ --build=$(sh config/config.guess) --host=$TARGET_STAGE1
+build-commands:
+- STAGE2_SYSROOT="$(dirname $(pwd))" make
diff --git a/strata/build-essential/stage2-reset-specs.morph b/strata/build-essential/stage2-reset-specs.morph
new file mode 100644
index 00000000..8892f67c
--- /dev/null
+++ b/strata/build-essential/stage2-reset-specs.morph
@@ -0,0 +1,21 @@
+name: stage2-reset-specs
+kind: chunk
+
+# Nasty hack to get around being unable to reliably add configuration to gcc,
+# hence the gcc specs are modified, combined with Baserock's rootfs protection
+# preventing specs being modified before builds.
+# The limitation is overcome by installing files as part of a chunk, which
+# overwrites previous files.
+# New specs were added for the bootstrap builds, but after stage2 we start
+# having chrooted builds, so the old specs need to be replaced.
+# Unfortunately we can't just replace the specs with the ones gcc produces,
+# since gcc behaves differently without specs to with specs it produces!
+# So we use a **NASTY HACK** to replace the specs symlink with one that
+# points to a file that doesn't exist.
+install-commands:
+- |
+ STAGE2_SYSROOT="$(dirname "$(pwd)")"
+ specs_dir="$(dirname $($TARGET_STAGE1-gcc -print-libgcc-file-name))"
+ target_specs_dir="$DESTDIR/${specs_dir#$STAGE2_SYSROOT}"
+ mkdir -p "$target_specs_dir"
+ ln -s "temporary specs removed by baserock bootstrap" "$target_specs_dir/specs"
diff --git a/strata/build-essential/zlib.morph b/strata/build-essential/zlib.morph
new file mode 100644
index 00000000..a69eebf3
--- /dev/null
+++ b/strata/build-essential/zlib.morph
@@ -0,0 +1,9 @@
+name: zlib
+kind: chunk
+max-jobs: 1
+configure-commands:
+- ./configure --prefix="$PREFIX"
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/c2man/c2man.morph b/strata/c2man/c2man.morph
new file mode 100644
index 00000000..93390d2d
--- /dev/null
+++ b/strata/c2man/c2man.morph
@@ -0,0 +1,11 @@
+name: c2man
+kind: chunk
+configure-commands:
+- ./Configure -d -e
+build-commands:
+- make LEX=flex
+install-commands:
+- mkdir -p $DESTDIR$PREFIX/bin
+- mkdir -p $DESTDIR$PREFIX/lib
+- mkdir -p $DESTDIR$PREFIX/man
+- make install bin=$DESTDIR$PREFIX/bin privlib=$DESTDIR$PREFIX/lib/c2man mansrc=$DESTDIR$PREFIX/man
diff --git a/strata/ceph-service.morph b/strata/ceph-service.morph
new file mode 100644
index 00000000..4ab9d432
--- /dev/null
+++ b/strata/ceph-service.morph
@@ -0,0 +1,59 @@
+name: ceph-service
+kind: stratum
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/network-security.morph
+chunks:
+- name: libaio
+ morph: strata/ceph-service/libaio.morph
+ repo: upstream:libaio
+ ref: 08f50baec0e7731116d3f665c6155d7829edf5d7
+ unpetrify-ref: libaio-0.3.110-1
+- name: keyutils
+ morph: strata/ceph-service/keyutils.morph
+ repo: upstream:keyutils
+ ref: 9209a0c8fd63afc59f644e078b40cec531409c30
+ unpetrify-ref: v1.5.9
+- name: libunwind
+ repo: upstream:libunwind
+ ref: d7322f0f64dab715c4feb5f08edef5660d8719c0
+ unpetrify-ref: v1.1
+- name: gperftools
+ repo: upstream:gperftools
+ ref: 846b775dfadb77901202ae7ddbac30ad1de7df01
+ unpetrify-ref: gperftools-2.2
+ build-depends:
+ - libunwind
+- name: snappy
+ repo: upstream:snappy-git
+ ref: 1ff9be9b8fafc8528ca9e055646f5932aa5db9c4
+ unpetrify-ref: master
+- name: leveldb
+ morph: strata/ceph-service/leveldb.morph
+ repo: upstream:leveldb
+ ref: e353fbc7ea81f12a5694991b708f8f45343594b1
+ unpetrify-ref: v1.17
+ build-depends:
+ - snappy
+ - gperftools
+- name: libeditline
+ repo: upstream:libeditline-tarball
+ ref: 7503ff5f8610734521f40e276b59b3b6291830e7
+ unpetrify-ref: baserock/morph
+- name: boost
+ morph: strata/ceph-service/boost.morph
+ repo: upstream:boost-tarball
+ ref: ed232fdd34968697a68783b3195b1da4226915b5
+ unpetrify-ref: boost_1_57_0
+- name: ceph
+ morph: strata/ceph-service/ceph.morph
+ repo: upstream:ceph
+ ref: f4f12a634b0a92938d54d77910134dbbcdf864e6
+ unpetrify-ref: v0.94.1.1
+ build-depends:
+ - libaio
+ - gperftools
+ - leveldb
+ - libeditline
+ - keyutils
+ - boost
diff --git a/strata/ceph-service/boost.morph b/strata/ceph-service/boost.morph
new file mode 100644
index 00000000..cc954076
--- /dev/null
+++ b/strata/ceph-service/boost.morph
@@ -0,0 +1,8 @@
+name: boost
+kind: chunk
+configure-commands:
+- ./bootstrap.sh
+build-commands:
+- ./b2
+install-commands:
+- ./b2 install --prefix="$DESTDIR$PREFIX"
diff --git a/strata/ceph-service/ceph.morph b/strata/ceph-service/ceph.morph
new file mode 100644
index 00000000..a6b06759
--- /dev/null
+++ b/strata/ceph-service/ceph.morph
@@ -0,0 +1,25 @@
+name: ceph
+kind: chunk
+build-system: autotools
+configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --with-nss --prefix="$PREFIX" --sysconfdir=/etc --without-fuse --without-libatomic-ops --without-libxfs
+build-commands:
+- make
+install-commands:
+- make install
+- install -D -m 755 src/init-ceph.in "$DESTDIR"/etc/init.d/ceph-SysV
+- install -D -m 755 systemd/ceph "$DESTDIR"/etc/systemd/system/ceph
+- install -D -m 644 systemd/ceph-mon@.service "$DESTDIR"/etc/systemd/system/ceph-mon@.service
+- install -D -m 644 systemd/ceph-osd@.service "$DESTDIR"/etc/systemd/system/ceph-osd@.service
+- install -D -m 644 systemd/ceph-mds@.service "$DESTDIR"/etc/systemd/system/ceph-mds@.service
+- install -D -m 644 systemd/ceph.target "$DESTDIR"/etc/systemd/system/ceph.target
+- install -d "$DESTDIR"/etc/ceph
+- install -D -d "$DESTDIR"/var/lib/ceph
+- install -d "$DESTDIR"/var/lib/ceph/mon
+- install -d "$DESTDIR"/var/lib/ceph/osd
+- install -d "$DESTDIR"/var/lib/ceph/mds
+- install -d "$DESTDIR"/var/lib/ceph/tmp
+- install -d "$DESTDIR"/var/lib/ceph/bootstrap-mds
+- install -d "$DESTDIR"/var/lib/ceph/bootstrap-osd
+- install -D -d "$DESTDIR"/var/log/ceph
diff --git a/strata/ceph-service/keyutils.morph b/strata/ceph-service/keyutils.morph
new file mode 100644
index 00000000..4d47e265
--- /dev/null
+++ b/strata/ceph-service/keyutils.morph
@@ -0,0 +1,7 @@
+name: keyutils
+kind: chunk
+build-commands:
+- make clean
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" LIBDIR="$PREFIX/lib" USRLIBDIR="$PREFIX/lib" install
diff --git a/strata/ceph-service/leveldb.morph b/strata/ceph-service/leveldb.morph
new file mode 100644
index 00000000..bff2b87a
--- /dev/null
+++ b/strata/ceph-service/leveldb.morph
@@ -0,0 +1,9 @@
+name: leveldb
+kind: chunk
+build-commands:
+- make
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/lib
+- mkdir -p "$DESTDIR$PREFIX"/include
+- cp --preserve=links libleveldb.* "$DESTDIR$PREFIX"/lib
+- cp -r include/leveldb "$DESTDIR$PREFIX"/include
diff --git a/strata/ceph-service/libaio.morph b/strata/ceph-service/libaio.morph
new file mode 100644
index 00000000..62c24d37
--- /dev/null
+++ b/strata/ceph-service/libaio.morph
@@ -0,0 +1,7 @@
+name: libaio
+kind: chunk
+build-commands:
+- make clean
+- make
+install-commands:
+- make prefix="$DESTDIR$PREFIX" install
diff --git a/strata/chef.morph b/strata/chef.morph
new file mode 100644
index 00000000..69b495aa
--- /dev/null
+++ b/strata/chef.morph
@@ -0,0 +1,187 @@
+name: chef
+kind: stratum
+description: Autogenerated by Baserock import tool
+build-depends:
+- morph: strata/ruby.morph
+- morph: strata/core.morph
+chunks:
+- name: yajl
+ morph: strata/chef/yajl.morph
+ repo: upstream:yajl
+ ref: a0ecdde0c042b9256170f2f8890dd9451a4240aa
+ unpetrify-ref: 2.1.0
+- name: libyajl2-1.0.1
+ morph: strata/chef/libyajl2-1.0.1.morph
+ repo: upstream:ruby-gems/libyajl2-gem
+ ref: 98aef032f536d13775bc7b3b69a25ebac9bdee0a
+ unpetrify-ref: 1.0.1
+ build-depends:
+ - yajl
+- name: chef-12.0.0.alpha.0
+ morph: strata/chef/chef-master.morph
+ repo: upstream:ruby-gems/chef
+ ref: 9841bc9c6271c6d9add3aff0c2e11239cfb129ca
+ unpetrify-ref: 12.0.3
+- name: chef-zero-2.2
+ morph: strata/chef/chef-zero-2.2.morph
+ repo: upstream:ruby-gems/chef-zero
+ ref: 231c3abd052e606820297a97e4bc32bdab656a02
+ unpetrify-ref: v2.2
+- name: coderay-1.1.0.rc1
+ morph: strata/chef/coderay-1.1.0.morph
+ repo: upstream:ruby-gems/coderay
+ ref: a48037b85a12228431b32103786456f36beb355f
+ unpetrify-ref: v1.1.0
+- name: erubis-master
+ morph: strata/chef/erubis-master.morph
+ repo: upstream:ruby-gems/erubis
+ ref: 14d3eab57fbc361312c8f3af350cbf9a5bafce17
+ unpetrify-ref: master
+- name: ffi-1.9.3
+ morph: strata/chef/ffi-1.9.3.morph
+ repo: upstream:ruby-gems/ffi
+ ref: d982b7049336106c04f7721045dc5613b16d3545
+ unpetrify-ref: 1.9.3
+- name: ffi-yajl-1.0.2
+ morph: strata/chef/ffi-yajl-master.morph
+ repo: upstream:ruby-gems/ffi-yajl
+ ref: 3a4bc4259fd67af0ff4a8c1d3d71cfbaed9c112f
+ unpetrify-ref: master
+ build-depends:
+ - libyajl2-1.0.1
+- name: hashie-2.1.2
+ morph: strata/chef/hashie-2.1.2.morph
+ repo: upstream:ruby-gems/hashie
+ ref: 95b97fbff2cac643d56ec718cb708665500682e5
+ unpetrify-ref: v2.1.2
+- name: highline-1.6.21
+ morph: strata/chef/highline-master.morph
+ repo: upstream:ruby-gems/highline
+ ref: 51de22e436e6d45696759d673d7b9ceba16cae39
+ unpetrify-ref: master
+- name: hoe-master
+ morph: strata/chef/hoe-master.morph
+ repo: upstream:ruby-gems/hoe
+ ref: d94b26b4687be0a24d04b7cb582753fbec33d7e4
+ unpetrify-ref: master
+- name: diff-lcs-1.2.5
+ morph: strata/chef/diff-lcs-1.2.5.morph
+ repo: upstream:diff-lcs
+ ref: d53e92242b9dd6745e56a0ff4ba15d2f62052b91
+ unpetrify-ref: v1.2.5
+ build-depends:
+ - hoe-master
+- name: ipaddress-master
+ morph: strata/chef/ipaddress-master.morph
+ repo: upstream:ruby-gems/ipaddress
+ ref: dae93ad0e4fb9a5d547a15dae0c3f2417078c845
+ unpetrify-ref: master
+- name: json-1.8.1
+ morph: strata/chef/json-1.8.1.morph
+ repo: upstream:ruby-gems/json
+ ref: 92a96dea2b24b9c68856004d69491f46aedd0925
+ unpetrify-ref: v1.8.1
+- name: method_source-0.8.1
+ morph: strata/chef/method_source-0.8.2.morph
+ repo: upstream:ruby-gems/method_source
+ ref: 1b1f8323a7c25f29331fe32511f50697e5405dbd
+ unpetrify-ref: v0.8.2
+- name: mime-types-1.25.1
+ morph: strata/chef/mime-types-1.25.1.morph
+ repo: upstream:ruby-gems/mime-types
+ ref: 6be836f59a041893cfc1c25668b3aa3552a7e334
+ unpetrify-ref: v1.25.1
+ build-depends:
+ - hoe-master
+- name: mixlib-authentication-1.3.0
+ morph: strata/chef/mixlib-authentication-1.3.0.morph
+ repo: upstream:ruby-gems/mixlib-authentication
+ ref: db24a56c6f5b99114998a50942220a7023060229
+ unpetrify-ref: 1.3.0
+- name: mixlib-cli-1.5.0
+ morph: strata/chef/mixlib-cli-1.5.0.morph
+ repo: upstream:ruby-gems/mixlib-cli
+ ref: b3b3c12141b5380ec61945770690fc1ae31d92b0
+ unpetrify-ref: 1.5.0
+- name: mixlib-config-2.1.0
+ morph: strata/chef/mixlib-config-2.1.0.morph
+ repo: upstream:ruby-gems/mixlib-config
+ ref: c5e2dee2beb5fdd17442ff92e520f2ef01d17ee5
+ unpetrify-ref: v2.1.0
+- name: mixlib-log-1.6.0
+ morph: strata/chef/mixlib-log-master.morph
+ repo: upstream:ruby-gems/mixlib-log
+ ref: 50ec55964ce19d3a8a14050be9a23c4b8990e2f0
+ unpetrify-ref: master
+- name: mixlib-shellout-1.4.0
+ morph: strata/chef/mixlib-shellout-1.4.0.morph
+ repo: upstream:ruby-gems/mixlib-shellout
+ ref: a04ce6db22edf0575c50e18ae2db09adced7dedc
+ unpetrify-ref: 1.4.0
+- name: net-dhcp-1.2.1
+ morph: strata/chef/net-dhcp-1.2.1.morph
+ repo: upstream:net-dhcp-ruby
+ ref: b644922a08aa09e2ce75f8f9f9fa1f0b8cecb2e9
+ unpetrify-ref: v1.2.1
+- name: net-ssh-2.9.1
+ morph: strata/chef/net-ssh-2.9.1.morph
+ repo: upstream:ruby-gems/net-ssh
+ ref: 9f8607984d8e904f211cc5edb39ab2a2ca94008e
+ unpetrify-ref: v2.9.1
+- name: net-ssh-gateway-1.2.0
+ morph: strata/chef/net-ssh-gateway-1.2.0.morph
+ repo: upstream:ruby-gems/net-ssh-gateway
+ ref: 1de7611a7f7cedbe7a4c6cf3798c88d00637582d
+ unpetrify-ref: v1.2.0
+- name: net-ssh-multi-1.2.0
+ morph: strata/chef/net-ssh-multi-1.2.0.morph
+ repo: upstream:ruby-gems/net-ssh-multi
+ ref: b659f2884b2c9abdbe3bbf3c844937a0799ed5ac
+ unpetrify-ref: v1.2.0
+- name: ohai-7.4.0.dev
+ morph: strata/chef/ohai-master.morph
+ repo: upstream:ruby-gems/ohai
+ ref: 0bf2ed32744445a253082910ee4e07b2b38023a7
+ unpetrify-ref: master
+- name: plist-master
+ morph: strata/chef/plist-master.morph
+ repo: upstream:ruby-gems/plist
+ ref: 12eb82d283cab148183c37c07e3f75a309969dec
+ unpetrify-ref: master
+- name: pry-0.10.1
+ morph: strata/chef/pry-master.morph
+ repo: upstream:ruby-gems/pry
+ ref: 6d5eb0831b50ec729d2dc3356255b49535535e37
+ unpetrify-ref: master
+- name: rack-1.5.2
+ morph: strata/chef/rack-1.5.2.morph
+ repo: upstream:ruby-gems/rack
+ ref: ac590d055c936bb9a618e955a690dc836c625211
+ unpetrify-ref: 1.5.2
+- name: slop-3.6.0
+ morph: strata/chef/slop-3.6.0.morph
+ repo: upstream:ruby-gems/slop
+ ref: c3f84e7e794004f9ae6958c13ef3dd3038c2c0eb
+ unpetrify-ref: v3.6.0
+- name: systemu-2.6.4
+ morph: strata/chef/systemu-master.morph
+ repo: upstream:ruby-gems/systemu
+ ref: 35340f1e91941af47988b1b9d77705493b96d3db
+ unpetrify-ref: master
+- name: wmi-lite-1.0.0
+ morph: strata/chef/wmi-lite-1.0.0.morph
+ repo: upstream:ruby-gems/wmi-lite
+ ref: 9377836dc0a5487474038ec727f02f9b33facfa6
+ unpetrify-ref: 1.0.0
+- name: libpopt
+ morph: strata/chef/libpopt.morph
+ repo: upstream:libpopt
+ ref: c224abf28f4ff9bbf292908324359cb5905addf8
+ unpetrify-ref: master
+- name: sgdisk
+ morph: strata/chef/sgdisk.morph
+ repo: upstream:sgdisk
+ ref: a920398fa393f9d6301b32b191bc01e086ab8bc8
+ unpetrify-ref: master
+ build-depends:
+ - libpopt
diff --git a/strata/chef/chef-master.morph b/strata/chef/chef-master.morph
new file mode 100644
index 00000000..61c12ca0
--- /dev/null
+++ b/strata/chef/chef-master.morph
@@ -0,0 +1,20 @@
+name: chef-12.0.0.alpha.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: chef-12.0.0.alpha.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build chef.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./chef-12.0.0.alpha.0.gem
+- mkdir -p "$DESTDIR"/etc
+- |
+ cat << EOF > "$DESTDIR/etc/lsb-release"
+ DISTRIB_ID=Baserock
+ DISTRIB_CODENAME=baserock
+ DISTRIB_DESCRIPTION="Baserock"
+ EOF
diff --git a/strata/chef/chef-zero-2.2.morph b/strata/chef/chef-zero-2.2.morph
new file mode 100644
index 00000000..11407ae3
--- /dev/null
+++ b/strata/chef/chef-zero-2.2.morph
@@ -0,0 +1,13 @@
+name: chef-zero-2.2
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: chef-zero-2.2-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build chef-zero.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./chef-zero-2.2.gem
diff --git a/strata/chef/coderay-1.1.0.morph b/strata/chef/coderay-1.1.0.morph
new file mode 100644
index 00000000..a380b911
--- /dev/null
+++ b/strata/chef/coderay-1.1.0.morph
@@ -0,0 +1,13 @@
+name: coderay-1.1.0.rc1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: coderay-1.1.0.rc1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build coderay.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./coderay-1.1.0.rc1.gem
diff --git a/strata/chef/diff-lcs-1.2.5.morph b/strata/chef/diff-lcs-1.2.5.morph
new file mode 100644
index 00000000..3585467f
--- /dev/null
+++ b/strata/chef/diff-lcs-1.2.5.morph
@@ -0,0 +1,13 @@
+name: diff-lcs-1.2.5
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: diff-lcs-1.2.5-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build diff-lcs.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./diff-lcs-1.2.5.gem
diff --git a/strata/chef/erubis-master.morph b/strata/chef/erubis-master.morph
new file mode 100644
index 00000000..f15e0968
--- /dev/null
+++ b/strata/chef/erubis-master.morph
@@ -0,0 +1,19 @@
+---
+name: erubis-master
+kind: chunk
+build-system: manual
+products:
+- artifact: erubis-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+configure-commands:
+# Manually do what it seems like the 'rook' build system would do, if it worked
+# Values taken from 'Rookbook.props'.
+- find -type f -exec sed -e 's/\$Release\$/2.7.0/g' -i \{} \;
+- find -type f -exec sed -e 's/\$Copyright\$/copyright(c) 2006-2011 kuwata-lab.com all rights reserved./g' -i \{} \;
+build-commands:
+- gem build erubis.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./erubis-2.7.0.gem
diff --git a/strata/chef/ffi-1.9.3.morph b/strata/chef/ffi-1.9.3.morph
new file mode 100644
index 00000000..672f0bd7
--- /dev/null
+++ b/strata/chef/ffi-1.9.3.morph
@@ -0,0 +1,13 @@
+name: ffi-1.9.3
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: ffi-1.9.3-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build ffi.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./ffi-1.9.3.gem
diff --git a/strata/chef/ffi-yajl-master.morph b/strata/chef/ffi-yajl-master.morph
new file mode 100644
index 00000000..5d3c0792
--- /dev/null
+++ b/strata/chef/ffi-yajl-master.morph
@@ -0,0 +1,13 @@
+name: ffi-yajl-1.0.2
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: ffi-yajl-1.0.2-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build ffi-yajl.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./ffi-yajl-1.0.2.gem
diff --git a/strata/chef/hashie-2.1.2.morph b/strata/chef/hashie-2.1.2.morph
new file mode 100644
index 00000000..a4fb46f6
--- /dev/null
+++ b/strata/chef/hashie-2.1.2.morph
@@ -0,0 +1,13 @@
+name: hashie-2.1.2
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: hashie-2.1.2-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build hashie.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./hashie-2.1.2.gem
diff --git a/strata/chef/highline-master.morph b/strata/chef/highline-master.morph
new file mode 100644
index 00000000..5615f6b2
--- /dev/null
+++ b/strata/chef/highline-master.morph
@@ -0,0 +1,13 @@
+name: highline-1.6.21
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: highline-1.6.21-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build highline.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./highline-1.6.21.gem
diff --git a/strata/chef/hoe-master.morph b/strata/chef/hoe-master.morph
new file mode 100644
index 00000000..9fe7ff8e
--- /dev/null
+++ b/strata/chef/hoe-master.morph
@@ -0,0 +1,14 @@
+---
+name: hoe-master
+kind: chunk
+build-system: manual
+products:
+- artifact: hoe-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- rake gem
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./pkg/hoe-3.12.0.gem
diff --git a/strata/chef/ipaddress-master.morph b/strata/chef/ipaddress-master.morph
new file mode 100644
index 00000000..fdaa5de6
--- /dev/null
+++ b/strata/chef/ipaddress-master.morph
@@ -0,0 +1,13 @@
+name: ipaddress-0.8.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: ipaddress-0.8.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build ipaddress.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./ipaddress-0.8.0.gem
diff --git a/strata/chef/json-1.8.1.morph b/strata/chef/json-1.8.1.morph
new file mode 100644
index 00000000..cf25abca
--- /dev/null
+++ b/strata/chef/json-1.8.1.morph
@@ -0,0 +1,13 @@
+name: json-1.8.1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: json-1.8.1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build json.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./json-1.8.1.gem
diff --git a/strata/chef/libpopt.morph b/strata/chef/libpopt.morph
new file mode 100644
index 00000000..6adde30c
--- /dev/null
+++ b/strata/chef/libpopt.morph
@@ -0,0 +1,6 @@
+name: libpopt
+kind: chunk
+build-system: autotools
+configure-commands:
+- autoreconf -if
+- ./configure
diff --git a/strata/chef/libyajl2-1.0.1.morph b/strata/chef/libyajl2-1.0.1.morph
new file mode 100644
index 00000000..d85d1567
--- /dev/null
+++ b/strata/chef/libyajl2-1.0.1.morph
@@ -0,0 +1,13 @@
+name: libyajl2-1.0.1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: libyajl2-1.0.1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- USE_SYSTEM_LIBYAJL2=yes gem build libyajl2.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- USE_SYSTEM_LIBYAJL2=yes gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./libyajl2-1.0.1.gem
diff --git a/strata/chef/method_source-0.8.2.morph b/strata/chef/method_source-0.8.2.morph
new file mode 100644
index 00000000..5ba20da6
--- /dev/null
+++ b/strata/chef/method_source-0.8.2.morph
@@ -0,0 +1,13 @@
+name: method_source-0.8.1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: method_source-0.8.1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build method_source.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./method_source-0.8.1.gem
diff --git a/strata/chef/mime-types-1.25.1.morph b/strata/chef/mime-types-1.25.1.morph
new file mode 100644
index 00000000..453c42d5
--- /dev/null
+++ b/strata/chef/mime-types-1.25.1.morph
@@ -0,0 +1,13 @@
+name: mime-types-1.25.1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: mime-types-1.25.1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build mime-types.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./mime-types-1.25.1.gem
diff --git a/strata/chef/mixlib-authentication-1.3.0.morph b/strata/chef/mixlib-authentication-1.3.0.morph
new file mode 100644
index 00000000..8792ff2c
--- /dev/null
+++ b/strata/chef/mixlib-authentication-1.3.0.morph
@@ -0,0 +1,13 @@
+name: mixlib-authentication-1.3.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: mixlib-authentication-1.3.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build mixlib-authentication.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./mixlib-authentication-1.3.0.gem
diff --git a/strata/chef/mixlib-cli-1.5.0.morph b/strata/chef/mixlib-cli-1.5.0.morph
new file mode 100644
index 00000000..84f3a4b7
--- /dev/null
+++ b/strata/chef/mixlib-cli-1.5.0.morph
@@ -0,0 +1,13 @@
+name: mixlib-cli-1.5.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: mixlib-cli-1.5.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build mixlib-cli.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./mixlib-cli-1.5.0.gem
diff --git a/strata/chef/mixlib-config-2.1.0.morph b/strata/chef/mixlib-config-2.1.0.morph
new file mode 100644
index 00000000..cc6077a5
--- /dev/null
+++ b/strata/chef/mixlib-config-2.1.0.morph
@@ -0,0 +1,13 @@
+name: mixlib-config-2.1.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: mixlib-config-2.1.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build mixlib-config.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./mixlib-config-2.1.0.gem
diff --git a/strata/chef/mixlib-log-master.morph b/strata/chef/mixlib-log-master.morph
new file mode 100644
index 00000000..ddd956c1
--- /dev/null
+++ b/strata/chef/mixlib-log-master.morph
@@ -0,0 +1,13 @@
+name: mixlib-log-1.6.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: mixlib-log-1.6.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build mixlib-log.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./mixlib-log-1.6.0.gem
diff --git a/strata/chef/mixlib-shellout-1.4.0.morph b/strata/chef/mixlib-shellout-1.4.0.morph
new file mode 100644
index 00000000..7087c1c6
--- /dev/null
+++ b/strata/chef/mixlib-shellout-1.4.0.morph
@@ -0,0 +1,13 @@
+name: mixlib-shellout-1.4.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: mixlib-shellout-1.4.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build mixlib-shellout.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./mixlib-shellout-1.4.0.gem
diff --git a/strata/chef/net-dhcp-1.2.1.morph b/strata/chef/net-dhcp-1.2.1.morph
new file mode 100644
index 00000000..88eb4963
--- /dev/null
+++ b/strata/chef/net-dhcp-1.2.1.morph
@@ -0,0 +1,13 @@
+name: net-dhcp-1.2.1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: net-dhcp-1.2.1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build net-dhcp.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./net-dhcp-1.2.1.gem
diff --git a/strata/chef/net-ssh-2.9.1.morph b/strata/chef/net-ssh-2.9.1.morph
new file mode 100644
index 00000000..c06495d5
--- /dev/null
+++ b/strata/chef/net-ssh-2.9.1.morph
@@ -0,0 +1,15 @@
+name: net-ssh-2.9.1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: net-ssh-2.9.1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+configure-commands:
+- sed -e '/cert_chain\s*=/d' -e '/signing_key\s*=/d' -i net-ssh.gemspec
+build-commands:
+- gem build net-ssh.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./net-ssh-2.9.1.gem
diff --git a/strata/chef/net-ssh-gateway-1.2.0.morph b/strata/chef/net-ssh-gateway-1.2.0.morph
new file mode 100644
index 00000000..c0425002
--- /dev/null
+++ b/strata/chef/net-ssh-gateway-1.2.0.morph
@@ -0,0 +1,15 @@
+name: net-ssh-gateway-1.2.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: net-ssh-gateway-1.2.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+configure-commands:
+- sed -e '/cert_chain\s*=/d' -e '/signing_key\s*=/d' -i net-ssh-gateway.gemspec
+build-commands:
+- gem build net-ssh-gateway.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./net-ssh-gateway-1.2.0.gem
diff --git a/strata/chef/net-ssh-multi-1.2.0.morph b/strata/chef/net-ssh-multi-1.2.0.morph
new file mode 100644
index 00000000..fc433a37
--- /dev/null
+++ b/strata/chef/net-ssh-multi-1.2.0.morph
@@ -0,0 +1,15 @@
+name: net-ssh-multi-1.2.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: net-ssh-multi-1.2.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+configure-commands:
+- sed -e '/cert_chain\s*=/d' -e '/signing_key\s*=/d' -i net-ssh-multi.gemspec
+build-commands:
+- gem build net-ssh-multi.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./net-ssh-multi-1.2.0.gem
diff --git a/strata/chef/ohai-master.morph b/strata/chef/ohai-master.morph
new file mode 100644
index 00000000..f47dda35
--- /dev/null
+++ b/strata/chef/ohai-master.morph
@@ -0,0 +1,13 @@
+name: ohai-7.4.0.dev
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: ohai-7.4.0.dev-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build ohai.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./ohai-7.4.0.dev.gem
diff --git a/strata/chef/plist-master.morph b/strata/chef/plist-master.morph
new file mode 100644
index 00000000..bef39f7d
--- /dev/null
+++ b/strata/chef/plist-master.morph
@@ -0,0 +1,14 @@
+---
+name: plist-master
+kind: chunk
+build-system: manual
+products:
+- artifact: plist-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- rake gem
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./pkg/plist-3.1.0.gem
diff --git a/strata/chef/pry-master.morph b/strata/chef/pry-master.morph
new file mode 100644
index 00000000..07ebca7a
--- /dev/null
+++ b/strata/chef/pry-master.morph
@@ -0,0 +1,13 @@
+name: pry-0.10.1
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: pry-0.10.1-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build pry.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./pry-0.10.1.gem
diff --git a/strata/chef/rack-1.5.2.morph b/strata/chef/rack-1.5.2.morph
new file mode 100644
index 00000000..50fd960c
--- /dev/null
+++ b/strata/chef/rack-1.5.2.morph
@@ -0,0 +1,13 @@
+name: rack-1.5.2
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: rack-1.5.2-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build rack.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./rack-1.5.2.gem
diff --git a/strata/chef/sgdisk.morph b/strata/chef/sgdisk.morph
new file mode 100644
index 00000000..e792f9d9
--- /dev/null
+++ b/strata/chef/sgdisk.morph
@@ -0,0 +1,7 @@
+name: sgdisk
+kind: chunk
+build-commands:
+- make
+install-commands:
+- install -D -m 0755 -o root -g root sgdisk "$DESTDIR$PREFIX/bin/sgdisk"
+
diff --git a/strata/chef/slop-3.6.0.morph b/strata/chef/slop-3.6.0.morph
new file mode 100644
index 00000000..32101078
--- /dev/null
+++ b/strata/chef/slop-3.6.0.morph
@@ -0,0 +1,13 @@
+name: slop-3.6.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: slop-3.6.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build slop.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./slop-3.6.0.gem
diff --git a/strata/chef/systemu-master.morph b/strata/chef/systemu-master.morph
new file mode 100644
index 00000000..3cc0a5e2
--- /dev/null
+++ b/strata/chef/systemu-master.morph
@@ -0,0 +1,13 @@
+name: systemu-2.6.4
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: systemu-2.6.4-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build systemu.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./systemu-2.6.4.gem
diff --git a/strata/chef/wmi-lite-1.0.0.morph b/strata/chef/wmi-lite-1.0.0.morph
new file mode 100644
index 00000000..bb4b4127
--- /dev/null
+++ b/strata/chef/wmi-lite-1.0.0.morph
@@ -0,0 +1,13 @@
+name: wmi-lite-1.0.0
+kind: chunk
+description: Automatically generated by rubygems.to_chunk
+products:
+- artifact: wmi-lite-1.0.0-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- gem build wmi-lite.gemspec
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./wmi-lite-1.0.0.gem
diff --git a/strata/chef/yajl.morph b/strata/chef/yajl.morph
new file mode 100644
index 00000000..9dbc8dbf
--- /dev/null
+++ b/strata/chef/yajl.morph
@@ -0,0 +1,6 @@
+name: yajl
+kind: chunk
+description: YAJL JSON parsing library
+build-system: cmake
+max-jobs: 1
+
diff --git a/strata/cloudinit-support.morph b/strata/cloudinit-support.morph
new file mode 100644
index 00000000..9c89c7e4
--- /dev/null
+++ b/strata/cloudinit-support.morph
@@ -0,0 +1,20 @@
+name: cloudinit-support
+kind: stratum
+description: A stratum with cloudinit to fit a system in the OpenStack cloud.
+build-depends:
+- morph: strata/build-essential.morph
+- morph: strata/python-core.morph
+- morph: strata/python-common.morph
+- morph: strata/foundation.morph
+chunks:
+- name: python-cheetah
+ repo: upstream:python-cheetah
+ ref: 831aa6b99d9b4fb012ee644d8e80e0bc0eb6d6ed
+ unpetrify-ref: baserock/morph
+- name: cloud-init
+ morph: strata/cloudinit-support/cloud-init.morph
+ repo: upstream:cloud-init
+ ref: 130d51acc5b0becd64e7007f9dfe41a6e022eaec
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - python-cheetah
diff --git a/strata/cloudinit-support/cloud-init.morph b/strata/cloudinit-support/cloud-init.morph
new file mode 100644
index 00000000..5933dbf8
--- /dev/null
+++ b/strata/cloudinit-support/cloud-init.morph
@@ -0,0 +1,6 @@
+name: cloud-init
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+- mkdir -p "$DESTDIR"/lib/systemd/system
+- cp systemd/* "$DESTDIR"/lib/systemd/system/
diff --git a/strata/connectivity.morph b/strata/connectivity.morph
new file mode 100644
index 00000000..dbd4a263
--- /dev/null
+++ b/strata/connectivity.morph
@@ -0,0 +1,20 @@
+name: connectivity
+kind: stratum
+build-depends:
+- morph: strata/foundation.morph
+chunks:
+- name: libnl
+ repo: upstream:libnl
+ ref: a2c4bd8f094a7247903578860a9c42049991860b
+ unpetrify-ref: baserock/morph
+- name: wpa_supplicant
+ morph: strata/connectivity/wpa_supplicant.morph
+ repo: upstream:hostap
+ ref: c4a58c968044c2002706b1a69c089c4d63728e77
+ unpetrify-ref: baserock/hostap_2_3
+ build-depends:
+ - libnl
+- name: iptables
+ repo: upstream:iptables
+ ref: 482c6d3731e2681cb4baae835c294840300197e6
+ unpetrify-ref: v1.4.21
diff --git a/strata/connectivity/wpa_supplicant.morph b/strata/connectivity/wpa_supplicant.morph
new file mode 100644
index 00000000..86a51608
--- /dev/null
+++ b/strata/connectivity/wpa_supplicant.morph
@@ -0,0 +1,8 @@
+name: wpa_supplicant
+kind: chunk
+configure-commands:
+- sed -i 's/^CONFIG_READLINE=y$//' wpa_supplicant/.config
+build-commands:
+- make -C wpa_supplicant
+install-commands:
+- make -C wpa_supplicant DESTDIR="$DESTDIR" install
diff --git a/strata/connman-common.morph b/strata/connman-common.morph
new file mode 100644
index 00000000..a22b7e90
--- /dev/null
+++ b/strata/connman-common.morph
@@ -0,0 +1,10 @@
+name: connman-common
+kind: stratum
+build-depends:
+- morph: strata/connectivity.morph
+chunks:
+- name: connman
+ morph: strata/connman-common/connman.morph
+ repo: upstream:connman
+ ref: 9951ba7a0353cfc884e96833c64e58c1bcae3f44
+ unpetrify-ref: baserock/1.24
diff --git a/strata/connman-common/connman.morph b/strata/connman-common/connman.morph
new file mode 100644
index 00000000..e468bc49
--- /dev/null
+++ b/strata/connman-common/connman.morph
@@ -0,0 +1,6 @@
+name: connman
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./bootstrap
+- ./configure --prefix="$PREFIX" --disable-wispr --disable-client LIBS=-lncurses
diff --git a/strata/core.morph b/strata/core.morph
new file mode 100644
index 00000000..6aa73f73
--- /dev/null
+++ b/strata/core.morph
@@ -0,0 +1,387 @@
+name: core
+kind: stratum
+description: Core components of a Baserock base system that are build tools required
+ for the BSP and Foundation strata, but a step above build-essential
+build-depends:
+- morph: strata/build-essential.morph
+chunks:
+- name: sqlite3
+ repo: upstream:sqlite3
+ ref: 24adc227bc29cd17e39df097fbca389c7724cd14
+ unpetrify-ref: sqlite-autoconf-3080801
+- name: cmake
+ morph: strata/core/cmake.morph
+ repo: upstream:cmake
+ ref: 732d8a467ab8a295b1bbf580f865980f8a1a9b29
+ unpetrify-ref: v3.2.1
+- name: gdbm
+ morph: strata/core/gdbm.morph
+ repo: upstream:gdbm-tarball
+ ref: e5faeaaf75ecfb705a9b643b3e4cb881ebb69f48
+ unpetrify-ref: gdbm-1.11
+- name: mini-utils
+ morph: strata/core/mini-utils.morph
+ repo: baserock:baserock/mini-utils
+ ref: 5293265b29bbf468ab9d7f3302b19dbc81d0f8b8
+ unpetrify-ref: master
+- name: ncurses
+ morph: strata/core/ncurses.morph
+ repo: upstream:ncurses
+ ref: f67398afa6a76fbc902cc0cea963d5151fa2a953
+ unpetrify-ref: ncurses-5.9-20150131
+- name: perl
+ morph: strata/core/perl.morph
+ repo: upstream:perl
+ ref: 78f9a3c880f11578fb9ff9c7d5f7e34b602d2ad1
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - gdbm
+- name: texinfo-tarball
+ morph: strata/core/texinfo-tarball.morph
+ repo: upstream:texinfo-tarball
+ ref: 6a55f074b1f67d02149c13931eb1df9f0e539b15
+ unpetrify-ref: texinfo-5.2
+ build-depends:
+ - ncurses
+ - perl
+- name: autoconf-tarball
+ morph: strata/core/autoconf-tarball.morph
+ repo: upstream:autoconf-tarball
+ ref: 55d1d2c339bc5f935f6d8d702e98b7bd5d968e9d
+ unpetrify-ref: autoconf-2.69
+ build-depends:
+ - perl
+- name: automake
+ morph: strata/core/automake.morph
+ repo: upstream:automake
+ ref: db43dd473361d90d8b00362cfef5bac8e722000d
+ unpetrify-ref: v1.15
+ build-depends:
+ - autoconf-tarball
+ - perl
+ - texinfo-tarball
+- name: libtool-tarball
+ morph: strata/core/libtool-tarball.morph
+ repo: upstream:libtool-tarball
+ ref: c026ca36e37d2643623a75d0d3e9e451023139f3
+ unpetrify-ref: libtool-2.4.6
+- name: libexpat
+ morph: strata/core/libexpat.morph
+ repo: upstream:libexpat
+ ref: 7cfc09db3e258129ab05811f2f9e351746ddab9f
+ unpetrify-ref: R_2_1_0
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - libtool-tarball
+- name: gettext-tarball
+ morph: strata/core/gettext-tarball.morph
+ repo: upstream:gettext-tarball
+ ref: 482840e61f86ca321838a91e902c41d40c098bbb
+ unpetrify-ref: gettext-0.19.4
+ build-depends:
+ - libexpat
+- name: bash
+ morph: strata/core/bash.morph
+ repo: upstream:bash
+ ref: 3590145af6f1c9fa321dff231f69ae696e7e740b
+ unpetrify-ref: baserock/bash-4.3-patch-27
+- name: flex
+ morph: strata/core/flex.morph
+ repo: upstream:flex
+ ref: de10f98e8a2dc2a021796811490d0f30c3cd90bf
+ unpetrify-ref: baserock/build-essential
+ build-depends:
+ - automake
+ - gettext-tarball
+- name: openssl-new
+ morph: strata/core/openssl-new.morph
+ repo: upstream:openssl-new
+ ref: 506c1068801fdeef5cb00f2053854bf56150fb6d
+ unpetrify-ref: OpenSSL_1_0_1m
+ build-depends:
+ - perl
+- name: bzip2
+ morph: strata/core/bzip2.morph
+ repo: upstream:bzip2
+ ref: 8deafa68e52b230018c0318dc7959ff9af3ad2a5
+ unpetrify-ref: baserock/morph
+- name: readline
+ morph: strata/core/readline.morph
+ repo: upstream:readline
+ ref: 518937ab89be812ccd45e9b8c1ce4ad721d35ef6
+ unpetrify-ref: baserock/genivi/baseline
+ build-depends:
+ - automake
+- name: cpython
+ morph: strata/core/cpython.morph
+ repo: upstream:cpython
+ ref: d0188e98ecf02e007d85fc34944f8be8f91b7e94
+ unpetrify-ref: v2.7.9
+ build-depends:
+ - openssl-new
+ - bzip2
+ - sqlite3
+ - ncurses
+ - readline
+- name: cython
+ repo: upstream:cython
+ ref: 4dd8e762fa51d01775506fbbc102c45dbcea065d
+ unpetrify-ref: 0.22
+ build-depends:
+ - cpython
+- name: gtk-doc-stub
+ repo: upstream:gtk-doc-stub
+ ref: 58ec0d8593541ef7ae522ce42ebec6f98536c4e0
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - bash
+- name: xz
+ repo: upstream:xz
+ ref: a0cd05ee71d330b79ead6eb9222e1b24e1559d3a
+ unpetrify-ref: v5.2.0
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - gettext-tarball
+ - libtool-tarball
+- name: libxml2
+ repo: upstream:libxml2
+ ref: ee8f1d4cda8dc1a6f2c515fe234f7bc89cdc9f80
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - cpython
+ - libtool-tarball
+ - xz
+- name: ca-certificates
+ morph: strata/core/ca-certificates.morph
+ repo: upstream:ca-certificates
+ ref: e9b06b26d9e57444e74a5cb6beca3f12726fc3c6
+ unpetrify-ref: baserock/debian/20140325
+ build-depends:
+ - automake
+ - cpython
+- name: curl
+ morph: strata/core/curl.morph
+ repo: upstream:curl
+ ref: 202aa9f7758636730299b86715d924f54468a908
+ unpetrify-ref: curl-7_38_0
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - libtool-tarball
+ - openssl-new
+ - ca-certificates
+- name: XML-Parser
+ repo: upstream:XML-Parser
+ ref: e1a3ec157140a699e3020836475a0df622f70f1b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - libexpat
+ - perl
+- name: git
+ morph: strata/core/git.morph
+ repo: upstream:git
+ ref: 9874fca7122563e28d699a911404fc49d2a24f1c
+ unpetrify-ref: v2.3.0
+ build-depends:
+ - autoconf-tarball
+ - cpython
+ - curl
+ - gettext-tarball
+ - libexpat
+ - openssl-new
+- name: help2man
+ repo: upstream:help2man
+ ref: 83bab7e2e8e24a380266a9a247c029c49b0de666
+ unpetrify-ref: baserock/v1.46.5
+ build-depends:
+ - autoconf-tarball
+ - automake
+- name: bison
+ morph: strata/core/bison.morph
+ repo: upstream:bison
+ ref: 2ab6d1daaccf32fc4314e4b2fe44da977f11a308
+ unpetrify-ref: baserock/v3.0.2
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - bash
+ - flex
+ - gettext-tarball
+ - git
+ - help2man
+ - mini-utils
+- name: gperf
+ morph: strata/core/gperf.morph
+ repo: upstream:gperf
+ ref: 5094e4a539adf845111013f82c2c4fcaec637983
+ unpetrify-ref: baserock/morph
+- name: intltool
+ repo: upstream:intltool
+ ref: 12f04c88be9ff8a578d8fd6990ee2448c66dc5f4
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - XML-Parser
+ - autoconf-tarball
+ - automake
+ - perl
+- name: pkg-config
+ morph: strata/core/pkg-config.morph
+ repo: upstream:pkg-config
+ ref: 74ceac54ef6f9247c00f08eecd8cca811a3c5934
+ unpetrify-ref: pkg-config-0.28
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - libtool-tarball
+- name: attr
+ morph: strata/core/attr.morph
+ repo: upstream:attr
+ ref: 4b005410f865895d4dcd56e2c135278a7a315877
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - gettext-tarball
+ - libtool-tarball
+- name: acl
+ morph: strata/core/acl.morph
+ repo: upstream:acl
+ ref: f13e09bd54fd4a501c4952f002ed2752bdd9f93b
+ unpetrify-ref: v2.2.52
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - gettext-tarball
+ - libtool-tarball
+ - attr
+- name: linux-pam
+ morph: strata/core/linux-pam.morph
+ repo: upstream:linux-pam
+ ref: b1521c97e73b10469f7b34c0571d51c647eca83c
+ unpetrify-ref: Linux-PAM-1.1.8
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - gettext-tarball
+ - libtool-tarball
+ - pkg-config
+ - flex
+ - attr
+ - acl
+- name: libcap2
+ morph: strata/core/libcap2.morph
+ repo: upstream:libcap2
+ ref: 4f7cca1bc9c2a274edb39d351b65747010d3ba7b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - gettext-tarball
+ - libtool-tarball
+ - pkg-config
+ - attr
+ - acl
+ - linux-pam
+- name: shadow
+ morph: strata/core/shadow.morph
+ repo: upstream:shadow
+ ref: 4f5000a45963c2cc2a403ad23e459f20296b29c2
+ unpetrify-ref: baserock/4.2
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - gettext-tarball
+ - libtool-tarball
+ - bison
+ - attr
+ - acl
+ - linux-pam
+ - libcap2
+- name: util-linux
+ morph: strata/core/util-linux.morph
+ repo: upstream:util-linux
+ ref: 34760e62e0d5a25262a6aa801b2f1df61216363f
+ unpetrify-ref: v2.26.1
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - gettext-tarball
+ - git
+ - libtool-tarball
+ - pkg-config
+ - linux-pam
+ - shadow
+- name: bc
+ repo: upstream:bc-tarball
+ ref: 0956d119432ff6a2e85bae1fa336df799cad70b0
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - flex
+ - texinfo-tarball
+- name: patch
+ morph: strata/core/patch.morph
+ repo: upstream:patch
+ ref: 3bbb26c928a147cfcf0756f1cc0a1307e5cc663f
+ unpetrify-ref: baserock/v2.7.1
+ build-depends:
+ - shadow
+- name: libxslt
+ repo: upstream:libxslt
+ ref: 73e08bf7c36a9145d38f51d37e66529b873c011a
+ unpetrify-ref: master
+ build-depends:
+ - autoconf-tarball
+ - libtool-tarball
+ - automake
+ - libxml2
+- name: gnome-common
+ repo: upstream:gnome-common
+ ref: 5d61b55e8bea32fe2b52e21682ee4b3719b290c5
+ unpetrify-ref: 3.14.0
+ build-depends:
+ - autoconf-tarball
+ - automake
+- name: libffi
+ morph: strata/core/libffi.morph
+ repo: upstream:libffi
+ ref: 77d4586cc47e8f4c02278afbc220145bba0d442b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - autoconf-tarball
+ - automake
+- name: glib
+ repo: upstream:glib
+ ref: 966ffb16f6bd54cb3d928a55d00d0eda73592094
+ unpetrify-ref: 2.44.0
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - cpython
+ - gettext-tarball
+ - libffi
+ - pkg-config
+- name: gobject-introspection
+ repo: upstream:gobject-introspection
+ ref: c0243e116c5e261c262dded9f4d7726a770c5a19
+ unpetrify-ref: GOBJECT_INTROSPECTION_1_44_0
+ build-depends:
+ - autoconf-tarball
+ - automake
+ - bison
+ - cpython
+ - flex
+ - glib
+ - libffi
+ - pkg-config
+- name: e2fsprogs
+ morph: strata/core/e2fsprogs.morph
+ repo: upstream:e2fsprogs
+ ref: 6a3741ad293031447b95b88431eafa77401a8987
+ unpetrify-ref: v1.42.12
+ build-depends:
+ - pkg-config
+ - util-linux
diff --git a/strata/core/acl.morph b/strata/core/acl.morph
new file mode 100644
index 00000000..23f08c1f
--- /dev/null
+++ b/strata/core/acl.morph
@@ -0,0 +1,6 @@
+name: acl
+kind: chunk
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install-lib install-dev
diff --git a/strata/core/attr.morph b/strata/core/attr.morph
new file mode 100644
index 00000000..46d0b9c0
--- /dev/null
+++ b/strata/core/attr.morph
@@ -0,0 +1,18 @@
+name: attr
+kind: chunk
+build-system: autotools
+configure-commands:
+- make configure
+- |
+ ./configure --prefix="$PREFIX" \
+ --exec-prefix="$PREFIX" \
+ --sbindir="$PREFIX"/sbin \
+ --bindir="$PREFIX"/bin \
+ --libdir="$PREFIX"/lib \
+ --libexecdir="$PREFIX"/lib \
+ --enable-lib64=yes \
+ --includedir="$PREFIX"/include \
+ --mandir="$PREFIX"/share/man \
+ --datadir="$PREFIX"/share
+install-commands:
+- make DESTDIR="$DESTDIR" install-lib install-dev
diff --git a/strata/core/autoconf-tarball.morph b/strata/core/autoconf-tarball.morph
new file mode 100644
index 00000000..fbdc5768
--- /dev/null
+++ b/strata/core/autoconf-tarball.morph
@@ -0,0 +1,5 @@
+name: autoconf-tarball
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX"
diff --git a/strata/core/automake.morph b/strata/core/automake.morph
new file mode 100644
index 00000000..282535d6
--- /dev/null
+++ b/strata/core/automake.morph
@@ -0,0 +1,8 @@
+name: automake
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./bootstrap.sh
+- ./configure --prefix="$PREFIX"
+build-commands:
+- unset DESTDIR; make
diff --git a/strata/core/bash.morph b/strata/core/bash.morph
new file mode 100644
index 00000000..5d0b8f03
--- /dev/null
+++ b/strata/core/bash.morph
@@ -0,0 +1,37 @@
+name: bash
+kind: chunk
+build-system: autotools
+configure-commands:
+- |
+ export CFLAGS="${CFLAGS} -DSYS_BASHRC='\"/etc/bash.bashrc\"'";
+ ./configure --prefix="$PREFIX" \
+ --bindir=/bin \
+ --without-bash-malloc \
+ --with-installed-readline \
+ --enable-readline
+post-install-commands:
+- |
+ cat <<'EOF' >> bash.bashrc
+ #
+ # /etc/bash.bashrc
+ #
+
+ # If not running interactively, don't do anything
+ [[ $- != *i* ]] && return
+
+ # Configure prompt for different terminals
+ case ${TERM} in
+ xterm*|rxvt*|Eterm|aterm|kterm|gnome*)
+ PROMPT_COMMAND=${PROMPT_COMMAND:+$PROMPT_COMMAND; }'printf "\033]0;%s@%s:%s\007" "${USER}" "${HOSTNAME%%.*}" "${PWD/#$HOME/\~}"'
+
+ ;;
+ screen)
+ PROMPT_COMMAND=${PROMPT_COMMAND:+$PROMPT_COMMAND; }'printf "\033_%s@%s:%s\033\\" "${USER}" "${HOSTNAME%%.*}" "${PWD/#$HOME/\~}"'
+ ;;
+ esac
+
+ # Enable bash completion if present
+ [ -r /usr/share/bash-completion/bash_completion ] && . /usr/share/bash-completion/bash_completion
+ EOF
+- mkdir -p "$DESTDIR"/etc
+- install -m 644 bash.bashrc "$DESTDIR"/etc/bash.bashrc
diff --git a/strata/core/bison.morph b/strata/core/bison.morph
new file mode 100644
index 00000000..7c7fffd9
--- /dev/null
+++ b/strata/core/bison.morph
@@ -0,0 +1,5 @@
+name: bison
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- bash bootstrap --skip-po
diff --git a/strata/core/bzip2.morph b/strata/core/bzip2.morph
new file mode 100644
index 00000000..f4bdb794
--- /dev/null
+++ b/strata/core/bzip2.morph
@@ -0,0 +1,10 @@
+name: bzip2
+kind: chunk
+configure-commands:
+- sed -i -rf bzip2.morph-makefix.sed Makefile
+build-commands:
+- make -f Makefile-libbz2_so
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" PREFIX="$PREFIX" install
+- install libbz2.so* "$DESTDIR$PREFIX/lib"
diff --git a/strata/core/ca-certificates.morph b/strata/core/ca-certificates.morph
new file mode 100644
index 00000000..b7bd9e5a
--- /dev/null
+++ b/strata/core/ca-certificates.morph
@@ -0,0 +1,22 @@
+name: ca-certificates
+kind: chunk
+build-system: autotools
+configure-commands: []
+pre-install-commands:
+- mkdir -p "$DESTDIR"/usr/share/ca-certificates
+- mkdir -p "$DESTDIR"/usr/sbin
+post-install-commands:
+- mkdir "$DESTDIR"/etc
+- |
+ cd "$DESTDIR"/usr/share/ca-certificates
+ find * -type f > "$DESTDIR"/etc/ca-certificates.conf
+- |
+ export CERTSCONF="$DESTDIR/etc/ca-certificates.conf"
+ export CERTSDIR="$DESTDIR/usr/share/ca-certificates"
+ export ETCCERTSDIR="$DESTDIR/etc/ssl/certs"
+ mkdir -p "$ETCCERTSDIR"
+ ./sbin/update-ca-certificates
+system-integration:
+ ca-certificates-misc:
+ 00-update-ca-certs:
+ - update-ca-certificates
diff --git a/strata/core/cmake.morph b/strata/core/cmake.morph
new file mode 100644
index 00000000..1d310ef4
--- /dev/null
+++ b/strata/core/cmake.morph
@@ -0,0 +1,8 @@
+name: cmake
+kind: chunk
+configure-commands:
+- ./bootstrap --prefix="$PREFIX" --docdir=/share/doc/cmake --mandir=/share/man
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/core/cpython.morph b/strata/core/cpython.morph
new file mode 100644
index 00000000..6c9e4b3d
--- /dev/null
+++ b/strata/core/cpython.morph
@@ -0,0 +1,7 @@
+name: cpython
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --enable-shared
+post-install-commands:
+- test -x "$DESTDIR"/"$PREFIX"/bin/python2 || ln -s python2.7 "$DESTDIR"/"$PREFIX"/bin/python2
diff --git a/strata/core/curl.morph b/strata/core/curl.morph
new file mode 100644
index 00000000..e245688c
--- /dev/null
+++ b/strata/core/curl.morph
@@ -0,0 +1,9 @@
+name: curl
+kind: chunk
+configure-commands:
+- ./buildconf
+- ./configure --disable-manual --prefix="$PREFIX"
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/core/e2fsprogs.morph b/strata/core/e2fsprogs.morph
new file mode 100644
index 00000000..55ac486c
--- /dev/null
+++ b/strata/core/e2fsprogs.morph
@@ -0,0 +1,22 @@
+name: e2fsprogs
+kind: chunk
+build-system: autotools
+configure-commands:
+# Configure with the usual paths for binaries and config
+# but also disable whatever we can that is already provided by util-linux
+# Disabling e2fsprofs' libblkid does not work, but we don't have to
+# install it.
+- |
+ ./configure --prefix="$PREFIX" --sysconfdir=/etc \
+ --disable-libuuid --disable-uuidd --disable-libblkid
+install-commands:
+# e2fsprogs also includes tools that are provided by util-linux, so we
+# need to selectively exclude them. Removing them directly from DESTDIR
+# causes problems, so we need to remove them beforehand.
+- |
+ td="$(mktemp -d)"
+ make DESTDIR="$td" install
+ make DESTDIR="$td" install-libs
+ find "$td" \( -name blkid -o -name findfs -o \) -delete
+ find "$td" \( -name blkid.8 -o -name findfs.8 \) -delete
+ mv "$td"/* "$DESTDIR"
diff --git a/strata/core/flex.morph b/strata/core/flex.morph
new file mode 100644
index 00000000..615bf79e
--- /dev/null
+++ b/strata/core/flex.morph
@@ -0,0 +1,14 @@
+name: flex
+kind: chunk
+pre-configure-commands:
+- autoreconf -ivf
+configure-commands:
+- ./configure --prefix="$PREFIX"
+build-commands:
+- make dist_doc_DATA=
+install-commands:
+- make dist_doc_DATA= DESTDIR="$DESTDIR" install
+- ln -s libfl.a "$DESTDIR$PREFIX/lib/libl.a"
+- echo "#!$PREFIX/bin/sh" >"$DESTDIR$PREFIX/bin/lex"
+- echo 'exec '"$PREFIX"'/bin/flex -l "$@"' >>"$DESTDIR$PREFIX/bin/lex"
+- chmod 755 "$DESTDIR$PREFIX/bin/lex"
diff --git a/strata/core/gdbm.morph b/strata/core/gdbm.morph
new file mode 100644
index 00000000..6bec33e0
--- /dev/null
+++ b/strata/core/gdbm.morph
@@ -0,0 +1,8 @@
+name: gdbm
+kind: chunk
+configure-commands:
+- ./configure --prefix="$PREFIX" --enable-libgdbm-compat --disable-nls
+build-commands:
+- make MANS= INFO_DEPS=
+install-commands:
+- make MANS= INFO_DEPS= DESTDIR="$DESTDIR" install
diff --git a/strata/core/gettext-tarball.morph b/strata/core/gettext-tarball.morph
new file mode 100644
index 00000000..4983b484
--- /dev/null
+++ b/strata/core/gettext-tarball.morph
@@ -0,0 +1,5 @@
+name: gettext-tarball
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX"
diff --git a/strata/core/git.morph b/strata/core/git.morph
new file mode 100644
index 00000000..765f4a5c
--- /dev/null
+++ b/strata/core/git.morph
@@ -0,0 +1,7 @@
+name: git
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- make configure
+build-commands:
+- make all
diff --git a/strata/core/gperf.morph b/strata/core/gperf.morph
new file mode 100644
index 00000000..e4f598c5
--- /dev/null
+++ b/strata/core/gperf.morph
@@ -0,0 +1,3 @@
+name: gperf
+kind: chunk
+build-system: autotools
diff --git a/strata/core/libcap2.morph b/strata/core/libcap2.morph
new file mode 100644
index 00000000..3e4f205e
--- /dev/null
+++ b/strata/core/libcap2.morph
@@ -0,0 +1,6 @@
+name: libcap2
+kind: chunk
+build-commands:
+- make prefix="$PREFIX"
+install-commands:
+- make prefix="$PREFIX" DESTDIR="$DESTDIR" RAISE_SETFCAP=no install lib=lib
diff --git a/strata/core/libexpat.morph b/strata/core/libexpat.morph
new file mode 100644
index 00000000..05285c0d
--- /dev/null
+++ b/strata/core/libexpat.morph
@@ -0,0 +1,6 @@
+name: libexpat
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./buildconf.sh
+- automake --add-missing --copy || true
diff --git a/strata/core/libffi.morph b/strata/core/libffi.morph
new file mode 100644
index 00000000..174477af
--- /dev/null
+++ b/strata/core/libffi.morph
@@ -0,0 +1,8 @@
+name: libffi
+kind: chunk
+build-system: autotools
+install-commands:
+- make DESTDIR="$DESTDIR"/ install
+- mkdir -p "$DESTDIR"/usr/include
+- cp "$TARGET"/include/ffi.h "$DESTDIR"/usr/include/
+- cp "$TARGET"/include/ffitarget.h "$DESTDIR"/usr/include/
diff --git a/strata/core/libtool-tarball.morph b/strata/core/libtool-tarball.morph
new file mode 100644
index 00000000..65a3edbc
--- /dev/null
+++ b/strata/core/libtool-tarball.morph
@@ -0,0 +1,5 @@
+name: libtool-tarball
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX"
diff --git a/strata/core/linux-pam.morph b/strata/core/linux-pam.morph
new file mode 100644
index 00000000..71ad98e9
--- /dev/null
+++ b/strata/core/linux-pam.morph
@@ -0,0 +1,9 @@
+name: linux-pam
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- autoreconf -ivf
+configure-commands:
+# libdir has to be specified or it'll go into lib64. It has to be /lib because
+# systemd installs its pam library into /lib/security.
+- ./configure --prefix="$PREFIX" --libdir="/lib"
diff --git a/strata/core/mini-utils.morph b/strata/core/mini-utils.morph
new file mode 100644
index 00000000..dc915b30
--- /dev/null
+++ b/strata/core/mini-utils.morph
@@ -0,0 +1,6 @@
+name: mini-utils
+kind: chunk
+build-system: autotools
+configure-commands: []
+install-commands:
+- make PREFIX="$PREFIX" DESTDIR="$DESTDIR" install
diff --git a/strata/core/ncurses.morph b/strata/core/ncurses.morph
new file mode 100644
index 00000000..42acb481
--- /dev/null
+++ b/strata/core/ncurses.morph
@@ -0,0 +1,45 @@
+name: ncurses
+kind: chunk
+configure-commands:
+- LDCONFIG=true ./configure --with-shared --without-debug --enable-widec
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
+post-install-commands:
+- |
+ # lfs recommends some alterations for software that looks for
+ # libcurses instead of libncurses
+ # it's more convenient to have this in a separate file than the
+ # morph as less strings have to be escaped and comments are possible
+
+ # some software expects to find libcurses in /lib
+ mkdir -p "$DESTDIR/lib"
+ mv "$DESTDIR"/"$PREFIX"/lib/libncursesw.so.5* "$DESTDIR"/lib
+ ln -sf ../../lib/libncursesw.so.5 "$DESTDIR"/"$PREFIX"/lib/libncursesw.so
+
+ # some linker tricks for libraries that look for non-wide character
+ # versions of ncurses
+ for lib in ncurses form panel menu; do
+ # remove non-wide shared object
+ rm -f "$DESTDIR/${PREFIX}/lib/lib${lib}.so"
+ # use a linker script to find the wide character variant
+ echo "INPUT(-l${lib}w)" >"$DESTDIR/${PREFIX}/lib/lib${lib}.so"
+ # symlink the non-shared library
+ ln -sf "lib${lib}w.a" "$DESTDIR/${PREFIX}/lib/lib${lib}.a"
+ done
+ ln -sf libncurses++w.a "$DESTDIR/${PREFIX}/lib/libncurses++.a"
+
+ # redirect software that looks for libcurses
+ rm -f "$DESTDIR/${PREFIX}/lib/libcursesw.so"
+ echo "INPUT(-lncursesw)" >"$DESTDIR/${PREFIX}/lib/libcursesw.so"
+ ln -sf libncurses.so "$DESTDIR/${PREFIX}/lib/libcurses.so"
+ ln -sf libncursesw.a "$DESTDIR/${PREFIX}/lib/libcursesw.a"
+ ln -sf libncurses.a "$DESTDIR/${PREFIX}/lib/libcurses.a"
+
+ # install documentation
+ mkdir -p "$DESTDIR/${PREFIX}/share/doc/ncurses"
+ cp -R doc/* "$DESTDIR/${PREFIX}/share/doc/ncurses"
+
+ # remove 'clear' and 'reset' executables, busybox provides them
+ rm -f "$DESTDIR/${PREFIX}/bin/clear" "$DESTDIR/${PREFIX}/bin/reset"
diff --git a/strata/core/openssl-new.morph b/strata/core/openssl-new.morph
new file mode 100644
index 00000000..2ed14ff2
--- /dev/null
+++ b/strata/core/openssl-new.morph
@@ -0,0 +1,15 @@
+name: openssl-new
+kind: chunk
+max-jobs: 1
+configure-commands:
+- sed -i -e 's,^LIBNAMES=\\(.*\\) padlock \\(.*\\),LIBNAMES=\\1 \\2,g' engines/Makefile
+- |
+ if [ "$(uname -m)" = "ppc64" ]; then
+ sh ./Configure linux-ppc64 --openssldir=/etc/ssl --prefix="${PREFIX-/usr}" --libdir=lib shared
+ else
+ ./config --openssldir=/etc/ssl --prefix="${PREFIX-/usr}" --libdir=lib shared
+ fi
+build-commands:
+- make
+install-commands:
+- make INSTALL_PREFIX="$DESTDIR" install_sw
diff --git a/strata/core/patch.morph b/strata/core/patch.morph
new file mode 100644
index 00000000..b7f146ef
--- /dev/null
+++ b/strata/core/patch.morph
@@ -0,0 +1,5 @@
+name: patch
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./bootstrap --skip-po
diff --git a/strata/core/perl.morph b/strata/core/perl.morph
new file mode 100644
index 00000000..ce086aa7
--- /dev/null
+++ b/strata/core/perl.morph
@@ -0,0 +1,16 @@
+name: perl
+kind: chunk
+max-jobs: 1
+configure-commands:
+- sh Configure -des -Dprefix="$PREFIX" -Darchlib="$PREFIX/lib/perl" -Dprivlib="$PREFIX/share/perl"
+ -Dbin="$PREFIX/bin" -Dscriptdir="$PREFIX/bin" -Dman1dir="$PREFIX/share/man/man1"
+ -Dman3dir="$PREFIX/share/man/man3" -Dsiteprefix="$PREFIX" -Dsitearch="$PREFIX/lib/perl"
+ -Dsitelib="$PREFIX/share/perl" -Dsitebin="$PREFIX/bin" -Dsitescript="$PREFIX/bin"
+ -Dsiteman1dir="$PREFIX/share/man/man1" -Dsiteman3dir="$PREFIX/share/man/man3" -Dvendorprefix="$PREFIX"
+ -Dvendorarch="$PREFIX/lib/perl" -Dvendorlib="$PREFIX/share/perl" -Dvendorbin="$PREFIX/bin"
+ -Dvendorscript="$PREFIX/bin" -Dvendorman1dir="$PREFIX/share/man/man1" -Dvendorman3dir="$PREFIX/share/man/man3"
+ -Dpager="$PREFIX/bin/less -isR" -Duseshrplib
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/core/pkg-config.morph b/strata/core/pkg-config.morph
new file mode 100644
index 00000000..ed35a2ac
--- /dev/null
+++ b/strata/core/pkg-config.morph
@@ -0,0 +1,5 @@
+name: pkg-config
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --with-internal-glib
diff --git a/strata/core/python-setuptools.morph b/strata/core/python-setuptools.morph
new file mode 100644
index 00000000..fa436c60
--- /dev/null
+++ b/strata/core/python-setuptools.morph
@@ -0,0 +1,6 @@
+name: python-setuptools
+kind: chunk
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix=/usr --root "$DESTDIR"
diff --git a/strata/core/readline.morph b/strata/core/readline.morph
new file mode 100644
index 00000000..511e124c
--- /dev/null
+++ b/strata/core/readline.morph
@@ -0,0 +1,6 @@
+name: readline
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- cp /usr/share/automake*/config.guess support
+- cp /usr/share/automake*/config.sub support
diff --git a/strata/core/shadow.morph b/strata/core/shadow.morph
new file mode 100644
index 00000000..c8715a7d
--- /dev/null
+++ b/strata/core/shadow.morph
@@ -0,0 +1,53 @@
+name: shadow
+kind: chunk
+build-system: autotools
+configure-commands:
+# Installing to /bin so that they overwrite busybox login.
+- |
+ ./autogen.sh --with-selinux=no \
+ --sysconfdir=/etc \
+ --with-libpam=yes \
+ --prefix="$PREFIX" \
+ --bindir=/bin
+post-install-commands:
+# Disable things handled by pam instead
+- |
+ for OPTION in FAIL_DELAY \
+ FAILLOG_ENAB \
+ LASTLOG_ENAB \
+ MAIL_CHECK_ENAB \
+ OBSCURE_CHECKS_ENAB \
+ PORTTIME_CHECKS_ENAB \
+ QUOTAS_ENAB \
+ CONSOLE MOTD_FILE \
+ FTMP_FILE \
+ NOLOGINS_FILE \
+ ENV_HZ \
+ PASS_MIN_LEN \
+ SU_WHEEL_ONLY \
+ CRACKLIB_DICTPATH \
+ PASS_CHANGE_TRIES \
+ PASS_ALWAYS_WARN \
+ CHFN_AUTH \
+ ENVIRON_FILE
+ do
+ sed -i -e "s/^${OPTION}.*/# & #This option is handled by PAM instead./" \
+ "$DESTDIR/etc/login.defs"
+ done
+# ENCRYPT_METHOD is handled specially with PAM, it will use the default as
+# provided in login.defs, but it may be overridden in the pam.d config.
+# We do not currently override this though, and it's better to guard oursleves
+# against accidentally reducing password security by forgetting to include the
+# algorithm as an argument to the PAM module, so ENCRYPT_METHOD is configured
+# here, rather than in PAM.
+- |
+ if grep -q '[\s#]ENCRYPT_METHOD' "$DESTDIR/etc/login.defs"; then
+ sed -i -e '/^[\s#]*ENCRYPT_METHOD /s/.*/ENCRYPT_METHOD SHA512/g' "$DESTDIR/etc/login.defs"
+ else
+ echo 'ENCRYPT_METHOD SHA512' >>"$DESTDIR/etc/login.defs"
+ fi
+
+# The default pam.d config files have pam_selinux.so as a requirement, even
+# when shadow is configured '--with-selinux=no'. We change this default config
+# to make this requirement optional.
+- sed -i -e 's/\(.*\)required\(.*pam_selinux.so.*\)/\1optional\2/' "$DESTDIR"/etc/pam.d/*
diff --git a/strata/core/texinfo-tarball.morph b/strata/core/texinfo-tarball.morph
new file mode 100644
index 00000000..5f784ba2
--- /dev/null
+++ b/strata/core/texinfo-tarball.morph
@@ -0,0 +1,9 @@
+name: texinfo-tarball
+kind: chunk
+configure-commands:
+- ./configure --prefix="$PREFIX"
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- make DESTDIR="$DESTDIR" TEXMF="$PREFIX/share/texmf" install-tex
diff --git a/strata/core/util-linux.morph b/strata/core/util-linux.morph
new file mode 100644
index 00000000..eebba6f6
--- /dev/null
+++ b/strata/core/util-linux.morph
@@ -0,0 +1,10 @@
+name: util-linux
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh
+# Installing to /bin so that they overwrite busybox login.
+- |
+ ./configure --prefix="$PREFIX" \
+ --disable-use-tty-group \
+ --bindir=/bin
diff --git a/strata/coreutils-common.morph b/strata/coreutils-common.morph
new file mode 100644
index 00000000..2ad2d866
--- /dev/null
+++ b/strata/coreutils-common.morph
@@ -0,0 +1,40 @@
+name: coreutils-common
+kind: stratum
+description: |
+ We need to split this so we can build stuff using coreutils but we can not
+ having it in the final systems (for example in genivi ones that do not
+ accept GPLv3 code)
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: coreutils
+ morph: strata/coreutils-common/coreutils.morph
+ repo: upstream:coreutils
+ ref: 9df9643842e4b4d8ece710fe6105f32fa38a0d22
+ unpetrify-ref: baserock/8.23
+- name: sed
+ morph: strata/coreutils-common/sed.morph
+ repo: upstream:sed
+ ref: ed4b1d7c04f92b330b940d4f0d02cd51d2473ce9
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - coreutils
+- name: diff
+ morph: strata/coreutils-common/diff.morph
+ repo: upstream:diffutils
+ ref: bd86eb1a1e87ab85a7c8dbb658fa829ce69c252e
+ unpetrify-ref: baserock/v3.3
+ build-depends:
+ - coreutils
+- name: tar
+ morph: strata/coreutils-common/tar.morph
+ repo: upstream:tar
+ ref: 9a58d148c26c220cb1b163c71e7a51a2e41f6b37
+ unpetrify-ref: baserock/release_1_28
+ build-depends:
+ - coreutils
+- name: findutils
+ morph: strata/coreutils-common/findutils.morph
+ repo: upstream:findutils
+ ref: ee6b25541336f9d74624ca814538ddc8c67f0732
+ unpetrify-ref: baserock/v4.5.14
diff --git a/strata/coreutils-common/coreutils.morph b/strata/coreutils-common/coreutils.morph
new file mode 100644
index 00000000..19542d79
--- /dev/null
+++ b/strata/coreutils-common/coreutils.morph
@@ -0,0 +1,16 @@
+name: coreutils
+kind: chunk
+build-system: autotools
+configure-commands:
+- sed -i -e '/^buildreq="/,/^"/{/rsync/d}' bootstrap.conf
+- bash bootstrap --skip-po --no-git --gnulib-srcdir=gnulib
+- FORCE_UNSAFE_CONFIGURE=1 ./configure --prefix="$PREFIX" --disable-nls
+build-commands:
+- make WERROR_CFLAGS=
+install-commands:
+- make INSTALL_PROGRAM=install DESTDIR="$DESTDIR" install
+# This hack is for supporting "fdatasync" argument in "dd" command
+# which busybox "dd" does not support.
+# This hack should dissappear when we merge /usr/bin and /bin.
+- mkdir -p "$DESTDIR"/bin/
+- ln -sf ../usr/bin/dd "$DESTDIR"/bin/
diff --git a/strata/coreutils-common/diff.morph b/strata/coreutils-common/diff.morph
new file mode 100644
index 00000000..346f8635
--- /dev/null
+++ b/strata/coreutils-common/diff.morph
@@ -0,0 +1,7 @@
+name: diff
+kind: chunk
+build-system: autotools
+configure-commands:
+- sed -i -e '/^buildreq="/,/^"/{/rsync/d}' bootstrap.conf
+- ./bootstrap --skip-po
+- ./configure --prefix="$PREFIX" --disable-gcc-warnings
diff --git a/strata/coreutils-common/findutils.morph b/strata/coreutils-common/findutils.morph
new file mode 100644
index 00000000..c9ba5ab6
--- /dev/null
+++ b/strata/coreutils-common/findutils.morph
@@ -0,0 +1,5 @@
+name: findutils
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./import-gnulib.sh -d gnulib
diff --git a/strata/coreutils-common/sed.morph b/strata/coreutils-common/sed.morph
new file mode 100644
index 00000000..92f6fa51
--- /dev/null
+++ b/strata/coreutils-common/sed.morph
@@ -0,0 +1,12 @@
+name: sed
+kind: chunk
+build-system: autotools
+configure-commands:
+- sed -i -e '/^buildreq="/,/^"/{/rsync/d}' bootstrap.conf
+- ./bootstrap --skip-po
+- ./configure --prefix="$PREFIX" --disable-gcc-warnings
+post-install-commands:
+# This hack is to use GNU sed instead the busybox one.
+# This will dissappear when we merge /usr/bin and /bin.
+- mkdir -p "$DESTDIR"/bin/
+- ln -sf ../usr/bin/sed "$DESTDIR"/bin/
diff --git a/strata/coreutils-common/tar.morph b/strata/coreutils-common/tar.morph
new file mode 100644
index 00000000..17d6a597
--- /dev/null
+++ b/strata/coreutils-common/tar.morph
@@ -0,0 +1,18 @@
+name: tar
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- rm .gitmodules
+- bash bootstrap --skip-po --gnulib-srcdir="$(pwd)/gnulib" --paxutils-srcdir="$(pwd)/paxutils"
+configure-commands:
+# Configure flag notes:
+# 1. Needed to run configure as root
+# 2. Disable some warning that cause the build to fail
+# 3. To use GNU tar instead the busybox one.
+# This will dissappear when we merge /usr/bin and /bin
+
+- |
+ `# [1]` env FORCE_UNSAFE_CONFIGURE=1 \
+ ./configure --prefix="$PREFIX" \
+ `# [2]` --disable-gcc-warnings \
+ `# [3]` --bindir=/bin
diff --git a/strata/cross-bootstrap.morph b/strata/cross-bootstrap.morph
new file mode 100644
index 00000000..ffed2e66
--- /dev/null
+++ b/strata/cross-bootstrap.morph
@@ -0,0 +1,28 @@
+name: cross-bootstrap
+kind: stratum
+description: The minimal development tools to build a baserock devel system
+build-depends:
+- morph: strata/core.morph
+- morph: strata/python-core.morph
+chunks:
+- name: groff
+ morph: strata/cross-bootstrap/groff.morph
+ repo: upstream:groff-git
+ ref: 16305a24e67966ace06d55e2a0b98cc0e3127a93
+ unpetrify-ref: 1.22.3
+- name: openssh
+ morph: strata/cross-bootstrap/openssh.morph
+ repo: upstream:openssh-git
+ ref: 782fe9e725243eeb5ad6ab9a1783b5d6bedfe0d7
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - groff
+- name: linux-user-chroot
+ repo: upstream:linux-user-chroot
+ ref: d25cc110f69e6e71a95b4ac532dcfc5423d4a16b
+ unpetrify-ref: baserock/morph
+- name: rsync
+ morph: strata/cross-bootstrap/rsync.morph
+ repo: upstream:rsync
+ ref: 7cb0de6326c915a72253fd103dae93308031ec3f
+ unpetrify-ref: v3.1.1
diff --git a/strata/cross-bootstrap/groff.morph b/strata/cross-bootstrap/groff.morph
new file mode 100644
index 00000000..8380a9cc
--- /dev/null
+++ b/strata/cross-bootstrap/groff.morph
@@ -0,0 +1,13 @@
+name: groff
+kind: chunk
+max-jobs: 1
+configure-commands:
+- PAGE=A4 ./configure --prefix="$PREFIX"
+build-commands:
+# hack to avoid a netpbm dependency
+- touch doc/gnu.eps
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- ln -s eqn "$DESTDIR$PREFIX/bin/geqn"
+- ln -s tbl "$DESTDIR$PREFIX/bin/gtbl"
diff --git a/strata/cross-bootstrap/openssh.morph b/strata/cross-bootstrap/openssh.morph
new file mode 100644
index 00000000..6c759cb0
--- /dev/null
+++ b/strata/cross-bootstrap/openssh.morph
@@ -0,0 +1,28 @@
+name: openssh
+kind: chunk
+configure-commands:
+- autoreconf -if
+- |
+ OPENSSL_SEEDS_ITSELF=yes ./configure \
+ --prefix="$PREFIX" --sysconfdir=/etc/ssh \
+ --datadir="$PREFIX/share/sshd" \
+ --libexecdir="$PREFIX/lib/openssh" \
+ --with-privsep-path=/var/lib/sshd
+build-commands:
+- make
+- |
+ sed -e "s|@prefix@|$PREFIX|g" \
+ -e 's|@STARTUP_SCRIPT_SHELL@|/bin/sh|g' \
+ -e 's|@sysconfdir@|/etc/ssh|g' \
+ -e 's|@COMMENT_OUT_ECC@||g' \
+ sshd-keygen.in >sshd-keygen
+- sed -e "s|@prefix@|$PREFIX|g" opensshd.service.in >opensshd.service
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR/$PREFIX/sbin"
+- chmod go= "$DESTDIR"/var/lib/sshd
+- install -m 744 sshd-keygen "$DESTDIR/$PREFIX/sbin/sshd-keygen"
+- install -m 755 contrib/ssh-copy-id "$DESTDIR/$PREFIX/bin/ssh-copy-id"
+- mkdir -p "$DESTDIR/lib/systemd/system/multi-user.target.wants"
+- install -m 644 opensshd.service "$DESTDIR/lib/systemd/system/opensshd.service"
+- ln -s ../opensshd.service "$DESTDIR/lib/systemd/system/multi-user.target.wants/opensshd.service"
diff --git a/strata/cross-bootstrap/rsync.morph b/strata/cross-bootstrap/rsync.morph
new file mode 100644
index 00000000..9a92878d
--- /dev/null
+++ b/strata/cross-bootstrap/rsync.morph
@@ -0,0 +1,6 @@
+name: rsync
+kind: chunk
+build-system: autotools
+build-commands:
+- make proto
+- make
diff --git a/strata/cross-tools.morph b/strata/cross-tools.morph
new file mode 100644
index 00000000..36a82808
--- /dev/null
+++ b/strata/cross-tools.morph
@@ -0,0 +1,10 @@
+name: cross-tools
+kind: stratum
+description: Tools used together with cross compilers
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: patchelf
+ repo: upstream:patchelf
+ ref: 21a85cc1c63cf3ef060ece59cdd82455e2884703
+ unpetrify-ref: baserock/morph
diff --git a/strata/cups.morph b/strata/cups.morph
new file mode 100644
index 00000000..b10bcd6c
--- /dev/null
+++ b/strata/cups.morph
@@ -0,0 +1,11 @@
+name: cups
+kind: stratum
+description: Cups printer driver
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: cups
+ morph: strata/cups/cups.morph
+ repo: upstream:cups
+ ref: f28bae1aeae3964fba4f0728263657405f63c417
+ unpetrify-ref: release-1.7.0
diff --git a/strata/cups/cups.morph b/strata/cups/cups.morph
new file mode 100644
index 00000000..a3c2edf8
--- /dev/null
+++ b/strata/cups/cups.morph
@@ -0,0 +1,12 @@
+name: cups
+kind: chunk
+build-system: manual
+configure-commands:
+- autoconf
+- ./configure --prefix="$PREFIX"
+build-commands:
+- make
+# Note: BUILDROOT is the variable recommended in INSTALL.txt for the
+# destination directory.
+install-commands:
+- make BUILDROOT="$DESTDIR" install
diff --git a/strata/cxmanage.morph b/strata/cxmanage.morph
new file mode 100644
index 00000000..326f7a59
--- /dev/null
+++ b/strata/cxmanage.morph
@@ -0,0 +1,32 @@
+name: cxmanage
+kind: stratum
+description: All the tools for calxeda management
+build-depends:
+- morph: strata/tools.morph
+chunks:
+- name: ipmitool
+ repo: upstream:ipmitool
+ ref: 830d5a3787b265e5dde154d2f0e08b10ae9a7bcd
+ unpetrify-ref: baserock/morph
+- name: pexpect
+ morph: strata/cxmanage/pexpect.morph
+ repo: upstream:pexpect
+ ref: 7a8455f21bb45020f3594c59fc8c85cf738e147c
+ unpetrify-ref: baserock/morph
+- name: pyipmi
+ repo: upstream:pyipmi
+ ref: bb7dba84578c05ba87db3c4186a2989c89e4e519
+ unpetrify-ref: baserock/morph
+- name: tftpy
+ repo: upstream:tftpy
+ ref: f3d2fc4c62c1e64dd49adfaf8e65097eb8ea307b
+ unpetrify-ref: baserock/morph
+- name: cxmanage
+ repo: upstream:cxmanage
+ ref: 54116d3c486ed7aa49f284e4cad9e6e7c293bea6
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - ipmitool
+ - pexpect
+ - pyipmi
+ - tftpy
diff --git a/strata/cxmanage/pexpect.morph b/strata/cxmanage/pexpect.morph
new file mode 100644
index 00000000..09254350
--- /dev/null
+++ b/strata/cxmanage/pexpect.morph
@@ -0,0 +1,10 @@
+name: pexpect
+kind: chunk
+description: |
+ Pexpect is a pure Python module that accomplishes behaviour similar to Expect
+configure-commands:
+- cd pexpect && make
+build-commands:
+- cd pexpect && python setup.py build
+install-commands:
+- cd pexpect && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/databases.morph b/strata/databases.morph
new file mode 100644
index 00000000..89aa996e
--- /dev/null
+++ b/strata/databases.morph
@@ -0,0 +1,32 @@
+name: databases
+kind: stratum
+description: some popular databases and some utils related to databases
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/nfs.morph
+- morph: strata/python-core.morph
+chunks:
+- name: postgresql
+ repo: upstream:postgresql
+ ref: d4f8dde3c1c2c90c723ab550e7f449fc75599316
+ unpetrify-ref: REL9_3_4
+- name: redis
+ morph: strata/databases/redis.morph
+ repo: upstream:redis
+ ref: 9ee59fca9c5c429185d35779c2d5db64005091b0
+ unpetrify-ref: baserock/3.0
+- name: memcached
+ morph: strata/databases/memcached.morph
+ repo: upstream:memcached
+ ref: c5530027c8ea28674358327ab8212ebaf014c848
+ unpetrify-ref: 1.4.22
+- name: psycopg2
+ repo: upstream:python-packages/psycopg2
+ ref: 00cafbe85bb82d85cbfe0f062a73c562433b5bc8
+ unpetrify-ref: 2.5.4
+ build-depends:
+ - postgresql
+- name: python-memcached
+ repo: upstream:python-packages/python-memcached
+ ref: 156ee975ff6e5c69e1d6c58b09bedf03c182c5db
+ unpetrify-ref: master
diff --git a/strata/databases/memcached.morph b/strata/databases/memcached.morph
new file mode 100644
index 00000000..e81a8be1
--- /dev/null
+++ b/strata/databases/memcached.morph
@@ -0,0 +1,21 @@
+name: memcached
+kind: chunk
+build-system: autotools
+configure-commands:
+- perl version.pl
+- touch README
+- autoreconf -fvi
+- ./configure --prefix="$PREFIX"
+install-commands:
+- make install
+- install -D -m 644 scripts/memcached.service "$DESTDIR$PREFIX"/lib/systemd/system/memcached.service
+post-install-commands:
+- mkdir -p "$DESTDIR"/etc/sysconfig
+- |
+ cat <<EOF > "$DESTDIR"/etc/sysconfig/memcached
+ PORT="11211"
+ USER="nobody"
+ MAXCONN="1024"
+ CACHESIZE="64"
+ OPTIONS=""
+ EOF
diff --git a/strata/databases/redis.morph b/strata/databases/redis.morph
new file mode 100644
index 00000000..51f602dd
--- /dev/null
+++ b/strata/databases/redis.morph
@@ -0,0 +1,6 @@
+name: redis
+kind: chunk
+build-commands:
+- make
+install-commands:
+- make PREFIX="$DESTDIR/$PREFIX" install
diff --git a/strata/devtools.morph b/strata/devtools.morph
new file mode 100644
index 00000000..5fb93c1d
--- /dev/null
+++ b/strata/devtools.morph
@@ -0,0 +1,38 @@
+name: devtools
+kind: stratum
+description: |
+ Extra development tools included in the devel system, this stratum
+ is here to help reduce unnecessary building, chunks added to this
+ stratum should not have any dependants (please don't build-depend on this
+ stratum)
+build-depends:
+- morph: strata/core.morph
+- morph: strata/python-core.morph
+chunks:
+- name: nano
+ morph: strata/devtools/nano.morph
+ repo: upstream:nano-tarball
+ ref: fb040ea36cb8e2158ccd9100600652f94ae90af1
+ unpetrify-ref: baserock/morph
+- name: vim
+ morph: strata/devtools/vim.morph
+ repo: upstream:vim
+ ref: 07c2c06799e0579e6bfb1a7d98acf38e36a98f79
+ unpetrify-ref: baserock/morph
+- name: pv
+ repo: upstream:pv
+ ref: d6ce7cfec684fa72d7a919d7b1aa817a0ca6102a
+ unpetrify-ref: baserock/morph
+- name: screen
+ morph: strata/devtools/screen.morph
+ repo: upstream:screen
+ ref: 7dd4a9e5f385c96a77e8ee5c977a1dde4c0ff467
+ unpetrify-ref: baserock/morph
+- name: less
+ repo: upstream:less
+ ref: 09a405d8f652b56944c93ebf5c673cdfe5319b04
+ unpetrify-ref: baserock/morph
+- name: git-review
+ repo: upstream:git-review
+ ref: 79262a52301c146a6b60d09a828661a83a5f5ba7
+ unpetrify-ref: master
diff --git a/strata/devtools/nano.morph b/strata/devtools/nano.morph
new file mode 100644
index 00000000..3eeee120
--- /dev/null
+++ b/strata/devtools/nano.morph
@@ -0,0 +1,16 @@
+name: nano
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --sysconfdir=/etc
+post-install-commands:
+- |
+ cat <<EOF >> nanorc
+ set smooth
+ include /usr/share/nano/c.nanorc
+ include /usr/share/nano/patch.nanorc
+ include /usr/share/nano/python.nanorc
+ include /usr/share/nano/sh.nanorc
+ EOF
+- mkdir -p "$DESTDIR"/etc
+- install -m 644 nanorc "$DESTDIR"/etc/nanorc
diff --git a/strata/devtools/screen.morph b/strata/devtools/screen.morph
new file mode 100644
index 00000000..0b23c5d4
--- /dev/null
+++ b/strata/devtools/screen.morph
@@ -0,0 +1,11 @@
+name: screen
+kind: chunk
+description: A terminal multiplexer and sanity tool for many
+max-jobs: 1
+configure-commands:
+- cd ./src && sh autogen.sh
+- cd ./src && ./configure --prefix="$PREFIX"
+build-commands:
+- cd ./src && make
+install-commands:
+- cd ./src && make DESTDIR="$DESTDIR" install
diff --git a/strata/devtools/vim.morph b/strata/devtools/vim.morph
new file mode 100644
index 00000000..58e1403f
--- /dev/null
+++ b/strata/devtools/vim.morph
@@ -0,0 +1,10 @@
+name: vim
+kind: chunk
+build-system: autotools
+configure-commands:
+- echo '#define SYS_VIMRC_FILE "/etc/vimrc"' >>src/feature.h
+- ./configure --prefix="$PREFIX" --enable-multibyte
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR"/etc
+- install -m 644 runtime/vimrc_example.vim "$DESTDIR"/etc/vimrc
diff --git a/strata/django.morph b/strata/django.morph
new file mode 100644
index 00000000..8ae63801
--- /dev/null
+++ b/strata/django.morph
@@ -0,0 +1,32 @@
+name: django
+kind: stratum
+description: Stratum with Django and its plugins and dependencies.
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/python-core.morph
+chunks:
+- name: django
+ repo: upstream:python-packages/django
+ ref: 811508b0512d3fa6b2328f8647fbf9eace68eceb
+ unpetrify-ref: 1.6.8
+- name: django-appconf
+ repo: upstream:python-packages/django-appconf
+ ref: 0c9835f95b44db1b3eb1a9409f95a3ecd63b8ff5
+ unpetrify-ref: 0.6
+- name: django-compressor
+ repo: upstream:python-packages/django-compressor
+ ref: e747dce3d7e04fe595bbfed54f9554c2725eb757
+ unpetrify-ref: 1.4
+ build-depends:
+ - django-appconf
+- name: pyscss
+ repo: upstream:python-packages/pyscss
+ ref: 85e8806e39e840c4402ee813c3e38fefaf9212b4
+ unpetrify-ref: 1.2.1
+- name: django-pyscss
+ repo: upstream:python-packages/django-pyscss
+ ref: 665270b54eea5de5aca379a78673b411ed6b1f04
+ unpetrify-ref: v1.0.6
+ build-depends:
+ - django
+ - pyscss
diff --git a/strata/enlightenment.morph b/strata/enlightenment.morph
new file mode 100644
index 00000000..d1637f48
--- /dev/null
+++ b/strata/enlightenment.morph
@@ -0,0 +1,79 @@
+name: enlightenment
+kind: stratum
+description: Enlightenment Desktop and Window Manager
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/x-generic.morph
+- morph: strata/lua.morph
+- morph: strata/audio-bluetooth.morph
+- morph: strata/multimedia-gstreamer.morph
+- morph: strata/connman-common.morph
+chunks:
+- name: fribidi
+ repo: upstream:fribidi
+ ref: c9916f2ab289126a32febcc4754efc73a011fb0c
+ unpetrify-ref: baserock/morph/0.19.6
+- name: bullet3
+ morph: strata/enlightenment/bullet3.morph
+ repo: upstream:bullet3
+ ref: 940059ddb3c4476bb9860f79f35b0b1230857f54
+ unpetrify-ref: baserock/morph
+- name: efl
+ morph: strata/enlightenment/efl.morph
+ repo: upstream:enlightenment/efl
+ ref: 3013a7c5cbd728dd9bc5516ffb4bd4f505888bd4
+ unpetrify-ref: v1.12.0
+ build-depends:
+ - bullet3
+ - fribidi
+- name: elementary
+ morph: strata/enlightenment/elementary.morph
+ repo: upstream:enlightenment/elementary
+ ref: c4fbbbf984b7bd1553191c26459bce4589122ad5
+ unpetrify-ref: v1.12.0
+ build-depends:
+ - efl
+- name: evas_generic_loaders
+ morph: strata/enlightenment/evas_generic_loaders.morph
+ repo: upstream:enlightenment/evas_generic_loaders
+ ref: 5d4415ec1e64930452f7d17cbec4c28a0efa8402
+ unpetrify-ref: v1.12.0
+ build-depends:
+ - efl
+- name: enlightenment
+ morph: strata/enlightenment/enlightenment.morph
+ repo: upstream:enlightenment/enlightenment
+ ref: 55165e2576823780a1053b03c5230cc8df8957b5
+ unpetrify-ref: v0.19.1
+ build-depends:
+ - efl
+ - elementary
+- name: imlib2
+ morph: strata/enlightenment/imlib2.morph
+ repo: upstream:enlightenment/imlib2
+ ref: f2f20dc5791a175be398a17fcdc4852a79079d47
+ unpetrify-ref: baserock/1.4.6
+ build-depends:
+ - efl
+ - elementary
+ - enlightenment
+- name: libast
+ morph: strata/enlightenment/libast.morph
+ repo: upstream:enlightenment/libast
+ ref: 0dbc0a5df55474bf61ca166be40e8de2d9e3a031
+ build-depends:
+ - efl
+ - elementary
+ - enlightenment
+ - imlib2
+- name: eterm
+ morph: strata/enlightenment/eterm.morph
+ repo: upstream:enlightenment/eterm
+ ref: 097234f1f27709ff2444e303350764ea3b80b3ad
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - efl
+ - elementary
+ - enlightenment
+ - libast
+ - imlib2
diff --git a/strata/enlightenment/bullet3.morph b/strata/enlightenment/bullet3.morph
new file mode 100644
index 00000000..74cb097a
--- /dev/null
+++ b/strata/enlightenment/bullet3.morph
@@ -0,0 +1,8 @@
+name: bullet
+kind: chunk
+configure-commands:
+- cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_SHARED_LIBS=ON
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/enlightenment/efl.morph b/strata/enlightenment/efl.morph
new file mode 100644
index 00000000..b2a013ae
--- /dev/null
+++ b/strata/enlightenment/efl.morph
@@ -0,0 +1,9 @@
+name: efl
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=/usr --with-tests=none --disable-image-loader-gif --enable-systemd
+ --disable-image-loader-tiff
+build-commands:
+- make VPATH=/usr/lib64
+install-commands:
+- make DESTDIR="$DESTDIR" VPATH=/usr/lib64 install
diff --git a/strata/enlightenment/elementary.morph b/strata/enlightenment/elementary.morph
new file mode 100644
index 00000000..3c0bda0d
--- /dev/null
+++ b/strata/enlightenment/elementary.morph
@@ -0,0 +1,8 @@
+name: elementary
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=/usr
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/enlightenment/enlightenment.morph b/strata/enlightenment/enlightenment.morph
new file mode 100644
index 00000000..90f0c646
--- /dev/null
+++ b/strata/enlightenment/enlightenment.morph
@@ -0,0 +1,8 @@
+name: enlightenment
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=/usr
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/enlightenment/eterm.morph b/strata/enlightenment/eterm.morph
new file mode 100644
index 00000000..9a98d193
--- /dev/null
+++ b/strata/enlightenment/eterm.morph
@@ -0,0 +1,8 @@
+name: eterm
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=/usr
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/enlightenment/evas_generic_loaders.morph b/strata/enlightenment/evas_generic_loaders.morph
new file mode 100644
index 00000000..ef36a40b
--- /dev/null
+++ b/strata/enlightenment/evas_generic_loaders.morph
@@ -0,0 +1,8 @@
+name: evas_generic_loaders
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=/usr
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/enlightenment/imlib2.morph b/strata/enlightenment/imlib2.morph
new file mode 100644
index 00000000..82617ad2
--- /dev/null
+++ b/strata/enlightenment/imlib2.morph
@@ -0,0 +1,8 @@
+name: imlib2
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=/usr
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/enlightenment/libast.morph b/strata/enlightenment/libast.morph
new file mode 100644
index 00000000..c44e274c
--- /dev/null
+++ b/strata/enlightenment/libast.morph
@@ -0,0 +1,8 @@
+name: libast
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=/usr
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/erlang.morph b/strata/erlang.morph
new file mode 100644
index 00000000..d31e78cd
--- /dev/null
+++ b/strata/erlang.morph
@@ -0,0 +1,26 @@
+name: erlang
+kind: stratum
+description: stratum for erlang/otp and stuff
+build-depends:
+- morph: strata/tools.morph
+chunks:
+- name: erlang
+ morph: strata/erlang/erlang.morph
+ repo: upstream:erlang
+ ref: 9417f044ee3c291c2ea343c203aebdcc40597226
+ unpetrify-ref: OTP-17.3.4
+- name: rebar
+ morph: strata/erlang/rebar.morph
+ repo: upstream:erlang-modules/rebar
+ ref: d42ed4683576d995f60e3222f076e99f0b081f79
+ unpretrify-ref: master
+ build-depends:
+ - erlang
+- name: erlang-sd_notify
+ morph: strata/erlang/erlang-sd_notify.morph
+ repo: upstream:erlang-modules/erlang-sd_notify
+ ref: 99f4689c2c18570680329f822591f95f9341ca10
+ unpretrify-ref: master
+ build-depends:
+ - erlang
+ - rebar
diff --git a/strata/erlang/erlang-sd_notify.morph b/strata/erlang/erlang-sd_notify.morph
new file mode 100644
index 00000000..dd3f66ca
--- /dev/null
+++ b/strata/erlang/erlang-sd_notify.morph
@@ -0,0 +1,8 @@
+name: erlang-sd_notify
+kind: chunk
+build-commands:
+- LDFLAGS=-lsystemd REBAR_FLAGS="--verbose 2" make
+install-commands:
+- install -D -m 644 -p ebin/sd_notify.app "$DESTDIR$PREFIX"/lib/erlang/lib/sd_notify-0.1/ebin/sd_notify.app
+- install -D -m 644 -p ebin/sd_notify.beam "$DESTDIR$PREFIX"/lib/erlang/lib/sd_notify-0.1/ebin/sd_notify.beam
+- install -D -m 755 -p priv/sd_notify_drv.so "$DESTDIR$PREFIX"/lib/erlang/lib/sd_notify-0.1/priv/sd_notify_drv.so
diff --git a/strata/erlang/erlang.morph b/strata/erlang/erlang.morph
new file mode 100644
index 00000000..feaf04f1
--- /dev/null
+++ b/strata/erlang/erlang.morph
@@ -0,0 +1,9 @@
+name: erlang
+kind: chunk
+configure-commands:
+- ./otp_build autoconf
+- ./configure --prefix=$PREFIX
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/erlang/rebar.morph b/strata/erlang/rebar.morph
new file mode 100644
index 00000000..fa5fd64f
--- /dev/null
+++ b/strata/erlang/rebar.morph
@@ -0,0 +1,6 @@
+name: rebar
+kind: chunk
+build-commands:
+- ./bootstrap
+install-commands:
+- install -D -m 755 rebar "$DESTDIR$PREFIX"/bin/rebar
diff --git a/strata/foundation.morph b/strata/foundation.morph
new file mode 100644
index 00000000..1be267ac
--- /dev/null
+++ b/strata/foundation.morph
@@ -0,0 +1,113 @@
+name: foundation
+kind: stratum
+description: Basic userland runtime system
+build-depends:
+- morph: strata/coreutils-common.morph
+chunks:
+- name: bash-completion
+ repo: upstream:bash-completion
+ ref: 3085c7e12179817a02a611016606391295c69942
+ unpetrify-ref: 2.1
+- name: groff
+ morph: strata/foundation/groff.morph
+ repo: upstream:groff-git
+ ref: 16305a24e67966ace06d55e2a0b98cc0e3127a93
+ unpetrify-ref: 1.22.3
+- name: kmod
+ morph: strata/foundation/kmod.morph
+ repo: upstream:kmod
+ ref: ae58de0fcb4a6528dd365e23d383bbe2eaf2d566
+ unpetrify-ref: v18
+- name: time-zone-database
+ morph: strata/foundation/time-zone-database.morph
+ repo: upstream:tz
+ ref: a0782484f101ac55c916568bc1c490d7761fc904
+ unpetrify-ref: 2015a
+- name: pciutils
+ morph: strata/foundation/pciutils.morph
+ repo: upstream:pciutils
+ ref: bae04bd0473c68a2f21b7712a627abb08fd84b29
+ unpetrify-ref: baserock/morph
+- name: dbus-pre
+ morph: strata/foundation/dbus-pre.morph
+ repo: upstream:dbus
+ ref: 92a2d95a6c4457783dbd663bab27260c328f2248
+ unpetrify-ref: baserock/genivi/dbus-1.9.16-1-g92a2d95
+- name: libgpg-error
+ morph: strata/foundation/libgpg-error.morph
+ repo: upstream:libgpg-error
+ ref: d77c33ae608d67086ea057cca5ddee99a7202f8b
+ unpetrify-ref: libgpg-error-1.19
+- name: libgcrypt
+ repo: upstream:libgcrypt
+ ref: 412eed473b557ed2172d81d76fa1e1f53c973a67
+ build-depends:
+ - libgpg-error
+- name: systemd
+ morph: strata/foundation/systemd.morph
+ repo: upstream:systemd
+ ref: 163ab2961268232e1cb49e990a8ccefe24b7649f
+ unpetrify-ref: baserock/v219-729-g163ab29-jetson_btrfs_fixes
+ build-depends:
+ - dbus-pre
+ - kmod
+ - libgcrypt
+- name: libusb
+ repo: upstream:libusb
+ ref: e11525c66c7dd2db466c8f5785ff0b37d6a99ec9
+ unpetrify-ref: v1.0.19
+ build-depends:
+ - systemd
+- name: usbutils
+ repo: upstream:usbutils
+ ref: c37f146eb2c6642c600f1b025a6d56996b0697ff
+ unpetrify-ref: baserock/v008
+ build-depends:
+ - libusb
+- name: lzo
+ morph: strata/foundation/lzo.morph
+ repo: upstream:lzo
+ ref: 5cca83e4d1b0be5825a597ab24ffa6126dc0bc95
+ unpetrify-ref: lzo-2.08
+- name: fuse
+ morph: strata/foundation/fuse.morph
+ repo: upstream:fuse
+ ref: d69e627e79862e2df4ff9ff1ddb0363c4520d8a8
+ unpetrify-ref: baserock/morph
+- name: btrfs-progs
+ morph: strata/foundation/btrfs-progs.morph
+ repo: upstream:btrfs-progs
+ ref: 19a806f575cd010734fa5793c4b3bfb49926dc62
+ unpetrify-ref: v4.0
+ build-depends:
+ - lzo
+- name: dbus
+ morph: strata/foundation/dbus.morph
+ repo: upstream:dbus
+ ref: 92a2d95a6c4457783dbd663bab27260c328f2248
+ unpetrify-ref: baserock/genivi/dbus-1.9.16-1-g92a2d95
+ build-depends:
+ - systemd
+- name: openssh
+ morph: strata/foundation/openssh.morph
+ repo: upstream:openssh-git
+ ref: 782fe9e725243eeb5ad6ab9a1783b5d6bedfe0d7
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - groff
+- name: tbdiff
+ morph: strata/foundation/tbdiff.morph
+ repo: baserock:baserock/tbdiff
+ ref: 47fb728f2432929868666afc915dbc5a64836c08
+ unpetrify-ref: master
+- name: dbus-glib
+ repo: upstream:dbus-glib
+ ref: 397e8297d433547c9bf4150ddd2b9e0b4c39628c
+ unpetrify-ref: dbus-glib_0.102
+ build-depends:
+ - dbus
+- name: rsync
+ morph: strata/foundation/rsync.morph
+ repo: upstream:rsync
+ ref: 7cb0de6326c915a72253fd103dae93308031ec3f
+ unpetrify-ref: v3.1.1
diff --git a/strata/foundation/btrfs-progs.morph b/strata/foundation/btrfs-progs.morph
new file mode 100644
index 00000000..4e724dc9
--- /dev/null
+++ b/strata/foundation/btrfs-progs.morph
@@ -0,0 +1,9 @@
+name: btrfs-progs
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./autogen.sh
+configure-commands:
+# The change in the CFLAGS is needed to make the compilation succeed in armv5l
+- export CFLAGS="-O2 $CFLAGS";
+ ./configure --prefix="$PREFIX" --disable-documentation
diff --git a/strata/foundation/dbus-pre.morph b/strata/foundation/dbus-pre.morph
new file mode 100644
index 00000000..3ec008bf
--- /dev/null
+++ b/strata/foundation/dbus-pre.morph
@@ -0,0 +1,10 @@
+name: dbus-pre
+kind: chunk
+build-system: autotools
+configure-commands:
+- sh autogen.sh --no-configure
+- ./configure --prefix="$PREFIX" --localstatedir=/var --sysconfdir=/etc --disable-systemd
+build-commands:
+- make XMLTO_OUTPUT=
+install-commands:
+- make XMLTO_OUTPUT= DESTDIR="$DESTDIR" install
diff --git a/strata/foundation/dbus.morph b/strata/foundation/dbus.morph
new file mode 100644
index 00000000..23840ce1
--- /dev/null
+++ b/strata/foundation/dbus.morph
@@ -0,0 +1,10 @@
+name: dbus
+kind: chunk
+build-system: autotools
+configure-commands:
+- sh autogen.sh --no-configure
+- ./configure --prefix="$PREFIX" --localstatedir=/var --sysconfdir=/etc --enable-user-session
+build-commands:
+- make XMLTO_OUTPUT=
+install-commands:
+- make XMLTO_OUTPUT= DESTDIR="$DESTDIR" install
diff --git a/strata/foundation/fuse.morph b/strata/foundation/fuse.morph
new file mode 100644
index 00000000..6f7fc170
--- /dev/null
+++ b/strata/foundation/fuse.morph
@@ -0,0 +1,6 @@
+name: fuse
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./makeconf.sh
+- ./configure --prefix="$PREFIX"
diff --git a/strata/foundation/groff.morph b/strata/foundation/groff.morph
new file mode 100644
index 00000000..8380a9cc
--- /dev/null
+++ b/strata/foundation/groff.morph
@@ -0,0 +1,13 @@
+name: groff
+kind: chunk
+max-jobs: 1
+configure-commands:
+- PAGE=A4 ./configure --prefix="$PREFIX"
+build-commands:
+# hack to avoid a netpbm dependency
+- touch doc/gnu.eps
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- ln -s eqn "$DESTDIR$PREFIX/bin/geqn"
+- ln -s tbl "$DESTDIR$PREFIX/bin/gtbl"
diff --git a/strata/foundation/kmod.morph b/strata/foundation/kmod.morph
new file mode 100644
index 00000000..8f158e8e
--- /dev/null
+++ b/strata/foundation/kmod.morph
@@ -0,0 +1,49 @@
+name: kmod
+kind: chunk
+configure-commands:
+- rm -rf libkmod/docs
+- sed -i -e'/SUBDIRS/{s/\S*doc\S*//;s/\S*man\S*//}' Makefile.am
+- sed -i -e'/AC_CONFIG_FILES(\[/,/])/{/docs/d}' configure.ac
+- autoreconf -fiv
+- |
+ ./configure --prefix="$PREFIX" --bindir="$PREFIX"/bin \
+ --libdir="$PREFIX"/lib --sysconfdir=/etc \
+ --without-xz --with-zlib \
+ --disable-manpages --enable-python
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" pkgconfigdir="$PREFIX"/lib/pkgconfig install
+
+# WARNING with the following command:
+# if the staging area when kmod is built is sufficiently different to the
+# staging area that kmod is included in, then these symlinks may not work.
+#
+# i.e. if /sbin isn't a symlink when this is built (fhs-dirs isn't included or
+# doesn't symlink /sbin to /usr/bin) then we get symlinks in /sbin which point
+# to ../usr/bin, but if /sbin is itself a symlink to /usr/bin, then the symlinks
+# would end up in /usr/bin, which would point to ../usr/bin, which would evaluate
+# to /usr/usr/bin.
+#
+# The alternatives to this are:
+#
+# 1. Use hardlinks instead, which has the problem of ssh-rsync upgrades duplicating
+# the binary
+#
+# 2. Use an absolute path for the symlink, which makes things interesting when
+# inspecting a chroot
+#
+# 3. Use a wrapper script to invoke kmod with a different name, which isn't
+# possible in busybox ash without making a temporary directory, which means
+# you need an extra process to hang around to clean that directory up.
+- |
+ if [ -h /sbin ]; then
+ for app in modprobe lsmod rmmod insmod modinfo depmod; do
+ ln -sf kmod "$DESTDIR$PREFIX/bin/$app"
+ done
+ else
+ install -d "$DESTDIR"/sbin
+ for app in modprobe lsmod rmmod insmod modinfo depmod; do
+ ln -sf ../"$PREFIX"/bin/kmod "$DESTDIR/sbin/$app"
+ done
+ fi
diff --git a/strata/foundation/libgpg-error.morph b/strata/foundation/libgpg-error.morph
new file mode 100644
index 00000000..bc8dcdab
--- /dev/null
+++ b/strata/foundation/libgpg-error.morph
@@ -0,0 +1,7 @@
+name: libgpg-error
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- autoreconf -fi
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-doc
diff --git a/strata/foundation/lzo.morph b/strata/foundation/lzo.morph
new file mode 100644
index 00000000..a1a7bbc7
--- /dev/null
+++ b/strata/foundation/lzo.morph
@@ -0,0 +1,5 @@
+name: lzo
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --enable-shared=yes --prefix="$PREFIX"
diff --git a/strata/foundation/openssh.morph b/strata/foundation/openssh.morph
new file mode 100644
index 00000000..6c759cb0
--- /dev/null
+++ b/strata/foundation/openssh.morph
@@ -0,0 +1,28 @@
+name: openssh
+kind: chunk
+configure-commands:
+- autoreconf -if
+- |
+ OPENSSL_SEEDS_ITSELF=yes ./configure \
+ --prefix="$PREFIX" --sysconfdir=/etc/ssh \
+ --datadir="$PREFIX/share/sshd" \
+ --libexecdir="$PREFIX/lib/openssh" \
+ --with-privsep-path=/var/lib/sshd
+build-commands:
+- make
+- |
+ sed -e "s|@prefix@|$PREFIX|g" \
+ -e 's|@STARTUP_SCRIPT_SHELL@|/bin/sh|g' \
+ -e 's|@sysconfdir@|/etc/ssh|g' \
+ -e 's|@COMMENT_OUT_ECC@||g' \
+ sshd-keygen.in >sshd-keygen
+- sed -e "s|@prefix@|$PREFIX|g" opensshd.service.in >opensshd.service
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR/$PREFIX/sbin"
+- chmod go= "$DESTDIR"/var/lib/sshd
+- install -m 744 sshd-keygen "$DESTDIR/$PREFIX/sbin/sshd-keygen"
+- install -m 755 contrib/ssh-copy-id "$DESTDIR/$PREFIX/bin/ssh-copy-id"
+- mkdir -p "$DESTDIR/lib/systemd/system/multi-user.target.wants"
+- install -m 644 opensshd.service "$DESTDIR/lib/systemd/system/opensshd.service"
+- ln -s ../opensshd.service "$DESTDIR/lib/systemd/system/multi-user.target.wants/opensshd.service"
diff --git a/strata/foundation/pciutils.morph b/strata/foundation/pciutils.morph
new file mode 100644
index 00000000..03bd4a23
--- /dev/null
+++ b/strata/foundation/pciutils.morph
@@ -0,0 +1,11 @@
+name: pciutils
+kind: chunk
+configure-commands:
+- make PREFIX="$PREFIX" ZLIB=no lib/config.mk
+- echo PREFIX="$PREFIX" >>lib/config.mk
+- echo MANDIR="$PREFIX/share/man" >>lib/config.mk # ensure manpages are always installed in share/man
+build-commands:
+- make PREFIX="$PREFIX"
+install-commands:
+- make PREFIX="$PREFIX" DESTDIR="$DESTDIR" install
+- make PREFIX="$PREFIX" DESTDIR="$DESTDIR" install-lib
diff --git a/strata/foundation/rsync.morph b/strata/foundation/rsync.morph
new file mode 100644
index 00000000..54337412
--- /dev/null
+++ b/strata/foundation/rsync.morph
@@ -0,0 +1,9 @@
+name: rsync
+kind: chunk
+build-system: autotools
+build-commands:
+- make proto
+- make
+install-commands:
+- make install
+- install -D -m 644 packaging/systemd/rsync.service "$DESTDIR$PREFIX"/lib/systemd/system/rsync.service
diff --git a/strata/foundation/systemd.morph b/strata/foundation/systemd.morph
new file mode 100644
index 00000000..5dc48e70
--- /dev/null
+++ b/strata/foundation/systemd.morph
@@ -0,0 +1,46 @@
+name: systemd
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+# systemd prematurely kills the user session. This is a known bug in systemd
+# https://bugs.freedesktop.org/show_bug.cgi?id=70593
+- |
+ cat >> units/user@.service.m4.in <<EOF
+ KillSignal=SIGCONT
+ ExecStop=/bin/kill -TERM \$MAINPID
+ EOF
+configure-commands:
+- sh autogen.sh
+# KILL added because it picks up /usr/bin/kill, which is not installed on
+# all systems
+- |
+ KILL=/bin/kill ./configure --prefix="$PREFIX" --enable-xz --disable-manpages \
+ --disable-tests --sysconfdir=/etc --localstatedir=/var \
+ --libdir="$PREFIX/lib" --libexecdir="$PREFIX/libexec" \
+ --with-rootprefix= --with-rootlibdir=/lib
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR"/sbin
+- ln -s /lib/systemd/systemd "$DESTDIR"/sbin/init
+- for f in telinit runlevel shutdown poweroff reboot halt; do ln -s /bin/systemctl
+ "$DESTDIR/sbin/$f"; done
+- sed -e 's|@sushell@|/bin/sh|g' units/debug-shell.service.in >"$DESTDIR/etc/systemd/system/debug-shell.service"
+- sed -r -e '/Options=/s/,?strictatime//' "$DESTDIR/lib/systemd/system/tmp.mount"
+ >"$DESTDIR/etc/systemd/system/tmp.mount"
+- touch "$DESTDIR/etc/machine-id"
+post-install-commands:
+- |
+ cat > "$DESTDIR/etc/systemd/network/10-dhcp.network" << "EOF"
+ [Match]
+ Name=e*
+
+ [Network]
+ DHCP=yes
+ EOF
+# Use the pam config systemd provides
+- cp -a "$DESTDIR/$PREFIX"/share/factory/etc/pam.d/* "$DESTDIR/etc/pam.d"
+
+# Add pam_deny.so to the default systemd-auth pam.d config file. Without
+# it, if shadow is configured to use PAM, it would be possible to login
+# to a system with the wrong password.
+- echo 'auth requisite pam_deny.so' >> "$DESTDIR"/etc/pam.d/system-auth
diff --git a/strata/foundation/tbdiff.morph b/strata/foundation/tbdiff.morph
new file mode 100644
index 00000000..a907a109
--- /dev/null
+++ b/strata/foundation/tbdiff.morph
@@ -0,0 +1,3 @@
+name: tbdiff
+kind: chunk
+build-system: autotools
diff --git a/strata/foundation/time-zone-database.morph b/strata/foundation/time-zone-database.morph
new file mode 100644
index 00000000..c3c89bcf
--- /dev/null
+++ b/strata/foundation/time-zone-database.morph
@@ -0,0 +1,10 @@
+name: time-zone-database
+kind: chunk
+description: |
+ Time zone database.
+build-system: manual
+install-commands:
+# ETCDIR is actually where the 'tzselect', 'zdump' and 'zic' commands get
+# installed. Note that tzselect from this chunk will overlap with and override
+# the tzselect binary from GLIBC.
+- make TOPDIR="$PREFIX" ETCDIR="$PREFIX/bin" TZDIR="$PREFIX/share/zoneinfo" DESTDIR="$DESTDIR" install
diff --git a/strata/genivi.morph b/strata/genivi.morph
new file mode 100644
index 00000000..5294dc4e
--- /dev/null
+++ b/strata/genivi.morph
@@ -0,0 +1,110 @@
+name: genivi
+kind: stratum
+description: Software components designed by genivi, for genivi.
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/audio-bluetooth.morph
+chunks:
+- name: linuxquota
+ morph: strata/genivi/linuxquota.morph
+ repo: upstream:linuxquota
+ ref: 0804c89a16736533e440771dc42a15e5d0223902
+ unpetrify-ref: baserock/genivi/morph
+- name: DLT-daemon
+ morph: strata/genivi/DLT-daemon.morph
+ repo: upstream:DLT-daemon
+ ref: 8b48e73f79b4463393916e4c6696917e3dedd026
+ unpetrify-ref: v2.10.0
+- name: node-startup-controller
+ morph: strata/genivi/node-startup-controller.morph
+ repo: upstream:node-startup-controller
+ ref: b77fb1dbb280ec45525853e52a362eafd736b400
+ unpetrify-ref: baserock/systemd_v216
+ build-depends:
+ - DLT-daemon
+- name: googletest
+ morph: strata/genivi/googletest.morph
+ repo: upstream:googletest
+ ref: 5e3f7d3db4c16e2ba8b42b0bf4689f7d2abbcb08
+ unpetrify-ref: baserock/morph
+- name: googlemock
+ morph: strata/genivi/googlemock.morph
+ repo: upstream:googlemock
+ ref: 0e9998c140079046c396a0e7033bb465abae79cd
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - googletest
+- name: genivi-common-api-runtime
+ morph: strata/genivi/genivi-common-api-runtime.morph
+ repo: upstream:genivi-common-api-runtime
+ ref: 188abb5e24d6a2a7fdd7e5d150439a162621292c
+ unpetrify-ref: 2.1.6
+- name: genivi-common-api-dbus-runtime
+ morph: strata/genivi/genivi-common-api-dbus-runtime.morph
+ repo: upstream:genivi-common-api-dbus-runtime
+ ref: 3372155b32a2cf3b05e1a2a13f6f8413069de33f
+ unpetrify-ref: 2.1.6-p1
+ build-depends:
+ - genivi-common-api-runtime
+- name: audiomanager
+ repo: upstream:audiomanager
+ ref: 00caae1e41e8891d9a1bafa76028e8119f06fd8a
+ unpetrify-ref: baserock/6.0
+ build-depends:
+ - DLT-daemon
+ - googletest
+ - googlemock
+ - genivi-common-api-runtime
+ - genivi-common-api-dbus-runtime
+- name: itzam-tarball
+ morph: strata/genivi/itzam-tarball.morph
+ repo: upstream:itzam-tarball
+ ref: 4ebcf671416927b64475da7b810b6016c847bf73
+ unpetrify-ref: baserock/genivi/baseline
+- name: persistence-common-object
+ morph: strata/genivi/persistence-common-object.morph
+ repo: upstream:genivi/persistence-common-object
+ ref: dc46126214022d068729f7ccd47415dc5f86f425
+ unpetrify-ref: baserock/1.0.2
+ build-depends:
+ - itzam-tarball
+ - DLT-daemon
+- name: persistence-client-library
+ morph: strata/genivi/persistence-client-library.morph
+ repo: upstream:genivi/persistence-client-library
+ ref: 39c68df2a9ee0b089212855f64a61fbd671b1a80
+ unpetrify-ref: baserock/v1.0.0
+ build-depends:
+ - persistence-common-object
+ - DLT-daemon
+ - itzam-tarball
+- name: node-state-manager
+ repo: upstream:node-state-manager
+ ref: 30add4659e002f1df205cc36f71ef3141c10c1fb
+ unpetrify-ref: baserock/systemd_v216
+ build-depends:
+ - DLT-daemon
+ - persistence-client-library
+- name: libarchive
+ morph: strata/genivi/libarchive.morph
+ repo: upstream:libarchive
+ ref: 19f23e191f9d3e1dd2a518735046100419965804
+ unpetrify-ref: v3.1.2
+- name: persistence-administrator
+ morph: strata/genivi/persistence-administrator.morph
+ repo: upstream:genivi/persistence-administrator
+ ref: f9d62e70237c05323cbd60f1d5c7b574fbc2de43
+ unpetrify-ref: baserock/1.0.5
+ build-depends:
+ - persistence-common-object
+ - DLT-daemon
+ - libarchive
+ - node-state-manager
+- name: node-health-monitor
+ repo: upstream:genivi/node-health-monitor
+ ref: 2af264563ef1b60ba2636cbf5e8cd6a7ed07b0c0
+ unpetrify-ref: baserock/1.3.3
+ build-depends:
+ - node-state-manager
+ - persistence-client-library
+ - DLT-daemon
diff --git a/strata/genivi/DLT-daemon.morph b/strata/genivi/DLT-daemon.morph
new file mode 100644
index 00000000..e22e31e3
--- /dev/null
+++ b/strata/genivi/DLT-daemon.morph
@@ -0,0 +1,5 @@
+name: DLT-deamon
+kind: chunk
+build-system: cmake
+configure-commands:
+- cmake -DCMAKE_INSTALL_PREFIX="$PREFIX" -DWITH_SYSTEMD=ON
diff --git a/strata/genivi/genivi-common-api-dbus-runtime.morph b/strata/genivi/genivi-common-api-dbus-runtime.morph
new file mode 100644
index 00000000..8d6cb134
--- /dev/null
+++ b/strata/genivi/genivi-common-api-dbus-runtime.morph
@@ -0,0 +1,5 @@
+name: genivi-common-api-dbus-runtime
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ln -s . build-aux
diff --git a/strata/genivi/genivi-common-api-runtime.morph b/strata/genivi/genivi-common-api-runtime.morph
new file mode 100644
index 00000000..7439331a
--- /dev/null
+++ b/strata/genivi/genivi-common-api-runtime.morph
@@ -0,0 +1,5 @@
+name: genivi-common-api-runtime
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ln -s . build-aux
diff --git a/strata/genivi/googlemock.morph b/strata/genivi/googlemock.morph
new file mode 100644
index 00000000..b3eaa952
--- /dev/null
+++ b/strata/genivi/googlemock.morph
@@ -0,0 +1,8 @@
+name: googlemock
+kind: chunk
+build-system: autotools
+configure-commands:
+- mkdir -p gtest/m4
+- ln -s /usr/share/gtest/m4/acx_pthread.m4 gtest/m4/acx_pthread.m4
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX"
diff --git a/strata/genivi/googletest.morph b/strata/genivi/googletest.morph
new file mode 100644
index 00000000..7723c721
--- /dev/null
+++ b/strata/genivi/googletest.morph
@@ -0,0 +1,7 @@
+name: googletest
+kind: chunk
+build-system: autotools
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p $DESTDIR/usr/share/gtest/m4
+- install -m 644 m4/acx_pthread.m4 $DESTDIR/usr/share/gtest/m4
diff --git a/strata/genivi/itzam-tarball.morph b/strata/genivi/itzam-tarball.morph
new file mode 100644
index 00000000..eaf2db61
--- /dev/null
+++ b/strata/genivi/itzam-tarball.morph
@@ -0,0 +1,10 @@
+name: itzam-tarball
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- sed -i 's/GENERIC_LIBRARY_NAME=libitzam/GENERIC_LIBRARY_NAME=itzam/g' configure.ac
+- mkdir m4
+- touch NEWS README AUTHORS ChangeLog
+- autoreconf -ivf
+post-install-commands:
+- install -D -m644 libitzam.pc "$DESTDIR$PREFIX/lib/pkgconfig/libitzam.pc"
diff --git a/strata/genivi/libarchive.morph b/strata/genivi/libarchive.morph
new file mode 100644
index 00000000..bf840f58
--- /dev/null
+++ b/strata/genivi/libarchive.morph
@@ -0,0 +1,6 @@
+name: libarchive
+kind: chunk
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX" --disable-acl
diff --git a/strata/genivi/linuxquota.morph b/strata/genivi/linuxquota.morph
new file mode 100644
index 00000000..826696ad
--- /dev/null
+++ b/strata/genivi/linuxquota.morph
@@ -0,0 +1,10 @@
+name: linuxquota
+kind: chunk
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX"
+build-commands:
+- make "RPCGEN=rpcgen -Y /usr/bin"
+install-commands:
+- make ROOTDIR="$DESTDIR" install
diff --git a/strata/genivi/node-startup-controller.morph b/strata/genivi/node-startup-controller.morph
new file mode 100644
index 00000000..d3d6dc77
--- /dev/null
+++ b/strata/genivi/node-startup-controller.morph
@@ -0,0 +1,9 @@
+name: node-startup-controller
+kind: chunk
+configure-commands:
+- sh autogen.sh
+- ./configure --prefix="$PREFIX" --sysconfdir=/etc
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/genivi/persistence-administrator.morph b/strata/genivi/persistence-administrator.morph
new file mode 100644
index 00000000..f4fd07a5
--- /dev/null
+++ b/strata/genivi/persistence-administrator.morph
@@ -0,0 +1,5 @@
+name: persistence-administrator
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- chmod +x autogen.sh
diff --git a/strata/genivi/persistence-client-library.morph b/strata/genivi/persistence-client-library.morph
new file mode 100644
index 00000000..6ea6aef2
--- /dev/null
+++ b/strata/genivi/persistence-client-library.morph
@@ -0,0 +1,6 @@
+name: persistence-client-library
+kind: chunk
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX" --disable-werror
diff --git a/strata/genivi/persistence-common-object.morph b/strata/genivi/persistence-common-object.morph
new file mode 100644
index 00000000..179a6031
--- /dev/null
+++ b/strata/genivi/persistence-common-object.morph
@@ -0,0 +1,5 @@
+name: persistence-common-object
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- chmod +x autogen.sh
diff --git a/strata/graphics-common.morph b/strata/graphics-common.morph
new file mode 100644
index 00000000..c23ec829
--- /dev/null
+++ b/strata/graphics-common.morph
@@ -0,0 +1,54 @@
+name: graphics-common
+kind: stratum
+build-depends:
+- morph: strata/mesa-common.morph
+- morph: strata/x-common.morph
+chunks:
+- name: pixman
+ morph: strata/graphics-common/pixman.morph
+ repo: upstream:pixman
+ ref: 87eea99e443b389c978cf37efc52788bf03a0ee0
+ unpetrify-ref: pixman-0.32.6
+- name: freetype2
+ repo: upstream:freetype2
+ ref: ec8853cd18e1a0c275372769bdad37a79550ed66
+ unpetrify-ref: VER-2-5-3
+- name: fontconfig
+ repo: upstream:fontconfig
+ ref: 9260b7ec39c34ce68d74e16d47917290a8c3f35a
+ unpetrify-ref: 2.11.1
+ build-depends:
+ - freetype2
+- name: freefont-otf
+ morph: strata/graphics-common/freefont-otf.morph
+ repo: upstream:freefont-otf
+ ref: 75fa95a912718bb94a135d4bf6b13bb38e186ce7
+ unpetrify-ref: baserock/morph
+- name: libpng
+ repo: upstream:libpng
+ ref: 88dd30b232362b65cca374dda39096888163dd6b
+ unpetrify-ref: libpng-1.6.16-signed
+- name: libjpeg
+ repo: upstream:libjpeg
+ ref: f57ac58ac664ede6bc6e8cd9d88e0edaa366e21a
+ unpetrify-ref: baserock/morph
+- name: libtiff
+ repo: upstream:libtiff
+ ref: 2f83c385ff3d5493602308b62ca6d7707725b4fd
+ unpetrify-ref: Release-v4-0-3
+- name: cairo
+ morph: strata/graphics-common/cairo.morph
+ repo: upstream:cairo
+ ref: f6fd372a8b31a0bebbdfe36090d6ffc7bab9a2f8
+ unpetrify-ref: 1.14.0
+ build-depends:
+ - fontconfig
+ - freetype2
+ - pixman
+ - libpng
+- name: harfbuzz
+ repo: upstream:harfbuzz
+ ref: 09b5393874e56fcfd63a92d28e6c1c2ddeee0942
+ unpetrify-ref: baserock/0.9.12
+ build-depends:
+ - freetype2
diff --git a/strata/graphics-common/cairo.morph b/strata/graphics-common/cairo.morph
new file mode 100644
index 00000000..f91ac385
--- /dev/null
+++ b/strata/graphics-common/cairo.morph
@@ -0,0 +1,5 @@
+name: cairo
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-glesv2
diff --git a/strata/graphics-common/freefont-otf.morph b/strata/graphics-common/freefont-otf.morph
new file mode 100644
index 00000000..f021480d
--- /dev/null
+++ b/strata/graphics-common/freefont-otf.morph
@@ -0,0 +1,5 @@
+name: freefont-otf
+kind: chunk
+install-commands:
+- mkdir -p "$DESTDIR"/usr/share/fonts/opentype/freefont
+- install -p *.otf "$DESTDIR"/usr/share/fonts/opentype/freefont
diff --git a/strata/graphics-common/pixman.morph b/strata/graphics-common/pixman.morph
new file mode 100644
index 00000000..d09b3df3
--- /dev/null
+++ b/strata/graphics-common/pixman.morph
@@ -0,0 +1,5 @@
+name: pixman
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --disable-arm-iwmmxt --disable-arm-iwmmxt2
diff --git a/strata/gtk-deps.morph b/strata/gtk-deps.morph
new file mode 100644
index 00000000..750ad8c2
--- /dev/null
+++ b/strata/gtk-deps.morph
@@ -0,0 +1,38 @@
+name: gtk-deps
+kind: stratum
+description: the GTK+ dependencies stratum
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/graphics-common.morph
+- morph: strata/x-common.morph
+chunks:
+- name: pango
+ morph: strata/gtk-deps/pango.morph
+ repo: upstream:pango
+ ref: e0a21abf52a0b7588b1aa3357818948816ed9103
+ unpetrify-ref: 1.36.8
+- name: shared-mime-info
+ morph: strata/gtk-deps/shared-mime-info.morph
+ repo: upstream:shared-mime-info
+ ref: c136fc2578aa8f7c0e098008da18d324033eb991
+ unpetrify-ref: Release-1-4
+- name: gdk-pixbuf
+ morph: strata/gtk-deps/gdk-pixbuf.morph
+ repo: upstream:gdk-pixbuf
+ ref: b86959aa16e9d49ec6e286bc57d36c5249578c59
+ unpetrify-ref: 2.30.8
+- name: atk
+ repo: upstream:atk
+ ref: 3f9e43b94364053fd16eb53391667d161d0dae12
+ unpetrify-ref: ATK_2_16_0
+- name: at-spi2-core
+ repo: upstream:at-spi2-core
+ ref: 96c2842088008670e72739ea7921d654487a57fb
+ unpetrify-ref: AT_SPI2_CORE_2_14_1
+- name: at-spi2-atk
+ repo: upstream:at-spi2-atk
+ ref: 2c220ff6565c4f02428a0c80267616a677e81ac9
+ unpetrify-ref: AT_SPI2_ATK_2_14_1
+ build-depends:
+ - at-spi2-core
+ - atk
diff --git a/strata/gtk-deps/gdk-pixbuf.morph b/strata/gtk-deps/gdk-pixbuf.morph
new file mode 100644
index 00000000..c89c75c8
--- /dev/null
+++ b/strata/gtk-deps/gdk-pixbuf.morph
@@ -0,0 +1,6 @@
+name: gdk-pixbuf
+kind: chunk
+build-system: autotools
+install-commands:
+- make DESTDIR=$DESTDIR install
+- LD_LIBRARY_PATH=$DESTDIR/usr/lib $DESTDIR/usr/bin/gdk-pixbuf-query-loaders > $DESTDIR/usr/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache
diff --git a/strata/gtk-deps/pango.morph b/strata/gtk-deps/pango.morph
new file mode 100644
index 00000000..82b00ee3
--- /dev/null
+++ b/strata/gtk-deps/pango.morph
@@ -0,0 +1,8 @@
+name: pango
+kind: chunk
+build-system: autotools
+system-integration:
+ pango-misc:
+ 01-setup-modules:
+ - mkdir -p /usr/etc/pango/
+ - /usr/bin/pango-querymodules > /usr/etc/pango/pango.modules
diff --git a/strata/gtk-deps/shared-mime-info.morph b/strata/gtk-deps/shared-mime-info.morph
new file mode 100644
index 00000000..e155fde0
--- /dev/null
+++ b/strata/gtk-deps/shared-mime-info.morph
@@ -0,0 +1,4 @@
+name: shared-mime-info
+kind: chunk
+max-jobs: 1
+build-system: autotools
diff --git a/strata/gtk2.morph b/strata/gtk2.morph
new file mode 100644
index 00000000..dd2f30fa
--- /dev/null
+++ b/strata/gtk2.morph
@@ -0,0 +1,12 @@
+name: gtk2
+kind: stratum
+description: the gtk2 stratum
+build-depends:
+- morph: strata/gtk-deps.morph
+- morph: strata/x-generic.morph
+chunks:
+- name: gtk+
+ morph: strata/gtk2/gtk+.morph
+ repo: upstream:gtk+
+ ref: e6333a1a374591fef456f7fe73942226b5b8b388
+ unpetrify-ref: 2.24.27
diff --git a/strata/gtk2/gtk+.morph b/strata/gtk2/gtk+.morph
new file mode 100644
index 00000000..abbf2510
--- /dev/null
+++ b/strata/gtk2/gtk+.morph
@@ -0,0 +1,11 @@
+name: gtk+
+kind: chunk
+build-system: autotools
+configure-commands:
+- gdk-pixbuf-query-loaders > loader.cache
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX"
+build-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make
+install-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make install DESTDIR="$DESTDIR"
diff --git a/strata/gtk3.morph b/strata/gtk3.morph
new file mode 100644
index 00000000..f03417a2
--- /dev/null
+++ b/strata/gtk3.morph
@@ -0,0 +1,18 @@
+name: gtk3
+kind: stratum
+description: the gtk3 stratum
+build-depends:
+- morph: strata/gtk-deps.morph
+- morph: strata/wayland-generic.morph
+chunks:
+- name: libepoxy
+ repo: upstream:libepoxy
+ ref: 7422de5b4be7b19d789136b3bb5f932de42db27c
+ unpetrify-ref: v1.2
+- name: gtk3
+ morph: strata/gtk3/gtk3.morph
+ repo: upstream:gtk+
+ ref: a816ccd4968f1e221b92bfd1e2b2dc27703d6db5
+ unpetrify-ref: 3.16.0
+ build-depends:
+ - libepoxy
diff --git a/strata/gtk3/gtk3.morph b/strata/gtk3/gtk3.morph
new file mode 100644
index 00000000..4542dc15
--- /dev/null
+++ b/strata/gtk3/gtk3.morph
@@ -0,0 +1,11 @@
+name: gtk3
+kind: chunk
+build-system: autotools
+configure-commands:
+- gdk-pixbuf-query-loaders > loader.cache
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX" --enable-wayland-backend
+build-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make
+install-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make install DESTDIR="$DESTDIR"
diff --git a/strata/initramfs-utils.morph b/strata/initramfs-utils.morph
new file mode 100644
index 00000000..6df77c96
--- /dev/null
+++ b/strata/initramfs-utils.morph
@@ -0,0 +1,11 @@
+name: initramfs-utils
+kind: stratum
+description: stratum for initramfs
+build-depends:
+- morph: strata/build-essential.morph
+chunks:
+- name: initramfs-scripts
+ morph: strata/initramfs-utils/initramfs-scripts.morph
+ repo: baserock:baserock/initramfs-scripts
+ ref: 062c5d3aece2e308aa7fc03acab1b3b6dd4270b2
+ unpetrify-ref: master
diff --git a/strata/initramfs-utils/initramfs-scripts.morph b/strata/initramfs-utils/initramfs-scripts.morph
new file mode 100644
index 00000000..68ed4a7b
--- /dev/null
+++ b/strata/initramfs-utils/initramfs-scripts.morph
@@ -0,0 +1,4 @@
+name: initramfs-scripts
+kind: chunk
+install-commands:
+- install -m 755 init "$DESTDIR/init"
diff --git a/strata/input-common.morph b/strata/input-common.morph
new file mode 100644
index 00000000..b67bcffe
--- /dev/null
+++ b/strata/input-common.morph
@@ -0,0 +1,26 @@
+name: input-common
+kind: stratum
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/xorg-util-macros-common.morph
+chunks:
+- name: mtdev
+ repo: upstream:mtdev-git
+ ref: 4381b78fea54de0e775bf54952b2f95e5a06c57d
+ unpetrify-ref: v1.1.5
+- name: xkeyboard-config
+ morph: strata/input-common/xkeyboard-config.morph
+ repo: upstream:xkeyboard-config
+ ref: 26f344c93f8c6141e9233eb68088ba4fd56bc9ef
+ unpetrify-ref: xkeyboard-config-2.14
+- name: libevdev
+ repo: upstream:libevdev
+ ref: 6f03fd49fb949e46ebccb5dfb54489584896c791
+ unpetrify-ref: libevdev-1.4.2
+- name: libinput
+ repo: upstream:libinput
+ ref: 12df380698531472d495534ed356722478563707
+ unpetrify-ref: 0.15.0
+ build-depends:
+ - mtdev
+ - libevdev
diff --git a/strata/input-common/xkeyboard-config.morph b/strata/input-common/xkeyboard-config.morph
new file mode 100644
index 00000000..ad33b422
--- /dev/null
+++ b/strata/input-common/xkeyboard-config.morph
@@ -0,0 +1,7 @@
+name: xkeyboard-config
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --disable-runtime-deps
+install-commands:
+- mkdir -p "$DESTDIR"/"$PREFIX"/share/X11/xkb/
+- make install DESTDIR="$DESTDIR"
diff --git a/strata/installer-utils.morph b/strata/installer-utils.morph
new file mode 100644
index 00000000..ddc714b6
--- /dev/null
+++ b/strata/installer-utils.morph
@@ -0,0 +1,11 @@
+name: installer-utils
+kind: stratum
+description: stratum for Baserock installer script.
+build-depends:
+- morph: strata/build-essential.morph
+chunks:
+- name: installer-scripts
+ morph: strata/installer-utils/installer-scripts.morph
+ repo: baserock:baserock/installer-scripts
+ ref: a1629ded9eb499b55b547cd8caa0ade8233b32f4
+ unpetrify-ref: master
diff --git a/strata/installer-utils/installer-scripts.morph b/strata/installer-utils/installer-scripts.morph
new file mode 100644
index 00000000..e42313a8
--- /dev/null
+++ b/strata/installer-utils/installer-scripts.morph
@@ -0,0 +1,4 @@
+name: installer-scripts
+kind: chunk
+install-commands:
+- install -D -m 755 baserock-installer "$DESTDIR/usr/lib/baserock-installer/installer"
diff --git a/strata/libdrm-common.morph b/strata/libdrm-common.morph
new file mode 100644
index 00000000..16ef2375
--- /dev/null
+++ b/strata/libdrm-common.morph
@@ -0,0 +1,16 @@
+name: libdrm-common
+kind: stratum
+build-depends:
+- morph: strata/xorg-util-macros-common.morph
+chunks:
+- name: xorg-lib-libpciaccess
+ repo: upstream:xorg-lib-libpciaccess
+ ref: f99c2e4199ce37f6f94428df504427f67c3ec543
+ unpetrify-ref: libpciaccess-0.13.3
+- name: drm
+ repo: upstream:drm
+ morph: strata/libdrm-common/drm.morph
+ ref: 5f7b6723e1bed755c230817eea0563bab5b2a8a7
+ unpetrify-ref: libdrm-2.4.60
+ build-depends:
+ - xorg-lib-libpciaccess
diff --git a/strata/libdrm-common/drm.morph b/strata/libdrm-common/drm.morph
new file mode 100644
index 00000000..a614a831
--- /dev/null
+++ b/strata/libdrm-common/drm.morph
@@ -0,0 +1,15 @@
+name: drm
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+ # Tegra requires a new coherent BO attribute, not currently upstream, so
+ # patch it here
+ - sed -i '/NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)/a#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)' include/drm/nouveau_drm.h
+ - |
+ sed -i '/info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;/a\
+ if (bo->flags & NOUVEAU_BO_COHERENT)\
+ info->domain |= NOUVEAU_GEM_DOMAIN_COHERENT;' nouveau/abi16.c
+ - sed -i '/NOUVEAU_BO_NOSNOOP 0x20000000/a#define NOUVEAU_BO_COHERENT 0x10000000' nouveau/nouveau.h
+configure-commands:
+ - NOCONFIGURE=1 ./autogen.sh
+ - ./configure --prefix="$PREFIX" --enable-tegra-experimental-api --enable-freedreno-experimental-api
diff --git a/strata/libsoup-common.morph b/strata/libsoup-common.morph
new file mode 100644
index 00000000..ca81b6e0
--- /dev/null
+++ b/strata/libsoup-common.morph
@@ -0,0 +1,10 @@
+name: libsoup-common
+kind: stratum
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: libsoup
+ morph: strata/libsoup-common/libsoup.morph
+ repo: upstream:libsoup
+ ref: ce764489e358bad6b49418f5c8bc7b25a4b1815e
+ unpetrify-ref: baserock/morph
diff --git a/strata/libsoup-common/libsoup.morph b/strata/libsoup-common/libsoup.morph
new file mode 100644
index 00000000..0a5ebef6
--- /dev/null
+++ b/strata/libsoup-common/libsoup.morph
@@ -0,0 +1,6 @@
+name: libsoup
+kind: chunk
+build-system: autotools
+configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX" --disable-tls-check
diff --git a/strata/lighttpd-server.morph b/strata/lighttpd-server.morph
new file mode 100644
index 00000000..9aa166fd
--- /dev/null
+++ b/strata/lighttpd-server.morph
@@ -0,0 +1,12 @@
+name: lighttpd-server
+kind: stratum
+description: lighttpd web server
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/pcre-utils.morph
+chunks:
+- name: lighttpd
+ morph: strata/lighttpd-server/lighttpd.morph
+ repo: upstream:lighttpd
+ ref: 12e4e21763da770034267ff0a7b660876930f789
+ unpetrify-ref: baserock/morph
diff --git a/strata/lighttpd-server/lighttpd.morph b/strata/lighttpd-server/lighttpd.morph
new file mode 100644
index 00000000..ae371c52
--- /dev/null
+++ b/strata/lighttpd-server/lighttpd.morph
@@ -0,0 +1,6 @@
+name: lighttpd
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh
+- ./configure --prefix="$PREFIX" --with-openssl --with-openssl-libs=/usr/lib
diff --git a/strata/llvm-common.morph b/strata/llvm-common.morph
new file mode 100644
index 00000000..1291e062
--- /dev/null
+++ b/strata/llvm-common.morph
@@ -0,0 +1,10 @@
+name: llvm-common
+kind: stratum
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: llvm
+ morph: strata/llvm-common/llvm.morph
+ repo: upstream:llvm
+ ref: a93239b7c6f0d78cb8836768c3ffbc39fb15b79f
+ unpetrify-ref: release_33
diff --git a/strata/llvm-common/llvm.morph b/strata/llvm-common/llvm.morph
new file mode 100644
index 00000000..9d280062
--- /dev/null
+++ b/strata/llvm-common/llvm.morph
@@ -0,0 +1,8 @@
+name: llvm
+kind: chunk
+description: Low Level Virtual Machine
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --sysconfdir=/etc --enable-shared --enable-targets=host --enable-optimized --disable-assertions
+build-commands:
+- make $MAKEFLAGS
diff --git a/strata/lorry-controller.morph b/strata/lorry-controller.morph
new file mode 100644
index 00000000..00ae5752
--- /dev/null
+++ b/strata/lorry-controller.morph
@@ -0,0 +1,16 @@
+name: lorry-controller
+kind: stratum
+description: |
+ Lorry Controller mirroring service.
+
+ This is a component of Trove, but can
+ also be used with other Git servers.
+build-depends:
+- morph: strata/python-cliapp.morph
+- morph: strata/python-wsgi.morph
+chunks:
+- name: lorry-controller
+ morph: strata/lorry-controller/lorry-controller.morph
+ repo: baserock:baserock/lorry-controller
+ ref: f3f59d1415bfdb18f17bd910b829a8d128c1562a
+ unpetrify-ref: master
diff --git a/strata/lorry-controller/lorry-controller.morph b/strata/lorry-controller/lorry-controller.morph
new file mode 100644
index 00000000..2f90a9a1
--- /dev/null
+++ b/strata/lorry-controller/lorry-controller.morph
@@ -0,0 +1,10 @@
+name: lorry-controller
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+- install -d 0755 "$DESTDIR/etc/lighttpd"
+- install -m 0644 -D etc/lighttpd/*.conf "$DESTDIR/etc/lighttpd/."
+- |
+ TGT="$DESTDIR/usr/lib/systemd/system"
+ install -d "$TGT/multi-user.target.wants"
+ install -m 0644 units/*.service units/*.timer "$TGT/."
diff --git a/strata/lorry.morph b/strata/lorry.morph
new file mode 100644
index 00000000..b3a0c779
--- /dev/null
+++ b/strata/lorry.morph
@@ -0,0 +1,120 @@
+name: lorry
+kind: stratum
+description: |
+ Lorry and tools for working with version control systems other than Git.
+
+ Lorry is a tool for creating and maintaining Git mirrors of source code
+ repositories, which aims to support most version control systems in use
+ by software projects that are used Baserock systems.
+
+ The commandline tools 'hg', 'bzr', 'svn', and 'cvs' are also made available
+ by this stratum.
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/python-cliapp.morph
+chunks:
+- name: bzr-tarball
+ repo: upstream:bzr-tarball
+ ref: e61c7edb4789abcd0f73c30fe719fa6fea478a52
+ unpetrify-ref: baserock/morph
+- name: python-fastimport
+ repo: upstream:python-fastimport
+ ref: 6500a5e7d82651ade9002d44e3ecc71a50302616
+ unpetrify-ref: baserock/morph
+- name: bzr-fastimport
+ repo: upstream:bzr-fastimport
+ ref: b3cda9967f857127bd4dab5eb72223a95916f5ea
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - bzr-tarball
+ - python-fastimport
+- name: cvs-tarball
+ morph: strata/lorry/cvs-tarball.morph
+ repo: upstream:cvs-tarball
+ ref: ca4cd317a75ef4349563b5a9a734561beb4a4f98
+ unpetrify-ref: baserock/morph
+- name: libapr
+ morph: strata/lorry/libapr.morph
+ repo: upstream:libapr
+ ref: 10835ec9a2d8bb9f7b867d15f29984d29744f028
+ unpetrify-ref: 1.5.1
+- name: libapr-util
+ morph: strata/lorry/libapr-util.morph
+ repo: upstream:libapr-util
+ ref: 718a507e2e33d87ab15ff80cfe80aea8ae6c7163
+ unpetrify-ref: 1.5.4
+ build-depends:
+ - libapr
+- name: perl-dbi-tarball
+ morph: strata/lorry/perl-dbi-tarball.morph
+ repo: upstream:perl-dbi-tarball
+ ref: 09e269cff811f0c1881ea0d6b7571173bab8377b
+ unpetrify-ref: baserock/morph
+- name: perl-dbd-sqlite-tarball
+ repo: upstream:perl-dbd-sqlite-tarball
+ ref: 485b97be9f2f2abf5a40923b5fd85f75714a8c02
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - perl-dbi-tarball
+- name: libserf-tarball
+ morph: strata/lorry/libserf.morph
+ repo: upstream:libserf-tarball
+ ref: 6f61a1acd01dc2ad1d2f5c1f7458702c77c69f9c
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - libapr
+ - libapr-util
+- name: swig-tarball
+ morph: strata/lorry/swig-tarball.morph
+ repo: upstream:swig-tarball
+ ref: 1f6cb46b6a4b3ebf9352fa10198b0b286f84138b
+ unpetrify-ref: baserock/morph
+- name: neon
+ morph: strata/lorry/neon.morph
+ repo: upstream:neon
+ ref: 837374e9d797e216f1de684595cefe791f67c0e4
+ unpetrify-ref: baserock/morph
+- name: subversion-tarball
+ morph: strata/lorry/subversion-tarball.morph
+ repo: upstream:subversion-tarball
+ ref: 2d02ea6f31311bf1b72f28c48d784b8d851ff2e0
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - swig-tarball
+ - libapr
+ - libapr-util
+ - libserf-tarball
+ - neon
+- name: mercurial-tarball
+ morph: strata/lorry/mercurial-tarball.morph
+ repo: upstream:mercurial-tarball
+ ref: 4b0aa73b8c69bd5b7521337809f7bc4714209a5a
+ unpetrify-ref: baserock/morph
+- name: hg-fast-export
+ morph: strata/lorry/hg-fast-export.morph
+ repo: upstream:hg-fast-export
+ ref: 09a472aa58da0417a11a22bae172785f7cb2e80f
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - mercurial-tarball
+- name: cvsps
+ morph: strata/lorry/cvsps.morph
+ repo: upstream:cvsps
+ ref: 71c6d1f5668f405a7b259a0aac0d423f6c9b4d49
+ unpetrify-ref: baserock/morph
+- name: lorry
+ morph: strata/lorry/lorry.morph
+ repo: baserock:baserock/lorry
+ ref: 4fcff82742ce3325a860c060b597ff281872c594
+ unpetrify-ref: master
+ build-depends:
+ - bzr-tarball
+ - python-fastimport
+ - bzr-fastimport
+ - perl-dbi-tarball
+ - perl-dbd-sqlite-tarball
+ - cvs-tarball
+ - cvsps
+ - subversion-tarball
+ - mercurial-tarball
+ - hg-fast-export
diff --git a/strata/lorry/cvs-tarball.morph b/strata/lorry/cvs-tarball.morph
new file mode 100644
index 00000000..5f34bc57
--- /dev/null
+++ b/strata/lorry/cvs-tarball.morph
@@ -0,0 +1,9 @@
+name: cvs-tarball
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- cp /usr/share/automake*/config.guess build-aux
+- cp /usr/share/automake*/config.sub build-aux
+configure-commands:
+- YACC='bison -y' ./configure --prefix "$PREFIX" --with-external-zlib --without-gssapi
+ --without-krb4 --disable-dependency-tracking --disable-nls --disable-rpath
diff --git a/strata/lorry/cvsps.morph b/strata/lorry/cvsps.morph
new file mode 100644
index 00000000..ff53784e
--- /dev/null
+++ b/strata/lorry/cvsps.morph
@@ -0,0 +1,6 @@
+name: cvsps
+kind: chunk
+build-commands:
+- make
+install-commands:
+- make install prefix="$DESTDIR$PREFIX"
diff --git a/strata/lorry/hg-fast-export.morph b/strata/lorry/hg-fast-export.morph
new file mode 100644
index 00000000..ef99a97a
--- /dev/null
+++ b/strata/lorry/hg-fast-export.morph
@@ -0,0 +1,10 @@
+name: hg-fast-export
+kind: chunk
+install-commands:
+- install -d "$DESTDIR/$PREFIX/bin"
+- install -m 0755 hg-fast-export.py "$DESTDIR/$PREFIX/bin/"
+- install -m 0755 hg-reset.py "$DESTDIR/$PREFIX/bin/"
+- install -m 0755 hg-fast-export.sh "$DESTDIR/$PREFIX/bin/hg-fast-export"
+- install -m 0755 hg-reset.sh "$DESTDIR/$PREFIX/bin/hg-reset"
+- install -d "$DESTDIR/$PREFIX/lib/python2.7/site-packages"
+- install -m 0644 hg2git.py "$DESTDIR/$PREFIX/lib/python2.7/site-packages/"
diff --git a/strata/lorry/libapr-util.morph b/strata/lorry/libapr-util.morph
new file mode 100644
index 00000000..26db03ba
--- /dev/null
+++ b/strata/lorry/libapr-util.morph
@@ -0,0 +1,7 @@
+name: libapr-util
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./buildconf --with-apr=/usr/share/apr-1
+configure-commands:
+- ./configure --prefix "$PREFIX" --with-apr=/usr/bin/apr-1-config
diff --git a/strata/lorry/libapr.morph b/strata/lorry/libapr.morph
new file mode 100644
index 00000000..57d35b84
--- /dev/null
+++ b/strata/lorry/libapr.morph
@@ -0,0 +1,21 @@
+name: libapr
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./buildconf
+configure-commands:
+- |
+ ./configure \
+ --prefix "$PREFIX" \
+ --disable-static \
+ --with-installbuilddir=/usr/share/apr-1/build
+post-install-commands:
+- |
+ # Install files required for apr-util
+ for file in find_apr.m4 apr_common.m4 install.sh gen-build.py \
+ get-version.sh config.guess config.sub
+ do
+ cp build/$file "$DESTDIR/$PREFIX"/share/apr-1/build/"$file"
+ done
+ # Create a symlink in the build directory to the include directory
+ ln -sf /usr/include/apr-1 "$DESTDIR/$PREFIX"/share/apr-1/build/
diff --git a/strata/lorry/libserf.morph b/strata/lorry/libserf.morph
new file mode 100644
index 00000000..d4bb415d
--- /dev/null
+++ b/strata/lorry/libserf.morph
@@ -0,0 +1,6 @@
+name: libserf
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- cp /usr/share/automake*/config.guess build
+- cp /usr/share/automake*/config.sub build
diff --git a/strata/lorry/lorry.morph b/strata/lorry/lorry.morph
new file mode 100644
index 00000000..6b8cb355
--- /dev/null
+++ b/strata/lorry/lorry.morph
@@ -0,0 +1,3 @@
+name: lorry
+kind: chunk
+build-system: python-distutils
diff --git a/strata/lorry/mercurial-tarball.morph b/strata/lorry/mercurial-tarball.morph
new file mode 100644
index 00000000..03264f56
--- /dev/null
+++ b/strata/lorry/mercurial-tarball.morph
@@ -0,0 +1,6 @@
+name: mercurial-tarball
+kind: chunk
+build-commands:
+- make build PREFIX="$PREFIX"
+install-commands:
+- make install-bin PREFIX="$PREFIX" DESTDIR="$DESTDIR"
diff --git a/strata/lorry/neon.morph b/strata/lorry/neon.morph
new file mode 100644
index 00000000..19f8e83f
--- /dev/null
+++ b/strata/lorry/neon.morph
@@ -0,0 +1,8 @@
+name: neon
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh
+- ./configure --prefix="$PREFIX" --with-ssl
+install-commands:
+- make install-lib install-headers install-config DESTDIR="$DESTDIR"
diff --git a/strata/lorry/perl-dbi-tarball.morph b/strata/lorry/perl-dbi-tarball.morph
new file mode 100644
index 00000000..9eb3a537
--- /dev/null
+++ b/strata/lorry/perl-dbi-tarball.morph
@@ -0,0 +1,5 @@
+name: perl-dbi-tarball
+kind: chunk
+build-system: cpan
+build-commands:
+- make -j1
diff --git a/strata/lorry/subversion-tarball.morph b/strata/lorry/subversion-tarball.morph
new file mode 100644
index 00000000..8fd1bc08
--- /dev/null
+++ b/strata/lorry/subversion-tarball.morph
@@ -0,0 +1,12 @@
+name: subversion-tarball
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- LIBTOOL_CONFIG=/usr/share/libtool/build-aux/ ./autogen.sh
+configure-commands:
+- python gen-make.py build.conf
+- ./configure --prefix="$PREFIX" --without-berkeley-db
+install-commands:
+- make install DESTDIR="$DESTDIR"
+- make swig-pl
+- make install-swig-pl DESTDIR="$DESTDIR"
diff --git a/strata/lorry/swig-tarball.morph b/strata/lorry/swig-tarball.morph
new file mode 100644
index 00000000..7d16d85b
--- /dev/null
+++ b/strata/lorry/swig-tarball.morph
@@ -0,0 +1,7 @@
+name: swig-tarball
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./autogen.sh
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-ccache
diff --git a/strata/lua.morph b/strata/lua.morph
new file mode 100644
index 00000000..dcdd8801
--- /dev/null
+++ b/strata/lua.morph
@@ -0,0 +1,16 @@
+name: lua
+kind: stratum
+description: Interpreter for the lua scripting language.
+build-depends:
+- morph: strata/tools.morph
+chunks:
+- name: lua
+ morph: strata/lua/lua.morph
+ repo: upstream:lua
+ ref: 948063437e0350d9ef1649ec3a76d0c24a5c8642
+ unpetrify-ref: baserock/5.1-morph
+- name: luajit2
+ morph: strata/lua/luajit2.morph
+ repo: upstream:luajit2
+ ref: 880ca300e8fb7b432b9d25ed377db2102e4cb63d
+ unpetrify-ref: v2.0.3
diff --git a/strata/lua/lua.morph b/strata/lua/lua.morph
new file mode 100644
index 00000000..32aeb5c1
--- /dev/null
+++ b/strata/lua/lua.morph
@@ -0,0 +1,11 @@
+name: lua
+kind: chunk
+configure-commands:
+- sed -e 's/defined(LUA_USE_READLINE)/0/g' src/luaconf.h.orig >src/luaconf.h
+- sed -i -e '/^linux:/{n;s/-lreadline//g;s/-lhistory//g;s/-lncurses//g}' src/Makefile
+build-commands:
+- make debian_linux INSTALL_TOP="$PREFIX" RPATH="$PREFIX/lib"
+install-commands:
+- make INSTALL_TOP="$DESTDIR/$PREFIX" debian_install
+- mkdir -p "$DESTDIR/$PREFIX/lib/pkgconfig"
+- cp lua5.1.pc "$DESTDIR/$PREFIX/lib/pkgconfig/"
diff --git a/strata/lua/luajit2.morph b/strata/lua/luajit2.morph
new file mode 100644
index 00000000..72f34f02
--- /dev/null
+++ b/strata/lua/luajit2.morph
@@ -0,0 +1,7 @@
+name: luajit2
+kind: chunk
+build-commands:
+- make PREFIX="$PREFIX"
+install-commands:
+- make install PREFIX="$PREFIX" DESTDIR="$DESTDIR"
+
diff --git a/strata/lvm.morph b/strata/lvm.morph
new file mode 100644
index 00000000..92a06f41
--- /dev/null
+++ b/strata/lvm.morph
@@ -0,0 +1,15 @@
+name: lvm
+kind: stratum
+description: |
+ LVM userland tools.
+
+ LVM is a logical volume manager for the Linux kernel. This stratum contains
+ the tools necessary to manage volumes with LVM.
+build-depends:
+- morph: strata/foundation.morph
+chunks:
+- name: lvm2
+ morph: strata/lvm/lvm2.morph
+ repo: upstream:lvm2
+ ref: fa01faaa4aa96de834ba7e8fbb9d9aff908571c3
+ unpetrify-ref: v2_02_115
diff --git a/strata/lvm/lvm2.morph b/strata/lvm/lvm2.morph
new file mode 100644
index 00000000..9b4a68bf
--- /dev/null
+++ b/strata/lvm/lvm2.morph
@@ -0,0 +1,31 @@
+name: lvm2
+kind: chunk
+build-system: autotools
+
+configure-commands:
+# We specify --sbindir explicitly due to a bug in .service file generation:
+# if left to the default, @sbindir@ is expanded to the literal string
+# '${exec_prefix}/sbin' in the generated .service files.
+#
+# udev rules *must* go in /lib, they'll be ignored if they go in /usr/lib.
+- >
+ ./configure --prefix="$PREFIX" \
+ --sbindir="$PREFIX"/sbin \
+ --with-udev-prefix=/ \
+ --with-systemdsystemunitdir="$PREFIX"/lib/systemd/system \
+ --enable-applib --enable-cmdlib --enable-pkgconfig --enable-lvmetad \
+ --enable-dmeventd --enable-udev_sync
+
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- make DESTDIR="$DESTDIR" install_system_dirs
+- make DESTDIR="$DESTDIR" install_systemd_generators
+- make DESTDIR="$DESTDIR" install_systemd_units
+- make DESTDIR="$DESTDIR" install_tmpfiles_configuration
+
+# Use lvmetad by default. This means we don't have to use the
+# `lvm2-activation-generator` systemd generator, which is a good thing
+# because I have seen it cause systems to completely fail to boot. Fedora
+# does something similar, see:
+# http://pkgs.fedoraproject.org/cgit/lvm2.git/tree/lvm2-enable-lvmetad-by-default.patch
+- sed -e 's/use_lvmetad = 0/use_lvmetad = 1/' -i "$DESTDIR"/etc/lvm/lvm.conf
diff --git a/strata/mesa-common.morph b/strata/mesa-common.morph
new file mode 100644
index 00000000..9dcdf5c1
--- /dev/null
+++ b/strata/mesa-common.morph
@@ -0,0 +1,13 @@
+name: mesa-common
+kind: stratum
+build-depends:
+- morph: strata/llvm-common.morph
+- morph: strata/libdrm-common.morph
+- morph: strata/wayland-generic.morph
+- morph: strata/python-core.morph
+chunks:
+- name: mesa
+ morph: strata/mesa-common/mesa.morph
+ repo: upstream:mesa
+ ref: 635ad274470a064100ff6aa38ede83d748ae07cf
+ unpetrify-ref: baserock/mesa-10.6.0-rc1-jetson
diff --git a/strata/mesa-common/mesa.morph b/strata/mesa-common/mesa.morph
new file mode 100644
index 00000000..2a3fe96a
--- /dev/null
+++ b/strata/mesa-common/mesa.morph
@@ -0,0 +1,22 @@
+name: mesa
+kind: chunk
+build-system: autotools
+configure-commands:
+- |
+ cpu=$(echo $TARGET | cut -d '-' -f 1)
+ case "$cpu" in
+ armv7lhf)
+ DRIDRIVERS=no
+ GALLIUMDRIVERS=nouveau,freedreno,svga,swrast,vc4
+ ;;
+ *)
+ DRIDRIVERS=yes
+ GALLIUMDRIVERS=yes
+ ;;
+ esac
+ ./autogen.sh --prefix="$PREFIX" \
+ --enable-gles2 \
+ --disable-glx \
+ --with-egl-platforms=drm,wayland \
+ --with-gallium-drivers="$GALLIUMDRIVERS" \
+ --with-dri-drivers="$DRIDRIVERS"
diff --git a/strata/morph-utils.morph b/strata/morph-utils.morph
new file mode 100644
index 00000000..acc8fad9
--- /dev/null
+++ b/strata/morph-utils.morph
@@ -0,0 +1,44 @@
+name: morph-utils
+kind: stratum
+build-depends:
+- morph: strata/core.morph
+- morph: strata/ostree-core.morph
+- morph: strata/python-cliapp.morph
+- morph: strata/python-core.morph
+- morph: strata/python-pygobject.morph
+- morph: strata/python-wsgi.morph
+chunks:
+- name: python-ttystatus
+ morph: strata/morph-utils/python-ttystatus.morph
+ repo: upstream:python-ttystatus
+ ref: 47d871216cea6ce3b9d6efd70e9a0f38ab8604f0
+ unpetrify-ref: baserock/morph
+- name: git-fat
+ morph: strata/tools/git-fat.morph
+ repo: upstream:git-fat
+ ref: 208f88d0f0ef04c25e8a231979eb0083f57b1610
+ unpetrify-ref: baserock/morph
+- name: pyfilesystem
+ morph: strata/morph-utils/pyfilesystem.morph
+ repo: upstream:pyfilesystem
+ ref: 821f7db1ce3a3e1ac53fa514ddacbc2871eac0f6
+ unpetrify-ref: baserock/morph
+- name: cmdtest
+ morph: strata/morph-utils/cmdtest.morph
+ repo: upstream:cmdtest
+ ref: ac91791842c6e7e6eda3213916af413255999c7b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - python-ttystatus
+- name: pylru
+ repo: upstream:python-packages/pylru
+ ref: 221dd259f5c34562c835611d1cf62384b9019da4
+ unpetrify-ref: master
+- name: morph
+ repo: baserock:baserock/morph
+ ref: 1da8ee6f66718de5d5dd413e188425ee4bdcfb47
+ unpetrify-ref: master
+ build-depends:
+ - cmdtest
+ - pyfilesystem
+ - pylru
diff --git a/strata/morph-utils/cmdtest.morph b/strata/morph-utils/cmdtest.morph
new file mode 100644
index 00000000..3e1c71c6
--- /dev/null
+++ b/strata/morph-utils/cmdtest.morph
@@ -0,0 +1,6 @@
+name: cmdtest
+kind: chunk
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix=/usr --root "$DESTDIR"
diff --git a/strata/morph-utils/pyfilesystem.morph b/strata/morph-utils/pyfilesystem.morph
new file mode 100644
index 00000000..a4931dfa
--- /dev/null
+++ b/strata/morph-utils/pyfilesystem.morph
@@ -0,0 +1,6 @@
+name: pyfilesystem
+kind: chunk
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix="$PREFIX" --root "$DESTDIR"
diff --git a/strata/morph-utils/python-ttystatus.morph b/strata/morph-utils/python-ttystatus.morph
new file mode 100644
index 00000000..e45ef7a8
--- /dev/null
+++ b/strata/morph-utils/python-ttystatus.morph
@@ -0,0 +1,6 @@
+name: python-ttystatus
+kind: chunk
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix=/usr --root "$DESTDIR"
diff --git a/strata/mtd-utilities.morph b/strata/mtd-utilities.morph
new file mode 100644
index 00000000..67ff09a8
--- /dev/null
+++ b/strata/mtd-utilities.morph
@@ -0,0 +1,11 @@
+name: mtd-utilities
+kind: stratum
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/core.morph
+chunks:
+- name: mtd-utils
+ morph: strata/mtd-utilities/mtd-utils.morph
+ repo: upstream:mtd-utils
+ ref: 9f107132a6a073cce37434ca9cda6917dd8d866b
+ unpetrify-ref: v1.5.1
diff --git a/strata/mtd-utilities/mtd-utils.morph b/strata/mtd-utilities/mtd-utils.morph
new file mode 100644
index 00000000..59dfcc68
--- /dev/null
+++ b/strata/mtd-utilities/mtd-utils.morph
@@ -0,0 +1,6 @@
+name: mtd-utils
+kind: chunk
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/multimedia-common.morph b/strata/multimedia-common.morph
new file mode 100644
index 00000000..69f3ccad
--- /dev/null
+++ b/strata/multimedia-common.morph
@@ -0,0 +1,16 @@
+name: multimedia-common
+kind: stratum
+description: Mutimedia Libraries
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: ogg
+ repo: upstream:ogg
+ ref: 0deb6226917e32a71f15d5279d0bc76d8b97c13f
+ unpetrify-ref: master
+- name: libvorbis
+ repo: upstream:libvorbis
+ ref: 2ae58009cbc655a6031280f92fb1e7b324318ae8
+ unpetrify-ref: libvorbis-1.3.3
+ build-depends:
+ - ogg
diff --git a/strata/multimedia-gstreamer-0.10.morph b/strata/multimedia-gstreamer-0.10.morph
new file mode 100644
index 00000000..51230de2
--- /dev/null
+++ b/strata/multimedia-gstreamer-0.10.morph
@@ -0,0 +1,33 @@
+name: multimedia-gstreamer-0.10
+kind: stratum
+description: Codecs
+build-depends:
+- morph: strata/core.morph
+- morph: strata/foundation.morph
+- morph: strata/audio-bluetooth.morph
+- morph: strata/multimedia-common.morph
+chunks:
+- name: orc
+ repo: upstream:orc
+ ref: b4f7fcaf99a4d952e59f2a9fa9286d24cc4b3a5a
+ unpetrify-ref: baserock/morph
+- name: gstreamer@0.10
+ repo: upstream:gstreamer
+ ref: 1bb950008f4656f6a6153fa88a8ebb5a39fbe84f
+ unpetrify-ref: baserock/morph/0.10
+ build-depends:
+ - orc
+- name: gstreamer-plugins-base@0.10
+ repo: upstream:gstreamer-plugins-base
+ ref: 960c596309dbb983a1d733259adccc45c47006a2
+ unpetrify-ref: baserock/morph/0.10
+ build-depends:
+ - gstreamer@0.10
+- name: gstreamer-plugins-good@0.10
+ repo: upstream:gstreamer-plugins-good
+ ref: 725e80e0c6b7f8e66d9b3fcaffd283ecbd8498d3
+ unpetrify-ref: baserock/morph/0.10
+ build-depends:
+ - gstreamer@0.10
+ - gstreamer-plugins-base@0.10
+ - orc
diff --git a/strata/multimedia-gstreamer.morph b/strata/multimedia-gstreamer.morph
new file mode 100644
index 00000000..f91e5173
--- /dev/null
+++ b/strata/multimedia-gstreamer.morph
@@ -0,0 +1,40 @@
+name: multimedia-gstreamer
+kind: stratum
+description: GStreamer multimedia components
+build-depends:
+- morph: strata/audio-bluetooth.morph
+- morph: strata/multimedia-common.morph
+- morph: strata/mesa-common.morph
+chunks:
+- name: orc
+ repo: upstream:orc
+ ref: 16e053b8f2359196fd50b111f1c10b93590f5cb9
+ unpetrify-ref: orc-0.4.22
+- name: gstreamer
+ repo: upstream:gstreamer
+ ref: c61dea148ca3f14586d8eddf0b7e6ca47c164c86
+ unpetrify-ref: baserock/1.4.4+bison_fix
+ build-depends:
+ - orc
+- name: gstreamer-plugins-base
+ repo: upstream:gstreamer-plugins-base
+ ref: 3b38ad94a2d58c07c24e4647e08afa1fe4dd7d46
+ unpetrify-ref: baserock/1.4
+ build-depends:
+ - gstreamer
+- name: gstreamer-plugins-good
+ repo: upstream:gstreamer-plugins-good
+ ref: 9d48c2f7a7b63fd967de7eec72434bc876c02667
+ unpetrify-ref: baserock/1.4
+ build-depends:
+ - gstreamer
+ - gstreamer-plugins-base
+ - orc
+- name: gstreamer-plugins-bad
+ repo: upstream:gstreamer-plugins-bad
+ ref: bb2a4669ff57af90c8101c54744d3228aa060475
+ unpetrify-ref: baserock/1.4
+ build-depends:
+ - gstreamer
+ - gstreamer-plugins-base
+ - orc
diff --git a/strata/network-security.morph b/strata/network-security.morph
new file mode 100644
index 00000000..d88d2f43
--- /dev/null
+++ b/strata/network-security.morph
@@ -0,0 +1,18 @@
+name: network-security
+kind: stratum
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: nspr
+ morph: strata/network-security/nspr.morph
+ repo: upstream:nspr-hg
+ ref: a6ee84946475c1fb7624973af28163f6da247c0d
+ unpetrify-ref: baserock/morph
+- name: nss
+ morph: strata/network-security/nss.morph
+ repo: upstream:nss
+ ref: ee1c99a3c8c29f50a91ab28f2f7b7773f6355487
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - nspr
+
diff --git a/strata/network-security/nspr.morph b/strata/network-security/nspr.morph
new file mode 100644
index 00000000..27613eb4
--- /dev/null
+++ b/strata/network-security/nspr.morph
@@ -0,0 +1,12 @@
+name: nspr
+kind: chunk
+build-system: autotools
+configure-commands:
+- |
+ case "$MORPH_ARCH" in
+ x86_64|ppc64)
+ EXTRA_ARGS="--enable-64bit";;
+ *)
+ EXTRA_ARGS="";;
+ esac
+ ./configure --prefix="$PREFIX" --with-mozilla --with-pthreads $EXTRA_ARGS
diff --git a/strata/network-security/nss.morph b/strata/network-security/nss.morph
new file mode 100644
index 00000000..c83fd174
--- /dev/null
+++ b/strata/network-security/nss.morph
@@ -0,0 +1,7 @@
+name: nss
+kind: chunk
+max-jobs: 1
+build-system: autotools
+configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX" --libdir="$PREFIX/lib"
diff --git a/strata/networking-utils.morph b/strata/networking-utils.morph
new file mode 100644
index 00000000..951e2d29
--- /dev/null
+++ b/strata/networking-utils.morph
@@ -0,0 +1,52 @@
+name: networking-utils
+kind: stratum
+description: |
+ Stratum which contains utils to works with networks: create, enable, filter...
+
+ This stratum requires kernel config flags which are not enabled in the default
+ Baserock kernel found in the BSP strata. See the Openstack BSP stratum to get
+ an idea of what is needed.
+build-depends:
+- morph: strata/foundation.morph
+chunks:
+- name: iproute2
+ morph: strata/networking-utils/iproute2.morph
+ repo: upstream:iproute2
+ ref: 50231ad2a554fcb02462bed6405c970833c1baee
+ unpetrify-ref: v3.17.0
+- name: libmnl
+ repo: upstream:libmnl
+ ref: c9f19b98cd8e108617e825e071091df14f78c53a
+ unpetrify-ref: master
+- name: ipset
+ morph: strata/networking-utils/ipset.morph
+ repo: upstream:ipset
+ ref: cf25c4c50e9996af30499e5b1c3354ab53be7237
+ unpetrify-ref: v6.24
+ build-depends:
+ - libmnl
+- name: libpcap
+ morph: strata/networking-utils/libpcap.morph
+ repo: upstream:libpcap
+ ref: 098a643f817e8a9929c70dfba157e4d399398375
+ unpetrify-ref: libpcap-1.7
+- name: tcpdump
+ morph: strata/networking-utils/tcpdump.morph
+ repo: upstream:tcpdump
+ ref: 8aa5edea1b8846740514dee4158b6c707d38fc13
+ unpetrify-ref: tcpdump-4.7
+ build-depends:
+ - libpcap
+- name: libnet
+ morph: strata/networking-utils/libnet.morph
+ repo: upstream:libnet
+ ref: 05df365769597e1d64d02af931d6127762ff2658
+ unpetrify-ref: libnet-1.2
+- name: arping
+ morph: strata/networking-utils/arping.morph
+ repo: upstream:arping
+ ref: a416764b03f4d5960b61372e27d84606899bfe2c
+ unpetrify-ref: arping-2.15
+ build-depends:
+ - libpcap
+ - libnet
diff --git a/strata/networking-utils/arping.morph b/strata/networking-utils/arping.morph
new file mode 100644
index 00000000..d9f5bd30
--- /dev/null
+++ b/strata/networking-utils/arping.morph
@@ -0,0 +1,3 @@
+name: arping
+kind: chunk
+build-system: autotools
diff --git a/strata/networking-utils/iproute2.morph b/strata/networking-utils/iproute2.morph
new file mode 100644
index 00000000..7ba04db6
--- /dev/null
+++ b/strata/networking-utils/iproute2.morph
@@ -0,0 +1,10 @@
+name: iproute2
+kind: chunk
+build-commands:
+# arpd binary is dependent on Berkeley DB, which is licensed by Oracle
+# as arpd is not needed for Openstack systems, arpd will not be compiled
+# or installed.
+- sed -i '/^TARGETS/s@arpd@@g' misc/Makefile
+- make
+install-commands:
+- DESTDIR="$DESTDIR" PREFIX="$PREFIX" make install
diff --git a/strata/networking-utils/ipset.morph b/strata/networking-utils/ipset.morph
new file mode 100644
index 00000000..7fe8c6dc
--- /dev/null
+++ b/strata/networking-utils/ipset.morph
@@ -0,0 +1,6 @@
+name: ipset
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh
+- ./configure --prefix="$PREFIX" --enable-static=no --with-kmod=no
diff --git a/strata/networking-utils/libnet.morph b/strata/networking-utils/libnet.morph
new file mode 100644
index 00000000..c4f2b213
--- /dev/null
+++ b/strata/networking-utils/libnet.morph
@@ -0,0 +1,15 @@
+name: libnet
+kind: chunk
+build-system: manual
+pre-configure-commands:
+# Avoid building the docs as they require doxygen
+- sed -i 's@doc @@' libnet/Makefile.am
+- sed -i 's@doc/.*Makefile@@' libnet/configure.ac
+
+- cd libnet && ./autogen.sh
+configure-commands:
+- cd libnet && ./configure --prefix="$PREFIX" --disable-samples --enable-shared=yes --with-pic
+build-commands:
+- cd libnet && make
+install-commands:
+- cd libnet && make DESTDIR="$DESTDIR" install
diff --git a/strata/networking-utils/libpcap.morph b/strata/networking-utils/libpcap.morph
new file mode 100644
index 00000000..7c5ee932
--- /dev/null
+++ b/strata/networking-utils/libpcap.morph
@@ -0,0 +1,3 @@
+name: libpcap
+kind: chunk
+build-system: autotools
diff --git a/strata/networking-utils/tcpdump.morph b/strata/networking-utils/tcpdump.morph
new file mode 100644
index 00000000..7a974dab
--- /dev/null
+++ b/strata/networking-utils/tcpdump.morph
@@ -0,0 +1,3 @@
+name: tcpdump
+kind: chunk
+build-system: autotools
diff --git a/strata/nfs.morph b/strata/nfs.morph
new file mode 100644
index 00000000..9b9aa248
--- /dev/null
+++ b/strata/nfs.morph
@@ -0,0 +1,30 @@
+name: nfs
+kind: stratum
+description: NFS utilities
+build-depends:
+- morph: strata/foundation.morph
+chunks:
+- name: libevent
+ repo: upstream:libevent
+ ref: 0b49ae34594533daa82c06a506078de9e336a013
+ unpetrify-ref: release-2.1.5-beta
+- name: ti-rpc
+ morph: strata/nfs/ti-rpc.morph
+ repo: upstream:ti-rpc
+ ref: c5a7a19070e74115fc9c26aa6f3dfec1da220c1f
+ unpetrify-ref: baserock/master
+- name: rpcbind
+ morph: strata/nfs/rpcbind.morph
+ repo: upstream:rpcbind
+ ref: aa3ac0d86c258d3e355ae59df31a96da795ecdfa
+ unpetrify-ref: baserock/master
+ build-depends:
+ - ti-rpc
+- name: nfs-utils
+ morph: strata/nfs/nfs-utils.morph
+ repo: upstream:nfs-utils
+ ref: 0da9f20a22bb32b2da6b587b4e85dafde087c0f7
+ unpetrify-ref: baserock/master
+ build-depends:
+ - libevent
+ - ti-rpc
diff --git a/strata/nfs/nfs-utils.morph b/strata/nfs/nfs-utils.morph
new file mode 100644
index 00000000..c412e1d0
--- /dev/null
+++ b/strata/nfs/nfs-utils.morph
@@ -0,0 +1,12 @@
+name: nfs-utils
+kind: chunk
+build-system: autotools
+configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX" --disable-nfsv4 --disable-nfsv41 --disable-gss --with-rpcgen=internal --without-tcp-wrappers
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR"/lib/systemd/system
+- install -m644 proc-fs-nfsd.mount "$DESTDIR"/lib/systemd/system
+- install -m644 nfs-mountd.service "$DESTDIR"/lib/systemd/system
+- install -m644 nfs-server.service "$DESTDIR"/lib/systemd/system
diff --git a/strata/nfs/rpcbind.morph b/strata/nfs/rpcbind.morph
new file mode 100644
index 00000000..0ca929dc
--- /dev/null
+++ b/strata/nfs/rpcbind.morph
@@ -0,0 +1,11 @@
+name: rpcbind
+kind: chunk
+build-system: autotools
+configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX" --with-rpcuser=nobody --enable-warmstarts
+install-commands:
+- make DESTDIR="$DESTDIR" install
+- mkdir -p "$DESTDIR"/lib/systemd/system
+- install -m644 rpcbind.service "$DESTDIR"/lib/systemd/system
+- install -m644 rpcbind.socket "$DESTDIR"/lib/systemd/system
diff --git a/strata/nfs/tcp-wrappers.morph b/strata/nfs/tcp-wrappers.morph
new file mode 100644
index 00000000..c68671f6
--- /dev/null
+++ b/strata/nfs/tcp-wrappers.morph
@@ -0,0 +1,9 @@
+name: tcp-wrappers
+kind: chunk
+build-commands:
+- make MAJOR=0 MINOR=7 REL=6 linux
+install-commands:
+- mkdir -p "$DESTDIR"/"$PREFIX"/include
+- install -p -m644 tcpd.h "$DESTDIR"/"$PREFIX"/include
+- mkdir -p "$DESTDIR"/"$PREFIX"/lib
+- cp -a libwrap.so* "$DESTDIR"/"$PREFIX"/lib
diff --git a/strata/nfs/ti-rpc.morph b/strata/nfs/ti-rpc.morph
new file mode 100644
index 00000000..3fb91e1f
--- /dev/null
+++ b/strata/nfs/ti-rpc.morph
@@ -0,0 +1,6 @@
+name: ti-rpc
+kind: chunk
+build-system: autotools
+configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX" --sysconfdir=/etc
diff --git a/strata/nodejs.morph b/strata/nodejs.morph
new file mode 100644
index 00000000..58dbdd0b
--- /dev/null
+++ b/strata/nodejs.morph
@@ -0,0 +1,10 @@
+name: nodejs
+kind: stratum
+description: Stratum for nodejs related stuff
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: node
+ repo: upstream:node
+ ref: cc56c62ed879ad4f93b1fdab3235c43e60f48b7e
+ unpetrify-ref: v0.10.26
diff --git a/strata/ntpd.morph b/strata/ntpd.morph
new file mode 100644
index 00000000..4c5483e5
--- /dev/null
+++ b/strata/ntpd.morph
@@ -0,0 +1,10 @@
+name: ntpd
+kind: stratum
+build-depends:
+- morph: strata/foundation.morph
+chunks:
+- name: ntpd
+ morph: strata/ntpd/ntpd.morph
+ repo: upstream:ntp
+ ref: b2ccf8dd31d1457ae9f0ae270054117179220370
+ unpetrify-ref: ntp-4.2.8p2
diff --git a/strata/ntpd/ntpd.morph b/strata/ntpd/ntpd.morph
new file mode 100644
index 00000000..49316c14
--- /dev/null
+++ b/strata/ntpd/ntpd.morph
@@ -0,0 +1,51 @@
+name: ntpd
+kind: chunk
+build-system: autotools
+configure-commands:
+ - ./configure --prefix="$PREFIX" --enable-linuxcaps
+post-install-commands:
+- |
+ cat > ntpd.service << EOF
+ [Unit]
+ Description=Network Time Service
+ After=network.target nss-lookup.target
+ Conflicts=systemd-timesyncd.service
+
+ [Service]
+ Type=forking
+ ExecStart=/usr/bin/ntpd -u ntp:ntp
+ PrivateTmp=True
+ Restart=on-failure
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF
+- install -D -m 644 ntpd.service "$DESTDIR"/lib/systemd/system/ntpd.service
+- mkdir -p "$DESTDIR"/lib/systemd/system/multi-user.target.wants
+- ln -s /lib/systemd/system/ntpd.service "$DESTDIR"/lib/systemd/system/multi-user.target.wants/ntpd.service
+- |
+ cat > ntp.conf << EOF
+ # We use iburst here to reduce the potential initial delay to set the clock
+ server 0.pool.ntp.org iburst
+ server 1.pool.ntp.org iburst
+ server 2.pool.ntp.org iburst
+ server 3.pool.ntp.org iburst
+
+ # kod - notify client when packets are denied service,
+ # rather than just dropping the packets
+ #
+ # nomodify - deny queries which attempt to modify the state of the server
+ #
+ # notrap - decline to provide mode 6 control message trap service to
+ # matching hosts
+ #
+ # see ntp.conf(5) for more details
+ restrict -4 default kod notrap nomodify
+ restrict -6 default kod notrap nomodify
+ EOF
+- install -D -m 644 ntp.conf "$DESTDIR"/etc/ntp.conf
+system-integration:
+ ntpd-misc:
+ 00-add-ntpd-user:
+ - groupadd -r ntp
+ - useradd -g ntp -d /home/ntp -s /bin/false -r ntp
diff --git a/strata/ocaml-language.morph b/strata/ocaml-language.morph
new file mode 100644
index 00000000..3364763d
--- /dev/null
+++ b/strata/ocaml-language.morph
@@ -0,0 +1,11 @@
+name: ocaml-language
+kind: stratum
+description: |
+ OCaml
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: ocaml
+ morph: strata/ocaml/ocaml.morph
+ repo: upstream:ocaml
+ ref: fa7961d5fada53056f38a9ae36615df26352028a
diff --git a/strata/ocaml/ocaml.morph b/strata/ocaml/ocaml.morph
new file mode 100644
index 00000000..00e36767
--- /dev/null
+++ b/strata/ocaml/ocaml.morph
@@ -0,0 +1,8 @@
+name: ocaml
+kind: chunk
+configure-commands:
+- ./configure --prefix "$PREFIX"
+build-commands:
+- make world.opt
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/openbmc.morph b/strata/openbmc.morph
new file mode 100644
index 00000000..b1309a2a
--- /dev/null
+++ b/strata/openbmc.morph
@@ -0,0 +1,22 @@
+name: openbmc
+kind: stratum
+description: OpenBMC components
+build-depends:
+- morph: strata/build-essential.morph
+- morph: strata/core.morph
+chunks:
+- name: i2c-tools
+ morph: strata/openbmc/i2c-tools.morph
+ repo: upstream:i2c-tools
+ ref: 187dd637f7789c4cb12dfe2ba13f6e01d2a3c698
+ unpetrify-ref: V3-1-1
+- name: lm_sensors
+ morph: strata/openbmc/lm_sensors.morph
+ repo: upstream:lm-sensors
+ ref: d9983967a89382f64c65db67026f85f073ef9b74
+ unpetrify-ref: master
+- name: isc-dhcp
+ repo: upstream:isc-dhcp-tarball
+ morph: strata/openbmc/isc-dhcp.morph
+ ref: f800382616186a5d30e28d8b2c51e97a9a8360f2
+ unpetrify-ref: master
diff --git a/strata/openbmc/i2c-tools.morph b/strata/openbmc/i2c-tools.morph
new file mode 100644
index 00000000..5fba3b0a
--- /dev/null
+++ b/strata/openbmc/i2c-tools.morph
@@ -0,0 +1,16 @@
+name: i2c-tools
+kind: chunk
+build-commands:
+- make -C eepromer && make
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/bin
+- |
+ i2ctools="i2cdetect i2cdump i2cget i2cset"
+ for f in ${i2ctools}; do
+ install -m 755 tools/${f} "$DESTDIR$PREFIX"/bin/${f}
+ done
+- |
+ eepromtools="eepromer eeprom eeprog"
+ for f in ${eepromtools}; do
+ install -m 755 eepromer/${f} "$DESTDIR$PREFIX"/bin/${f}
+ done
diff --git a/strata/openbmc/isc-dhcp.morph b/strata/openbmc/isc-dhcp.morph
new file mode 100644
index 00000000..5ee2e66b
--- /dev/null
+++ b/strata/openbmc/isc-dhcp.morph
@@ -0,0 +1,3 @@
+name: isc-dhcp
+kind: chunk
+build-system: autotools
diff --git a/strata/openbmc/lm_sensors.morph b/strata/openbmc/lm_sensors.morph
new file mode 100644
index 00000000..27ee10d5
--- /dev/null
+++ b/strata/openbmc/lm_sensors.morph
@@ -0,0 +1,7 @@
+name: lm_sensors
+kind: chunk
+build-commands:
+- make user PROG_EXTRA="sensors"
+install-commands:
+- make DESTDIR="$DESTDIR" PREFIX="$PREFIX" install
+# TODO: If we need sensord too, we'll need rrdtool as a dependency and install instructions for sensord components
diff --git a/strata/openstack-clients.morph b/strata/openstack-clients.morph
new file mode 100644
index 00000000..32e0f356
--- /dev/null
+++ b/strata/openstack-clients.morph
@@ -0,0 +1,103 @@
+name: openstack-clients
+kind: stratum
+description: Stratum with all the OpenStack clients and their dependencies.
+build-depends:
+- morph: strata/openstack-common.morph
+chunks:
+- name: warlock
+ repo: upstream:warlock
+ ref: 408ccb82347aabf3dc7cf6eccbd2ed2475cb0d60
+ unpetrify-ref: 1.1.0
+- name: python-keystoneclient
+ repo: upstream:openstack/python-keystoneclient
+ ref: 79d1eec35aad874a7c08ff22c39260884a5524ba
+ unpetrify-ref: 0.11.1
+- name: python-glanceclient
+ repo: upstream:openstack/python-glanceclient
+ ref: 8a877b2752162d6a2db43d7d61d6311c4f42285b
+ unpetrify-ref: 0.14.1
+ build-depends:
+ - python-keystoneclient
+ - warlock
+- name: python-novaclient
+ repo: upstream:openstack/python-novaclient
+ ref: 5ecfdac6b34769e200ff5c4c7429c20518c5b24f
+ unpetrify-ref: 2.20.0
+ build-depends:
+ - python-keystoneclient
+- name: python-swiftclient
+ repo: upstream:openstack/python-swiftclient
+ ref: bb4d2ab59c4de9389667eeed255642f51e276f1e
+ unpetrify-ref: 2.3.1
+- name: python-troveclient
+ repo: upstream:openstack/python-troveclient
+ ref: e010a919750f07493afd42a4db867853b925bcbf
+ unpetrify-ref: 1.0.7
+- name: python-cinderclient
+ repo: upstream:openstack/python-cinderclient
+ ref: ac9b0913904c43f4bf12c8164324d6e6a55dc1ab
+ unpetrify-ref: 1.1.1
+ build-depends:
+ - python-keystoneclient
+- name: pyparsing
+ morph: strata/openstack-clients/pyparsing.morph
+ repo: upstream:python-packages/pyparsing.git
+ ref: 8062c76ab3958a570052124f17e71f3dd3ec2257
+ unpetrify-ref: pyparsing_2.0.3
+- name: cmd2
+ repo: upstream:python-packages/cmd2.git
+ ref: 07b4dc3d6991cbdc420c246e807371c97a467d1a
+ unpetrify-ref: master
+ build-depends:
+ - pyparsing
+- name: cliff
+ repo: upstream:openstack/cliff
+ ref: 42675b2d7ad93f4bba9c4216874c68b8e5834147
+ unpetrify-ref: 1.7.0
+ build-depends:
+ - cmd2
+ - pyparsing
+- name: python-neutronclient
+ repo: upstream:openstack/python-neutronclient
+ ref: fa5642f1550bc8f818c1686c40edbaf3672d356a
+ unpetrify-ref: 2.3.9
+ build-depends:
+ - cliff
+ - python-keystoneclient
+- name: python-ceilometerclient
+ repo: upstream:openstack/python-ceilometerclient
+ ref: 7316dd16b8850270db27c1298dcf5a2223f2f1e1
+ unpetrify-ref: 1.0.12
+ build-depends:
+ - python-keystoneclient
+- name: python-heatclient
+ repo: upstream:openstack/python-heatclient
+ ref: 6089d31e302d80910cc15132f99a1bf358bbb64f
+ unpetrify-ref: 0.2.12
+ build-depends:
+ - python-keystoneclient
+- name: python-designateclient
+ repo: upstream:openstack/python-designateclient
+ ref: a900b41e4d8a9eb40e1812295ba16d773e2b9618
+ unpetrify-ref: 1.1.0
+ build-depends:
+ - cliff
+ - python-keystoneclient
+- name: python-barbicanclient
+ repo: upstream:openstack/python-barbicanclient.git
+ ref: ac30643631b6bc1c40116061f8eb280a7881e422
+ unpetrify-ref: 2.2.1
+ build-depends:
+ - python-keystoneclient
+ - cliff
+- name: python-saharaclient
+ repo: upstream:openstack/python-saharaclient
+ ref: eadb40ccb62566f02cb93ec03aa232e48835dec3
+ unpetrify-ref: 0.7.6
+ build-depends:
+ - python-keystoneclient
+- name: python-ironicclient
+ morph: strata/openstack-clients/python-ironicclient.morph
+ repo: upstream:openstack/python-ironicclient
+ ref: 4390a21663de3123fac306e9860624ba7deda0e6
+ unpetrify-ref: 0.4.1
diff --git a/strata/openstack-clients/pyparsing.morph b/strata/openstack-clients/pyparsing.morph
new file mode 100644
index 00000000..60088544
--- /dev/null
+++ b/strata/openstack-clients/pyparsing.morph
@@ -0,0 +1,6 @@
+name: pyparsing
+kind: chunk
+build-commands:
+- cd src && python setup.py build
+install-commands:
+- cd src && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-clients/python-ironicclient.morph b/strata/openstack-clients/python-ironicclient.morph
new file mode 100644
index 00000000..95be0bd8
--- /dev/null
+++ b/strata/openstack-clients/python-ironicclient.morph
@@ -0,0 +1,3 @@
+name: python-ironicclient
+kind: chunk
+build-system: python-distutils
diff --git a/strata/openstack-common.morph b/strata/openstack-common.morph
new file mode 100644
index 00000000..78c04ba6
--- /dev/null
+++ b/strata/openstack-common.morph
@@ -0,0 +1,112 @@
+name: openstack-common
+kind: stratum
+description: |
+ Stratum with the python packages needed to compile openstack
+ clients and services.
+build-depends:
+- morph: strata/python-common.morph
+- morph: strata/foundation.morph
+chunks:
+- name: pycparser
+ repo: upstream:python-packages/pycparser
+ ref: c926670643ebb5f88a2bf56579fc9934c82be6d2
+ unpetrify-ref: master
+- name: cffi
+ repo: upstream:python-packages/cffi
+ ref: fc53b53095d61a1ec5814c09c3bf2c7e18627fb5
+ unpretrify-ref: master
+ build-depends:
+ - pycparser
+- name: pytz
+ repo: upstream:pytz-tarball
+ ref: c414cb801776e11c769fb36c0d37b8d7a7c8712c
+ unpetrify-ref: pytz-2014.10
+- name: babel
+ repo: upstream:babel
+ ref: 246996b0ee51e49ebcd504e234d3bcdcb178996c
+ unpetrify-ref: master
+ build-depends:
+ - pytz
+- name: enum34
+ repo: upstream:python-packages/enum34.git
+ ref: f1fa1787ceaac72b9934f318ea2135d28c05dd55
+ unpetrify-ref: enum34-1.0
+- name: pyasn1
+ repo: upstream:python-packages/pyasn1
+ ref: fa8e66c5f4367a1e39b53dcddc133d084b10400f
+ unpetrify-ref: release_0_1_7_tag
+- name: cryptography
+ repo: upstream:python-packages/cryptography
+ ref: 9ac7d55150c8410a08ee3b00b89bfeac3dbd8e8f
+ unpetrify-ref: 0.7.2
+ build-depends:
+ - pyasn1
+ - enum34
+ - cffi
+- name: pyiso8601
+ repo: upstream:pyiso8601
+ ref: 9eb87a9bab114e9cc9a8d3dceb04362644d89cab
+ unpetrify-ref: 0.1.10
+- name: netaddr
+ repo: upstream:netaddr
+ ref: bbb31ed50a5338a7d1c0011fc4429d09954f9f0b
+ unpetrify-ref: release-0.7.12
+- name: stevedore
+ ref: 860bd8f8ecba38fdfda5b41a3a1dbe854d6528e2
+ unpetrify-ref: 1.0.0.0a2
+ repo: upstream:openstack/stevedore
+- name: oslo-config
+ repo: upstream:openstack/oslo-config
+ ref: 059579ac2189b94bc9e9555b2e9acfb31a83ef53
+ unpetrify-ref: 1.4.0.0a5
+ build-depends:
+ - netaddr
+ - stevedore
+- name: pyopenssl
+ repo: upstream:python-packages/pyopenssl
+ ref: 0146d447e63a737b4f05a1164d5950eff68543e7
+ unpetrify-ref: 0.14
+- name: oslo-i18n
+ repo: upstream:openstack/oslo-i18n
+ ref: 040f1d6afa733527385d2309e485bf37e9843b0e
+ unpetrify-ref: 0.4.0
+ build-depends:
+ - babel
+- name: oslo-utils
+ repo: upstream:openstack/oslo-utils
+ ref: 6a123fce93895e92004ce9d5bd8dee3c3642cf41
+ unpetrify-ref: 0.3.0
+ build-depends:
+ - babel
+ - pyiso8601
+ - oslo-i18n
+- name: futures
+ repo: upstream:python-packages/futures
+ ref: 4f7ceedb8a7742e52b0436a4160c7c44665a2597
+ unpetrify-ref: 2.2.0
+- name: oslo-serialization
+ repo: upstream:openstack/oslo-serialization.git
+ ref: 80fec894a54253d9b4c80dd8a563957966ca0b88
+ unpetrify-ref: 0.3.0
+- name: jsonschema
+ repo: upstream:jsonschema
+ ref: 35b60f390098d3306c03eee27ceec8cf8a493579
+ unpetrify-ref: v2.3.0
+- name: simplejson
+ repo: upstream:simplejson
+ ref: 54d5ff15d508c51366986cc4f77f2f287f036582
+ unpetrify-ref: v3.6.4
+- name: greenlet
+ ref: 42b65a7b67dfabd180625155cd4fcd8e51917fe2
+ unpetrify-ref: 0.4.5
+ repo: upstream:python-packages/greenlet
+- name: eventlet
+ ref: 8d2474197de4827a7bca9c33e71a82573b6fc721
+ unpetrify-ref: v0.15.2
+ repo: upstream:python-packages/eventlet
+ build-depends:
+ - greenlet
+- name: pastedeploy
+ repo: upstream:python-packages/pastedeploy
+ ref: 5ad87be617c2cc1dd31625688172e964db2756dc
+ unpetrify-ref: 1.5.2
diff --git a/strata/openstack-services.morph b/strata/openstack-services.morph
new file mode 100644
index 00000000..081b97f3
--- /dev/null
+++ b/strata/openstack-services.morph
@@ -0,0 +1,566 @@
+name: openstack-services
+kind: stratum
+description: Openstack clients and services
+build-depends:
+- morph: strata/erlang.morph
+- morph: strata/django.morph
+- morph: strata/xstatic.morph
+- morph: strata/openstack-clients.morph
+- morph: strata/test-tools.morph
+- morph: strata/python-tools.morph
+chunks:
+- name: rabbitmq-codegen
+ morph: strata/openstack-services/rabbitmq-codegen.morph
+ ref: 4dc5ccde2a0b3d638e5754b00abf94196fe9ca32
+ unpetrify-ref: rabbitmq_v3_4_1
+ repo: upstream:rabbitmq-codegen
+- name: rabbitmq-server
+ morph: strata/openstack-services/rabbitmq-server.morph
+ ref: 9afcf9b95d2c53f866e9a33ddce46562f8950ded
+ unpetrify-ref: baserock/master
+ repo: upstream:rabbitmq-server
+ build-depends:
+ - rabbitmq-codegen
+- name: oslo-rootwrap
+ repo: upstream:openstack/oslo-rootwrap
+ ref: 62d732277de5cf663172eafe6d3a6c149a9cf814
+ unpetrify-ref: master
+- name: py-amqp
+ repo: upstream:python-packages/py-amqp
+ ref: 875b10d1715def640042c7ff2f42c00a6c07eed1
+ unpetrify-ref: 1.4.6
+- name: librabbitmq
+ morph: strata/openstack-services/librabbitmq.morph
+ repo: upstream:python-packages/librabbitmq
+ ref: ecccbd2c7d92f6e189e362b26def8d980fa50e3b
+ unpetrify-ref: baserock/v1.6.1
+ build-depends:
+ - py-amqp
+- name: anyjson
+ repo: upstream:python-packages/anyjson
+ ref: 016506078e94718e1fe750eb5083ab5ba07498c8
+ unpetrify-ref: 0.3.3
+- name: kombu
+ repo: upstream:python-packages/kombu
+ ref: 6e68e54b3a846faa6f8b7e14a4a7ca51d1967bc2
+ unpetrify-ref: v3.0.23
+ build-depends:
+ - anyjson
+ - py-amqp
+ - librabbitmq
+- name: sqlalchemy
+ repo: upstream:python-packages/sqlalchemy
+ ref: ff34c480dfd2b8b7c72339d196f5477980a5124c
+ unpetrify-ref: rel_0_9_8
+- name: alembic
+ repo: upstream:python-packages/alembic
+ ref: 5f952f25caa688d89204d134ee867de7dbad917c
+ unpetrify-ref: rel_0_7_1
+ build-depends:
+ - sqlalchemy
+- name: lockfile
+ repo: upstream:python-packages/lockfile
+ ref: 777758cdf4520271370b3338b86b5c66f9b104f0
+ unpetrify-ref: master
+- name: dogpile-core
+ repo: upstream:python-packages/dogpile-core
+ ref: 3e6f0f86b4161b48898e656039154092426e5cc8
+ unpetrify-ref: rel_0_4_1
+- name: dogpile-cache
+ repo: upstream:python-packages/dogpile-cache
+ ref: 1c753914b335b4391bc5847a87b7c52ca81c2bc6
+ unpetrify-ref: rel_0_5_4
+ build-depends:
+ - dogpile-core
+- name: pyjwt
+ repo: upstream:python-packages/pyjwt
+ ref: 739efeff4c6c1fc30ab5cf5eb0ac337e8ee107d7
+ unpetrify-ref: 0.2.3
+- name: creole
+ repo: upstream:python-packages/creole
+ ref: 6390231688adb5d8678a71cd33204e9bca555d4a
+ unpetrify-ref: v1.2.1
+- name: pygments
+ repo: upstream:python-packages/pygments
+ ref: f41e8c594e76855611b3b3dfca300894bd29f1c4
+ unpetrify-ref: 2.0rc1
+- name: docutils
+ repo: upstream:python-packages/docutils
+ ref: 1976ba91eff979abc3e13e5d8cb68324833af6a0
+ unpetrify-ref: docutils-0.12
+- name: snowballstemmer
+ repo: upstream:python-packages/snowballstemmer.git
+ ref: f0d81ce1606d685029302ea073b59d9437d12569
+ unpetrify-ref: master
+- name: sphinx
+ repo: upstream:python-packages/sphinx
+ ref: e76c9771bad16e68cdb7deb18b4d13de0a718c68
+ unpetrify-ref: 1.3b1
+ build-depends:
+ - pygments
+ - docutils
+ - snowballstemmer
+- name: pystache
+ repo: upstream:python-packages/pystache.git
+ ref: cce3ebd658f28aeb7dd54561554dedf5259303dc
+ unpetrify-ref: baserock/v0.5.4
+- name: pies
+ repo: upstream:python-packages/pies.git
+ ref: 593dcce07fdb3e8d0053a863a7aba7f2ce8a6c9c
+ unpetrify-ref: 2.6.1
+- name: natsort
+ repo: upstream:python-packages/natsort.git
+ ref: ce94d418c3849e891f0508b41f5bccc4ae1d4e4d
+ unpetrify-ref: 3.5.1
+- name: isort
+ repo: upstream:python-packages/isort.git
+ ref: 9d1b03b6e1fd2f87be1c635d6159a76a131f8155
+ unpetrify-ref: 3.9.0
+ build-depends:
+ - pies
+ - natsort
+- name: pep8
+ repo: upstream:python-packages/pep8.git
+ ref: 164066c4d85f212f5f4a11699b848942c678b947
+ unpetrify-ref: 1.5.7
+- name: pyflakes
+ repo: upstream:python-packages/pyflakes.git
+ ref: 71b7b6776856912d50a0b9a85a8ea2a1dc7eb75c
+ unpetrify-ref: 0.8.1
+- name: smartypants
+ repo: upstream:python-packages/smartypants.git
+ ref: e89b6e7e119694edcd414a556c634d5ca85bff76
+ unpetrify-ref: v1.8.6
+ build-depends:
+ - isort
+ - pep8
+ - pyflakes
+- name: pycco
+ repo: upstream:python-packages/pycco
+ ref: 22e157e525760e843dba391ca85fbe9bd35fdc5b
+ unpetrify-ref: master
+ build-depends:
+ - pygments
+ - pystache
+ - smartypants
+- name: dateutil
+ repo: upstream:python-packages/dateutil
+ ref: 8c6026ba09716a4e164f5420120bfe2ebb2d9d82
+ unpetrify-ref: trunk
+- name: posix-ipc-tarball
+ repo: upstream:python-packages/posix-ipc-tarball
+ ref: f3765db04b903b3671733e07cf1541a51966dd14
+ unpetrify-ref: posix_ipc-0.9.8
+- name: paste
+ repo: upstream:python-packages/paste
+ ref: 4c177fce89fee925f0f4fbfde00ce2e1252562c0
+ unpetrify-ref: master
+- name: repoze-lru
+ repo: upstream:python-packages/repoze-lru
+ ref: 29c8281dee7fe8dae8c66c7c40ce7c058ec2ab0f
+ unpetrify-ref: 0.6
+- name: routes
+ repo: upstream:python-packages/routes
+ ref: 745a9207d9e48e309e292172543bc21075e65e09
+ unpetrify-ref: v2.1
+ build-depends:
+ - repoze-lru
+- name: passlib
+ repo: upstream:python-packages/passlib
+ ref: f407312597727a08440e41bc8e31d3b3b334c66f
+ unpetrify-ref: 1.6.2
+- name: tempita
+ repo: upstream:python-packages/tempita
+ ref: b2b67795a009e9f825cbd855d766b78a00273f10
+ unpetrify-ref: 0.5
+- name: numpy
+ repo: upstream:python-packages/numpy.git
+ ref: 65293874fb101907e1648e6b4fafd30d0aa1172a
+ unpetrify-ref: baserock/v1.9.0
+ build-depends:
+ - sphinx
+- name: websockify
+ repo: upstream:python-packages/websockify
+ ref: 2f025741f86419d4ad5702dabf5903db3dbfe77c
+ unpetrify-ref: baserock/v0.6.1-1-g2f02574
+ build-depends:
+ - numpy
+- name: httplib2
+ ref: 058a1f9448d5c27c23772796f83a596caf9188e6
+ unpetrify-ref: v0.9
+ repo: upstream:python-packages/httplib2
+- name: suds
+ repo: upstream:python-packages/suds
+ ref: badd30a87f676d632d7386b05401e6029a5df83c
+ unpetrify-ref: release-0.3.2
+- name: jsonrpclib
+ repo: upstream:python-packages/jsonrpclib
+ ref: 53c8ffcfe4dd1718086cc551dce8ac459e8abc67
+ unpetrify-ref: master
+- name: rtslib-fb
+ repo: upstream:python-packages/rtslib-fb
+ ref: 8ffb4739d596fb1aac5d62d173d1e6f80270af9d
+ unpetrify-ref: v2.1.fb52
+ morph: strata/openstack-services/rtslib-fb.morph
+- name: python-pexpect
+ repo: upstream:python-packages/pexpect
+ ref: 671417beb41c21f772687c565196fdde444b053b
+ unpetrify-ref: 3.3
+- name: ply
+ repo: upstream:python-packages/ply.git
+ ref: 6513f3537d80f483ec9c7e36d52e87f107a57f23
+ unpetrify-ref: master
+- name: jsonpath-rw
+ repo: upstream:python-packages/jsonpath-rw
+ ref: f4cfe56b8e8370116c5275ddcea970e9a68fd75d
+ unpetrify-ref: 1.2.3
+ build-depends:
+ - ply
+- name: logutils
+ repo: upstream:python-packages/logutils.git
+ ref: 095f14efbaaf838b7772bffd36a466abb9668efe
+ unpetrify-ref: 0.3.3
+- name: pecan
+ repo: upstream:python-packages/pecan
+ ref: c7f241fd6bb8a0b10e02b8b43aaf1810c312cfbf
+ unpetrify-ref: 0.8.0
+ build-depends:
+ - logutils
+- name: croniter
+ repo: upstream:python-packages/croniter
+ ref: 0c3aeac05791cb4ece9e30da29442e9cd5b22f36
+ unpetrify-ref: 0.3.5
+ build-depends:
+ - dateutil
+- name: msgpack-python
+ repo: upstream:python-packages/msgpack-python
+ ref: 61bac2f586e82313a0e618093bfed2435cd18983
+ unpetrify-ref: 0.4.2
+- name: qpid-python
+ morph: strata/openstack-services/qpid-python.morph
+ repo: upstream:python-packages/qpid-python
+ ref: 587b0febaf1996db1e483137ed6addb45580ee9e
+ unpetrify-ref: 0.30
+- name: simplegeneric-tarball
+ repo: upstream:python-packages/simplegeneric-tarball
+ ref: 601fab9b154c557dfd848d531a8969f4697d1aa2
+ unpetrify-ref: simplegeneric_0.8.1.orig
+- name: ipaddr-py
+ morph: strata/openstack-services/ipaddr-py.morph
+ ref: c813f4790d1092b1515ee18fe8270180ed3cc5cb
+ unpetrify-ref: master
+ repo: upstream:python-packages/ipaddr-py
+- name: wsme
+ repo: upstream:python-packages/wsme
+ ref: 99d7668789cc6252398222549599d1363cddb081
+ unpetrify-ref: 0.6.1
+ build-depends:
+ - ipaddr-py
+ - simplegeneric-tarball
+- name: pysnmp
+ repo: upstream:python-packages/pysnmp
+ ref: 1b377afeaf9ee96ab1d48aeebb2b7a6d65a4ac9d
+ unpetrify-ref: release_4_2_5_tag
+- name: thrift
+ morph: strata/openstack-services/thrift.morph
+ repo: upstream:thrift
+ ref: 591e20f9636c37527a70dc03598218c3468a0eff
+ unpetrify-ref: 0.9.2
+- name: retrying
+ repo: upstream:python-packages/retrying
+ ref: cab083eb5791615fadbc0c98ad77a70d64b77d0d
+ unpetrify-ref: v1.3.1
+- name: oslo-messaging
+ repo: upstream:openstack/oslo-messaging
+ ref: 6ea3b12492e86f9e8d109fc3490cc4d3a0edd8b6
+ unpetrify-ref: 1.4.0.0a5
+ build-depends:
+ - kombu
+- name: oslo-vmware
+ repo: upstream:openstack/oslo-vmware
+ ref: 047d62c4bbd01a951f77a1a4a75fb2b3d8ce23ee
+ unpetrify-ref: 0.6.0
+ build-depends:
+ - suds
+- name: taskflow
+ repo: upstream:openstack/taskflow
+ ref: 1caaecc5d6b2aa4cde4a50e31d1d993fce7a66c4
+ unpetrify-ref: 0.5.0
+- name: sqlalchemy-migrate
+ repo: upstream:python-packages/sqlalchemy-migrate
+ ref: 089663761cc15f8b3cdb874b6a76270ccdd0a412
+ unpetrify-ref: 0.9.1
+ build-depends:
+ - sqlalchemy
+ - tempita
+- name: oauthlib
+ repo: upstream:python-packages/oauthlib
+ ref: de773eefcb3c1afe54a0c12d5bf9bd214ceaf404
+ unpetrify-ref: 0.6.3
+ build-depends:
+ - docutils
+ - creole
+ - pycco
+ - sphinx
+ - pyjwt
+- name: happybase
+ repo: upstream:python-packages/happybase
+ ref: 90a4b7ef741f0ecfe5145693f64c6f7180b9ba3d
+ unpetrify-ref: 0.8
+ build-depends:
+ - thrift
+- name: osprofiler
+ repo: upstream:python-packages/osprofiler
+ ref: bbe39b517263017c9db56ae1d904d08846eacff7
+ unpetrify-ref: 0.3.0
+- name: pycadf
+ repo: upstream:python-packages/pycadf
+ ref: 52727bcea3a98e72331e748ce5f9e3a111a64cd1
+ unpetrify-ref: 0.6.0
+ build-depends:
+ - oslo-messaging
+ - posix-ipc-tarball
+- name: keystonemiddleware
+ repo: upstream:openstack/keystonemiddleware
+ ref: a7beb50b38be5c3dd4c44d68ad79d1bb206dab6b
+ unpetrify-ref: 1.2.0
+- name: oslo-db
+ repo: upstream:openstack/oslo-db
+ ref: 0bb1e236daae53a3f5b4b88761d7b19f7961ed6c
+ unpetrify-ref: 1.0.2
+ build-depends:
+ - alembic
+ - sqlalchemy
+ - sqlalchemy-migrate
+- name: glance_store
+ repo: upstream:openstack/glance_store.git
+ ref: 54f673d4133c1244e2555e7ceb3344c104a74154
+ unpetrify-ref: 0.1.8
+- name: glance
+ repo: upstream:openstack/glance
+ ref: 1db07bd8c07bbcde4bd267985c4e3961c22b990e
+ unpetrify-ref: 2014.2.3
+ build-depends:
+ - sqlalchemy
+ - anyjson
+ - routes
+ - sqlalchemy-migrate
+ - httplib2
+ - kombu
+ - keystonemiddleware
+ - wsme
+ - oslo-vmware
+ - paste
+ - oslo-db
+ - oslo-messaging
+ - retrying
+ - osprofiler
+ - glance_store
+- name: sqlparse
+ repo: upstream:python-packages/sqlparse
+ ref: 991e7348075accae6d08025212251af21e92e664
+ unpetrify-ref: 0.1.13
+- name: keystone
+ repo: upstream:openstack/keystone
+ ref: 91a33875385ca296d24f67d4ef9629a2b33bed99
+ unpetrify-ref: 2014.2.3
+ build-depends:
+ - paste
+ - routes
+ - sqlalchemy
+ - sqlalchemy-migrate
+ - passlib
+ - keystonemiddleware
+ - oslo-messaging
+ - oslo-db
+ - oauthlib
+ - dogpile-cache
+ - pycadf
+ - posix-ipc-tarball
+ - sqlparse
+- name: neutron
+ morph: strata/openstack-services/neutron.morph
+ repo: upstream:openstack/neutron
+ ref: 96b5962646510ee67b322aa82242e02e3edfaa83
+ unpetrify-ref: 2014.2.3
+ build-depends:
+ - paste
+ - routes
+ - anyjson
+ - httplib2
+ - jsonrpclib
+ - keystonemiddleware
+ - sqlalchemy
+ - alembic
+ - oslo-db
+ - oslo-messaging
+ - oslo-rootwrap
+- name: wsgiref
+ repo: upstream:python-packages/wsgiref.git
+ ref: e8360785eef259394e13b2062407edc3c2cbc1e0
+ unpetrify-ref: baserock/master
+- name: rfc3986
+ repo: upstream:python-packages/python-rfc3986.git
+ ref: 9817ec3e47bca8fba9a7cac56d785e9d644f7473
+ unpetrify-ref: rfc3986-0.2.0
+- name: urwid
+ repo: upstream:python-packages/urwid
+ ref: 838839f7a300a774240d52f943aafd6ff44b2413
+ unpetrify-ref: release-1.3.0
+- name: configshell-fb
+ repo: upstream:python-packages/configshell-fb
+ ref: 8c151ccdd75956da60b2304417c41a60a2c28231
+ unpetrify-ref: v1.1.fb17
+- name: targetcli-fb
+ repo: upstream:python-packages/targetcli
+ ref: c62610f3c2da6b4d364028c18bcc7f0d3da54477
+ unpetrify-ref: v2.1.fb39
+ build-depends:
+ - configshell-fb
+ - rtslib-fb
+- name: sysfsutils
+ repo: upstream:sysfsutils
+ ref: 237bf36e664db92f95b75067bf0f246726993254
+ unpetrify-ref: sysfsutils-2_1_0
+- name: open-iscsi
+ morph: strata/openstack-services/open-iscsi.morph
+ repo: upstream:open-iscsi
+ ref: 8da14e6f9eeeb4fd03938d40fe1126fe0d110b68
+ unpetrify-ref: master
+- name: nova
+ morph: strata/openstack-services/nova.morph
+ repo: upstream:openstack/nova
+ ref: e6452b995023e89bf6f1a1fb14f39216f83c760b
+ unpetrify-ref: 2014.2.3
+ build-depends:
+ - sqlalchemy
+ - anyjson
+ - keystonemiddleware
+ - kombu
+ - routes
+ - paste
+ - sqlalchemy-migrate
+ - suds
+ - posix-ipc-tarball
+ - websockify
+ - wsgiref
+ - oslo-db
+ - oslo-rootwrap
+ - pycadf
+ - oslo-messaging
+ - lockfile
+ - rfc3986
+ - oslo-vmware
+- name: cinder
+ morph: strata/openstack-services/cinder.morph
+ repo: upstream:openstack/cinder
+ ref: 48c792513fd342a5f82122b12e4ed2afd0de2ae7
+ unpetrify-ref: 2014.2.3
+ build-depends:
+ - anyjson
+ - keystonemiddleware
+ - kombu
+ - oslo-db
+ - oslo-messaging
+ - oslo-rootwrap
+ - osprofiler
+ - paste
+ - routes
+ - taskflow
+ - rtslib-fb
+ - sqlalchemy
+ - sqlalchemy-migrate
+ - suds
+ - wsgiref
+- name: pymemcache
+ repo: upstream:python-packages/pymemcache.git
+ ref: 3824d3b0bcdea3e8d0c08598bedfce10fd3c79e0
+ unpetrify-ref: master
+- name: sysv_ipc-tarball
+ repo: upstream:python-packages/sysv-ipc-tarball.git
+ ref: a77e3a63f004e6ee789fa05e4a5bbc333b1529f1
+ unpetrify-ref: sysv_ipc-0.6.8
+- name: tooz
+ repo: upstream:python-packages/tooz.git
+ ref: a90940a506b7c1bf52ef7d2f2ff52204fdcc6221
+ unpetrify-ref: 0.7
+ build-depends:
+ - pymemcache
+ - sysv_ipc-tarball
+ - msgpack-python
+ - retrying
+- name: ceilometer
+ repo: upstream:openstack/ceilometer
+ morph: strata/openstack-services/ceilometer.morph
+ ref: 720c2b4915caf5decd3da91ea2d6e6291316b940
+ unpetrify-ref: 2014.2.3
+ build-depends:
+ - alembic
+ - anyjson
+ - croniter
+ - happybase
+ - jsonpath-rw
+ - keystonemiddleware
+ - lockfile
+ - msgpack-python
+ - oslo-db
+ - oslo-rootwrap
+ - oslo-vmware
+ - pecan
+ - posix-ipc-tarball
+ - oslo-messaging
+ - pysnmp
+ - sqlalchemy
+ - sqlalchemy-migrate
+ - tooz
+ - wsme
+- name: django-openstack-auth
+ repo: upstream:openstack/django_openstack_auth
+ ref: e676c88a329af57d6c4f13df54f6e1e06c1f8360
+ unpetrify-ref: 1.1.8
+- name: horizon
+ morph: strata/openstack-services/horizon.morph
+ repo: upstream:openstack/horizon
+ ref: b37c1f3565e89a7fe3fef5ce76e9c26b22e3e7c4
+ unpetrify-ref: 2014.2.3
+ build-depends:
+ - django-openstack-auth
+ - lockfile
+- name: novnc
+ morph: strata/openstack-services/novnc.morph
+ repo: upstream:novnc
+ ref: 97be997f62d59c028fc45323b00e3b93fafe4eb4
+ unpetrify-ref: baserock/v0.5.1
+- name: oauth
+ repo: upstream:python-packages/oauthlib
+ ref: fd239fca84644896b1971cf24bc6213d065adb86
+ unpetrify-ref: 0.072
+- name: pyserial
+ repo: upstream:python-packages/pyserial
+ ref: bcfc1ec2fdb9a8c9c867481d7673e85fe512e667
+ unpetrify-ref: release2_7
+- name: tempest
+ morph: strata/openstack-services/tempest.morph
+ repo: upstream:openstack/tempest.git
+ ref: acba5510785258949679304f3e1a55e53b851962
+ unpetrify-ref: "2"
+- name: tftp-hpa
+ morph: strata/openstack-services/tftp-hpa.morph
+ repo: upstream:tftp-hpa
+ ref: 4faf178a509d8091b7ba1e1fa3d13bc68c5ff55f
+ unpetrify-ref: tftp-hpa-5.2
+- name: singledispatch
+ morph: strata/openstack-services/singledispatch.morph
+ repo: upstream:python-packages/singledispatch
+ ref: 92175ba65602a03086d2b1f770f45d88af93fc3e
+ unpetrify-ref: 3.4.0.3
+- name: pysinglefile
+ morph: strata/openstack-services/pysendfile.morph
+ repo: upstream:python-packages/pysendfile
+ ref: 6775b2938ef74255239c8e08458369921297b311
+ unpetrify-ref: release-2.0.0
+- name: ironic
+ morph: strata/openstack-services/ironic.morph
+ repo: upstream:openstack/ironic
+ ref: 0f4d454bf2093d3d62460f88aa9288bee3286b38
+ unpetrify-ref: stable/juno
diff --git a/strata/openstack-services/ceilometer.morph b/strata/openstack-services/ceilometer.morph
new file mode 100644
index 00000000..f120ce9e
--- /dev/null
+++ b/strata/openstack-services/ceilometer.morph
@@ -0,0 +1,23 @@
+name: ceilometer
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+# Install the configuration files required to
+- mkdir -p "$DESTDIR"/etc/ceilometer
+- install -D -m 644 etc/ceilometer/*.json "$DESTDIR"/etc/ceilometer
+- install -D -m 644 etc/ceilometer/*.yaml "$DESTDIR"/etc/ceilometer
+- install -D -m 644 etc/ceilometer/api_paste.ini "$DESTDIR"/etc/ceilometer/api_paste.ini
+# Install rootwrap.conf
+- install -D -m 640 etc/ceilometer/rootwrap.conf "$DESTDIR"/etc/ceilometer/rootwrap.conf
+# Move rootwrap files to a proper location
+- mkdir -p "$DESTDIR"/etc/ceilometer/rootwrap.d
+- install -D -m 644 etc/ceilometer/rootwrap.d/* "$DESTDIR"/etc/ceilometer/rootwrap.d/
+# Add ceilometer to sudoers controlling which commands will run as a root
+# using the openstack rootwrap.
+- mkdir -p "$DESTDIR"/etc/sudoers.d
+- |
+ install -D -m 0440 /proc/self/fd/0 <<'EOF' "$DESTDIR"/etc/sudoers.d/ceilometer-rootwrap
+ Defaults:ceilometer !requiretty
+
+ ceilometer ALL=(root) NOPASSWD: /usr/bin/ceilometer-rootwrap /etc/ceilometer/rootwrap.conf *
+ EOF
diff --git a/strata/openstack-services/cinder.morph b/strata/openstack-services/cinder.morph
new file mode 100644
index 00000000..cd680b09
--- /dev/null
+++ b/strata/openstack-services/cinder.morph
@@ -0,0 +1,18 @@
+name: cinder
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+# Install rootwrap.conf
+- install -D -m 640 etc/cinder/rootwrap.conf "$DESTDIR"/etc/cinder/rootwrap.conf
+# Move rootwrap files to a proper location
+- mkdir -p "$DESTDIR"/etc/cinder/rootwrap.d
+- install -D -m 644 etc/cinder/rootwrap.d/* "$DESTDIR"/etc/cinder/rootwrap.d/
+# Add cinder to sudoers controlling which commands will run as a root
+# using the openstack rootwrap.
+- mkdir -p "$DESTDIR"/etc/sudoers.d
+- |
+ install -D -m 0440 /proc/self/fd/0 <<'EOF' "$DESTDIR"/etc/sudoers.d/cinder-rootwrap
+ Defaults:cinder !requiretty
+
+ cinder ALL=(root) NOPASSWD: /usr/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *
+ EOF
diff --git a/strata/openstack-services/horizon.morph b/strata/openstack-services/horizon.morph
new file mode 100644
index 00000000..ae6399b1
--- /dev/null
+++ b/strata/openstack-services/horizon.morph
@@ -0,0 +1,59 @@
+name: horizon
+kind: chunk
+configure-commands:
+# Remove unnecessary .mo files they will be generated
+# later during package build.
+- find . -name "django*.mo" -exec rm -f '{}' \;
+# Set COMPRESS_OFFLINE=True
+- |
+ sed -i 's:COMPRESS_OFFLINE.=.False:COMPRESS_OFFLINE = True:' \
+ openstack_dashboard/settings.py
+build-commands:
+# Compile message strings
+- cd horizon && django-admin.py compilemessages && cd ..
+- cd openstack_dashboard && django-admin.py compilemessages && cd ..
+- python setup.py build
+post-build-commands:
+# Use the local_settings.py example to compile and compress the css, js, etc files.
+# This is a hack to make SECRET_KEY work.
+- |
+ cp openstack_dashboard/local/local_settings.py.example \
+ openstack_dashboard/local/local_settings.py
+# Collect the static files and compress them.
+- python manage.py collectstatic --noinput
+- python manage.py compress --force
+install-commands:
+# Undo hack
+- |
+ cp openstack_dashboard/local/local_settings.py.example \
+ openstack_dashboard/local/local_settings.py
+# Install horizon
+- python setup.py install -O1 --skip-build --prefix "$PREFIX" --root "$DESTDIR"
+post-install-commands:
+# Remove unnecessary .po files
+- find "$DESTDIR" -name django.po -exec rm '{}' \;
+- find "$DESTDIR" -name djangojs.po -exec rm '{}' \;
+# Link Openstack local_settings where openstack_dashboard is installed.
+- mkdir -p "$DESTDIR"/var/lib/horizon/openstack_dashboard/local
+- |
+ ln -sf /etc/horizon/openstack_dashboard/local_settings.py \
+ "$DESTDIR$PREFIX"/lib/python2.7/site-packages/openstack_dashboard/local/local_settings.py
+# Create the static directory (STATIC_ROOT) used in local_settings.py to keep
+# the static objects like css files.
+- mkdir -p "$DESTDIR"/var/lib/horizon/openstack_dashboard/static
+# Copy the compressed static files to horizon.
+- cp -a openstack_dashboard/static/* "$DESTDIR"/var/lib/horizon/openstack_dashboard/static
+- cp -a horizon/static/* "$DESTDIR"/var/lib/horizon/openstack_dashboard/static
+- cp -a static/* "$DESTDIR"/var/lib/horizon/openstack_dashboard/static
+# Work around to make django.wsgi working with horizon
+# See: https://bugs.launchpad.net/osprofiler/+bug/1361235
+# and: https://git.openstack.org/cgit/openstack/tripleo-image-elements/commit/?id=41c9a1dfad23f8aee366afb6a0b20a6c57ec8f79
+- |
+ sed -i "s|'../..'|os.path.realpath('../..')|" \
+ "$DESTDIR$PREFIX"/lib/python2.7/site-packages/openstack_dashboard/wsgi/django.wsgi
+# And link this django.wsgi file to the horizon home directory
+- |
+ ln -sf "$PREFIX"/lib/python2.7/site-packages/openstack_dashboard/wsgi/django.wsgi \
+ "$DESTDIR"/var/lib/horizon/openstack_dashboard/django.wsgi
+# Create the horizon document root for apache configuration
+- mkdir -p "$DESTDIR"/var/lib/horizon/.blackhole
diff --git a/strata/openstack-services/ipaddr-py.morph b/strata/openstack-services/ipaddr-py.morph
new file mode 100644
index 00000000..f6691ab7
--- /dev/null
+++ b/strata/openstack-services/ipaddr-py.morph
@@ -0,0 +1,6 @@
+name: ipaddr-py
+kind: chunk
+build-commands:
+- cd trunk && python setup.py build
+install-commands:
+- cd trunk && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-services/ironic.morph b/strata/openstack-services/ironic.morph
new file mode 100644
index 00000000..850399b4
--- /dev/null
+++ b/strata/openstack-services/ironic.morph
@@ -0,0 +1,17 @@
+name: ironic
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+# Install rootwrap.conf
+- install -D -m 640 etc/ironic/rootwrap.conf "$DESTDIR"/etc/ironic/rootwrap.conf
+# Move rootwrap files to a proper location
+- mkdir -p "$DESTDIR"/etc/ironic/rootwrap.d
+- install -m 644 etc/ironic/rootwrap.d/* "$DESTDIR"/etc/ironic/rootwrap.d/
+# Add ironic to sudoers controlling which commands will run as a root
+# using the openstack rootwrap.
+- |
+ install -D -m 0440 /proc/self/fd/0 <<'EOF' "$DESTDIR"/etc/sudoers.d/ironic-rootwrap
+ Defaults:ironic !requiretty
+
+ ironic ALL=(root) NOPASSWD: /usr/bin/ironic-rootwrap /etc/ironic/rootwrap.conf *
+ EOF
diff --git a/strata/openstack-services/librabbitmq.morph b/strata/openstack-services/librabbitmq.morph
new file mode 100644
index 00000000..b87ce369
--- /dev/null
+++ b/strata/openstack-services/librabbitmq.morph
@@ -0,0 +1,10 @@
+name: librabbitmq
+kind: chunk
+build-system: python-distutils
+configure-commands:
+- (cd rabbitmq-c; rm -rf codegen; ln -sf ../rabbitmq-codegen ./codegen)
+- (cd rabbitmq-c; autoreconf -fvi)
+- (cd rabbitmq-c; automake --add-missing)
+- (cd rabbitmq-c; ./configure --disable-tools --disable-docs)
+- (cd rabbitmq-c; make distdir)
+- mv rabbitmq-c/rabbitmq-c-0.5.3 clib
diff --git a/strata/openstack-services/neutron.morph b/strata/openstack-services/neutron.morph
new file mode 100644
index 00000000..b79a089b
--- /dev/null
+++ b/strata/openstack-services/neutron.morph
@@ -0,0 +1,17 @@
+name: neutron
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+# Move rootwrap files to a proper location
+- mkdir -p "$DESTDIR"/etc/neutron
+- mv "$DESTDIR$PREFIX"/etc/neutron/rootwrap.d "$DESTDIR"/etc/neutron/
+- mv "$DESTDIR$PREFIX"/etc/neutron/rootwrap.conf "$DESTDIR"/etc/neutron/
+# Add neutron to sudoers controlling which commands is running as a root
+# using the openstack rootwrap.
+- mkdir -p "$DESTDIR"/etc/sudoers.d
+- |
+ install -D -m 0440 /proc/self/fd/0 <<'EOF' "$DESTDIR"/etc/sudoers.d/neutron-rootwrap
+ Defaults:neutron !requiretty
+
+ neutron ALL=(root) NOPASSWD: /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
+ EOF
diff --git a/strata/openstack-services/nova.morph b/strata/openstack-services/nova.morph
new file mode 100644
index 00000000..bb6dd756
--- /dev/null
+++ b/strata/openstack-services/nova.morph
@@ -0,0 +1,18 @@
+name: nova
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+# Install rootwrap.conf
+- install -D -m 640 etc/nova/rootwrap.conf "$DESTDIR"/etc/nova/rootwrap.conf
+# Move rootwrap files to a proper location
+- mkdir -p "$DESTDIR"/etc/nova/rootwrap.d
+- install -D -m 644 etc/nova/rootwrap.d/* "$DESTDIR"/etc/nova/rootwrap.d/
+# Add nova to sudoers controlling which commands will run as a root
+# using the openstack rootwrap.
+- mkdir -p "$DESTDIR"/etc/sudoers.d
+- |
+ install -D -m 0440 /proc/self/fd/0 <<'EOF' "$DESTDIR"/etc/sudoers.d/nova-rootwrap
+ Defaults:nova !requiretty
+
+ nova ALL=(root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf *
+ EOF
diff --git a/strata/openstack-services/novnc.morph b/strata/openstack-services/novnc.morph
new file mode 100644
index 00000000..858320a0
--- /dev/null
+++ b/strata/openstack-services/novnc.morph
@@ -0,0 +1,11 @@
+name: novnc
+kind: chunk
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/share/novnc/utils
+- install -m 444 *html "$DESTDIR$PREFIX"/share/novnc
+- install -m 444 vnc.html "$DESTDIR$PREFIX"/share/novnc/index.html
+- mkdir -p "$DESTDIR$PREFIX"/share/novnc/include
+- install -m 444 include/*.* "$DESTDIR$PREFIX"/share/novnc/include
+- mkdir -p "$DESTDIR$PREFIX"/share/novnc/images
+- install -m 444 images/*.* "$DESTDIR$PREFIX"/share/novnc/images
+- install -d "$DESTDIR"/etc/sysconfig
diff --git a/strata/openstack-services/open-iscsi.morph b/strata/openstack-services/open-iscsi.morph
new file mode 100644
index 00000000..9d135ab2
--- /dev/null
+++ b/strata/openstack-services/open-iscsi.morph
@@ -0,0 +1,45 @@
+name: open-iscsi
+kind: chunk
+build-commands:
+- make
+install-commands:
+# Rewrite prefix and exec_prefix which are set to "/usr" and "/" respectively
+- make prefix="$PREFIX" exec_prefix="$PREFIX" DESTDIR="$DESTDIR" install
+# Install iscsistart app which is not listed by default in PROGRAMS
+- make prefix="$PREFIX" exec_prefix="$PREFIX" DESTDIR="$DESTDIR" PROGRAMS="usr/iscsistart" install
+post-install-commands:
+# Configure iscsi daemon
+# Point the startup to the installed binary
+- |
+ sed -i -e "s|iscsid.startup = \/sbin\/iscsid|iscsid.startup = "$PREFIX"/sbin/iscsid|" \
+ etc/iscsid.conf
+# Start up a session automatically
+- sed -i -e 's|node.startup = manual|node.startup = automatic|' etc/iscsid.conf
+# Install config file
+- install -D -m 644 etc/iscsid.conf "$DESTDIR"/etc/iscsi
+# Install custom systemd unit file
+- |
+ install -D -m 644 /proc/self/fd/0 << 'EOF' "$DESTDIR$PREFIX"/lib/systemd/system/iscsid.service
+ [Unit]
+ Description=Open iSCSI Daemon
+ After=network.target
+
+ [Service]
+ Type=forking
+ ExecStart=/usr/sbin/iscsid
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF
+# Install iscsi socket unit
+- |
+ install -D -m 644 /proc/self/fd/0 << 'EOF' "$DESTDIR$PREFIX"/lib/systemd/system/iscsid.socket
+ [Unit]
+ Description=Open-iSCSI iscsid Socket
+
+ [Socket]
+ ListenStream=@ISCSIADM_ABSTRACT_NAMESPACE
+
+ [Install]
+ WantedBy=sockets.target
+ EOF
diff --git a/strata/openstack-services/pies.morph b/strata/openstack-services/pies.morph
new file mode 100644
index 00000000..327c1dc9
--- /dev/null
+++ b/strata/openstack-services/pies.morph
@@ -0,0 +1,11 @@
+name: pies
+kind: chunk
+configure-commands:
+- |
+ cd pies2override
+ && python setup.py build
+ && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-services/pysendfile.morph b/strata/openstack-services/pysendfile.morph
new file mode 100644
index 00000000..2e2f809b
--- /dev/null
+++ b/strata/openstack-services/pysendfile.morph
@@ -0,0 +1,3 @@
+name: pysendfile
+kind: chunk
+build-system: python-distutils
diff --git a/strata/openstack-services/qpid-python.morph b/strata/openstack-services/qpid-python.morph
new file mode 100644
index 00000000..203b3db5
--- /dev/null
+++ b/strata/openstack-services/qpid-python.morph
@@ -0,0 +1,6 @@
+name: qpid-python
+kind: chunk
+build-commands:
+- cd qpid/python && python setup.py build
+install-commands:
+- cd qpid/python && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-services/rabbitmq-codegen.morph b/strata/openstack-services/rabbitmq-codegen.morph
new file mode 100644
index 00000000..2b06aeb5
--- /dev/null
+++ b/strata/openstack-services/rabbitmq-codegen.morph
@@ -0,0 +1,7 @@
+name: rabbitmq-codegen
+kind: chunk
+configure-commands: []
+build-commands: []
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/lib/rabbitmq-codegen
+- cp * "$DESTDIR$PREFIX"/lib/rabbitmq-codegen
diff --git a/strata/openstack-services/rabbitmq-server.morph b/strata/openstack-services/rabbitmq-server.morph
new file mode 100644
index 00000000..97b8a126
--- /dev/null
+++ b/strata/openstack-services/rabbitmq-server.morph
@@ -0,0 +1,16 @@
+name: rabbitmq-server
+kind: chunk
+configure-commands:
+- mkdir -p codegen
+- cp /usr/lib/rabbitmq-codegen/* codegen
+build-commands:
+- make
+install-commands:
+- |
+ make install_bin TARGET_DIR="$DESTDIR$PREFIX" \
+ SBIN_DIR="$DESTDIR$PREFIX"/sbin \
+ MAN_DIR="$PREFIX"/share/man \
+ DOC_INSTALL_DIR="$PREFIX"/share
+
+post-install-commands:
+- rm "$DESTDIR$PREFIX"/LICENSE* "$DESTDIR$PREFIX"/INSTALL
diff --git a/strata/openstack-services/rtslib-fb.morph b/strata/openstack-services/rtslib-fb.morph
new file mode 100644
index 00000000..26de3f9b
--- /dev/null
+++ b/strata/openstack-services/rtslib-fb.morph
@@ -0,0 +1,27 @@
+name: rtslib-fb
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/lib/systemd/system
+
+# The following systemd unit is from the fedora package see
+# http://pkgs.fedoraproject.org/cgit/python-rtslib.git/tree/target.service for
+# more information
+
+- |
+ install -D -m 0644 /proc/self/fd/0 <<'EOF' "$DESTDIR$PREFIX"/lib/systemd/system/target.service
+ [Unit]
+ Description=Restore LIO kernel target configuration
+ Requires=sys-kernel-config.mount
+ After=sys-kernel-config.mount network.target local-fs.target
+
+ [Service]
+ Type=oneshot
+ RemainAfterExit=yes
+ ExecStart=/usr/bin/targetctl restore
+ ExecStop=/usr/bin/targetctl clear
+ SyslogIdentifier=target
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF
diff --git a/strata/openstack-services/singledispatch.morph b/strata/openstack-services/singledispatch.morph
new file mode 100644
index 00000000..919c7096
--- /dev/null
+++ b/strata/openstack-services/singledispatch.morph
@@ -0,0 +1,3 @@
+name: singledispatch
+kind: chunk
+build-system: python-distutils
diff --git a/strata/openstack-services/tempest.morph b/strata/openstack-services/tempest.morph
new file mode 100644
index 00000000..cffb7d33
--- /dev/null
+++ b/strata/openstack-services/tempest.morph
@@ -0,0 +1,12 @@
+name: tempest
+kind: chunk
+build-system: python-distutils
+post-install-commands:
+# Install files and folders required to run tempest
+- mkdir -p "$DESTDIR"/etc/tempest
+- cp -r tempest "$DESTDIR"/etc/tempest/
+- cp -r tools "$DESTDIR"/etc/tempest/
+- cp setup.py tox.ini "$DESTDIR"/etc/tempest/
+- cp run_tests.sh run_tempest.sh "$DESTDIR"/etc/tempest/
+- cp .testr.conf "$DESTDIR"/etc/tempest/
+- cp etc/logging.conf.sample "$DESTDIR"/etc/tempest/logging.conf
diff --git a/strata/openstack-services/tftp-hpa.morph b/strata/openstack-services/tftp-hpa.morph
new file mode 100644
index 00000000..d466d2d4
--- /dev/null
+++ b/strata/openstack-services/tftp-hpa.morph
@@ -0,0 +1,5 @@
+name: tftp-hpa
+build-system: autotools
+kind: chunk
+install-commands:
+- make INSTALLROOT="$DESTDIR" install
diff --git a/strata/openstack-services/thrift.morph b/strata/openstack-services/thrift.morph
new file mode 100644
index 00000000..39062351
--- /dev/null
+++ b/strata/openstack-services/thrift.morph
@@ -0,0 +1,6 @@
+name: thrift
+kind: chunk
+build-system: autotools
+max-jobs: 1
+pre-configure-commands:
+- ./bootstrap.sh
diff --git a/strata/ostree-core.morph b/strata/ostree-core.morph
new file mode 100644
index 00000000..bcf7d61b
--- /dev/null
+++ b/strata/ostree-core.morph
@@ -0,0 +1,16 @@
+name: ostree-core
+kind: stratum
+build-depends:
+- morph: strata/core.morph
+- morph: strata/libsoup-common.morph
+chunks:
+- name: libgsystem
+ repo: upstream:libgsystem
+ ref: 8231b8ad4a4ee35e4b11fae5f6e7cddabf1c51ae
+ unpetrify-ref: master
+- name: ostree
+ repo: upstream:ostree
+ ref: c9704e9802dfeda9b5a138535c59e98df3dd7196
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - libgsystem
diff --git a/strata/pcre-utils.morph b/strata/pcre-utils.morph
new file mode 100644
index 00000000..8d0c2489
--- /dev/null
+++ b/strata/pcre-utils.morph
@@ -0,0 +1,9 @@
+name: pcre-utils
+kind: stratum
+build-depends:
+- morph: strata/tools.morph
+chunks:
+- name: pcre
+ repo: upstream:pcre
+ ref: 2720152c58e13e7cc7403642ec33127101b9971b
+ unpetrify-ref: baserock/morph
diff --git a/strata/python-cliapp.morph b/strata/python-cliapp.morph
new file mode 100644
index 00000000..495a6a2e
--- /dev/null
+++ b/strata/python-cliapp.morph
@@ -0,0 +1,24 @@
+name: python-cliapp
+kind: stratum
+description: |
+ Cliapp command line application framework.
+build-depends:
+- morph: strata/python-core.morph
+chunks:
+- name: python-coveragepy
+ morph: strata/python-cliapp/python-coveragepy.morph
+ repo: upstream:python-coveragepy
+ ref: 77d2e3bfd8fb325092aaed37ba1378054d182d19
+ unpetrify-ref: baserock/morph
+- name: python-coverage-test-runner
+ repo: upstream:python-coverage-test-runner
+ ref: 8ea9421ac3384b2e88e0c36f2cfa52586c4798b7
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - python-coveragepy
+- name: cliapp
+ repo: upstream:cliapp
+ ref: cec20cedd062a3aef1b04f997e77b45090c07806
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - python-coverage-test-runner
diff --git a/strata/python-cliapp/python-coveragepy.morph b/strata/python-cliapp/python-coveragepy.morph
new file mode 100644
index 00000000..ed5e3d87
--- /dev/null
+++ b/strata/python-cliapp/python-coveragepy.morph
@@ -0,0 +1,6 @@
+name: python-coveragepy
+kind: chunk
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix=/usr --root "$DESTDIR"
diff --git a/strata/python-common.morph b/strata/python-common.morph
new file mode 100644
index 00000000..e724d912
--- /dev/null
+++ b/strata/python-common.morph
@@ -0,0 +1,74 @@
+name: python-common
+kind: stratum
+description: Common dependencies of some python chunks
+build-depends:
+- morph: strata/python-core.morph
+chunks:
+- name: pycrypto
+ morph: strata/python-common/pycrypto.morph
+ repo: upstream:python-packages/pycrypto
+ ref: af058ee6f5da391a05275470ab4a4a96aa22b350
+ unpetrify-ref: v2.7a1
+- name: ecdsa
+ repo: upstream:python-packages/ecdsa
+ ref: 36e9cfa80fcf8b53119adc787e54a5892ec1eb2c
+ unpetrify-ref: python-ecdsa-0.11
+- name: paramiko
+ repo: upstream:paramiko
+ ref: 424ba615c2a94d3b059e7f24db1a1093a92d8d22
+ unpetrify-ref: v1.15.2
+ build-depends:
+ - pycrypto
+ - ecdsa
+- name: markupsafe
+ repo: upstream:markupsafe
+ ref: feb1d70c16df62f60dcb521d127fdad8819fc036
+ unpetrify-ref: 0.23
+- name: jinja2
+ repo: upstream:jinja2
+ ref: 762c612e7276889aac265645da00e62e33d1573c
+ unpetrify-ref: 2.7.3
+ build-depends:
+ - markupsafe
+- name: python-json-pointer
+ repo: upstream:python-json-pointer
+ ref: 34073e561261cb413b9bdff5beac31b070d98ea2
+ unpetrify-ref: v1.4
+- name: python-json-patch
+ repo: upstream:python-json-patch
+ ref: e4da658a5dc9f68d3386017ffdcc8e07d22b51a3
+ unpetrify-ref: v1.8
+ build-depends:
+ - python-json-pointer
+- name: python-prettytable
+ repo: upstream:python-prettytable
+ ref: 7a48f1e84049577370cf28632a75d2fd01e4142d
+ unpetrify-ref: master
+- name: configobj
+ repo: upstream:configobj-git
+ ref: 9d2aab01c77dce600b296ba9da1163cc0bbc14e0
+ unpetrify-ref: v5.0.6
+- name: python-mimeparse
+ repo: upstream:python-packages/python-mimeparse.git
+ ref: 2d600d3fc4a386af69d20fba433843b4df2b3c92
+ unpetrify-ref: master
+- name: kazoo
+ repo: upstream:python-packages/kazoo.git
+ ref: 93a718ea4c20df797766742c3d74de281613c651
+ unpretrify-ref: 2.0
+- name: webob
+ repo: upstream:python-packages/webob
+ ref: ae0ac90f22f9e1ff351e445f92330efd89195f51
+ unpetrify-ref: 1.4
+- name: python-decorator
+ repo: upstream:python-packages/python-decorator
+ ref: b02f7a254c3679dfac57a824e08dd02ced850636
+ unpetrify-ref: decorator-3.4.0
+- name: networkx
+ repo: upstream:python-packages/networkx
+ ref: 8ac796aba866ade1a56b83e30f25ed392ca3512c
+ unpetrify-ref: networkx-1.9.1
+- name: boto
+ repo: upstream:boto
+ ref: 2517f660f8ef9012708d46da3a36ab967993d2f6
+ unpetrify-ref: 2.33.0
diff --git a/strata/python-common/pycrypto.morph b/strata/python-common/pycrypto.morph
new file mode 100644
index 00000000..51cc00f0
--- /dev/null
+++ b/strata/python-common/pycrypto.morph
@@ -0,0 +1,3 @@
+name: pycrypto
+kind: chunk
+build-system: python-distutils
diff --git a/strata/python-core.morph b/strata/python-core.morph
new file mode 100644
index 00000000..0f9d1653
--- /dev/null
+++ b/strata/python-core.morph
@@ -0,0 +1,52 @@
+name: python-core
+kind: stratum
+description: Core python packages
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: python-setuptools
+ morph: strata/core/python-setuptools.morph
+ repo: upstream:python-setuptools-bitbucket
+ ref: 0aa6a4de5931d02876428388678802db2371fd37
+ unpetrify-ref: baserock/master
+- name: mako
+ repo: upstream:python-packages/mako.git
+ ref: 285bc818a50ccc0f9549630f7c4f4c250585c3e7
+ unpetrify-ref: rel_1_0_0
+ build-depends:
+ - python-setuptools
+- name: pip
+ repo: upstream:pip
+ ref: ea680f204fb0e48789710c22c8f597a9bf01bc16
+ unpetrify-ref: baserock/master
+ build-depends:
+ - python-setuptools
+- name: pbr
+ repo: upstream:pbr
+ ref: aef4f7ef4faec987d553d1ca40b55951235af0b1
+ unpetrify-ref: 0.10.7
+ build-depends:
+ - pip
+- name: python-requests
+ repo: upstream:python-requests
+ ref: b83131779c701720a9ae9efae78996277d416269
+ unpetrify-ref: v2.5.1
+- name: six
+ repo: upstream:six
+ ref: 8cfbff6b764af86d825086fa1637aa009e90d75a
+ unpetrify-ref: 1.9.0
+- name: pyyaml
+ morph: strata/python-core/pyyaml.morph
+ repo: upstream:pyyaml
+ ref: d9fbcceaed39d955f6871b07c61dc42f824285c1
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - python-setuptools
+- name: python-lxml
+ repo: upstream:python-lxml
+ ref: 14505bc62f5f1fc9fb0ff007955f3e67ab4562bb
+ unpetrify-ref: lxml-3.4.0
+- name: python-markdown
+ repo: upstream:python-markdown
+ ref: f0c5b71acbc02af60a33d67c59558bb513b25e74
+ unpetrify-ref: 2.5.1-final
diff --git a/strata/python-core/pyyaml.morph b/strata/python-core/pyyaml.morph
new file mode 100644
index 00000000..8ebd7b57
--- /dev/null
+++ b/strata/python-core/pyyaml.morph
@@ -0,0 +1,6 @@
+name: pyyaml
+kind: chunk
+build-commands:
+- python setup.py --without-libyaml build
+install-commands:
+- python setup.py --without-libyaml install --prefix="$PREFIX" --root "$DESTDIR"
diff --git a/strata/python-pygobject.morph b/strata/python-pygobject.morph
new file mode 100644
index 00000000..39f74f6d
--- /dev/null
+++ b/strata/python-pygobject.morph
@@ -0,0 +1,12 @@
+name: python-pygobject
+kind: stratum
+description: |
+ Python GObject bindings
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: pygobject
+ morph: strata/python-pygobject/pygobject.morph
+ repo: upstream:pygobject
+ ref: 276341d7ddab180020c31e6837bd28fd25784de0
+ unpetrify-ref: baserock/morph
diff --git a/strata/python-pygobject/pygobject.morph b/strata/python-pygobject/pygobject.morph
new file mode 100644
index 00000000..a423537e
--- /dev/null
+++ b/strata/python-pygobject/pygobject.morph
@@ -0,0 +1,5 @@
+name: pygobject
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --disable-cairo
diff --git a/strata/python-tools.morph b/strata/python-tools.morph
new file mode 100644
index 00000000..a84697af
--- /dev/null
+++ b/strata/python-tools.morph
@@ -0,0 +1,25 @@
+name: python-tools
+kind: stratum
+description: "A stratum for non-essential python tools:
+useful python tools that we don't want to include in core."
+build-depends:
+- morph: strata/python-core.morph
+chunks:
+- name: virtualenv
+ repo: upstream:python-packages/virtualenv
+ ref: 9205ff46a67130e8835f14bb4f802fd59e7dcf2c
+ unpetrify-ref: 12.0.5
+
+## Pylint - required in openstack-services.morph
+- name: astroid
+ repo: upstream:python-packages/astroid
+ ref: 194dc114a33b80b4bfbbeb73f36712848f696025
+ unpetrify-ref: astroid-1.3.5
+- name: logilab-common
+ repo: upstream:python-packages/logilab-common
+ ref: d1d8f793af6d015d885b9ea67b5741d5a093e2f4
+ unpetrify-ref: logilab-common-version-0.62.0
+- name: pylint
+ repo: upstream:python-packages/pylint
+ ref: ba998d7a4e5fce0ea3a3e701ff446bbe4ca406b5
+ unpetrify-ref: pylint-1.4.2
diff --git a/strata/python-wsgi.morph b/strata/python-wsgi.morph
new file mode 100644
index 00000000..289d27ba
--- /dev/null
+++ b/strata/python-wsgi.morph
@@ -0,0 +1,15 @@
+name: python-wsgi
+kind: stratum
+description: |
+ Python modules for web applications using Web Server Gateway Interface.
+build-depends:
+- morph: strata/python-core.morph
+chunks:
+- name: bottle
+ repo: upstream:bottle
+ ref: 5238c615b3ec198fedebb0fcaad4458e3d68d70f
+ unpetrify-ref: baserock/morph
+- name: flup
+ repo: upstream:flup
+ ref: 0f97c5e0ab7d9827506120efc22af3a9c21d1d70
+ unpetrify-ref: baserock/morph
diff --git a/strata/python3-core.morph b/strata/python3-core.morph
new file mode 100644
index 00000000..192e9201
--- /dev/null
+++ b/strata/python3-core.morph
@@ -0,0 +1,11 @@
+name: python3-core
+kind: stratum
+description: Core python3 packages
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: python3
+ morph: strata/python3-core/python3.morph
+ repo: upstream:cpython
+ ref: d1414cefaddc7b56caef75eea87226b1fce5ca7c
+ unpetrify-ref: v3.4.2
diff --git a/strata/python3-core/python3.morph b/strata/python3-core/python3.morph
new file mode 100644
index 00000000..f02452d4
--- /dev/null
+++ b/strata/python3-core/python3.morph
@@ -0,0 +1,7 @@
+name: python3
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --enable-shared
+post-install-commands:
+- test -x "$DESTDIR"/"$PREFIX"/bin/python3 || ln -s python3.4 "$DESTDIR"/"$PREFIX"/bin/python3
diff --git a/strata/qt4-sdk.morph b/strata/qt4-sdk.morph
new file mode 100644
index 00000000..0533ea58
--- /dev/null
+++ b/strata/qt4-sdk.morph
@@ -0,0 +1,11 @@
+name: qt4-sdk
+kind: stratum
+description: Qt4 Desktop Environment, IDE and Example Apps
+build-depends:
+- morph: strata/qt4-tools.morph
+chunks:
+- name: qt-creator
+ morph: strata/qt4-sdk/qt-creator.morph
+ repo: upstream:qt-creator
+ ref: d5a6b10634c1a3271012e9578e016772ef077d59
+ unpetrify-ref: baserock/morph/2.7
diff --git a/strata/qt4-sdk/qt-creator.morph b/strata/qt4-sdk/qt-creator.morph
new file mode 100644
index 00000000..76d9f7d7
--- /dev/null
+++ b/strata/qt4-sdk/qt-creator.morph
@@ -0,0 +1,9 @@
+name: qt-creator
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- ./qhelpgenerator.sh
diff --git a/strata/qt4-tools.morph b/strata/qt4-tools.morph
new file mode 100644
index 00000000..698dc5ce
--- /dev/null
+++ b/strata/qt4-tools.morph
@@ -0,0 +1,33 @@
+name: qt4-tools
+kind: stratum
+description: Qt4 Development Libraries and Tools
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/x-generic.morph
+- morph: strata/multimedia-gstreamer-0.10.morph
+chunks:
+- name: icu
+ morph: strata/qt4-tools/icu.morph
+ repo: upstream:icu
+ ref: ba023548a3bff7277cbea4acade3042ce9d8949e
+ unpetrify-ref: baserock/morph
+- name: ruby-1.8
+ morph: strata/qt4-tools/ruby-1.8.morph
+ repo: upstream:ruby
+ ref: 7a24f1710028d568ad61d0aa49d5178260178d77
+ unpetrify-ref: baserock/morph/ruby_1_8_7
+- name: ruby-1.9
+ morph: strata/qt4-tools/ruby-1.9.morph
+ repo: upstream:ruby
+ ref: cb3ea602294b5038b5f7ac21d3875a2b52342956
+ unpetrify-ref: baserock/morph/ruby_1_9_3
+ build-depends:
+ - ruby-1.8
+- name: qt4-tools
+ morph: strata/qt4-tools/qt4-tools.morph
+ repo: upstream:qt4-tools
+ ref: a182f020bc1703c20d86ba18f2f6b4ea8889de84
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - icu
+ - ruby-1.9
diff --git a/strata/qt4-tools/icu.morph b/strata/qt4-tools/icu.morph
new file mode 100644
index 00000000..37dec07e
--- /dev/null
+++ b/strata/qt4-tools/icu.morph
@@ -0,0 +1,8 @@
+name: icu
+kind: chunk
+configure-commands:
+- cd source; ./runConfigureICU Linux --prefix=/usr
+build-commands:
+- cd source; unset TARGET ; make
+install-commands:
+- cd source; unset TARGET ; make DESTDIR="$DESTDIR" install
diff --git a/strata/qt4-tools/qt4-tools.morph b/strata/qt4-tools/qt4-tools.morph
new file mode 100644
index 00000000..bc4cc17e
--- /dev/null
+++ b/strata/qt4-tools/qt4-tools.morph
@@ -0,0 +1,18 @@
+name: qt4-tools
+kind: chunk
+max-jobs: 1
+configure-commands:
+- |
+ arch=$(uname -m)
+ if [ "${arch}" != "${arch//arm/}" ] ; then
+ sed 's/g++-unix.conf)/&\nQMAKE_CXXFLAGS += -fno-strict-volatile-bitfields/' \
+ -i mkspecs/linux-g++/qmake.conf
+ else
+ echo Running on x86, not modifying qmake.conf
+ fi
+- ./configure -v -prefix /usr -opensource -confirm-license
+build-commands:
+- make
+- LD_LIBRARY_PATH="$PWD"/lib QT_PLUGIN_PATH="$PWD"/plugins make docs
+install-commands:
+- make install INSTALL_ROOT="$DESTDIR"
diff --git a/strata/qt4-tools/ruby-1.8.morph b/strata/qt4-tools/ruby-1.8.morph
new file mode 100644
index 00000000..cee282c0
--- /dev/null
+++ b/strata/qt4-tools/ruby-1.8.morph
@@ -0,0 +1,9 @@
+name: ruby-1.8
+kind: chunk
+configure-commands:
+- autoconf
+- ./configure --prefix=/usr --program-suffix=-1.8
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/qt4-tools/ruby-1.9.morph b/strata/qt4-tools/ruby-1.9.morph
new file mode 100644
index 00000000..bc697ada
--- /dev/null
+++ b/strata/qt4-tools/ruby-1.9.morph
@@ -0,0 +1,9 @@
+name: ruby-1.9
+kind: chunk
+configure-commands:
+- autoconf
+- ./configure --prefix=/usr --enable-shared --with-baseruby=/usr/bin/ruby-1.8
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/qt5-sdk.morph b/strata/qt5-sdk.morph
new file mode 100644
index 00000000..d4c76ee8
--- /dev/null
+++ b/strata/qt5-sdk.morph
@@ -0,0 +1,12 @@
+name: qt5-sdk
+kind: stratum
+description: Qt5 Desktop Environment, IDE and Example Apps
+build-depends:
+- morph: strata/qt5-tools.morph
+- morph: strata/qt5-tools-qtwebkit.morph
+chunks:
+- name: qt-creator
+ morph: strata/qt5-sdk/qt-creator.morph
+ repo: upstream:qt-creator
+ ref: d81cd236df1cc6bc6977c438f0edbff35eef6682
+ unpetrify-ref: baserock/3.3.0
diff --git a/strata/qt5-sdk/qt-creator.morph b/strata/qt5-sdk/qt-creator.morph
new file mode 100644
index 00000000..91fae589
--- /dev/null
+++ b/strata/qt5-sdk/qt-creator.morph
@@ -0,0 +1,8 @@
+name: qt-creator
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-sdk/snowshoe.morph b/strata/qt5-sdk/snowshoe.morph
new file mode 100644
index 00000000..f89ab751
--- /dev/null
+++ b/strata/qt5-sdk/snowshoe.morph
@@ -0,0 +1,8 @@
+name: snowshoe
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools-qtmultimedia.morph b/strata/qt5-tools-qtmultimedia.morph
new file mode 100644
index 00000000..20473ec2
--- /dev/null
+++ b/strata/qt5-tools-qtmultimedia.morph
@@ -0,0 +1,12 @@
+name: qt5-tools-qtmultimedia
+kind: stratum
+description: Qt5 QtMultimedia Development Libraries and Tools
+build-depends:
+- morph: strata/qt5-tools.morph
+- morph: strata/multimedia-gstreamer.morph
+chunks:
+- name: qtmultimedia
+ morph: strata/qt5-tools/qtmultimedia.morph
+ repo: upstream:qt5/qtmultimedia
+ ref: a42a5ae6191ecbe317a00d8261bc53e842967052
+ unpetrify-ref: baserock/qt5.4-wip-gstreamer-1.0
diff --git a/strata/qt5-tools-qtwebkit.morph b/strata/qt5-tools-qtwebkit.morph
new file mode 100644
index 00000000..53d5a682
--- /dev/null
+++ b/strata/qt5-tools-qtwebkit.morph
@@ -0,0 +1,34 @@
+name: qt5-tools-qtwebkit
+kind: stratum
+description: Qt5 WebKit Development Libraries and Tools
+build-depends:
+- morph: strata/multimedia-gstreamer.morph
+- morph: strata/qt5-tools.morph
+- morph: strata/ruby.morph
+chunks:
+- name: qtwebkit
+ morph: strata/qt5-tools/qtwebkit.morph
+ repo: upstream:qt5/qtwebkit
+ ref: 586bdc38324dfaeec65389bf7646c82cb35db017
+ unpetrify-ref: "5.4.2"
+- name: qtwebkit-examples
+ morph: strata/qt5-tools/qtwebkit-examples.morph
+ repo: upstream:qt5/qtwebkit-examples
+ ref: 70bd4d5253f134bf48a30544030bb832f1eba8b3
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtwebkit
+- name: qttools
+ morph: strata/qt5-tools/qttools.morph
+ repo: upstream:qt5/qttools
+ ref: 5060a0ec7dcb26826b19eefbd6063efcbde6101f
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtwebkit
+- name: qttranslations
+ morph: strata/qt5-tools/qttranslations.morph
+ repo: upstream:qt5/qttranslations
+ ref: 3aad4ab4718d4bf952275a07e406e93eb6a22eed
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qttools
diff --git a/strata/qt5-tools.morph b/strata/qt5-tools.morph
new file mode 100644
index 00000000..7fb8403e
--- /dev/null
+++ b/strata/qt5-tools.morph
@@ -0,0 +1,137 @@
+name: qt5-tools
+kind: stratum
+description: Qt5 Development Libraries and Tools
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/x-generic.morph
+chunks:
+- name: icu
+ morph: strata/qt5-tools/icu.morph
+ repo: upstream:icu
+ ref: ba023548a3bff7277cbea4acade3042ce9d8949e
+ unpetrify-ref: baserock/morph
+- name: qtbase
+ morph: strata/qt5-tools/qtbase.morph
+ repo: upstream:qt5/qtbase
+ ref: 5367fa356233da4c0f28172a8f817791525f5457
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - icu
+- name: qtsvg
+ morph: strata/qt5-tools/qtsvg.morph
+ repo: upstream:qt5/qtsvg
+ ref: eece19e62638409b479cabcbb985978d61d84307
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtscript
+ morph: strata/qt5-tools/qtscript.morph
+ repo: upstream:qt5/qtscript
+ ref: 1371263991489f11774250aa609ace0b12415186
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtdeclarative
+ morph: strata/qt5-tools/qtdeclarative.morph
+ repo: upstream:qt5/qtdeclarative
+ ref: fdf004803d036583f58ceb832803cfe39c6ba6d8
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+ - qtscript
+- name: qtquick1
+ morph: strata/qt5-tools/qtquick1.morph
+ repo: upstream:qt5/qtquick1
+ ref: 1f92491bd95abc642dcd882846884a5271c8ba16
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+ - qtscript
+- name: qt3d
+ morph: strata/qt5-tools/qt3d.morph
+ repo: upstream:qt5/qt3d
+ ref: bdb98baf8253c69949a8c259369203da9ffb269c
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - qtbase
+ - qtscript
+ - qtdeclarative
+- name: qtquickcontrols
+ morph: strata/qt5-tools/qtquickcontrols.morph
+ repo: upstream:qt5/qtquickcontrols
+ ref: ea099e341b5f8845be56f81b22e44a8b6cb227a2
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+ - qtdeclarative
+- name: qtsensors
+ morph: strata/qt5-tools/qtsensors.morph
+ repo: upstream:qt5/qtsensors
+ ref: 10702cfa8ce803e0911306284c246a879d865f1d
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtserialport
+ morph: strata/qt5-tools/qtserialport.morph
+ repo: upstream:qt5/qtserialport
+ ref: 9aef6c3c8ff97b7998bdd813f7c7f3d845b2a53f
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtx11extras
+ morph: strata/qt5-tools/qtx11extras.morph
+ repo: upstream:qt5/qtx11extras
+ ref: a21bbb45019e1bbc7b583811ff2e84757082a01a
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtxmlpatterns
+ morph: strata/qt5-tools/qtxmlpatterns.morph
+ repo: upstream:qt5/qtxmlpatterns
+ ref: 2e8b90b33d1d8dce4acdb6a116a227f2a5d3e044
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtdoc
+ morph: strata/qt5-tools/qtdoc.morph
+ repo: upstream:qt5/qtdoc
+ ref: 251f5eaa2dae740c0d0217893038a3f5bab2bca3
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtgraphicaleffects
+ morph: strata/qt5-tools/qtgraphicaleffects.morph
+ repo: upstream:qt5/qtgraphicaleffects
+ ref: 9e9600d92224084f7942a6bd83b926c630a29747
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+ - qtdeclarative
+- name: qtimageformats
+ morph: strata/qt5-tools/qtimageformats.morph
+ repo: upstream:qt5/qtimageformats
+ ref: fdc7026fb7d098abbf135fd5e7d2cf00884f9235
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtconnectivity
+ morph: strata/qt5-tools/qtconnectivity.morph
+ repo: upstream:qt5/qtconnectivity
+ ref: 5d405d07fb51502d0bfab08f4d74aa5eba365bab
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtlocation
+ morph: strata/qt5-tools/qtlocation.morph
+ repo: upstream:qt5/qtlocation
+ ref: 7791dd790d2b15751b703db70a5fceb31d3fd99f
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
+- name: qtwebsockets
+ morph: strata/qt5-tools/qtwebsockets.morph
+ repo: upstream:qt5/qtwebsockets
+ ref: d1c9ede19976767393fe5db2f396c0c18d6fbced
+ unpetrify-ref: v5.4.0
+ build-depends:
+ - qtbase
diff --git a/strata/qt5-tools/icu.morph b/strata/qt5-tools/icu.morph
new file mode 100644
index 00000000..f9532f04
--- /dev/null
+++ b/strata/qt5-tools/icu.morph
@@ -0,0 +1,9 @@
+name: icu
+kind: chunk
+configure-commands:
+- sed -e 's/LDFLAGSICUDT/#LDFLAGSICUDT/' -i source/config/mh-linux
+- cd source; ./runConfigureICU Linux --prefix=/usr
+build-commands:
+- cd source; unset TARGET ; make
+install-commands:
+- cd source; unset TARGET ; make DESTDIR="$DESTDIR" install
diff --git a/strata/qt5-tools/qt3d.morph b/strata/qt5-tools/qt3d.morph
new file mode 100644
index 00000000..d4e44d4e
--- /dev/null
+++ b/strata/qt5-tools/qt3d.morph
@@ -0,0 +1,10 @@
+name: qt3d
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtbase.morph b/strata/qt5-tools/qtbase.morph
new file mode 100644
index 00000000..9b2a20c6
--- /dev/null
+++ b/strata/qt5-tools/qtbase.morph
@@ -0,0 +1,17 @@
+name: qtbase
+kind: chunk
+configure-commands:
+- ./configure -v -prefix /usr -opensource -confirm-license
+build-commands:
+- make
+- touch /qtbase.build/src/corelib/corelib.pro
+- touch /qtbase.build/qmake/qmake.pro
+- touch /qtbase.build/qmake/qmake-docs.pro
+- ./bin/qmake -set QDOC /qtbase.build/bin/qdoc
+- make html_docs
+- ./bin/qmake -unset QDOC
+install-commands:
+- make install INSTALL_ROOT="$DESTDIR"
+- ./bin/qmake -set QDOC /qtbase.build/bin/qdoc
+- make install_html_docs INSTALL_ROOT="$DESTDIR"
+- ./bin/qmake -unset QDOC
diff --git a/strata/qt5-tools/qtconnectivity.morph b/strata/qt5-tools/qtconnectivity.morph
new file mode 100644
index 00000000..d1123711
--- /dev/null
+++ b/strata/qt5-tools/qtconnectivity.morph
@@ -0,0 +1,10 @@
+name: qtconnectivity
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtdeclarative.morph b/strata/qt5-tools/qtdeclarative.morph
new file mode 100644
index 00000000..9eb02331
--- /dev/null
+++ b/strata/qt5-tools/qtdeclarative.morph
@@ -0,0 +1,10 @@
+name: qtdeclarative
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtdoc.morph b/strata/qt5-tools/qtdoc.morph
new file mode 100644
index 00000000..3b449382
--- /dev/null
+++ b/strata/qt5-tools/qtdoc.morph
@@ -0,0 +1,8 @@
+name: qtdoc
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtgraphicaleffects.morph b/strata/qt5-tools/qtgraphicaleffects.morph
new file mode 100644
index 00000000..27c42971
--- /dev/null
+++ b/strata/qt5-tools/qtgraphicaleffects.morph
@@ -0,0 +1,8 @@
+name: qtgraphicaleffects
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtimageformats.morph b/strata/qt5-tools/qtimageformats.morph
new file mode 100644
index 00000000..08323ca1
--- /dev/null
+++ b/strata/qt5-tools/qtimageformats.morph
@@ -0,0 +1,10 @@
+name: qtimageformats
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtjsbackend.morph b/strata/qt5-tools/qtjsbackend.morph
new file mode 100644
index 00000000..5813084c
--- /dev/null
+++ b/strata/qt5-tools/qtjsbackend.morph
@@ -0,0 +1,10 @@
+name: qtjsbackend
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtlocation.morph b/strata/qt5-tools/qtlocation.morph
new file mode 100644
index 00000000..14a527ff
--- /dev/null
+++ b/strata/qt5-tools/qtlocation.morph
@@ -0,0 +1,10 @@
+name: qtlocation
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtmultimedia.morph b/strata/qt5-tools/qtmultimedia.morph
new file mode 100644
index 00000000..537baa08
--- /dev/null
+++ b/strata/qt5-tools/qtmultimedia.morph
@@ -0,0 +1,10 @@
+name: qtmultimedia
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make install_html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtquick1.morph b/strata/qt5-tools/qtquick1.morph
new file mode 100644
index 00000000..d4692445
--- /dev/null
+++ b/strata/qt5-tools/qtquick1.morph
@@ -0,0 +1,10 @@
+name: qtquick1
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtquickcontrols.morph b/strata/qt5-tools/qtquickcontrols.morph
new file mode 100644
index 00000000..12051034
--- /dev/null
+++ b/strata/qt5-tools/qtquickcontrols.morph
@@ -0,0 +1,10 @@
+name: qtquickcontrols
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtscript.morph b/strata/qt5-tools/qtscript.morph
new file mode 100644
index 00000000..6f27842e
--- /dev/null
+++ b/strata/qt5-tools/qtscript.morph
@@ -0,0 +1,10 @@
+name: qtscript
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtsensors.morph b/strata/qt5-tools/qtsensors.morph
new file mode 100644
index 00000000..d4b4ec00
--- /dev/null
+++ b/strata/qt5-tools/qtsensors.morph
@@ -0,0 +1,10 @@
+name: qtsensors
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtserialport.morph b/strata/qt5-tools/qtserialport.morph
new file mode 100644
index 00000000..0a623865
--- /dev/null
+++ b/strata/qt5-tools/qtserialport.morph
@@ -0,0 +1,10 @@
+name: qtserialport
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtsvg.morph b/strata/qt5-tools/qtsvg.morph
new file mode 100644
index 00000000..40316a3c
--- /dev/null
+++ b/strata/qt5-tools/qtsvg.morph
@@ -0,0 +1,10 @@
+name: qtsvg
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qttools.morph b/strata/qt5-tools/qttools.morph
new file mode 100644
index 00000000..1baa10e7
--- /dev/null
+++ b/strata/qt5-tools/qttools.morph
@@ -0,0 +1,10 @@
+name: qttools
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- unset TARGET ; make
+- unset TARGET ; make html_docs
+install-commands:
+- unset TARGET ; make install INSTALL_ROOT=$DESTDIR
+- unset TARGET ; make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qttranslations.morph b/strata/qt5-tools/qttranslations.morph
new file mode 100644
index 00000000..bfb61302
--- /dev/null
+++ b/strata/qt5-tools/qttranslations.morph
@@ -0,0 +1,8 @@
+name: qttranslations
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtwebkit-examples.morph b/strata/qt5-tools/qtwebkit-examples.morph
new file mode 100644
index 00000000..b6683e65
--- /dev/null
+++ b/strata/qt5-tools/qtwebkit-examples.morph
@@ -0,0 +1,8 @@
+name: qtwebkit-examples
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtwebkit.morph b/strata/qt5-tools/qtwebkit.morph
new file mode 100644
index 00000000..c9348aca
--- /dev/null
+++ b/strata/qt5-tools/qtwebkit.morph
@@ -0,0 +1,11 @@
+name: qtwebkit
+kind: chunk
+max-jobs: 1
+configure-commands:
+- QMAKEPATH=/qtwebkit.build/Tools/qmake/mkspecs qmake WebKit.pro
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtwebsockets.morph b/strata/qt5-tools/qtwebsockets.morph
new file mode 100644
index 00000000..e1cac87f
--- /dev/null
+++ b/strata/qt5-tools/qtwebsockets.morph
@@ -0,0 +1,10 @@
+name: qtwebsockets
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtx11extras.morph b/strata/qt5-tools/qtx11extras.morph
new file mode 100644
index 00000000..edea0acf
--- /dev/null
+++ b/strata/qt5-tools/qtx11extras.morph
@@ -0,0 +1,10 @@
+name: qtx11extras
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/qtxmlpatterns.morph b/strata/qt5-tools/qtxmlpatterns.morph
new file mode 100644
index 00000000..bdd32d5f
--- /dev/null
+++ b/strata/qt5-tools/qtxmlpatterns.morph
@@ -0,0 +1,10 @@
+name: qtxmlpatterns
+kind: chunk
+configure-commands:
+- qmake
+build-commands:
+- make
+- make html_docs
+install-commands:
+- make install INSTALL_ROOT=$DESTDIR
+- make install_html_docs INSTALL_ROOT=$DESTDIR
diff --git a/strata/qt5-tools/ruby-1.8.morph b/strata/qt5-tools/ruby-1.8.morph
new file mode 100644
index 00000000..cee282c0
--- /dev/null
+++ b/strata/qt5-tools/ruby-1.8.morph
@@ -0,0 +1,9 @@
+name: ruby-1.8
+kind: chunk
+configure-commands:
+- autoconf
+- ./configure --prefix=/usr --program-suffix=-1.8
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/qt5-tools/ruby-1.9.morph b/strata/qt5-tools/ruby-1.9.morph
new file mode 100644
index 00000000..bc697ada
--- /dev/null
+++ b/strata/qt5-tools/ruby-1.9.morph
@@ -0,0 +1,9 @@
+name: ruby-1.9
+kind: chunk
+configure-commands:
+- autoconf
+- ./configure --prefix=/usr --enable-shared --with-baseruby=/usr/bin/ruby-1.8
+build-commands:
+- make
+install-commands:
+- make DESTDIR="$DESTDIR" install
diff --git a/strata/ruby.morph b/strata/ruby.morph
new file mode 100644
index 00000000..eba231be
--- /dev/null
+++ b/strata/ruby.morph
@@ -0,0 +1,50 @@
+name: ruby
+kind: stratum
+description: |
+ Ruby and common Ruby build dependencies.
+
+ Most Gem dependencies are not required when building or installing the
+ Gem. They are needed at runtime only. Tools which extend Rake, such as
+ 'hoe' and 'rake-compiler', are exceptions to that rule, and are kept
+ in this stratum so they are always available when building other Gems.
+build-depends:
+- morph: strata/tools.morph
+chunks:
+- name: ruby-1.8
+ morph: strata/ruby/ruby-1.8.morph
+ repo: upstream:ruby
+ ref: 7a24f1710028d568ad61d0aa49d5178260178d77
+ unpetrify-ref: baserock/morph/ruby_1_8_7
+- name: libyaml
+ repo: upstream:libyaml-hg
+ ref: 0577078d6625a7bf06e6bc7fb26a43e27400b17e
+ unpetrify-ref: master
+- name: ruby
+ morph: strata/ruby/ruby.morph
+ repo: upstream:ruby
+ ref: 05604af5a6da635b8bca51269db8b433972e82c0
+ unpetrify-ref: baserock/ruby_2_0_0
+ build-depends:
+ - ruby-1.8
+ - libyaml
+- name: bundler
+ morph: strata/ruby/bundler.morph
+ repo: upstream:bundler
+ ref: 0708fbe62617a63300e1cc3b9869cc1280c57ef6
+ unpetrify-ref: baserock/v1.6.2
+ build-depends:
+ - ruby
+- name: hoe
+ morph: strata/ruby/hoe.morph
+ repo: upstream:ruby-gems/hoe
+ ref: 50a2706d0f70ece52922ddcc56630e1e0655b83e
+ unpetrify-ref: master
+ build-depends:
+ - ruby
+- name: rake-compiler
+ morph: strata/ruby/rake-compiler.morph
+ repo: upstream:ruby-gems/rake-compiler
+ ref: aaed621f6fdb0b0395775fea5464cc83e794fbdb
+ unpetrify-ref: v0.9.3
+ build-depends:
+ - ruby
diff --git a/strata/ruby/bundler.morph b/strata/ruby/bundler.morph
new file mode 100644
index 00000000..643ef119
--- /dev/null
+++ b/strata/ruby/bundler.morph
@@ -0,0 +1,6 @@
+name: bundler
+kind: chunk
+build-commands:
+- rake build
+install-commands:
+- gem install ./pkg/*gem --bindir "$DESTDIR/$PREFIX/bin" --install-dir "$DESTDIR/$PREFIX/lib/ruby/gems/2.0.0"
diff --git a/strata/ruby/hoe.morph b/strata/ruby/hoe.morph
new file mode 100644
index 00000000..0d06a82a
--- /dev/null
+++ b/strata/ruby/hoe.morph
@@ -0,0 +1,16 @@
+name: hoe
+kind: chunk
+description: |
+ Hoe is a rake/rubygems helper for project Rakefiles. It helps you
+ manage, maintain, and release your project and includes a dynamic
+ plug-in system allowing for easy extensibility.
+products:
+- artifact: hoe-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- rake gem
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./pkg/hoe-*.gem
diff --git a/strata/ruby/rake-compiler.morph b/strata/ruby/rake-compiler.morph
new file mode 100644
index 00000000..9286f14a
--- /dev/null
+++ b/strata/ruby/rake-compiler.morph
@@ -0,0 +1,15 @@
+name: rake-compiler
+kind: chunk
+description: |
+ Provide a standard and simplified way to build and package Ruby
+ extensions (C, Java) using Rake as glue.
+products:
+- artifact: rake-compiler-doc
+ include:
+ - usr/lib/ruby/gems/\d[\w.]*/doc/.*
+build-commands:
+- rake gem
+install-commands:
+- mkdir -p "$DESTDIR/$(gem environment home)"
+- gem install --install-dir "$DESTDIR/$(gem environment home)" --bindir "$DESTDIR/$PREFIX/bin"
+ --ignore-dependencies --local ./pkg/rake-compiler-*.gem
diff --git a/strata/ruby/ruby-1.8.morph b/strata/ruby/ruby-1.8.morph
new file mode 100644
index 00000000..4554e441
--- /dev/null
+++ b/strata/ruby/ruby-1.8.morph
@@ -0,0 +1,9 @@
+name: ruby-1.8
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- cp /usr/share/automake*/config.guess .
+- cp /usr/share/automake*/config.sub .
+- autoconf
+configure-commands:
+- ./configure --prefix=/usr --program-suffix=-1.8
diff --git a/strata/ruby/ruby.morph b/strata/ruby/ruby.morph
new file mode 100644
index 00000000..f9f0ae7c
--- /dev/null
+++ b/strata/ruby/ruby.morph
@@ -0,0 +1,9 @@
+name: ruby
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- cp /usr/share/automake*/config.guess tool
+- cp /usr/share/automake*/config.sub tool
+- autoconf
+configure-commands:
+- ./configure --prefix=/usr --enable-shared --with-baseruby=/usr/bin/ruby-1.8
diff --git a/strata/swift.morph b/strata/swift.morph
new file mode 100644
index 00000000..22ca8a3a
--- /dev/null
+++ b/strata/swift.morph
@@ -0,0 +1,29 @@
+name: swift
+kind: stratum
+description: Distributed object storage
+build-depends:
+# openstack-common is required for simplejson, cffi, greenlet,
+# eventlet, pastedeploy
+- morph: strata/openstack-common.morph
+chunks:
+- name: dnspython
+ repo: upstream:python-packages/dnspython
+ ref: e1369c62d14f82b80ef11197a490ace5d43bb3f3
+ unpetrify-ref: v1.12.0
+- name: netifaces
+ repo: upstream:python-packages/netifaces
+ ref: 885b200ba717df87f6e8044ec8c66c677c949bcb
+ unpetrify-ref: release_0_10_4
+- name: xattr
+ morph: strata/swift/xattr.morph
+ repo: upstream:python-packages/xattr
+ ref: dd10d44e3eb9a1d2303c1f7d5126c099d56e97fc
+ unpetrify-ref: v0.7.6
+- name: swift
+ repo: upstream:openstack/swift
+ ref: 2e8261a4dc0d0af0c4a46478b81e167bcf02220b
+ unpetrify-ref: 2.2.0
+ build-depends:
+ - dnspython
+ - netifaces
+ - xattr
diff --git a/strata/swift/xattr.morph b/strata/swift/xattr.morph
new file mode 100644
index 00000000..65f47dae
--- /dev/null
+++ b/strata/swift/xattr.morph
@@ -0,0 +1,8 @@
+name: xattr
+kind: chunk
+configure-commands:
+- cp -r /usr/lib/python2.7/site-packages/cffi .
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/test-tools.morph b/strata/test-tools.morph
new file mode 100644
index 00000000..6826c963
--- /dev/null
+++ b/strata/test-tools.morph
@@ -0,0 +1,113 @@
+name: test-tools
+kind: stratum
+description: Tools and frameworks used for testing
+build-depends:
+- morph: strata/python-common.morph
+chunks:
+- name: python-test-extras
+ repo: upstream:python-packages/python-test-extras.git
+ ref: cdeb596f01241e9c779332e86f6edcd0c2e8e9f0
+ unpetrify-ref: master
+- name: check
+ repo: upstream:check
+ ref: 8c872aca6675e95fa47e7514e28fbdf25fce6170
+ unpetrify-ref: 0.9.8
+- name: cppunit
+ repo: upstream:cppunit
+ ref: 8133cf2b977f013216f0a41b6fcb740410d83926
+ unpetrify-ref: 1.13.2
+- name: testtools
+ repo: upstream:python-packages/testtools
+ ref: ee9946228ce5a03a84cf146027de0a8a9a46c4fe
+ unpetrify-ref: testools-1.1.0
+ build-depends:
+ - python-test-extras
+- name: subunit
+ morph: strata/test-tools/subunit.morph
+ repo: upstream:python-packages/subunit
+ ref: e18ffe65a3229d5c1d91be988405d40219db0887
+ unpetrify-ref: 0.0.21
+ build-depends:
+ - python-test-extras
+ - testtools
+ - check
+ - cppunit
+- name: fixtures
+ repo: upstream:python-packages/fixtures
+ ref: 9f9d89ce718463b24cd3910b9a99efb60b3c9e1b
+ unpetrify-ref: 0.3.16
+ build-depends:
+ - testtools
+- name: testrepository
+ repo: upstream:python-packages/testrepository
+ ref: 6419a3dcaabaf09eaf438c6d8d85c90eba7a2b91
+ unpetrify-ref: 0.0.19
+ build-depends:
+ - fixtures
+ - subunit
+ - testtools
+- name: testscenarios
+ repo: upstream:python-packages/testscenarios
+ ref: 475857af19a8190c9c0c7f8241b9907b942e19fd
+ unpetrify-ref: trunk
+- name: mox
+ repo: upstream:python-packages/mox
+ ref: 160491d0384285698d726b1af21277f336107f51
+ unpetrify-ref: master
+- name: mock
+ repo: upstream:python-packages/mock
+ ref: 35b35f7ad239005a950f870af57b44dbdc99d66b
+ unpetrify-ref: master
+- name: oslotest
+ repo: upstream:openstack/oslotest
+ ref: cfdb562a6e07728570ca624a8c4faf3f5b61423b
+ unpetrify-ref: 1.2.0
+ build-depends:
+ - fixtures
+ - subunit
+ - testrepository
+ - testscenarios
+ - testtools
+ - mock
+ - mox
+- name: mox3
+ repo: upstream:python-packages/pymox
+ ref: 444fa40f4edb529efbffa2da8dbd97e9b8564b5c
+ unpetrify-ref: master
+- name: mocker
+ repo: upstream:python-packages/mocker
+ ref: f7f87e4ac1c52342162cf2035f5fe3d273f8b07f
+ unpetrify-ref: master
+- name: zake
+ repo: upstream:python-packages/zake.git
+ ref: 13b92d9db4ad37d9550ef5c5abd323a2530a1e72
+ unpetrify-ref: master
+ build-depends:
+ - testtools
+- name: nose
+ repo: upstream:python-packages/nose
+ ref: 08d134270b035dac3310cd877bb0fe9ab678303a
+ unpetrify-ref: release_1.3.4
+- name: beautifulsoup4
+ repo: upstream:python-packages/beautifulsoup4.git
+ ref: bcd7af0e9159d97aa511fb2d879424d1c1c5aadf
+- name: waitress
+ repo: upstream:python-packages/waitress.git
+ ref: b795d573a5a9e6e39b46a6e82da367a6a5db8dbd
+ unpetrify-ref: 0.8.9
+- name: webtest
+ repo: upstream:python-packages/webtest.git
+ ref: 6a24fba456d1c4ac2609b90f1fdc377c595608a4
+ unpetrify-ref: 2.0.16
+ build-depends:
+ - waitress
+ - beautifulsoup4
+- name: testresources
+ repo: upstream:python-packages/testresources
+ ref: ef938bcce0e436f9e9ffef932a898dc248a1d6ea
+ unpetrify-ref: 0.2.7
+ build-depends:
+ - testtools
+ - fixtures
+ - check
+ - cppunit
diff --git a/strata/test-tools/subunit.morph b/strata/test-tools/subunit.morph
new file mode 100644
index 00000000..b7b43a39
--- /dev/null
+++ b/strata/test-tools/subunit.morph
@@ -0,0 +1,3 @@
+name: python-subunit
+kind: chunk
+build-system: autotools
diff --git a/strata/tools.morph b/strata/tools.morph
new file mode 100644
index 00000000..4d2fa2ad
--- /dev/null
+++ b/strata/tools.morph
@@ -0,0 +1,85 @@
+name: tools
+kind: stratum
+description: Various tools
+build-depends:
+- morph: strata/foundation.morph
+chunks:
+- name: distcc
+ morph: strata/tools/distcc.morph
+ repo: upstream:distcc
+ ref: c9691a9604fdf9d6711204999787d332b7141692
+ unpetrify-ref: baserock/morph
+- name: file
+ repo: upstream:file
+ ref: f69c3fd9bcb108292e7887dd889e8b49f68c4a52
+ unpetrify-ref: file-5.22
+- name: gdb
+ morph: strata/tools/gdb.morph
+ repo: upstream:binutils-gdb
+ ref: 129ee12d013f4a2f09fe40a33072e6e47e949890
+ unpetrify-ref: gdb-7.8-branch
+- name: linux-user-chroot
+ repo: upstream:linux-user-chroot
+ ref: d25cc110f69e6e71a95b4ac532dcfc5423d4a16b
+ unpetrify-ref: baserock/morph
+- name: lsof
+ morph: strata/tools/lsof.morph
+ repo: upstream:lsof
+ ref: fffb8558208586338587027c265fd0eca44466be
+ unpetrify-ref: baserock/morph
+- name: strace
+ repo: upstream:strace
+ ref: 6d8c0637e8dd0f65c667af33c612230552419db1
+ unpetrify-ref: v4.8
+- name: vala-bootstrap
+ morph: strata/tools/vala-bootstrap.morph
+ repo: upstream:vala
+ ref: 4e4a02c03445336237b36723b23a91670ef7621b
+ unpetrify-ref: baserock/bootstrap
+- name: vala
+ repo: upstream:vala
+ ref: 5f6ebe007050be12bdc4aa7c902ae4059f28874a
+ unpetrify-ref: 0.28.0
+ build-depends:
+ - vala-bootstrap
+- name: u-boot
+ morph: strata/tools/u-boot.morph
+ repo: upstream:u-boot
+ ref: fe57382d04b46c37f34cf8d3b3ad876554fd12bf
+ unpetrify-ref: baserock/morph
+- name: kexec-tools
+ morph: strata/tools/kexec-tools.morph
+ repo: upstream:kexec-tools
+ ref: f4d1d2ad474e882df13418239aa3050673a844d7
+ unpetrify-ref: baserock/morph
+- name: device-tree-compiler
+ morph: strata/tools/device-tree-compiler.morph
+ repo: upstream:device-tree-compiler
+ ref: c92f284c3cf76d471eb27a271de3a51cb45ed058
+ unpetrify-ref: baserock/morph
+- name: sudo
+ repo: upstream:sudo
+ ref: a4769dc7999b53260325fb89945bef85714fb338
+ unpetrify-ref: baserock/morph
+- name: procps-ng
+ morph: strata/tools/procps-ng.morph
+ repo: upstream:procps-ng
+ ref: 85fff468fa263cdd2ff1c0144579527c32333695
+ unpetrify-ref: v3.3.9
+- name: ipmitool
+ morph: strata/tools/ipmitool.morph
+ repo: upstream:ipmitool
+ ref: be7917f9f58c8a354bc0960ed57516af5d2bd29a
+ unpetrify-ref: IPMITOOL_1_8_14
+ build-depends:
+ - file
+- name: parted
+ morph: strata/tools/parted.morph
+ repo: upstream:parted
+ ref: 387e96e6eac59d84e9a688422b4b321ae9beaa20
+ unpetrify-ref: baserock/v3.2
+- name: zip
+ morph: strata/tools/zip.morph
+ repo: upstream:zip
+ ref: e52e9d6a339aad5fcff464cf207da51c02996e39
+ unpetrify-ref: zip30
diff --git a/strata/tools/device-tree-compiler.morph b/strata/tools/device-tree-compiler.morph
new file mode 100644
index 00000000..8abfafc8
--- /dev/null
+++ b/strata/tools/device-tree-compiler.morph
@@ -0,0 +1,6 @@
+name: device-tree-compiler
+kind: chunk
+build-commands:
+- make all
+install-commands:
+- make install DESTDIR="$DESTDIR" PREFIX="$PREFIX"
diff --git a/strata/tools/distcc.morph b/strata/tools/distcc.morph
new file mode 100644
index 00000000..82adbe50
--- /dev/null
+++ b/strata/tools/distcc.morph
@@ -0,0 +1,10 @@
+name: distcc
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+# distcc doesn't use automake, so we cannot autoreconf it
+- cp /usr/share/automake*/config.guess .
+- cp /usr/share/automake*/config.sub .
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-Werror
diff --git a/strata/tools/gdb.morph b/strata/tools/gdb.morph
new file mode 100644
index 00000000..808ff7d7
--- /dev/null
+++ b/strata/tools/gdb.morph
@@ -0,0 +1,5 @@
+name: gdb
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-werror --disable-gas --disable-binutils --disable-ld --disable-gold --disable-gprof
diff --git a/strata/tools/git-fat.morph b/strata/tools/git-fat.morph
new file mode 100644
index 00000000..c971b07f
--- /dev/null
+++ b/strata/tools/git-fat.morph
@@ -0,0 +1,4 @@
+name: git-fat
+kind: chunk
+install-commands:
+- install -D -m 755 git-fat "$DESTDIR/usr/bin/git-fat"
diff --git a/strata/tools/ipmitool.morph b/strata/tools/ipmitool.morph
new file mode 100644
index 00000000..db0d5a3a
--- /dev/null
+++ b/strata/tools/ipmitool.morph
@@ -0,0 +1,5 @@
+name: ipmitool
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- touch NEWS
diff --git a/strata/tools/kexec-tools.morph b/strata/tools/kexec-tools.morph
new file mode 100644
index 00000000..118c1ac2
--- /dev/null
+++ b/strata/tools/kexec-tools.morph
@@ -0,0 +1,6 @@
+name: kexec-tools
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./bootstrap
+- ./configure --prefix="$PREFIX"
diff --git a/strata/tools/lsof.morph b/strata/tools/lsof.morph
new file mode 100644
index 00000000..38183ae6
--- /dev/null
+++ b/strata/tools/lsof.morph
@@ -0,0 +1,12 @@
+name: lsof
+kind: chunk
+configure-commands:
+- tar xf lsof_*_src.tar --no-same-owner
+- cd lsof_*_src/ && ./Configure -n linux
+build-commands:
+- cd lsof_*_src/ && make
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/bin
+- mv lsof_*_src/lsof "$DESTDIR$PREFIX"/bin/lsof
+- mkdir -p "$DESTDIR$PREFIX"/man/man8
+- mv lsof_*_src/lsof.8 "$DESTDIR$PREFIX"/man/man8
diff --git a/strata/tools/parted.morph b/strata/tools/parted.morph
new file mode 100644
index 00000000..d99c73cd
--- /dev/null
+++ b/strata/tools/parted.morph
@@ -0,0 +1,10 @@
+name: parted
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./bootstrap --skip-po --no-git --gnulib-srcdir=gnulib
+configure-commands:
+# Disable device-mapper as it is not installed on Baserock
+- ./configure --prefix="$PREFIX" --disable-device-mapper
+build-commands:
+- make WERROR_CFLAGS=""
diff --git a/strata/tools/procps-ng.morph b/strata/tools/procps-ng.morph
new file mode 100644
index 00000000..69df3d4a
--- /dev/null
+++ b/strata/tools/procps-ng.morph
@@ -0,0 +1,27 @@
+name: procps-ng
+kind: chunk
+
+description: |
+ Process management tools.
+
+ procps-ng is a fork of the original procps project.
+
+build-system: autotools
+
+configure-commands:
+ - NOCONFIGURE=1 ./autogen.sh
+ # Setting exec-prefix to /usr causes a bunch of stuff to go in /usr/usr/bin
+ # Setting prefix to / causes files to go in /include and /share
+ # So don't do either of those things!
+ - ./configure --prefix="$PREFIX" --exec-prefix=/
+
+post-install-commands:
+# We need to link the binaries into /bin so that they override the Busybox
+# versions of these tools. This will not be necessary once /bin is merged
+# into /usr/bin. It's not possible to get the Makefile to install the binaries
+# in /bin -- a bunch of them are hardcoded to live in ${exec_prefix}/usr/bin.
+ - |
+ usr_binaries="free pidof pmap slabtop top vmstat watch pgrep pkill pwdx tload uptime w"
+ for file in $usr_binaries; do
+ ln -sf "$PREFIX"/bin/$file "$DESTDIR"/bin/$file
+ done
diff --git a/strata/tools/u-boot.morph b/strata/tools/u-boot.morph
new file mode 100644
index 00000000..9be30bc7
--- /dev/null
+++ b/strata/tools/u-boot.morph
@@ -0,0 +1,11 @@
+name: u-boot
+kind: chunk
+build-commands:
+- make tools
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX/bin"
+- install -m 755 tools/img2brec.sh "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/jtagconsole "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/netconsole "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/mkenvimage "$DESTDIR$PREFIX/bin/."
+- install -m 755 tools/mkimage "$DESTDIR$PREFIX/bin/."
diff --git a/strata/tools/vala-bootstrap.morph b/strata/tools/vala-bootstrap.morph
new file mode 100644
index 00000000..e55b1887
--- /dev/null
+++ b/strata/tools/vala-bootstrap.morph
@@ -0,0 +1,7 @@
+name: vala-bootstrap
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- autoreconf -ivf
+configure-commands:
+- ./configure --prefix="$PREFIX"
diff --git a/strata/tools/zip.morph b/strata/tools/zip.morph
new file mode 100644
index 00000000..070467e0
--- /dev/null
+++ b/strata/tools/zip.morph
@@ -0,0 +1,9 @@
+name: zip
+kind: chunk
+configure-commands:
+- cp unix/Makefile .
+build-commands:
+- sed -i -e 's/^prefix = .*$/prefix = $$DESTDIR\/$$PREFIX/' Makefile
+- make generic
+install-commands:
+- make install
diff --git a/strata/trove.morph b/strata/trove.morph
new file mode 100644
index 00000000..984050b7
--- /dev/null
+++ b/strata/trove.morph
@@ -0,0 +1,82 @@
+name: trove
+kind: stratum
+description: Trove software
+build-depends:
+- morph: strata/python-core.morph
+- morph: strata/tools.morph
+- morph: strata/morph-utils.morph
+- morph: strata/pcre-utils.morph
+chunks:
+- name: lua
+ morph: strata/trove/lua.morph
+ repo: upstream:lua
+ ref: 948063437e0350d9ef1649ec3a76d0c24a5c8642
+ unpetrify-ref: baserock/5.1-morph
+- name: lace
+ morph: strata/trove/lace.morph
+ repo: upstream:gitano/lace
+ ref: d1b540b6d361d6a1f51e53cdaab69f053340efbb
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
+- name: luxio
+ morph: strata/trove/luxio.morph
+ repo: upstream:luxio
+ ref: be9d125080b9ff2376273e21b75669b65dc88d46
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
+- name: supple
+ morph: strata/trove/supple.morph
+ repo: upstream:gitano/supple
+ ref: 0963e5706d78d0ae7446ea91af986de1e196eb39
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
+ - luxio
+- name: clod
+ morph: strata/trove/clod.morph
+ repo: upstream:gitano/clod
+ ref: da15894f42f48d15db997c4355d6b672371a4163
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
+- name: gall
+ morph: strata/trove/gall.morph
+ repo: upstream:gitano/gall
+ ref: f58c7526fbb0421d7f5446644f01f4cf57035ee2
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
+ - luxio
+- name: lrexlib-pcre
+ morph: strata/trove/lrexlib-pcre.morph
+ repo: upstream:lrexlib
+ ref: 0524a6e3ab6d50cba63c8642a875e246de53d651
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
+- name: gitano
+ morph: strata/trove/gitano.morph
+ repo: upstream:gitano/gitano
+ ref: 4b8ce6875266fdd6609a217dcf2924d7d4815cc2
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
+- name: cgit
+ morph: strata/trove/cgit.morph
+ repo: upstream:cgit
+ ref: acbf4a15e260c711094455dbef7c024f2553fd32
+ unpetrify-ref: baserock/morph
+- name: trove-setup
+ morph: strata/trove/trove-setup.morph
+ repo: baserock:baserock/trove-setup
+ ref: 16de74536e0846ba1d2e5101618df9146c785a41
+ unpetrify-ref: master
+- name: lua-scrypt
+ morph: strata/trove/lua-scrypt.morph
+ repo: upstream:lua-scrypt
+ ref: 0d7f74cd3eab7d54fbb13294194de7ea70ac34a5
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - lua
diff --git a/strata/trove/cgit.morph b/strata/trove/cgit.morph
new file mode 100644
index 00000000..bd373a51
--- /dev/null
+++ b/strata/trove/cgit.morph
@@ -0,0 +1,6 @@
+name: cgit
+kind: chunk
+build-commands:
+- make prefix="$PREFIX"
+install-commands:
+- make prefix="$PREFIX" DESTDIR="$DESTDIR" install
diff --git a/strata/trove/clod.morph b/strata/trove/clod.morph
new file mode 100644
index 00000000..e31ca4fb
--- /dev/null
+++ b/strata/trove/clod.morph
@@ -0,0 +1,7 @@
+name: clod
+kind: chunk
+description: Configuration Language Organised (by) Dots.
+build-commands:
+- make
+install-commands:
+- make INST_BASE="$PREFIX" DESTDIR="$DESTDIR" install
diff --git a/strata/trove/gall.morph b/strata/trove/gall.morph
new file mode 100644
index 00000000..c1f2fd47
--- /dev/null
+++ b/strata/trove/gall.morph
@@ -0,0 +1,7 @@
+name: gall
+kind: chunk
+description: Git Abstraction Layer for Lua
+build-commands:
+- make
+install-commands:
+- make INST_BASE="$PREFIX" DESTDIR="$DESTDIR" install
diff --git a/strata/trove/gitano.morph b/strata/trove/gitano.morph
new file mode 100644
index 00000000..962224d5
--- /dev/null
+++ b/strata/trove/gitano.morph
@@ -0,0 +1,5 @@
+name: gitano
+kind: chunk
+install-commands:
+- make install INST_ROOT="$PREFIX" DESTDIR="$DESTDIR"
+- cp scripts/htpasswd "$DESTDIR/$PREFIX/bin"
diff --git a/strata/trove/lace.morph b/strata/trove/lace.morph
new file mode 100644
index 00000000..70752397
--- /dev/null
+++ b/strata/trove/lace.morph
@@ -0,0 +1,7 @@
+name: lace
+kind: chunk
+description: Lua Access Control Engine.
+build-commands:
+- make
+install-commands:
+- make INST_BASE="$PREFIX" DESTDIR="$DESTDIR" install
diff --git a/strata/trove/lrexlib-pcre.morph b/strata/trove/lrexlib-pcre.morph
new file mode 100644
index 00000000..63f3b034
--- /dev/null
+++ b/strata/trove/lrexlib-pcre.morph
@@ -0,0 +1,9 @@
+name: lrexlib-pcre
+kind: chunk
+description: Lua regular expression library - PCRE build only
+build-commands:
+- gcc -fPIC -DPIC -DVERSION=\"2.7.2\" -o rex_pcre.so -shared src/common.c src/pcre/lpcre.c
+ src/pcre/lpcre_f.c -lpcre
+install-commands:
+- mkdir -p $DESTDIR/usr/lib/lua/5.1/
+- cp rex_pcre.so $DESTDIR/usr/lib/lua/5.1/
diff --git a/strata/trove/lua-scrypt.morph b/strata/trove/lua-scrypt.morph
new file mode 100644
index 00000000..141b8ec3
--- /dev/null
+++ b/strata/trove/lua-scrypt.morph
@@ -0,0 +1,4 @@
+name: lua-scrypt
+kind: chunk
+install-commands:
+- make lua-5.1-install DESTDIR="$DESTDIR"
diff --git a/strata/trove/lua.morph b/strata/trove/lua.morph
new file mode 100644
index 00000000..32aeb5c1
--- /dev/null
+++ b/strata/trove/lua.morph
@@ -0,0 +1,11 @@
+name: lua
+kind: chunk
+configure-commands:
+- sed -e 's/defined(LUA_USE_READLINE)/0/g' src/luaconf.h.orig >src/luaconf.h
+- sed -i -e '/^linux:/{n;s/-lreadline//g;s/-lhistory//g;s/-lncurses//g}' src/Makefile
+build-commands:
+- make debian_linux INSTALL_TOP="$PREFIX" RPATH="$PREFIX/lib"
+install-commands:
+- make INSTALL_TOP="$DESTDIR/$PREFIX" debian_install
+- mkdir -p "$DESTDIR/$PREFIX/lib/pkgconfig"
+- cp lua5.1.pc "$DESTDIR/$PREFIX/lib/pkgconfig/"
diff --git a/strata/trove/luxio.morph b/strata/trove/luxio.morph
new file mode 100644
index 00000000..29aabfaf
--- /dev/null
+++ b/strata/trove/luxio.morph
@@ -0,0 +1,7 @@
+name: luxio
+kind: chunk
+description: Lua Unix IO (and related stuff).
+build-commands:
+- make lua-5.1
+install-commands:
+- make DESTDIR="$DESTDIR" lua-5.1-install
diff --git a/strata/trove/supple.morph b/strata/trove/supple.morph
new file mode 100644
index 00000000..6470ba28
--- /dev/null
+++ b/strata/trove/supple.morph
@@ -0,0 +1,7 @@
+name: supple
+kind: chunk
+description: Sandbox (for) Untrusted Procedure Partitioning (in) Lua Engine.
+build-commands:
+- make
+install-commands:
+- make INST_BASE="$PREFIX" DESTDIR="$DESTDIR" install
diff --git a/strata/trove/trove-setup.morph b/strata/trove/trove-setup.morph
new file mode 100644
index 00000000..1f49760f
--- /dev/null
+++ b/strata/trove/trove-setup.morph
@@ -0,0 +1,4 @@
+name: trove-setup
+kind: chunk
+install-commands:
+- make install DESTDIR="$DESTDIR"
diff --git a/strata/unionfs-fuse-group.morph b/strata/unionfs-fuse-group.morph
new file mode 100644
index 00000000..914b18e0
--- /dev/null
+++ b/strata/unionfs-fuse-group.morph
@@ -0,0 +1,21 @@
+name: unionfs-fuse-group
+kind: stratum
+
+description:
+ User-space union file system.
+
+ This is used by Morph for systems which are unable to use 'overlayfs'.
+ It is slower than 'overlayfs', because it runs outside rather than
+ inside the kernel, but 'overlayfs' is only available in Linux 3.18 and
+ newer.
+
+build-depends:
+# Depends on foundation for 'fuse', and maybe other stuff.
+- morph: strata/foundation.morph
+
+chunks:
+ - name: unionfs.fuse
+ morph: strata/unionfs-fuse-group/unionfs-fuse.morph
+ repo: upstream:unionfs-fuse
+ ref: efac5b7aa91ec860f8f430a8d21060fe53a07002
+ unpetrify-ref: v0.26
diff --git a/strata/unionfs-fuse-group/unionfs-fuse.morph b/strata/unionfs-fuse-group/unionfs-fuse.morph
new file mode 100644
index 00000000..9b8ac2c7
--- /dev/null
+++ b/strata/unionfs-fuse-group/unionfs-fuse.morph
@@ -0,0 +1,13 @@
+name: unionfs-fuse
+kind: chunk
+
+description:
+ User-space union file system.
+
+build-system: manual
+
+build-commands:
+- make PREFIX="$PREFIX"
+
+install-commands:
+- make PREFIX="$PREFIX" DESTDIR="$DESTDIR" install
diff --git a/strata/virtualbox-guest-x86_64.morph b/strata/virtualbox-guest-x86_64.morph
new file mode 100644
index 00000000..fadcec92
--- /dev/null
+++ b/strata/virtualbox-guest-x86_64.morph
@@ -0,0 +1,22 @@
+name: virtualbox-guest-x86_64
+kind: stratum
+description: |
+ VirtualBox Guest Additions for x86_64
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/bsp-x86_64-generic.morph
+- morph: strata/x-common.morph
+- morph: strata/x-generic.morph
+chunks:
+- name: yasm
+ morph: strata/virtualbox-guest-x86_64/yasm.morph
+ repo: upstream:yasm
+ ref: fefefe262eb29081f0bcb4d48f2d476ce5730562
+ unpetrify-ref: baserock/morph
+- name: vboxguest
+ morph: strata/virtualbox-guest-x86_64/vboxguest.morph
+ repo: upstream:VirtualBox
+ ref: 617aeb5a3c13f4a46423e8c2fb560449403ad56d
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - yasm
diff --git a/strata/virtualbox-guest-x86_64/vboxguest.morph b/strata/virtualbox-guest-x86_64/vboxguest.morph
new file mode 100644
index 00000000..f31c9944
--- /dev/null
+++ b/strata/virtualbox-guest-x86_64/vboxguest.morph
@@ -0,0 +1,34 @@
+name: vboxguest
+kind: chunk
+description: |
+ VirtualBox guest additions
+max-jobs: 1
+build-commands:
+- truncate -s0 src/VBox/Additions/common/pam/Makefile.kmk
+- env PATH="${PATH}:$(pwd)/kBuild/bin/linux.amd64/" VBOX_ONLY_ADDITIONS=1 VBOX_ONLY_BUILD=1
+ kmk
+- cd out/linux.amd64/release/bin/additions/src/ && make M="$(pwd)" -C /usr/src/linux
+ modules
+install-commands:
+- cd out/linux.amd64/release/bin/additions/src && make M="$(pwd)" -C /usr/src/linux
+ INSTALL_MOD_PATH="$DESTDIR" modules_install
+- install -m 755 -D -o 0 -g 0 out/linux.amd64/release/bin/additions/mount.vboxsf "$DESTDIR/sbin/mount.vboxsf"
+- install -m 755 -D -o 0 -g 0 out/linux.amd64/release/bin/additions/VBoxService "$DESTDIR/sbin/VBoxService"
+- install -m 755 -D -o 0 -g 0 out/linux.amd64/release/bin/additions/VBoxControl "$DESTDIR/sbin/VBoxControl"
+- |
+ install -m 644 -D -o 0 -g 0 /proc/self/fd/0 "$DESTDIR/lib/systemd/system/virtualbox-guest.service" <<EOS
+ [Unit]
+ Description=VirtualBox Guest Additions
+
+ [Service]
+ ExecStart=/sbin/VBoxService -f
+ Restart=always
+ EOS
+- mkdir -p "$DESTDIR/lib/systemd/system/basic.target.wants"
+- ln -s "/lib/systemd/system/virtualbox-guest.service" "$DESTDIR/lib/systemd/system/basic.target.wants/virtualbox-guest.service"
+- mkdir -p "$DESTDIR/etc/modules-load.d"
+- echo vboxsf > "$DESTDIR/etc/modules-load.d/virtualbox-guest-filesystem.conf"
+system-integration:
+ vboxguest-misc:
+ 00-depmod:
+ - depmod -a $(cd /lib/modules; ls | head -n 1)
diff --git a/strata/virtualbox-guest-x86_64/yasm.morph b/strata/virtualbox-guest-x86_64/yasm.morph
new file mode 100644
index 00000000..437ae97d
--- /dev/null
+++ b/strata/virtualbox-guest-x86_64/yasm.morph
@@ -0,0 +1,4 @@
+name: yasm
+kind: chunk
+max-jobs: 1
+build-system: autotools
diff --git a/strata/virtualization.morph b/strata/virtualization.morph
new file mode 100644
index 00000000..548019aa
--- /dev/null
+++ b/strata/virtualization.morph
@@ -0,0 +1,96 @@
+name: virtualization
+kind: stratum
+description: |
+ Virtualization for baserock
+ NOTE: this stratum requires kernel support, please see openstack bsp
+ kernel history to check which config are needed for openvswitch, libvirt,
+ or ebtables, for example.
+build-depends:
+- morph: strata/libsoup-common.morph
+- morph: strata/python-core.morph
+- morph: strata/python-pygobject.morph
+- morph: strata/connman-common.morph
+- morph: strata/lvm.morph
+- morph: strata/xorg-util-macros-common.morph
+chunks:
+- name: yajl
+ morph: strata/virtualization/yajl.morph
+ repo: upstream:yajl
+ ref: 52fc681857228c65c1cb439782da485554875481
+ unpetrify-ref: baserock/morph
+- name: xml-catalog
+ morph: strata/virtualization/xml-catalog.morph
+ repo: baserock:baserock/xml-catalog
+ ref: 1d4a2abc875c4dda1b5eadc0a097a48a8d2ec82b
+ unpetrify-ref: master
+- name: dnsmasq
+ morph: strata/virtualization/dnsmasq.morph
+ repo: upstream:dnsmasq
+ ref: 8471cd938ca41fbe4fee8ae3f657625c92cfb954
+ unpetrify-ref: baserock/morph
+- name: qemu
+ morph: strata/virtualization/qemu.morph
+ repo: upstream:qemu
+ ref: c5691f7ecb32cbe7a95b491314ce070e211fd97d
+ unpetrify-ref: baserock/v2.2.0
+- name: libpciaccess
+ repo: upstream:libpciaccess
+ ref: b9c068896914b4132a24839c9ef7f9fcd6282d88
+ unpetrify-ref: master
+- name: dmidecode
+ morph: strata/virtualization/dmidecode.morph
+ repo: upstream:dmidecode
+ ref: 47a0aa5d6696a83922ee70279b7253a4e55947d5
+ unpetrify-ref: master
+- name: ebtables
+ morph: strata/virtualization/ebtables.morph
+ repo: upstream:ebtables
+ ref: f4bdc80ae8c1a79b4ab5dcb8431ad85aea618d66
+ unpetrify-ref: master
+- name: libvirt
+ morph: strata/virtualization/libvirt.morph
+ repo: upstream:libvirt
+ ref: 7b1ceec1e2f141d36ed9b7ef3a660ff8bb34fc53
+ unpetrify-ref: baserock/v1.2.10
+ build-depends:
+ - libpciaccess
+ - dnsmasq
+ - qemu
+ - xml-catalog
+ - yajl
+ - dmidecode
+ - ebtables
+- name: pycurl
+ repo: upstream:pycurl
+ ref: 5ca370827d88817eeca3c56cbb37e4ddccc16c6e
+ unpetrify-ref: baserock/morph
+- name: urlgrabber
+ repo: upstream:urlgrabber
+ ref: bf0a0be71373dec515bbb54e0613a3b9b0c00b04
+ unpetrify-ref: master
+ build-depends:
+ - pycurl
+- name: libvirt-python
+ repo: upstream:libvirt-python
+ ref: 8e09c79a07b097a6ba9af83be4916fb9c9538500
+ unpetrify-ref: v1.2.10
+ build-depends:
+ - libvirt
+ - urlgrabber
+- name: libosinfo
+ morph: strata/virtualization/libosinfo.morph
+ repo: upstream:libosinfo
+ ref: a86c74c4d3f62bb0e315ab7fc78ec9f7746bdd12
+ unpetrify-ref: baserock/morph
+- name: virt-manager
+ repo: upstream:virt-manager
+ ref: 8b7ebd4538ffbd2d246fdeee4f1bb1c452585575
+ unpetrify-ref: v1.0.1
+ build-depends:
+ - libvirt-python
+ - libosinfo
+- name: openvswitch
+ morph: strata/virtualization/openvswitch.morph
+ repo: upstream:openvswitch
+ ref: a52b0492a4d0398a24ed2a3566ff55ac53fea31f
+ unpretrify-ref: master
diff --git a/strata/virtualization/dmidecode.morph b/strata/virtualization/dmidecode.morph
new file mode 100644
index 00000000..d9ab0fa3
--- /dev/null
+++ b/strata/virtualization/dmidecode.morph
@@ -0,0 +1,7 @@
+name: dmidecode
+kind: chunk
+build-system: manual
+build-commands:
+- make
+install-commands:
+- make install DESTDIR="$DESTDIR" prefix="$PREFIX"
diff --git a/strata/virtualization/dnsmasq.morph b/strata/virtualization/dnsmasq.morph
new file mode 100644
index 00000000..3f4c6c53
--- /dev/null
+++ b/strata/virtualization/dnsmasq.morph
@@ -0,0 +1,10 @@
+name: dnsmasq
+kind: chunk
+build-system: manual
+build-commands:
+- make
+- make -C contrib/wrt
+install-commands:
+- make install PREFIX="$PREFIX" DESTDIR="$DESTDIR"
+- install -D -m 755 contrib/wrt/dhcp_release "$DESTDIR$PREFIX"/bin/dhcp_release
+- install -D -m 755 contrib/wrt/dhcp_lease_time "$DESTDIR$PREFIX"/bin/dhcp_lease_time
diff --git a/strata/virtualization/ebtables.morph b/strata/virtualization/ebtables.morph
new file mode 100644
index 00000000..a4d9fc76
--- /dev/null
+++ b/strata/virtualization/ebtables.morph
@@ -0,0 +1,7 @@
+name: ebtables
+kind: chunk
+build-system: manual
+build-commands:
+- make
+install-commands:
+- make install DESTDIR="$DESTDIR" BINDIR="$PREFIX/sbin"
diff --git a/strata/virtualization/libosinfo.morph b/strata/virtualization/libosinfo.morph
new file mode 100644
index 00000000..d5679965
--- /dev/null
+++ b/strata/virtualization/libosinfo.morph
@@ -0,0 +1,6 @@
+name: libosinfo
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --disable-gtk-doc --disable-tests --disable-vala
+ --with-usb-ids-path=usb.ids --with-pci-ids-path=pci.ids
diff --git a/strata/virtualization/libvirt.morph b/strata/virtualization/libvirt.morph
new file mode 100644
index 00000000..bdc99ddf
--- /dev/null
+++ b/strata/virtualization/libvirt.morph
@@ -0,0 +1,31 @@
+name: libvirt
+kind: chunk
+build-system: autotools
+configure-commands:
+- |
+ GNULIB_SRCDIR=.gnulib \
+ ./autogen.sh \
+ --without-apparmor --with-attr --without-audit --without-avahi --with-blkid \
+ --without-capng --with-curl --with-dbus --with-fuse --without-glusterfs \
+ --without-hal --without-netcf --without-numactl --without-openwsman \
+ --with-pciaccess --without-readline --without-sanlock --without-sasl \
+ --without-selinux --without-ssh2 --with-systemd-daemon --with-udev \
+ --with-yajl --without-xen --with-qemu --without-uml --without-openvz \
+ --without-vmware --without-phyp --without-xenapi --without-libxl \
+ --without-vbox --without-lxc --without-esx --without-hyperv \
+ --without-parallels --with-test --with-remote --with-libvirtd \
+ --with-init-script=systemd --without-bhyve --without-gnutls --without-polkit \
+ --without-firewalld --without-dtrace --without-numad --with-network \
+ --with-secrets --with-storage-dir --with-storage-fs --with-storage-lvm \
+ --without-storage-iscsi --without-storage-scsi --without-storage-mpath \
+ --without-storage-disk --with-storage-rbd --without-storage-sheepdog \
+ --without-storage-gluster --without-wireshark-dissector --disable-werror \
+ --prefix="$PREFIX"
+system-integration:
+ libvirt-misc:
+ 01-addgroup:
+ - groupadd libvirt
+ 02-setup_libvirtconf:
+ - sed -i 's/#unix_sock_group = "libvirt"/unix_sock_group = "libvirt"/' /etc/libvirt/libvirtd.conf
+ - sed -i 's/#unix_sock_rw_perms = "0770"/unix_sock_rw_perms = "0770"/' /etc/libvirt/libvirtd.conf
+ - sed -i 's/#unix_sock_ro_perms = "0777"/unix_sock_ro_perms = "0777"/' /etc/libvirt/libvirtd.conf
diff --git a/strata/virtualization/openvswitch.morph b/strata/virtualization/openvswitch.morph
new file mode 100644
index 00000000..06480827
--- /dev/null
+++ b/strata/virtualization/openvswitch.morph
@@ -0,0 +1,11 @@
+name: openvswitch
+kind: chunk
+build-system: autotools
+pre-configure-commands:
+- ./boot.sh
+configure-commands:
+- |
+ ./configure --prefix="$PREFIX" \
+ --localstatedir=/var \
+ --sysconfdir=/etc \
+ --enable-ssl
diff --git a/strata/virtualization/qemu.morph b/strata/virtualization/qemu.morph
new file mode 100644
index 00000000..89067cd2
--- /dev/null
+++ b/strata/virtualization/qemu.morph
@@ -0,0 +1,5 @@
+name: qemu
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./configure --prefix="$PREFIX" --disable-werror
diff --git a/strata/virtualization/xml-catalog.morph b/strata/virtualization/xml-catalog.morph
new file mode 100644
index 00000000..34ba0306
--- /dev/null
+++ b/strata/virtualization/xml-catalog.morph
@@ -0,0 +1,6 @@
+name: xml-catalog
+kind: chunk
+install-commands:
+- make DESTDIR="$DESTDIR" install
+post-install-commands:
+- ./post-install.sh
diff --git a/strata/virtualization/yajl.morph b/strata/virtualization/yajl.morph
new file mode 100644
index 00000000..3fac99db
--- /dev/null
+++ b/strata/virtualization/yajl.morph
@@ -0,0 +1,8 @@
+name: yajl
+kind: chunk
+configure-commands:
+- ./configure -p "$PREFIX"
+build-commands:
+- make distro
+install-commands:
+- make install DESTDIR="$DESTDIR"
diff --git a/strata/wayland-generic.morph b/strata/wayland-generic.morph
new file mode 100644
index 00000000..f65fa63e
--- /dev/null
+++ b/strata/wayland-generic.morph
@@ -0,0 +1,18 @@
+name: wayland-generic
+kind: stratum
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/xorg-util-macros-common.morph
+chunks:
+- name: libxkbcommon
+ morph: strata/wayland-generic/libxkbcommon.morph
+ repo: upstream:xorg-lib-libxkbcommon
+ ref: c43c3c866eb9d52cd8f61e75cbef1c30d07f3a28
+ unpetrify-ref: xkbcommon-0.5.0
+- name: wayland
+ morph: strata/wayland-generic/wayland.morph
+ repo: upstream:wayland
+ ref: 8e9d5a108476b3435a8286613b9a63b69afd92b7
+ unpetrify-ref: 1.7.0
+ build-depends:
+ - libxkbcommon
diff --git a/strata/wayland-generic/libxkbcommon.morph b/strata/wayland-generic/libxkbcommon.morph
new file mode 100644
index 00000000..04048abb
--- /dev/null
+++ b/strata/wayland-generic/libxkbcommon.morph
@@ -0,0 +1,5 @@
+name: libxkbcommon
+kind: chunk
+build-system: autotools
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --disable-x11
diff --git a/strata/wayland-generic/wayland.morph b/strata/wayland-generic/wayland.morph
new file mode 100644
index 00000000..752a5d6e
--- /dev/null
+++ b/strata/wayland-generic/wayland.morph
@@ -0,0 +1,7 @@
+name: wayland
+kind: chunk
+description: Wayland server
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- ./configure --prefix="$PREFIX" --disable-documentation
diff --git a/strata/webtools.morph b/strata/webtools.morph
new file mode 100644
index 00000000..17a1abda
--- /dev/null
+++ b/strata/webtools.morph
@@ -0,0 +1,21 @@
+name: webtools
+kind: stratum
+description: web things
+build-depends:
+- morph: strata/tools.morph
+- morph: strata/pcre-utils.morph
+chunks:
+- name: icu
+ morph: strata/webtools/icu.morph
+ repo: upstream:icu
+ ref: ba023548a3bff7277cbea4acade3042ce9d8949e
+ unpetrify-ref: baserock/morph
+- name: libgit2
+ repo: upstream:libgit2
+ ref: 4b0a36e881506a02b43a4ae3c19c93c919b36eeb
+ unpetrify-ref: master
+- name: nginx
+ morph: strata/webtools/nginx.morph
+ repo: upstream:nginx
+ ref: 37a582c9ea3e731c115e560d31b26f78535b8fca
+ unpetrify-ref: baserock/v1.7.0
diff --git a/strata/webtools/icu.morph b/strata/webtools/icu.morph
new file mode 100644
index 00000000..37dec07e
--- /dev/null
+++ b/strata/webtools/icu.morph
@@ -0,0 +1,8 @@
+name: icu
+kind: chunk
+configure-commands:
+- cd source; ./runConfigureICU Linux --prefix=/usr
+build-commands:
+- cd source; unset TARGET ; make
+install-commands:
+- cd source; unset TARGET ; make DESTDIR="$DESTDIR" install
diff --git a/strata/webtools/nginx.morph b/strata/webtools/nginx.morph
new file mode 100644
index 00000000..6f7f04ae
--- /dev/null
+++ b/strata/webtools/nginx.morph
@@ -0,0 +1,14 @@
+name: nginx
+kind: chunk
+configure-commands:
+- |
+ ./configure --prefix=$PREFIX \
+ --conf-path=/etc/nginx/nginx.conf \
+ --pid-path=/etc/nginx/nginx.pid \
+ --error-log-path=/var/log/nginx/error.log \
+ --http-log-path=/var/log/nginx/access.log \
+ --with-http_ssl_module
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/weston-common.morph b/strata/weston-common.morph
new file mode 100644
index 00000000..2925d72c
--- /dev/null
+++ b/strata/weston-common.morph
@@ -0,0 +1,14 @@
+name: weston-common
+kind: stratum
+build-depends:
+- morph: strata/graphics-common.morph
+- morph: strata/input-common.morph
+- morph: strata/mesa-common.morph
+- morph: strata/libdrm-common.morph
+- morph: strata/wayland-generic.morph
+chunks:
+- name: weston
+ morph: strata/weston-common/weston.morph
+ repo: upstream:weston
+ ref: 322383ed469d69401b502618718a97a4e85c97d8
+ unpetrify-ref: baserock/weston-1.7.0/tegra
diff --git a/strata/weston-common/weston.morph b/strata/weston-common/weston.morph
new file mode 100644
index 00000000..c49b8d5b
--- /dev/null
+++ b/strata/weston-common/weston.morph
@@ -0,0 +1,7 @@
+name: weston
+kind: chunk
+build-system: autotools
+configure-commands:
+- |
+ ./autogen.sh --prefix="$PREFIX" \
+ --enable-demo-clients-install
diff --git a/strata/weston-genivi.morph b/strata/weston-genivi.morph
new file mode 100644
index 00000000..ea619821
--- /dev/null
+++ b/strata/weston-genivi.morph
@@ -0,0 +1,21 @@
+name: weston-genivi
+kind: stratum
+build-depends:
+- morph: strata/graphics-common.morph
+- morph: strata/input-common.morph
+- morph: strata/mesa-common.morph
+- morph: strata/libdrm-common.morph
+- morph: strata/wayland-generic.morph
+chunks:
+- name: weston
+ morph: strata/weston-genivi/weston.morph
+ repo: upstream:weston
+ ref: 5d7a71cb941ed419ec042ea28be56c87ea407db6
+ unpetrify-ref: baserock/genivi/1.3.0
+- name: wayland-ivi-extension
+ morph: strata/weston-genivi/wayland-ivi-extension.morph
+ repo: upstream:genivi/wayland-ivi-extension
+ ref: 43fa65ede93cef9e6c902df2248229ad204b510a
+ unpetrify-ref: master
+ build-depends:
+ - weston
diff --git a/strata/weston-genivi/wayland-ivi-extension.morph b/strata/weston-genivi/wayland-ivi-extension.morph
new file mode 100644
index 00000000..bceb2a88
--- /dev/null
+++ b/strata/weston-genivi/wayland-ivi-extension.morph
@@ -0,0 +1,8 @@
+name: wayland-ivi-extension
+kind: chunk
+configure-commands:
+- cmake -DCMAKE_INSTALL_PREFIX="$PREFIX" -DCMAKE_C_FLAGS="-I/usr/include/weston"
+build-commands:
+- make
+install-commands:
+- make install DESTDIR="$DESTDIR"
diff --git a/strata/weston-genivi/weston.morph b/strata/weston-genivi/weston.morph
new file mode 100644
index 00000000..810ce5e9
--- /dev/null
+++ b/strata/weston-genivi/weston.morph
@@ -0,0 +1,53 @@
+name: weston
+kind: chunk
+description: weston compositor
+build-system: autotools
+configure-commands:
+- autoreconf -ivf
+- |
+ LDFLAGS='-L/lib64 -lrt' \
+ ./configure --prefix=$PREFIX --disable-xwayland \
+ --disable-x11-compositor \
+ --disable-wayland-compositor --enable-egl \
+ --enable-clients --enable-demo-clients-install --enable-fbdev-compositor
+post-install-commands:
+- install -d "$DESTDIR/usr/share/weston"
+- install -m 0644 data/*.png "$DESTDIR/usr/share/weston/"
+- sed -i 's,/weston.build/data/,/usr/share/weston/,g' ivi-shell/weston.ini
+- sed -i 's,/weston.build/clients/,/usr/bin/,g' ivi-shell/weston.ini
+- sed -i 's,/weston.build/,/usr/libexec/,g' ivi-shell/weston.ini
+- sed -i '/^\[ivi-launcher\]/,$d' ivi-shell/weston.ini
+- |
+ cat <<EOF >> ivi-shell/weston.ini
+ [ivi-launcher]
+ workspace-id=0
+ icon-id=4001
+ icon=/usr/share/weston/icon_ivi_flower.png
+ path=/usr/bin/weston-flower
+
+ [ivi-launcher]
+ workspace-id=0
+ icon-id=4002
+ icon=/usr/share/weston/icon_ivi_clickdot.png
+ path=/usr/bin/weston-clickdot
+
+ [ivi-launcher]
+ workspace-id=1
+ icon-id=4003
+ icon=/usr/share/weston/icon_ivi_simple-egl.png
+ path=/usr/bin/weston-simple-egl
+
+ [ivi-launcher]
+ workspace-id=1
+ icon-id=4004
+ icon=/usr/share/weston/icon_ivi_simple-shm.png
+ path=/usr/bin/weston-simple-shm
+
+ [ivi-launcher]
+ workspace-id=2
+ icon-id=4005
+ icon=/usr/share/weston/icon_ivi_smoke.png
+ path=/usr/bin/weston-smoke
+ EOF
+- install -d "$DESTDIR/usr/share/doc/weston"
+- install -m 0644 ivi-shell/weston.ini "$DESTDIR/usr/share/doc/weston/ivi-shell-weston.ini"
diff --git a/strata/x-common.morph b/strata/x-common.morph
new file mode 100644
index 00000000..69ffdba3
--- /dev/null
+++ b/strata/x-common.morph
@@ -0,0 +1,234 @@
+name: x-common
+kind: stratum
+build-depends:
+- morph: strata/xorg-util-macros-common.morph
+chunks:
+- name: xorg-proto-bigreqsproto
+ repo: upstream:xorg-proto-bigreqsproto
+ ref: d6ed3e927a756900ad4c9fd7235f8f7f34f376db
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-compositeproto
+ repo: upstream:xorg-proto-compositeproto
+ ref: 39738dbe9438dc80fc6b9e221d9ed26a6d42da6b
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-damageproto
+ repo: upstream:xorg-proto-damageproto
+ ref: 015b980e5091492dbe681af59569768ba89fbfe0
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-dmxproto
+ repo: upstream:xorg-proto-dmxproto
+ ref: 395f6fcc0a5635907b5e45829e86b29431316184
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-fixesproto
+ repo: upstream:xorg-proto-fixesproto
+ ref: b6c6bc2aa4b83f8763c75c90e6671052272a2af2
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-fontsproto
+ repo: upstream:xorg-proto-fontsproto
+ ref: df8c05f7c0253a36589d96efa52938215eff9d4d
+ unpetrify-ref: fontsproto-2.1.3
+- name: xorg-proto-glproto
+ repo: upstream:xorg-proto-glproto
+ ref: f84853d97d5749308992412a215fa518b6536eb3
+ unpetrify-ref: glproto-1.4.17
+- name: xorg-proto-inputproto
+ repo: upstream:xorg-proto-inputproto
+ ref: 343ff0938f592876b9d82c966f166bf45a78c3c8
+ unpetrify-ref: inputproto-2.3.1
+- name: xorg-proto-kbproto
+ repo: upstream:xorg-proto-kbproto
+ ref: f7022f5775350dce3348b7151845a32390e98791
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-xineramaproto
+ repo: upstream:xorg-proto-xineramaproto
+ ref: 4e77b45e0d6b42a448dab2ec316eeb5c490ecfed
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-randrproto
+ repo: upstream:xorg-proto-randrproto
+ ref: ca7cc541c2e43e6c784df19b4583ac35829d2f72
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-recordproto
+ repo: upstream:xorg-proto-recordproto
+ ref: 0fd4f8e57c1e637b2aaaaa0f539ddbac8cc50575
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-renderproto
+ repo: upstream:xorg-proto-renderproto
+ ref: 935f5ec95a3718c184ff685f5b79b467483b7844
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-resourceproto
+ repo: upstream:xorg-proto-resourceproto
+ ref: ada91f54c98b5a61d3e116fca6bf239a8604730f
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-scrnsaverproto
+ repo: upstream:xorg-proto-scrnsaverproto
+ ref: 614532026e8ec7496216316fb584d6f2af6a7f7b
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-videoproto
+ repo: upstream:xorg-proto-videoproto
+ ref: e42cf822e230cff5c6550ca2c050dfa27d2c9611
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-xcmiscproto
+ repo: upstream:xorg-proto-xcmiscproto
+ ref: 83549077a3c2140b9862709004cd873f1c55e395
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-xextproto
+ repo: upstream:xorg-proto-xextproto
+ ref: 66afec3f49e8eb0d4c2e9af7088fc3116d4bafd7
+ unpetrify-ref: xextproto-7.3.0
+- name: xorg-proto-xf86bigfontproto
+ repo: upstream:xorg-proto-xf86bigfontproto
+ ref: f805b328b2195de384c0fb6b82ef5f88c179b2c0
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-xf86driproto
+ repo: upstream:xorg-proto-xf86driproto
+ ref: cb03b8d49bf063860859c1ed8bcecd055551e93a
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-x11proto
+ repo: upstream:xorg-proto-x11proto
+ ref: 03cbbf6c3e811c026c86e3a60d2f9af56606e155
+ unpetrify-ref: xproto-7.0.26
+- name: xorg-proto-dri2proto
+ repo: upstream:xorg-proto-dri2proto
+ ref: ead89ad84877551cc15d26b95cb19a3e205df71f
+ unpetrify-ref: baserock/morph
+- name: xorg-proto-dri3proto
+ repo: upstream:xorg-proto-dri3proto
+ ref: 91df0f88b70c268f3580385a7b37543ab8c544c8
+- name: xorg-proto-presentproto
+ repo: upstream:xorg-proto-presentproto
+ ref: ef84007fc4a23d3897b4776906139de9d0698c2a
+- name: xcb-proto
+ repo: upstream:xcb-proto
+ ref: 4b384d2a015c50d0e93dcacda4b8260a3fd37640
+ unpetrify-ref: "1.11"
+- name: xorg-lib-libxshmfence
+ repo: upstream:xorg-lib-libxshmfence
+ ref: 9c4f070e1304a3503cfab08f68573443025fc4c9
+ build-depends:
+ - xorg-proto-x11proto
+- name: xorg-lib-libXau
+ repo: upstream:xorg-lib-libXau
+ ref: 1a8a1b2c68967b48c07b56142799b1020f017027
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-proto-x11proto
+- name: xcb-libxcb
+ repo: upstream:xcb-libxcb
+ ref: d1e8ec96fca4862f37ec9f0e9407bb989c4c161a
+ unpetrify-ref: "1.11"
+ build-depends:
+ - xcb-proto
+ - xorg-lib-libXau
+- name: xcb-util
+ repo: upstream:xcb-util
+ ref: 4de010f122da40e17b52866d07d1d501a66bf007
+ unpetrify-ref: baserock/0.4.0
+ build-depends:
+ - xcb-libxcb
+- name: util-wm
+ repo: upstream:util-wm
+ ref: fb7afc3f291c8cc072d327cd8d97ab1db3283c21
+ unpetrify-ref: baserock/0.4.1
+ build-depends:
+ - xcb-libxcb
+- name: util-keysyms
+ repo: upstream:util-keysyms
+ ref: edb763a8837d3932690b9d6d77cb7e20a9ab8013
+ unpetrify-ref: baserock/0.4.0
+ build-depends:
+ - xcb-libxcb
+- name: util-image
+ repo: upstream:util-image
+ ref: f20f25a1c017c58d5d7dfffc6e9adc8d31879152
+ unpetrify-ref: baserock/0.4.0
+ build-depends:
+ - xcb-libxcb
+ - xcb-util
+- name: xorg-lib-libxtrans
+ repo: upstream:xorg-lib-libxtrans
+ ref: 7cbad9fe2e61cd9d5caeaf361826a6f4bd320f03
+ unpetrify-ref: xtrans-1.3.5
+- name: xorg-lib-libX11
+ repo: upstream:xorg-lib-libX11
+ ref: cb107760df33ffc8630677e66e2e50aa37950a5c
+ unpetrify-ref: libX11-1.6.2
+ build-depends:
+ - xcb-libxcb
+ - xorg-lib-libxtrans
+ - xorg-proto-inputproto
+ - xorg-proto-kbproto
+ - xorg-proto-x11proto
+ - xorg-proto-xextproto
+ - xorg-proto-xf86bigfontproto
+- name: xorg-lib-libXext
+ repo: upstream:xorg-lib-libXext
+ ref: 8eee1236041d46a21faba32e0d27c26985267d89
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libX11
+ - xorg-lib-libXau
+ - xorg-proto-x11proto
+ - xorg-proto-xextproto
+- name: xorg-lib-libXi
+ repo: upstream:xorg-lib-libXi
+ ref: 9b26b81477cf3486e5aa0ef8d81af68a0f04df1b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libX11
+ - xorg-lib-libXext
+ - xorg-proto-inputproto
+ - xorg-proto-x11proto
+ - xorg-proto-xextproto
+- name: xorg-lib-libXfixes
+ repo: upstream:xorg-lib-libXfixes
+ ref: 0cb446962381f750e05d97bfb974ca1e32481d5d
+ unpetrify-ref: libXfixes-5.0.1
+ build-depends:
+ - xorg-lib-libXext
+ - xorg-proto-fixesproto
+- name: xorg-lib-libXrender
+ repo: upstream:xorg-lib-libXrender
+ ref: 1af52cb334377611233d7dc156bc1e6f7923756d
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libX11
+ - xorg-proto-renderproto
+- name: xorg-lib-libXrandr
+ repo: upstream:xorg-lib-libXrandr
+ ref: 99a63d10cbbab7d69a52d25d78795a3278506ea9
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libX11
+ - xorg-lib-libXext
+ - xorg-lib-libXrender
+ - xorg-proto-randrproto
+ - xorg-proto-renderproto
+ - xorg-proto-xextproto
+- name: xorg-lib-libXtst
+ repo: upstream:xorg-lib-libXtst
+ ref: 2aafac9474a0a0a0c39797862f823255918cf368
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libX11
+ - xorg-lib-libXext
+ - xorg-lib-libXi
+ - xorg-proto-inputproto
+ - xorg-proto-recordproto
+ - xorg-proto-xextproto
+- name: xorg-lib-libXdamage
+ repo: upstream:xorg-lib-libXdamage
+ ref: 0d35761dc39409b70e04dd0786aef6537f92976a
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libXfixes
+ - xorg-proto-damageproto
+- name: xorg-lib-libXcursor
+ repo: upstream:xorg-lib-libXcursor
+ ref: 1b98fd6a2e8c00a563187849a585e68c7344468b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libXrender
+ - xorg-lib-libXfixes
+ - xorg-proto-fixesproto
+ - xorg-lib-libXau
+ - xorg-lib-libX11
diff --git a/strata/x-generic.morph b/strata/x-generic.morph
new file mode 100644
index 00000000..27a7c7b6
--- /dev/null
+++ b/strata/x-generic.morph
@@ -0,0 +1,49 @@
+name: x-generic
+kind: stratum
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/libdrm-common.morph
+- morph: strata/mesa-common.morph
+- morph: strata/x-common.morph
+- morph: strata/graphics-common.morph
+chunks:
+- name: libepoxy
+ repo: upstream:libepoxy
+ ref: 7422de5b4be7b19d789136b3bb5f932de42db27c
+ unpetrify-ref: v1.2
+- name: xorg-lib-libxkbfile
+ repo: upstream:xorg-lib-libxkbfile
+ ref: 7381c2f9013ef7784c78091fa671e652a62ca706
+ unpetrify-ref: baserock/morph
+- name: xorg-font-util
+ repo: upstream:xorg-font-util
+ ref: 5f01ea79f1cb2328bfc4130b1e693f71be916b87
+ unpetrify-ref: baserock/morph
+- name: xorg-lib-libfontenc
+ repo: upstream:xorg-lib-libfontenc
+ ref: f5d1208172e965fdd7fae8927bd3e29b3cc3a975
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-font-util
+- name: xorg-lib-libXfont
+ repo: upstream:xorg-lib-libXfont
+ ref: ad4f4d8a2d0730c0ea3c09210bf921638b4682bc
+ unpetrify-ref: libXfont-1.5.0
+ build-depends:
+ - xorg-lib-libfontenc
+- name: xserver
+ morph: strata/x-generic/xserver.morph
+ repo: upstream:xserver
+ ref: 3b0d1ba2266d2780bfc111bab74885b90458eca4
+ unpetrify-ref: xorg-server-1.17.1
+ build-depends:
+ - libepoxy
+ - xorg-font-util
+ - xorg-lib-libXfont
+ - xorg-lib-libxkbfile
+- name: xorg-app-xkbcomp
+ repo: upstream:xorg-app-xkbcomp
+ ref: 705b9bbb426410f9510601c7010da51184919b36
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-lib-libxkbfile
diff --git a/strata/x-generic/xserver.morph b/strata/x-generic/xserver.morph
new file mode 100644
index 00000000..ca678331
--- /dev/null
+++ b/strata/x-generic/xserver.morph
@@ -0,0 +1,13 @@
+name: xserver
+kind: chunk
+build-system: autotools
+configure-commands:
+- |
+ ./autogen.sh --prefix="$PREFIX" \
+ --disable-glx \
+ --disable-xorg \
+ --disable-xvfb \
+ --disable-xnest \
+ --disable-xquartz \
+ --disable-xwin \
+ --enable-xwayland
diff --git a/strata/xfce.morph b/strata/xfce.morph
new file mode 100644
index 00000000..295abfe1
--- /dev/null
+++ b/strata/xfce.morph
@@ -0,0 +1,164 @@
+name: xfce
+kind: stratum
+description: xfce stratum
+build-depends:
+- morph: strata/gtk2.morph
+- morph: strata/x-generic.morph
+chunks:
+- name: libcroco
+ repo: upstream:libcroco
+ ref: 611f624a6e15065792b41eb5ce14811293f3fb0a
+ unpetrify-ref: baserock/morph
+- name: librsvg
+ morph: strata/xfce/librsvg.morph
+ repo: upstream:librsvg
+ ref: 61171ca59d02b58b7da1697ca1d924bff93e66cb
+ unpetrify-ref: baserock/morph
+- name: xfce-dev-tools
+ repo: upstream:xfce/xfce4-dev-tools
+ ref: 9244250ac0c15ba160688758c5dccf97f3f160ef
+ unpetrify-ref: baserock/morph
+- name: libxfce4util
+ morph: strata/xfce/libxfce4util.morph
+ repo: upstream:xfce/libxfce4util
+ ref: fc0437fe503c3eaa5ccd4fa68f098bc428f27cad
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xfce-dev-tools
+- name: xfconf
+ morph: strata/xfce/xfconf.morph
+ repo: upstream:xfce/xfconf
+ ref: ed6ed2f7656a0d5d8cb453cb643061c9a157f2ae
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xfce-dev-tools
+ - libxfce4util
+- name: libxfce4ui
+ morph: strata/xfce/libxfce4ui.morph
+ repo: upstream:xfce/libxfce4ui
+ ref: a9c2a379709915dfda255d3ca9181fac3f8484d8
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xfce-dev-tools
+ - libxfce4util
+ - xfconf
+- name: garcon
+ morph: strata/xfce/garcon.morph
+ repo: upstream:xfce/garcon
+ ref: febd4136365714556f6afbccf4302e8f4550e5d1
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - libxfce4util
+- name: perl-uri
+ repo: upstream:libwww-perl/uri
+ ref: 3dd72ded7f043a4f6973781fe594722e2e95f491
+ unpetrify-ref: baserock/morph
+- name: exo
+ morph: strata/xfce/exo.morph
+ repo: upstream:xfce/exo
+ ref: b985c4b5c72ef116d55bbf746c16a05f26afc045
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - libxfce4util
+ - libxfce4ui
+ - perl-uri
+- name: libwnck
+ morph: strata/xfce/libwnck.morph
+ repo: upstream:libwnck
+ ref: 6792abcdf27aceba1012406d51606e84b065a526
+ unpetrify-ref: baserock/xfce-build
+- name: xfce4-panel
+ morph: strata/xfce/xfce4-panel.morph
+ repo: upstream:xfce/xfce4-panel
+ ref: 6ac8bfcb481781e8e23b101f5c5fdd70cf6d083b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - libxfce4ui
+ - garcon
+ - exo
+ - libwnck
+- name: thunar
+ morph: strata/xfce/thunar.morph
+ repo: upstream:xfce/thunar
+ ref: 8289f48c200b91cc1e9932e13defb8e6a4765054
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - libxfce4ui
+ - exo
+ - garcon
+ - libwnck
+- name: xfce4-settings
+ morph: strata/xfce/xfce4-settings.morph
+ repo: upstream:xfce/xfce4-settings
+ ref: 697ce5cd4bf4860a0a20c27b557be296c9adf469
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xfce-dev-tools
+ - xfconf
+ - exo
+ - libxfce4ui
+ - garcon
+- name: iceauth
+ repo: upstream:iceauth
+ ref: c07d443d43e53cfe21b53a9a0eafcd6189432867
+ unpetrify-ref: baserock/morph
+- name: xfce4-session
+ morph: strata/xfce/xfce4-session.morph
+ repo: upstream:xfce/xfce4-session
+ ref: 2c89b2f12409ca617fc094e2942cb0d69318c7b5
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - iceauth
+ - exo
+ - xfce-dev-tools
+ - libxfce4ui
+ - libwnck
+- name: gtk-xfce-engine-2
+ morph: strata/xfce/gtk-xfce-engine-2.morph
+ repo: upstream:xfce/gtk-xfce-engine
+ ref: ed44a71f1a4d76ae850483ece41638bb9a3e7781
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xfce-dev-tools
+- name: xfdesktop
+ morph: strata/xfce/xfdesktop.morph
+ repo: upstream:xfce/xfdesktop
+ ref: d2f7404da32f29cfec4a704389112fe7183edda5
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - exo
+ - libxfce4ui
+ - xfconf
+ - libwnck
+- name: xfwm4
+ morph: strata/xfce/xfwm4.morph
+ repo: upstream:xfce/xfwm4
+ ref: 523ef8a6861af772be3468cd700ad14172c86768
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - exo
+ - libxfce4util
+ - libxfce4ui
+ - xfconf
+ - libwnck
+- name: xfce4-appfinder
+ morph: strata/xfce/xfce4-appfinder.morph
+ repo: upstream:xfce/xfce4-appfinder
+ ref: 5c069f13fde648913a59f022957b2c83b5764f39
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - exo
+ - libxfce4ui
+ - garcon
+- name: tumbler
+ morph: strata/xfce/tumbler.morph
+ repo: upstream:xfce/tumbler
+ ref: f8191f286227a045fbdabd7684c9da81ade518c8
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xfce-dev-tools
+- name: elementary-xfce
+ morph: strata/xfce/elementary-xfce.morph
+ repo: upstream:elementary-xfce
+ ref: 81e683bb46f8ba58c49c60ed75124951b7ba0779
+ unpetrify-ref: baserock/morph
diff --git a/strata/xfce/elementary-xfce.morph b/strata/xfce/elementary-xfce.morph
new file mode 100644
index 00000000..978cfdc4
--- /dev/null
+++ b/strata/xfce/elementary-xfce.morph
@@ -0,0 +1,10 @@
+name: elementary-xfce
+kind: chunk
+install-commands:
+- mkdir -p $DESTDIR/usr/share/icons/
+- cp -R elementary-xfce $DESTDIR/usr/share/icons/
+- cp -R elementary-xfce-* $DESTDIR/usr/share/icons/
+- gtk-update-icon-cache -t -f $DESTDIR/usr/share/icons/elementary-xfce
+- gtk-update-icon-cache -t -f $DESTDIR/usr/share/icons/elementary-xfce-dark
+- gtk-update-icon-cache -t -f $DESTDIR/usr/share/icons/elementary-xfce-darker
+- gtk-update-icon-cache -t -f $DESTDIR/usr/share/icons/elementary-xfce-darkest
diff --git a/strata/xfce/exo.morph b/strata/xfce/exo.morph
new file mode 100644
index 00000000..f5096213
--- /dev/null
+++ b/strata/xfce/exo.morph
@@ -0,0 +1,8 @@
+name: exo
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/garcon.morph b/strata/xfce/garcon.morph
new file mode 100644
index 00000000..569197ff
--- /dev/null
+++ b/strata/xfce/garcon.morph
@@ -0,0 +1,8 @@
+name: garcon
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/gtk-xfce-engine-2.morph b/strata/xfce/gtk-xfce-engine-2.morph
new file mode 100644
index 00000000..cf1c2530
--- /dev/null
+++ b/strata/xfce/gtk-xfce-engine-2.morph
@@ -0,0 +1,8 @@
+name: gtk-xfce-engine-2
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/librsvg.morph b/strata/xfce/librsvg.morph
new file mode 100644
index 00000000..f4d880ed
--- /dev/null
+++ b/strata/xfce/librsvg.morph
@@ -0,0 +1,6 @@
+name: librsvg
+kind: chunk
+build-system: autotools
+configure-commands:
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX" --disable-gtk-theme
diff --git a/strata/xfce/libwnck.morph b/strata/xfce/libwnck.morph
new file mode 100644
index 00000000..bb8d7afa
--- /dev/null
+++ b/strata/xfce/libwnck.morph
@@ -0,0 +1,11 @@
+name: libwnck
+kind: chunk
+build-system: autotools
+configure-commands:
+- gdk-pixbuf-query-loaders > loader.cache
+- NOCONFIGURE=1 ./autogen.sh
+- ./configure --prefix="$PREFIX"
+build-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make
+install-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make install DESTDIR="$DESTDIR"
diff --git a/strata/xfce/libxfce4ui.morph b/strata/xfce/libxfce4ui.morph
new file mode 100644
index 00000000..5fbd342e
--- /dev/null
+++ b/strata/xfce/libxfce4ui.morph
@@ -0,0 +1,8 @@
+name: libxfce4ui
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=${PREFIX}
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/libxfce4util.morph b/strata/xfce/libxfce4util.morph
new file mode 100644
index 00000000..60a59126
--- /dev/null
+++ b/strata/xfce/libxfce4util.morph
@@ -0,0 +1,8 @@
+name: libxfce4util
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/thunar.morph b/strata/xfce/thunar.morph
new file mode 100644
index 00000000..51f76ac5
--- /dev/null
+++ b/strata/xfce/thunar.morph
@@ -0,0 +1,9 @@
+name: thunar
+kind: chunk
+configure-commands:
+- gdk-pixbuf-query-loaders > loader.cache
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make
+install-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make install
diff --git a/strata/xfce/tumbler.morph b/strata/xfce/tumbler.morph
new file mode 100644
index 00000000..fe27a783
--- /dev/null
+++ b/strata/xfce/tumbler.morph
@@ -0,0 +1,8 @@
+name: tumbler
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/xfce4-appfinder.morph b/strata/xfce/xfce4-appfinder.morph
new file mode 100644
index 00000000..f8d1fcbe
--- /dev/null
+++ b/strata/xfce/xfce4-appfinder.morph
@@ -0,0 +1,8 @@
+name: xfce4-appfinder
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=${PREFIX}
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/xfce4-panel.morph b/strata/xfce/xfce4-panel.morph
new file mode 100644
index 00000000..fe85e172
--- /dev/null
+++ b/strata/xfce/xfce4-panel.morph
@@ -0,0 +1,8 @@
+name: xfce4-panel
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/xfce4-session.morph b/strata/xfce/xfce4-session.morph
new file mode 100644
index 00000000..87ffac22
--- /dev/null
+++ b/strata/xfce/xfce4-session.morph
@@ -0,0 +1,9 @@
+name: xfce4-session
+kind: chunk
+configure-commands:
+- gdk-pixbuf-query-loaders > loader.cache
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make
+install-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make install
diff --git a/strata/xfce/xfce4-settings.morph b/strata/xfce/xfce4-settings.morph
new file mode 100644
index 00000000..6063dbc1
--- /dev/null
+++ b/strata/xfce/xfce4-settings.morph
@@ -0,0 +1,8 @@
+name: xfce4-settings
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/xfconf.morph b/strata/xfce/xfconf.morph
new file mode 100644
index 00000000..cfbe741b
--- /dev/null
+++ b/strata/xfce/xfconf.morph
@@ -0,0 +1,8 @@
+name: xfconf
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix=${PREFIX}
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/xfdesktop.morph b/strata/xfce/xfdesktop.morph
new file mode 100644
index 00000000..4af5a905
--- /dev/null
+++ b/strata/xfce/xfdesktop.morph
@@ -0,0 +1,8 @@
+name: xfdesktop
+kind: chunk
+configure-commands:
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- make
+install-commands:
+- make install
diff --git a/strata/xfce/xfwm4.morph b/strata/xfce/xfwm4.morph
new file mode 100644
index 00000000..0c4e2273
--- /dev/null
+++ b/strata/xfce/xfwm4.morph
@@ -0,0 +1,11 @@
+name: xfwm4
+kind: chunk
+configure-commands:
+- gdk-pixbuf-query-loaders > loader.cache
+- ./autogen.sh --prefix="$PREFIX" --enable-debug=yes
+build-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make
+install-commands:
+- GDK_PIXBUF_MODULE_FILE="$(pwd)/loader.cache" make install
+- install -m 0644 index.theme $DESTDIR/usr/share/icons/hicolor
+- gtk-update-icon-cache $DESTDIR/usr/share/icons/hicolor
diff --git a/strata/xorg-util-macros-common.morph b/strata/xorg-util-macros-common.morph
new file mode 100644
index 00000000..909c9420
--- /dev/null
+++ b/strata/xorg-util-macros-common.morph
@@ -0,0 +1,15 @@
+name: xorg-util-macros-common
+kind: stratum
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: xorg-util-macros
+ repo: upstream:xorg-util-macros
+ ref: 9a54b858601bd305de2737b06e609084a2a114c2
+ unpetrify-ref: util-macros-1.19.0
+- name: xcb-pthread-stubs
+ repo: upstream:xcb-pthread-stubs
+ ref: 431d2c0be218d878b9dd3862e4232243c599df4b
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - xorg-util-macros
diff --git a/strata/xstatic.morph b/strata/xstatic.morph
new file mode 100644
index 00000000..bf21a296
--- /dev/null
+++ b/strata/xstatic.morph
@@ -0,0 +1,114 @@
+name: xstatic
+kind: stratum
+description: |
+ Stratum with Xstatic and some xstatic libraries.
+
+ XStatic is a packaging standard to package external static files as
+ a python package, so they are easily usable on all OSes.
+
+ This conflicts with the 'everything from Git' policy of Baserock,
+ but it is required right now for the Openstack system to work.
+build-depends:
+- morph: strata/foundation.morph
+- morph: strata/python-core.morph
+chunks:
+- name: xstatic
+ repo: upstream:xstatic
+ ref: 6c8604a422ba7f176ce6b372a8e182c293d06fb2
+ unpetrify-ref: 1.0.1
+- name: xstatic-angular
+ repo: upstream:xstatic-packages/xstatic-angular
+ ref: dac047df05da5bb20de4e78876bc03820d0c6671
+ unpetrify-ref: 1.3.7.0
+- name: xstatic-angular-animate
+ repo: upstream:xstatic-packages/xstatic-angular-animate
+ ref: 91b9d3197f67213e4e35e9a35ba7196b898731e7
+ unpetrify-ref: master
+- name: xstatic-angular-bootstrap
+ repo: upstream:xstatic-packages/xstatic-angular-bootstrap
+ ref: 2a8c157b8b0733afbfdaf7eea29cf012a8b7f483
+ unpetrify-ref: master
+- name: xstatic-angular-cookies
+ repo: upstream:xstatic-packages/xstatic-angular-cookies
+ ref: 66141a33ae7ad84b82ba9384a8101ae15ccd6da5
+ unpetrify-ref: 1.2.1.1
+- name: xstatic-angular-fileupload
+ repo: upstream:xstatic-packages/xstatic-angular-fileupload
+ ref: 04bcd774dae17cfff5e826fc0874f6403b4a6cae
+ unpetrify-ref: master
+- name: xstatic-angular-mock
+ repo: upstream:xstatic-packages/xstatic-angular-mock
+ ref: 20fdada109b167bcdcacec22986b43fdb42866f3
+ unpetrify-ref: 1.2.1.1
+- name: xstatic-angular-sanitize
+ repo: upstream:xstatic-packages/xstatic-angular-sanitize
+ ref: 9e0f03ada6fc00f8583903aeb6d9fc6516784999
+ unpetrify-ref: master
+- name: xstatic-angular-smart-table
+ repo: upstream:xstatic-packages/xstatic-angular-smart-table
+ ref: e0fbf69fb156b995547471d71c5c6a7f88989d47
+ unpetrify-ref: master
+- name: xstatic-bootstrap-datepicker
+ repo: upstream:xstatic-packages/xstatic-bootstrap-datepicker
+ ref: 60caf41e32d49f9bc715c39570f01449ebfbd37a
+ unpetrify-ref: master
+- name: xstatic-bootstrap-scss
+ repo: upstream:xstatic-packages/xstatic-bootstrap-scss
+ ref: e8ca8e38e464691268faec8cd6ae59777ee7ee9e
+ unpetrify-ref: master
+- name: xstatic-d3
+ repo: upstream:xstatic-packages/xstatic-d3
+ ref: 1e754e73c801fb9315995ffeb95ae51233c4fcaf
+ unpetrify-ref: master
+- name: xstatic-hogan
+ repo: upstream:xstatic-packages/xstatic-hogan
+ ref: 9e39977f6a6744810b08fa0323147e9f31dbd363
+ unpetrify-ref: master
+- name: xstatic-font-awesome
+ repo: upstream:xstatic-packages/xstatic-font-awesome
+ ref: c13aad1b95b1d84e73f2565a18a5a8e2bd2194a7
+ unpetrify-ref: 4.2.0.0
+- name: xstatic-jasmine
+ repo: upstream:xstatic-packages/xstatic-jasmine
+ ref: a55f5db1dbe59b23c7d6284250bf4eac1d0143c3
+ unpetrify-ref: 2.1.2.0
+- name: xstatic-jquery
+ repo: upstream:xstatic-packages/xstatic-jquery
+ ref: e91c5bf82535a62c1cfac61b5efb6b5c0157c1f5
+ unpetrify-ref: master
+- name: xstatic-jquery-bootstrap-wizard
+ repo: upstream:xstatic-packages/xstatic-jquery-bootstrap-wizard
+ ref: 07ae7e3a15b3af3d3d378d1f2412fe503ee42142
+ unpetrify-ref: master
+- name: xstatic-jquery-migrate
+ repo: upstream:xstatic-packages/xstatic-jquery-migrate
+ ref: 989b3b31106727542dd83810c3b952f90d8cdb8f
+ unpetrify-ref: master
+- name: xstatic-jquery-quicksearch
+ repo: upstream:xstatic-packages/xstatic-jquery-quicksearch
+ ref: f5221c8c30507340846d97d6db41a782e7c63316
+ unpetrify-ref: master
+- name: xstatic-jquery-tablesorter
+ repo: upstream:xstatic-packages/xstatic-jquery-tablesorter
+ ref: eb78328391f44a9a88033e1aede1a605902c5551
+ unpetrify-ref: master
+- name: xstatic-jquery-ui
+ repo: upstream:xstatic-packages/xstatic-jquery-ui
+ ref: b494369430dafd8ac4ddbe90efb9f8ad20a6e6d1
+ unpetrify-ref: 1.11.0.1
+- name: xstatic-jsencrypt
+ repo: upstream:xstatic-packages/xstatic-jsencrypt
+ ref: 5c0f088310ecd602e3aaf5e683385b0d27258409
+ unpetrify-ref: master
+- name: xstatic-qunit
+ repo: upstream:xstatic-packages/xstatic-qunit
+ ref: c3189eaa77c68c149a40b8c0afc8722cf394bd1f
+ unpetrify-ref: master
+- name: xstatic-rickshaw
+ repo: upstream:xstatic-packages/xstatic-rickshaw
+ ref: 6427ca5406852bc779d6918f487bb0fe3f063e77
+ unpetrify-ref: master
+- name: xstatic-spin
+ repo: upstream:xstatic-packages/xstatic-spin
+ ref: 74b4c0d0ff12db1f84787246857d5e925ff6883f
+ unpetrify-ref: master
diff --git a/strata/zookeeper-client.morph b/strata/zookeeper-client.morph
new file mode 100644
index 00000000..83ce2e05
--- /dev/null
+++ b/strata/zookeeper-client.morph
@@ -0,0 +1,16 @@
+name: zookeeper-client
+kind: stratum
+description: |
+ This stratum installs a small demonstration program for the
+ client side of ZooKeeper. The program can take up to two
+ arguments:
+ (1) The IP address of the ZooKeeper server to connect to
+ (2) The type of client that this will be (default client
+ types are typeOneNode & typeTwoNode)
+build-depends:
+- morph: strata/zookeeper.morph
+chunks:
+- name: zookeeper-client
+ morph: strata/zookeeper/zookeeper-client.morph
+ repo: baserock:tests/zookeeper-test
+ ref: master
diff --git a/strata/zookeeper-server.morph b/strata/zookeeper-server.morph
new file mode 100644
index 00000000..97acd2ca
--- /dev/null
+++ b/strata/zookeeper-server.morph
@@ -0,0 +1,10 @@
+name: zookeeper-server
+kind: stratum
+description: This Stratum installs a zookeeper server and small program to populate it with default data.
+build-depends:
+- morph: strata/zookeeper.morph
+chunks:
+- name: zookeeper-server
+ morph: strata/zookeeper/zookeeper-server.morph
+ repo: baserock:tests/zookeeper-test
+ ref: master
diff --git a/strata/zookeeper.morph b/strata/zookeeper.morph
new file mode 100644
index 00000000..2baf6f58
--- /dev/null
+++ b/strata/zookeeper.morph
@@ -0,0 +1,26 @@
+name: zookeeper
+kind: stratum
+description: |
+ This stratum installs zookeeper and its dependencies.
+build-depends:
+- morph: strata/core.morph
+- morph: strata/test-tools.morph
+chunks:
+- name: java-binary
+ morph: strata/zookeeper/java-binary.morph
+ repo: github:franred/gerrit-installation-binaries
+ ref: ef262c635890f19eaff8ef6bbd831ee9b0d8693e
+ unpetrify-ref: master
+- name: java-ant
+ morph: strata/zookeeper/java-ant.morph
+ repo: upstream:java/ant
+ ref: master
+ build-depends:
+ - java-binary
+- name: zookeeper
+ morph: strata/zookeeper/zookeeper.morph
+ repo: upstream:zookeeper
+ ref: baserock/mikesmith/zookeeper
+ unpetrify-ref: trunk
+ build-depends:
+ - java-ant
diff --git a/strata/zookeeper/java-ant.morph b/strata/zookeeper/java-ant.morph
new file mode 100644
index 00000000..37d402b0
--- /dev/null
+++ b/strata/zookeeper/java-ant.morph
@@ -0,0 +1,8 @@
+name: ant
+kind: chunk
+build-commands:
+- |
+ export JAVA_HOME=/usr/lib/jdk1.8.0_20
+ sh build.sh -Ddist.dir="$DESTDIR/usr/lib/ant" dist
+- mkdir -p "${DESTDIR}${PREFIX}/bin"
+- ln -sf "${PREFIX}/lib/ant/bin/ant" "${DESTDIR}${PREFIX}/bin/ant"
diff --git a/strata/zookeeper/java-binary.morph b/strata/zookeeper/java-binary.morph
new file mode 100644
index 00000000..a6c11f95
--- /dev/null
+++ b/strata/zookeeper/java-binary.morph
@@ -0,0 +1,11 @@
+name: java-binary
+kind: chunk
+configure-commands: []
+build-commands:
+- cat jdk-8u20-linux-x64.tar.gz_* > jdk-8u20-linux-x64.tar.gz
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/bin
+- mkdir -p "$DESTDIR$PREFIX"/lib
+- tar zxf jdk-8u20-linux-x64.tar.gz -C "$DESTDIR$PREFIX"/lib
+- unzip jce_policy-8.zip -d "$DESTDIR$PREFIX"/lib/jdk1.8.0_20/jre/lib/security
+- ln -sfn "$PREFIX"/lib/jdk1.8.0_20/jre/bin/java "$DESTDIR$PREFIX"/bin/
diff --git a/strata/zookeeper/zookeeper-client.morph b/strata/zookeeper/zookeeper-client.morph
new file mode 100644
index 00000000..591c1840
--- /dev/null
+++ b/strata/zookeeper/zookeeper-client.morph
@@ -0,0 +1,17 @@
+name: zookeeper-client
+kind: chunk
+build-commands:
+ - mkdir -p "$DESTDIR$PREFIX"/zookeeper-client
+ - cp -r * "$DESTDIR$PREFIX"/zookeeper-client
+ - make -C "$DESTDIR$PREFIX"/zookeeper-client/ZKTest/Release/
+post-install-commands:
+ - |
+ install -D -m 644 /proc/self/fd/0 << 'EOF' "$DESTDIR"/etc/systemd/system/zookeeper-client.service
+ [unit]
+ Description=Zookeeper client init
+ [Service]
+ ExecStart=/usr/zookeeper-client/ZKTest/Release/ZKTest nodeTypeOne 10.24.1.198
+ RemainAfterExit=yes
+ [Install]
+ WantedBy=multi-user.target
+ EOF
diff --git a/strata/zookeeper/zookeeper-server.morph b/strata/zookeeper/zookeeper-server.morph
new file mode 100644
index 00000000..082c063c
--- /dev/null
+++ b/strata/zookeeper/zookeeper-server.morph
@@ -0,0 +1,30 @@
+name: zookeeper-server
+kind: chunk
+build-commands:
+ - mkdir -p "$DESTDIR$PREFIX"/zookeeper_server
+ - cp -r * "$DESTDIR$PREFIX"/zookeeper_server
+ - make -C "$DESTDIR$PREFIX"/zookeeper_server/zkServerFileSetup/Release/
+post-install-commands:
+ - |
+ install -D -m 644 /proc/self/fd/0 << 'EOF' "$DESTDIR"/etc/systemd/system/zookeeper-server.service
+ [unit]
+ Description=Zookeeper server
+ [Service]
+ ExecStart=/usr/zookeeper/bin/zkServer.sh start
+ RemainAfterExit=yes
+ [Install]
+ WantedBy=multi-user.target
+ EOF
+ - |
+ install -D -m 644 /proc/self/fd/0 << 'EOF' "$DESTDIR"/etc/systemd/system/zookeeper-init.service
+ [unit]
+ Description=Zookeeper server init
+ [Service]
+ ExecStart=/usr/zookeeper_server/zkServerFileSetup/Release/zkServerFileSetup
+ RemainAfterExit=no
+ [Install]
+ WantedBy=multi-user.target
+ EOF
+ - mkdir "$DESTDIR"/etc/systemd/system/multi-user.target.wants
+ - ln -s "$DESTDIR"/etc/systemd/system/zookeeper-server.service "$DESTDIR"/etc/systemd/system/multi-user.target.wants/zookeeper-server.service
+ - ln -s "$DESTDIR"/etc/systemd/system/zookeeper-init.service "$DESTDIR"/etc/systemd/system/multi-user.target.wants/zookeeper-init.service
diff --git a/strata/zookeeper/zookeeper.morph b/strata/zookeeper/zookeeper.morph
new file mode 100644
index 00000000..dbe6ccbb
--- /dev/null
+++ b/strata/zookeeper/zookeeper.morph
@@ -0,0 +1,14 @@
+name: zookeeper
+kind: chunk
+configure-commands:
+ - mkdir -p "$DESTDIR$PREFIX"/lib/zookeeper
+build-commands:
+ - ant -p compile_jute
+ - mkdir -p "$DESTDIR$PREFIX"/zookeeper
+ - cp -r * "$DESTDIR$PREFIX"/zookeeper
+ - cd "$DESTDIR$PREFIX"/zookeeper/src/c && autoreconf -i
+ - cd "$DESTDIR$PREFIX"/zookeeper/src/c && ./configure --prefix="$PREFIX" --libdir="$PREFIX"/lib/
+ - make -C "$DESTDIR$PREFIX"/zookeeper/src/c
+ - cd "$DESTDIR$PREFIX"/zookeeper/src/c && make install
+ - mv "$DESTDIR$PREFIX"/zookeeper/conf/zoo_sample.cfg "$DESTDIR$PREFIX"/zookeeper/conf/zoo.cfg
+ - make -C "$DESTDIR$PREFIX"/zookeeper/src/c zktest-mt
diff --git a/strip-gplv3.configure b/strip-gplv3.configure
new file mode 100755
index 00000000..c08061ad
--- /dev/null
+++ b/strip-gplv3.configure
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# Copyright (C) 2013 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+''' A Morph configuration extension for removing gplv3 chunks from a system
+
+Using a hard-coded list of chunks, it will read the system's /baserock metadata
+to find the files created by that chunk, then remove them.
+
+'''
+
+import cliapp
+import re
+import os
+import json
+
+class StripGPLv3ConfigureExtension(cliapp.Application):
+ gplv3_chunks = [
+ ['autoconf', ''],
+ ['automake', ''],
+ ['bash', ''],
+ ['binutils', ''],
+ ['bison', ''],
+ ['ccache', ''],
+ ['cmake', ''],
+ ['flex', ''],
+ ['gawk', ''],
+ ['gcc', r'^.*lib.*\.so(\.\d+)*$'],
+ ['gdbm', ''],
+ ['gettext', ''],
+ ['gperf', ''],
+ ['groff', ''],
+ ['libtool', r'^.*lib.*\.so(\.\d+)*$'],
+ ['m4', ''],
+ ['make', ''],
+ ['nano', ''],
+ ['patch', ''],
+ ['rsync', ''],
+ ['texinfo-tarball', ''],
+ ]
+
+ def process_args(self, args):
+ target_root = args[0]
+ meta_dir = os.path.join(target_root, 'baserock')
+
+ for chunk in self.gplv3_chunks:
+ regex = os.path.join(meta_dir, "%s-[^-]\+\.meta" % chunk[0])
+ artifacts = self.runcmd(['find', meta_dir, '-regex', regex])
+
+ for artifact in artifacts.split():
+ self.remove_chunk(target_root, artifact, chunk[1])
+
+ os.symlink(os.path.join(os.sep, 'bin', 'busybox'),
+ os.path.join(target_root, 'usr', 'bin', 'awk'))
+
+ def remove_chunk(self, target_root, chunk, pattern):
+ chunk_meta_path = os.path.join(target_root, 'baserock', chunk)
+
+ with open(chunk_meta_path, 'r') as f:
+ chunk_meta_data = json.load(f)
+
+ if not 'contents' in chunk_meta_data:
+ raise cliapp.AppError('Chunk %s does not have a "contents" list'
+ % chunk)
+ updated_contents = []
+ for content_entry in reversed(chunk_meta_data['contents']):
+ pat = re.compile(pattern)
+ if len(pattern) == 0 or not pat.match(content_entry):
+ self.remove_content_entry(target_root, content_entry)
+ else:
+ updated_contents.append(content_entry)
+
+ def remove_content_entry(self, target_root, content_entry):
+ entry_path = os.path.join(target_root, './' + content_entry)
+ if not entry_path.startswith(target_root):
+ raise cliapp.AppException('%s is not in %s'
+ % (entry_path, target_root))
+ if os.path.exists(entry_path):
+ if os.path.islink(entry_path):
+ os.unlink(entry_path)
+ elif os.path.isfile(entry_path):
+ os.remove(entry_path)
+ elif os.path.isdir(entry_path):
+ if not os.listdir(entry_path):
+ os.rmdir(entry_path)
+ else:
+ raise cliapp.AppException('%s is not a link, file or directory'
+ % entry_path)
+StripGPLv3ConfigureExtension().run()
diff --git a/swift-build-rings.yml b/swift-build-rings.yml
new file mode 100644
index 00000000..1ffe9c37
--- /dev/null
+++ b/swift-build-rings.yml
@@ -0,0 +1,34 @@
+---
+- hosts: localhost
+ vars:
+ - rings:
+ - { name: account, port: 6002 }
+ - { name: container, port: 6001 }
+ - { name: object, port: 6000 }
+ remote_user: root
+ tasks:
+ - file: path={{ ansible_env.ROOT }}/etc/swift owner=root group=root state=directory
+
+ - name: Create ring
+ shell: swift-ring-builder {{ item.name }}.builder create {{ ansible_env.SWIFT_PART_POWER }}
+ {{ ansible_env.SWIFT_REPLICAS }} {{ ansible_env.SWIFT_MIN_PART_HOURS }}
+ with_items: rings
+
+ - name: Add each storage node to the ring
+ shell: swift-ring-builder {{ item[0].name }}.builder
+ add r1z1-{{ item[1].ip }}:{{ item[0].port }}/{{ item[1].device }} {{ item[1].weight }}
+ with_nested:
+ - rings
+ - ansible_env.SWIFT_STORAGE_DEVICES
+
+ - name: Rebalance the ring
+ shell: swift-ring-builder {{ item.name }}.builder rebalance {{ ansible_env.SWIFT_REBALANCE_SEED }}
+ with_items: rings
+
+ - name: Copy ring configuration files into place
+ copy: src={{ item.name }}.ring.gz dest={{ ansible_env.ROOT }}/etc/swift
+ with_items: rings
+
+ - name: Copy ring builder files into place
+ copy: src={{ item.name }}.builder dest={{ ansible_env.ROOT }}/etc/swift
+ with_items: rings
diff --git a/swift-storage-devices-validate.py b/swift-storage-devices-validate.py
new file mode 100755
index 00000000..57ab23d0
--- /dev/null
+++ b/swift-storage-devices-validate.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This is used by the openstack-swift.configure extension
+# to validate any provided storage device specifiers
+# under SWIFT_STORAGE_DEVICES
+#
+
+
+'''
+ This is used by the swift-storage.configure extension
+ to validate any storage device specifiers specified
+ in the SWIFT_STORAGE_DEVICES environment variable
+'''
+
+from __future__ import print_function
+
+import yaml
+import sys
+
+EXAMPLE_DEVSPEC = '{device: sdb1, ip: 127.0.0.1, weight: 100}'
+REQUIRED_KEYS = ['ip', 'device', 'weight']
+
+def err(msg):
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+if len(sys.argv) != 2:
+ err('usage: %s STRING_TO_BE_VALIDATED' % sys.argv[0])
+
+swift_storage_devices = yaml.load(sys.argv[1])
+
+if not isinstance(swift_storage_devices, list):
+ err('Expected list of device specifiers\n'
+ 'Example: [%s]' % EXAMPLE_DEVSPEC)
+
+for d in swift_storage_devices:
+ if not isinstance(d, dict):
+ err("Invalid device specifier: `%s'\n"
+ 'Device specifier must be a dictionary\n'
+ 'Example: %s' % (d, EXAMPLE_DEVSPEC))
+
+ if set(d.keys()) != set(REQUIRED_KEYS):
+ err("Invalid device specifier: `%s'\n"
+ 'Specifier should contain: %s\n'
+ 'Example: %s' % (d, str(REQUIRED_KEYS)[1:-1], EXAMPLE_DEVSPEC))
diff --git a/swift-storage.configure b/swift-storage.configure
new file mode 100644
index 00000000..391b392a
--- /dev/null
+++ b/swift-storage.configure
@@ -0,0 +1,107 @@
+#!/bin/bash
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+# The ansible script needs to know where the rootfs is, so we export it here
+export ROOT="$1"
+
+validate_number() {
+ local name="$1"
+ local value="$2"
+
+ local pattern='^[0-9]+$'
+ if ! [[ $value =~ $pattern ]]
+ then
+ echo "'$name' must be a number" >&2
+ exit 1
+ fi
+}
+
+validate_non_empty() {
+ local name="$1"
+ local value="$2"
+
+ if [[ $value = None ]]
+ then
+ echo "'$name' cannot be empty" >&2
+ exit 1
+ fi
+}
+
+MANDATORY_OPTIONS="SWIFT_HASH_PATH_PREFIX \
+ SWIFT_HASH_PATH_SUFFIX \
+ SWIFT_REBALANCE_SEED \
+ SWIFT_PART_POWER \
+ SWIFT_REPLICAS \
+ SWIFT_MIN_PART_HOURS \
+ SWIFT_STORAGE_DEVICES \
+ CONTROLLER_HOST_ADDRESS \
+ MANAGEMENT_INTERFACE_IP_ADDRESS"
+
+for option in $MANDATORY_OPTIONS
+do
+ if ! [[ -v $option ]]
+ then
+ missing_option=True
+ echo "Required option $option isn't set!" >&2
+ fi
+done
+
+if [[ $missing_option = True ]]; then exit 1; fi
+
+./swift-storage-devices-validate.py "$SWIFT_STORAGE_DEVICES"
+
+# Validate SWIFT_PART_POWER, SWIFT_REPLICAS, SWIFT_MIN_PART_HOURS
+# just make sure they're numbers
+
+validate_number "SWIFT_PART_POWER" "$SWIFT_PART_POWER"
+validate_number "SWIFT_REPLICAS" "$SWIFT_REPLICAS"
+validate_number "SWIFT_MIN_PART_HOURS" "$SWIFT_MIN_PART_HOURS"
+
+# Make sure these aren't empty
+validate_non_empty "SWIFT_HASH_PATH_PREFIX" "$SWIFT_HASH_PATH_PREFIX"
+validate_non_empty "SWIFT_HASH_PATH_SUFFIX" "$SWIFT_HASH_PATH_SUFFIX"
+validate_non_empty "SWIFT_REBALANCE_SEED" "$SWIFT_REBALANCE_SEED"
+validate_non_empty "CONTROLLER_HOST_ADDRESS" "$CONTROLLER_HOST_ADDRESS"
+validate_non_empty "MANAGEMENT_INTERFACE_IP_ADDRESS" "$MANAGEMENT_INTERFACE_IP_ADDRESS"
+
+mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks
+
+# A swift controller needs the storage setup service
+# but does not want any of the other storage services enabled
+ln -s "/usr/lib/systemd/system/swift-storage-setup.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage-setup.service"
+
+SWIFT_CONTROLLER=${SWIFT_CONTROLLER:-False}
+
+if [[ $SWIFT_CONTROLLER = False ]]
+then
+ ln -s "/usr/lib/systemd/system/rsync.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/rsync.service"
+ ln -s "/usr/lib/systemd/system/swift-storage.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage.service"
+fi
+
+# Build swift data structures (the rings)
+/usr/bin/ansible-playbook -i hosts swift-build-rings.yml
+
+cat << EOF > "$ROOT"/usr/share/swift/swift-storage-vars.yml
+---
+MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS
+SWIFT_HASH_PATH_PREFIX: $SWIFT_HASH_PATH_PREFIX
+SWIFT_HASH_PATH_SUFFIX: $SWIFT_HASH_PATH_SUFFIX
+EOF
diff --git a/swift/etc/ntp.conf b/swift/etc/ntp.conf
new file mode 100644
index 00000000..54522871
--- /dev/null
+++ b/swift/etc/ntp.conf
@@ -0,0 +1,25 @@
+{% if SWIFT_CONTROLLER is undefined or SWIFT_CONTROLLER == "False" %}
+server {{ CONTROLLER_HOST_ADDRESS }} iburst
+{% else %}
+# We use iburst here to reduce the potential initial delay to set the clock
+server 0.pool.ntp.org iburst
+server 1.pool.ntp.org iburst
+server 2.pool.ntp.org iburst
+server 3.pool.ntp.org iburst
+
+# kod - notify client when packets are denied service,
+# rather than just dropping the packets
+#
+# nomodify - deny queries which attempt to modify the state of the server
+#
+# notrap - decline to provide mode 6 control message trap service to
+# matching hosts
+#
+# see ntp.conf(5) for more details
+restrict -4 default kod notrap nomodify
+restrict -6 default kod notrap nomodify
+{% endif %}
+
+# The default rlimit isn't enough in some cases
+# so we set a higher limit here
+rlimit memlock 256
diff --git a/swift/manifest b/swift/manifest
new file mode 100644
index 00000000..7fd76206
--- /dev/null
+++ b/swift/manifest
@@ -0,0 +1,15 @@
+0040755 0 0 /usr/share
+0040755 0 0 /usr/share/swift
+0100644 0 0 /usr/share/swift/hosts
+0100644 0 0 /usr/share/swift/swift-storage.yml
+0040755 0 0 /usr/share/swift/etc
+0040755 0 0 /usr/share/swift/etc/swift
+0100644 0 0 /usr/share/swift/etc/swift/account-server.j2
+0100644 0 0 /usr/share/swift/etc/swift/swift.j2
+0100644 0 0 /usr/share/swift/etc/swift/object-server.j2
+0100644 0 0 /usr/share/swift/etc/swift/container-server.j2
+0100644 0 0 /usr/share/swift/etc/rsyncd.j2
+0100644 0 0 /usr/lib/systemd/system/swift-storage-setup.service
+0100644 0 0 /usr/lib/systemd/system/swift-storage.service
+template overwrite 0100644 0 0 /etc/ntp.conf
+overwrite 0100644 0 0 /usr/lib/systemd/system/rsync.service
diff --git a/swift/usr/lib/systemd/system/rsync.service b/swift/usr/lib/systemd/system/rsync.service
new file mode 100644
index 00000000..babcfb46
--- /dev/null
+++ b/swift/usr/lib/systemd/system/rsync.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=fast remote file copy program daemon
+After=swift-storage-setup.service
+ConditionPathExists=/etc/rsyncd.conf
+
+[Service]
+ExecStart=/usr/bin/rsync --daemon --no-detach
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/swift/usr/lib/systemd/system/swift-storage-setup.service b/swift/usr/lib/systemd/system/swift-storage-setup.service
new file mode 100644
index 00000000..3df31163
--- /dev/null
+++ b/swift/usr/lib/systemd/system/swift-storage-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run openstack-swift-storage-setup (once)
+After=local-fs.target postgres-server-setup.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/swift/hosts /usr/share/swift/swift-storage.yml
+Restart=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/swift/usr/lib/systemd/system/swift-storage.service b/swift/usr/lib/systemd/system/swift-storage.service
new file mode 100644
index 00000000..dc41d3bc
--- /dev/null
+++ b/swift/usr/lib/systemd/system/swift-storage.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=OpenStack Swift Storage
+After=syslog.target network.target swift-storage-setup.service
+
+[Service]
+Type=forking
+Restart=on-failure
+ExecStart=/usr/bin/swift-init all start
+ExecStop=/usr/bin/swift-init all stop
+
+[Install]
+WantedBy=multi-user.target
diff --git a/swift/usr/share/swift/etc/rsyncd.j2 b/swift/usr/share/swift/etc/rsyncd.j2
new file mode 100644
index 00000000..c0657665
--- /dev/null
+++ b/swift/usr/share/swift/etc/rsyncd.j2
@@ -0,0 +1,23 @@
+uid = swift
+gid = swift
+log file = /var/log/rsyncd.log
+pid file = /var/run/rsyncd.pid
+address = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+[account]
+max connections = 2
+path = /srv/node/
+read only = false
+lock file = /var/lock/account.lock
+
+[container]
+max connections = 2
+path = /srv/node/
+read only = false
+lock file = /var/lock/container.lock
+
+[object]
+max connections = 2
+path = /srv/node/
+read only = false
+lock file = /var/lock/object.lock
diff --git a/swift/usr/share/swift/etc/swift/account-server.j2 b/swift/usr/share/swift/etc/swift/account-server.j2
new file mode 100644
index 00000000..d977e295
--- /dev/null
+++ b/swift/usr/share/swift/etc/swift/account-server.j2
@@ -0,0 +1,192 @@
+[DEFAULT]
+# bind_ip = 0.0.0.0
+bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+bind_port = 6002
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+# mount_check = true
+# disable_fallocate = false
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host = localhost
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# If you don't mind the extra disk space usage in overhead, you can turn this
+# on to preallocate disk space with SQLite databases to decrease fragmentation.
+# db_preallocation = off
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+
+[pipeline:main]
+pipeline = healthcheck recon account-server
+
+[app:account-server]
+use = egg:swift#account
+# You can override the default log routing for this app here:
+# set log_name = account-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# auto_create_account_prefix = .
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server".
+# replication_server = false
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+
+[account-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# vm_test_mode = no
+# per_diff = 1000
+# max_diffs = 100
+# concurrency = 8
+# interval = 30
+#
+# How long without an error before a node's error count is reset. This will
+# also be how long before a node is reenabled after suppression is triggered.
+# error_suppression_interval = 60
+#
+# How many errors can accumulate before a node is temporarily ignored.
+# error_suppression_limit = 10
+#
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# Time in seconds to wait between replication passes
+# Note: if the parameter 'interval' is defined then it will be used in place
+# of run_pause.
+# run_pause = 30
+#
+# recon_cache_path = /var/cache/swift
+
+[account-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Will audit each account at most once per interval
+# interval = 1800
+#
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# accounts_per_second = 200
+# recon_cache_path = /var/cache/swift
+
+[account-reaper]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-reaper
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# concurrency = 25
+# interval = 3600
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# Normally, the reaper begins deleting account information for deleted accounts
+# immediately; you can set this to delay its work however. The value is in
+# seconds; 2592000 = 30 days for example.
+# delay_reaping = 0
+#
+# If the account fails to be be reaped due to a persistent error, the
+# account reaper will log a message such as:
+# Account <name> has not been reaped since <date>
+# You can search logs for this message if space is not being reclaimed
+# after you delete account(s).
+# Default is 2592000 seconds (30 days). This is in addition to any time
+# requested by delay_reaping.
+# reap_warn_after = 2592000
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/account.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/swift/usr/share/swift/etc/swift/container-server.j2 b/swift/usr/share/swift/etc/swift/container-server.j2
new file mode 100644
index 00000000..d226d016
--- /dev/null
+++ b/swift/usr/share/swift/etc/swift/container-server.j2
@@ -0,0 +1,203 @@
+[DEFAULT]
+# bind_ip = 0.0.0.0
+bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+bind_port = 6001
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+# mount_check = true
+# disable_fallocate = false
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# This is a comma separated list of hosts allowed in the X-Container-Sync-To
+# field for containers. This is the old-style of using container sync. It is
+# strongly recommended to use the new style of a separate
+# container-sync-realms.conf -- see container-sync-realms.conf-sample
+# allowed_sync_hosts = 127.0.0.1
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host = localhost
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# If you don't mind the extra disk space usage in overhead, you can turn this
+# on to preallocate disk space with SQLite databases to decrease fragmentation.
+# db_preallocation = off
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+
+[pipeline:main]
+pipeline = healthcheck recon container-server
+
+[app:container-server]
+use = egg:swift#container
+# You can override the default log routing for this app here:
+# set log_name = container-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# node_timeout = 3
+# conn_timeout = 0.5
+# allow_versions = false
+# auto_create_account_prefix = .
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server".
+# replication_server = false
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+
+[container-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# vm_test_mode = no
+# per_diff = 1000
+# max_diffs = 100
+# concurrency = 8
+# interval = 30
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# Time in seconds to wait between replication passes
+# Note: if the parameter 'interval' is defined then it will be used in place
+# of run_pause.
+# run_pause = 30
+#
+# recon_cache_path = /var/cache/swift
+
+[container-updater]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-updater
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# interval = 300
+# concurrency = 4
+# node_timeout = 3
+# conn_timeout = 0.5
+#
+# slowdown will sleep that amount between containers
+# slowdown = 0.01
+#
+# Seconds to suppress updating an account that has generated an error
+# account_suppression_time = 60
+#
+# recon_cache_path = /var/cache/swift
+
+[container-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Will audit each container at most once per interval
+# interval = 1800
+#
+# containers_per_second = 200
+# recon_cache_path = /var/cache/swift
+
+[container-sync]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-sync
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
+# You can also set this to a comma separated list of HTTP Proxies and they will
+# be randomly used (simple load balancing).
+# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888
+#
+# Will sync each container at most once per interval
+# interval = 300
+#
+# Maximum amount of time to spend syncing each container per pass
+# container_time = 60
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/container.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/swift/usr/share/swift/etc/swift/object-server.j2 b/swift/usr/share/swift/etc/swift/object-server.j2
new file mode 100644
index 00000000..66990be9
--- /dev/null
+++ b/swift/usr/share/swift/etc/swift/object-server.j2
@@ -0,0 +1,283 @@
+[DEFAULT]
+# bind_ip = 0.0.0.0
+bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+bind_port = 6000
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+# mount_check = true
+# disable_fallocate = false
+# expiring_objects_container_divisor = 86400
+# expiring_objects_account_name = expiring_objects
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host = localhost
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+#
+# Time to wait while attempting to connect to another backend node.
+# conn_timeout = 0.5
+# Time to wait while sending each chunk of data to another backend node.
+# node_timeout = 3
+# Time to wait while receiving each chunk of data from a client or another
+# backend node.
+# client_timeout = 60
+#
+# network_chunk_size = 65536
+# disk_chunk_size = 65536
+
+[pipeline:main]
+pipeline = healthcheck recon object-server
+
+[app:object-server]
+use = egg:swift#object
+# You can override the default log routing for this app here:
+# set log_name = object-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# max_upload_time = 86400
+# slow = 0
+#
+# Objects smaller than this are not evicted from the buffercache once read
+# keep_cache_size = 5424880
+#
+# If true, objects for authenticated GET requests may be kept in buffer cache
+# if small enough
+# keep_cache_private = false
+#
+# on PUTs, sync data every n MB
+# mb_per_sync = 512
+#
+# Comma separated list of headers that can be set in metadata on an object.
+# This list is in addition to X-Object-Meta-* headers and cannot include
+# Content-Type, etag, Content-Length, or deleted
+# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
+#
+# auto_create_account_prefix = .
+#
+# A value of 0 means "don't use thread pools". A reasonable starting point is
+# 4.
+# threads_per_disk = 0
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server".
+# replication_server = false
+#
+# Set to restrict the number of concurrent incoming REPLICATION requests
+# Set to 0 for unlimited
+# Note that REPLICATION is currently an ssync only item
+# replication_concurrency = 4
+#
+# Restricts incoming REPLICATION requests to one per device,
+# replication_currency above allowing. This can help control I/O to each
+# device, but you may wish to set this to False to allow multiple REPLICATION
+# requests (up to the above replication_concurrency setting) per device.
+# replication_one_per_device = True
+#
+# Number of seconds to wait for an existing replication device lock before
+# giving up.
+# replication_lock_timeout = 15
+#
+# These next two settings control when the REPLICATION subrequest handler will
+# abort an incoming REPLICATION attempt. An abort will occur if there are at
+# least threshold number of failures and the value of failures / successes
+# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
+# failures have to occur and there have to be more failures than successes for
+# an abort to occur.
+# replication_failure_threshold = 100
+# replication_failure_ratio = 1.0
+#
+# Use splice() for zero-copy object GETs. This requires Linux kernel
+# version 3.0 or greater. If you set "splice = yes" but the kernel
+# does not support it, error messages will appear in the object server
+# logs at startup, but your object servers should continue to function.
+#
+# splice = no
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+#recon_lock_path = /var/lock
+
+[object-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# vm_test_mode = no
+# daemonize = on
+# run_pause = 30
+# concurrency = 1
+# stats_interval = 300
+#
+# The sync method to use; default is rsync but you can use ssync to try the
+# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified
+# as having performance comparable to, or better than, rsync, we plan to
+# deprecate rsync so we can move on with more features for replication.
+# sync_method = rsync
+#
+# max duration of a partition rsync
+# rsync_timeout = 900
+#
+# bandwidth limit for rsync in kB/s. 0 means unlimited
+# rsync_bwlimit = 0
+#
+# passed to rsync for io op timeout
+# rsync_io_timeout = 30
+#
+# node_timeout = <whatever's in the DEFAULT section or 10>
+# max duration of an http request; this is for REPLICATE finalization calls and
+# so should be longer than node_timeout
+# http_timeout = 60
+#
+# attempts to kill all workers if nothing replicates for lockup_timeout seconds
+# lockup_timeout = 1800
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# ring_check_interval = 15
+# recon_cache_path = /var/cache/swift
+#
+# limits how long rsync error log lines are
+# 0 means to log the entire line
+# rsync_error_log_line_length = 0
+#
+# handoffs_first and handoff_delete are options for a special case
+# such as disk full in the cluster. These two options SHOULD NOT BE
+# CHANGED, except for such an extreme situations. (e.g. disks filled up
+# or are about to fill up. Anyway, DO NOT let your drives fill up)
+# handoffs_first is the flag to replicate handoffs prior to canonical
+# partitions. It allows to force syncing and deleting handoffs quickly.
+# If set to a True value(e.g. "True" or "1"), partitions
+# that are not supposed to be on the node will be replicated first.
+# handoffs_first = False
+#
+# handoff_delete is the number of replicas which are ensured in swift.
+# If the number less than the number of replicas is set, object-replicator
+# could delete local handoffs even if all replicas are not ensured in the
+# cluster. Object-replicator would remove local handoff partition directories
+# after syncing partition when the number of successful responses is greater
+# than or equal to this number. By default(auto), handoff partitions will be
+# removed when it has successfully replicated to all the canonical nodes.
+# handoff_delete = auto
+
+[object-updater]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-updater
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# interval = 300
+# concurrency = 1
+# node_timeout = <whatever's in the DEFAULT section or 10>
+# slowdown will sleep that amount between objects
+# slowdown = 0.01
+#
+# recon_cache_path = /var/cache/swift
+
+[object-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# You can set the disk chunk size that the auditor uses making it larger if
+# you like for more efficient local auditing of larger objects
+# disk_chunk_size = 65536
+# files_per_second = 20
+# concurrency = 1
+# bytes_per_second = 10000000
+# log_time = 3600
+# zero_byte_files_per_second = 50
+# recon_cache_path = /var/cache/swift
+
+# Takes a comma separated list of ints. If set, the object auditor will
+# increment a counter for every object whose size is <= to the given break
+# points and report the result after a full scan.
+# object_size_stats =
+
+# Note: Put it at the beginning of the pipleline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/object.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/swift/usr/share/swift/etc/swift/swift.j2 b/swift/usr/share/swift/etc/swift/swift.j2
new file mode 100644
index 00000000..6d76215a
--- /dev/null
+++ b/swift/usr/share/swift/etc/swift/swift.j2
@@ -0,0 +1,118 @@
+[swift-hash]
+
+# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
+# the hashing algorithm when determining data placement in the cluster.
+# These values should remain secret and MUST NOT change
+# once a cluster has been deployed.
+
+swift_hash_path_suffix = {{ SWIFT_HASH_PATH_SUFFIX }}
+swift_hash_path_prefix = {{ SWIFT_HASH_PATH_PREFIX }}
+
+# storage policies are defined here and determine various characteristics
+# about how objects are stored and treated. Policies are specified by name on
+# a per container basis. Names are case-insensitive. The policy index is
+# specified in the section header and is used internally. The policy with
+# index 0 is always used for legacy containers and can be given a name for use
+# in metadata however the ring file name will always be 'object.ring.gz' for
+# backwards compatibility. If no policies are defined a policy with index 0
+# will be automatically created for backwards compatibility and given the name
+# Policy-0. A default policy is used when creating new containers when no
+# policy is specified in the request. If no other policies are defined the
+# policy with index 0 will be declared the default. If multiple policies are
+# defined you must define a policy with index 0 and you must specify a
+# default. It is recommended you always define a section for
+# storage-policy:0.
+[storage-policy:0]
+name = Policy-0
+default = yes
+
+# the following section would declare a policy called 'silver', the number of
+# replicas will be determined by how the ring is built. In this example the
+# 'silver' policy could have a lower or higher # of replicas than the
+# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You
+# may only specify one storage policy section as the default. If you changed
+# this section to specify 'silver' as the default, when a client created a new
+# container w/o a policy specified, it will get the 'silver' policy because
+# this config has specified it as the default. However if a legacy container
+# (one created with a pre-policy version of swift) is accessed, it is known
+# implicitly to be assigned to the policy with index 0 as opposed to the
+# current default.
+#[storage-policy:1]
+#name = silver
+
+# The swift-constraints section sets the basic constraints on data
+# saved in the swift cluster. These constraints are automatically
+# published by the proxy server in responses to /info requests.
+
+[swift-constraints]
+
+# max_file_size is the largest "normal" object that can be saved in
+# the cluster. This is also the limit on the size of each segment of
+# a "large" object when using the large object manifest support.
+# This value is set in bytes. Setting it to lower than 1MiB will cause
+# some tests to fail. It is STRONGLY recommended to leave this value at
+# the default (5 * 2**30 + 2).
+
+#max_file_size = 5368709122
+
+
+# max_meta_name_length is the max number of bytes in the utf8 encoding
+# of the name portion of a metadata header.
+
+#max_meta_name_length = 128
+
+
+# max_meta_value_length is the max number of bytes in the utf8 encoding
+# of a metadata value
+
+#max_meta_value_length = 256
+
+
+# max_meta_count is the max number of metadata keys that can be stored
+# on a single account, container, or object
+
+#max_meta_count = 90
+
+
+# max_meta_overall_size is the max number of bytes in the utf8 encoding
+# of the metadata (keys + values)
+
+#max_meta_overall_size = 4096
+
+# max_header_size is the max number of bytes in the utf8 encoding of each
+# header. Using 8192 as default because eventlet use 8192 as max size of
+# header line. This value may need to be increased when using identity
+# v3 API tokens including more than 7 catalog entries.
+# See also include_service_catalog in proxy-server.conf-sample
+# (documented in overview_auth.rst)
+
+#max_header_size = 8192
+
+
+# max_object_name_length is the max number of bytes in the utf8 encoding
+# of an object name
+
+#max_object_name_length = 1024
+
+
+# container_listing_limit is the default (and max) number of items
+# returned for a container listing request
+
+#container_listing_limit = 10000
+
+
+# account_listing_limit is the default (and max) number of items returned
+# for an account listing request
+#account_listing_limit = 10000
+
+
+# max_account_name_length is the max number of bytes in the utf8 encoding
+# of an account name
+
+#max_account_name_length = 256
+
+
+# max_container_name_length is the max number of bytes in the utf8 encoding
+# of a container name
+
+#max_container_name_length = 256
diff --git a/swift/usr/share/swift/hosts b/swift/usr/share/swift/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/swift/usr/share/swift/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/swift/usr/share/swift/swift-storage.yml b/swift/usr/share/swift/swift-storage.yml
new file mode 100644
index 00000000..62a335ed
--- /dev/null
+++ b/swift/usr/share/swift/swift-storage.yml
@@ -0,0 +1,24 @@
+---
+- hosts: localhost
+ vars_files:
+ - swift-storage-vars.yml
+ vars:
+ remote_user: root
+ tasks:
+ - user: name=swift comment="Swift user"
+ - file: path=/etc/swift owner=swift group=swift state=directory recurse=yes
+
+ - template: src=/usr/share/swift/etc/rsyncd.j2 dest=/etc/rsyncd.conf
+ mode=0644 owner=swift group=swift
+
+ - template: src=/usr/share/swift/etc/swift/{{ item }}.j2
+ dest=/etc/swift/{{ item }}.conf mode=0644 owner=swift group=swift
+ with_items:
+ - account-server
+ - container-server
+ - object-server
+ - swift
+
+ - file: path=/srv/node owner=swift group=swift state=directory recurse=yes
+ - file: path=/var/cache/swift owner=swift group=swift state=directory
+ recurse=yes
diff --git a/systems/armv7lhf-cross-toolchain-system-x86_32.morph b/systems/armv7lhf-cross-toolchain-system-x86_32.morph
new file mode 100644
index 00000000..9fe7888a
--- /dev/null
+++ b/systems/armv7lhf-cross-toolchain-system-x86_32.morph
@@ -0,0 +1,19 @@
+name: armv7lhf-cross-toolchain-system-x86_32
+kind: system
+description: A system that contains an ARM cross compiler, intended to be used as
+ a sysroot.
+arch: x86_32
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: armv7lhf-cross-toolchain
+ morph: strata/armv7lhf-cross-toolchain.morph
+- name: cross-tools
+ morph: strata/cross-tools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/armv7lhf-cross-toolchain-system-x86_64.morph b/systems/armv7lhf-cross-toolchain-system-x86_64.morph
new file mode 100644
index 00000000..c1de199c
--- /dev/null
+++ b/systems/armv7lhf-cross-toolchain-system-x86_64.morph
@@ -0,0 +1,19 @@
+name: armv7lhf-cross-toolchain-system-x86_64
+kind: system
+description: A system that contains an ARM cross compiler, intended to be used as
+ a sysroot.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: armv7lhf-cross-toolchain
+ morph: strata/armv7lhf-cross-toolchain.morph
+- name: cross-tools
+ morph: strata/cross-tools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-armv7-highbank.morph b/systems/base-system-armv7-highbank.morph
new file mode 100644
index 00000000..ffc5e188
--- /dev/null
+++ b/systems/base-system-armv7-highbank.morph
@@ -0,0 +1,20 @@
+name: base-system-armv7-highbank
+kind: system
+description: The set of strata required to have a minimal system for an ARM highbank
+ system.
+arch: armv7l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7-highbank
+ morph: strata/bsp-armv7-highbank.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-armv7-versatile.morph b/systems/base-system-armv7-versatile.morph
new file mode 100644
index 00000000..8de2b35f
--- /dev/null
+++ b/systems/base-system-armv7-versatile.morph
@@ -0,0 +1,20 @@
+name: base-system-armv7-versatile
+kind: system
+description: The set of strata required to have a minimal system for an ARM versatile
+ system.
+arch: armv7l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7-versatile
+ morph: strata/bsp-armv7-versatile.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-armv7b-highbank.morph b/systems/base-system-armv7b-highbank.morph
new file mode 100644
index 00000000..23bf4dbf
--- /dev/null
+++ b/systems/base-system-armv7b-highbank.morph
@@ -0,0 +1,20 @@
+name: base-system-armv7b-highbank
+kind: system
+description: The set of strata required to have a minimal system for an ARM highbank
+ system.
+arch: armv7b
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7b-highbank
+ morph: strata/bsp-armv7b-highbank.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-armv7b-vexpress-tc2.morph b/systems/base-system-armv7b-vexpress-tc2.morph
new file mode 100644
index 00000000..b06ead7b
--- /dev/null
+++ b/systems/base-system-armv7b-vexpress-tc2.morph
@@ -0,0 +1,19 @@
+name: base-system-armv7b-vexpress-tc2
+kind: system
+description: A small system for Versatile TC2 boards system.
+arch: armv7b
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7b-vexpress-tc2
+ morph: strata/bsp-armv7b-vexpress-tc2.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-armv7lhf-highbank.morph b/systems/base-system-armv7lhf-highbank.morph
new file mode 100644
index 00000000..c827f3a2
--- /dev/null
+++ b/systems/base-system-armv7lhf-highbank.morph
@@ -0,0 +1,20 @@
+name: base-system-armv7lhf-highbank
+kind: system
+description: The set of strata required to have a minimal system for an ARM highbank
+ system.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7-highbank
+ morph: strata/bsp-armv7-highbank.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-armv8b64.morph b/systems/base-system-armv8b64.morph
new file mode 100644
index 00000000..49e7dac7
--- /dev/null
+++ b/systems/base-system-armv8b64.morph
@@ -0,0 +1,22 @@
+name: base-system-armv8b64
+kind: system
+description: |
+ The set of strata required to have a minimal
+ system for big endian 64-bit ARMv8 computers.
+arch: armv8b64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv8b64-generic
+ morph: strata/bsp-armv8b64-generic.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- moonshot-kernel
+- install-essential-files
diff --git a/systems/base-system-armv8l64.morph b/systems/base-system-armv8l64.morph
new file mode 100644
index 00000000..560add69
--- /dev/null
+++ b/systems/base-system-armv8l64.morph
@@ -0,0 +1,22 @@
+name: base-system-armv8l64
+kind: system
+description: |
+ The set of strata required to have a minimal
+ system for little endian 64-bit ARMv8 computers.
+arch: armv8l64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv8l64-generic
+ morph: strata/bsp-armv8l64-generic.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- moonshot-kernel
+- install-essential-files
diff --git a/systems/base-system-ppc64-generic.morph b/systems/base-system-ppc64-generic.morph
new file mode 100644
index 00000000..3763cce5
--- /dev/null
+++ b/systems/base-system-ppc64-generic.morph
@@ -0,0 +1,20 @@
+name: base-system-ppc64-generic
+kind: system
+description: The set of strata required to have a minimal system for a 64-bit ppc64
+ system.
+arch: ppc64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-ppc64-generic
+ morph: strata/bsp-ppc64-generic.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-x86_32-generic.morph b/systems/base-system-x86_32-generic.morph
new file mode 100644
index 00000000..7ada4052
--- /dev/null
+++ b/systems/base-system-x86_32-generic.morph
@@ -0,0 +1,19 @@
+name: base-system-x86_32-generic
+kind: system
+description: The set of strata required to have a basic system for a 32-bit x86 system.
+arch: x86_32
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_32-generic
+ morph: strata/bsp-x86_32-generic.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/base-system-x86_64-generic.morph b/systems/base-system-x86_64-generic.morph
new file mode 100644
index 00000000..796c8185
--- /dev/null
+++ b/systems/base-system-x86_64-generic.morph
@@ -0,0 +1,20 @@
+name: base-system-x86_64-generic
+kind: system
+description: The set of strata required to have a minimal system for a 64-bit x86
+ system.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/build-system-armv5l-openbmc-aspeed.morph b/systems/build-system-armv5l-openbmc-aspeed.morph
new file mode 100644
index 00000000..4eb0b6e7
--- /dev/null
+++ b/systems/build-system-armv5l-openbmc-aspeed.morph
@@ -0,0 +1,43 @@
+name: build-system-armv5l-openbmc-aspeed
+kind: system
+description:
+- |
+ The system that should be used for building other Baserock systems
+ for little endian ARMv5 computers.
+arch: armv5l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-armv5l-openbmc-aspeed
+ morph: strata/bsp-armv5l-openbmc-aspeed.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: mtd-utilities
+ morph: strata/mtd-utilities.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/build-system-armv7lhf-highbank.morph b/systems/build-system-armv7lhf-highbank.morph
new file mode 100644
index 00000000..d43ac935
--- /dev/null
+++ b/systems/build-system-armv7lhf-highbank.morph
@@ -0,0 +1,55 @@
+name: build-system-armv7lhf-highbank
+kind: system
+description: The system that should be used for building all other Baserock systems for ARMv7 little-endian hard-float Calxeda Highbank computers.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-armv7-highbank
+ morph: strata/bsp-armv7-highbank.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: unionfs-fuse-group
+ morph: strata/unionfs-fuse-group.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- install-essential-files
diff --git a/systems/build-system-armv7lhf-jetson.morph b/systems/build-system-armv7lhf-jetson.morph
new file mode 100644
index 00000000..fa948037
--- /dev/null
+++ b/systems/build-system-armv7lhf-jetson.morph
@@ -0,0 +1,53 @@
+name: build-system-armv7lhf-jetson
+kind: system
+description: The system that should be used for building all other Baserock systems for ARMv7 little-endian hard-float NVIDIA Jetson TK1 computers.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-jetson
+ morph: strata/bsp-jetson.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- install-essential-files
diff --git a/systems/build-system-armv8b64.morph b/systems/build-system-armv8b64.morph
new file mode 100644
index 00000000..84495016
--- /dev/null
+++ b/systems/build-system-armv8b64.morph
@@ -0,0 +1,57 @@
+name: build-system-armv8b64
+kind: system
+description:
+- |
+ The system that should be used for building all other Baserock systems
+ for 64-bit ARMv8 computers running in big-endian mode.
+arch: armv8b64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv8b64-generic
+ morph: strata/bsp-armv8b64-generic.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- moonshot-kernel
+- install-essential-files
diff --git a/systems/build-system-armv8l64.morph b/systems/build-system-armv8l64.morph
new file mode 100644
index 00000000..f79fb76e
--- /dev/null
+++ b/systems/build-system-armv8l64.morph
@@ -0,0 +1,57 @@
+name: build-system-armv8l64
+kind: system
+description:
+- |
+ The system that should be used for building all other Baserock systems
+ for little endian 64-bit ARMv8 computers.
+arch: armv8l64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-armv8l64-generic
+ morph: strata/bsp-armv8l64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- moonshot-kernel
+- install-essential-files
diff --git a/systems/build-system-ppc64.morph b/systems/build-system-ppc64.morph
new file mode 100644
index 00000000..38f2e9f3
--- /dev/null
+++ b/systems/build-system-ppc64.morph
@@ -0,0 +1,53 @@
+name: build-system-ppc64
+kind: system
+description: The system that should be used for building all other Baserock systems for 64-bit POWER computers.
+arch: ppc64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-ppc64-generic
+ morph: strata/bsp-ppc64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- install-essential-files
diff --git a/systems/build-system-x86_32-chroot.morph b/systems/build-system-x86_32-chroot.morph
new file mode 100644
index 00000000..f193841f
--- /dev/null
+++ b/systems/build-system-x86_32-chroot.morph
@@ -0,0 +1,53 @@
+name: build-system-x86_32-chroot
+kind: system
+description: The system that should be used for building all other Baserock systems for 32-bit Intel x86 computers, chroot variant.
+arch: x86_32
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: bsp-x86_both-tools
+ morph: strata/bsp-x86_both-tools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- install-essential-files
diff --git a/systems/build-system-x86_32.morph b/systems/build-system-x86_32.morph
new file mode 100644
index 00000000..a802b64f
--- /dev/null
+++ b/systems/build-system-x86_32.morph
@@ -0,0 +1,55 @@
+name: build-system-x86_32
+kind: system
+description: The system that should be used for building all other Baserock systems for 32-bit Intel x86 computers.
+arch: x86_32
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-x86_32-generic
+ morph: strata/bsp-x86_32-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: bsp-x86_both-tools
+ morph: strata/bsp-x86_both-tools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- install-essential-files
diff --git a/systems/build-system-x86_64-chroot.morph b/systems/build-system-x86_64-chroot.morph
new file mode 100644
index 00000000..fa54f9d3
--- /dev/null
+++ b/systems/build-system-x86_64-chroot.morph
@@ -0,0 +1,53 @@
+name: build-system-x86_64-chroot
+kind: system
+description: The system that should be used for building all other Baserock systems for 64-bit Intel x86 computers, chroot variant.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: bsp-x86_both-tools
+ morph: strata/bsp-x86_both-tools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- install-essential-files
diff --git a/systems/build-system-x86_64.morph b/systems/build-system-x86_64.morph
new file mode 100644
index 00000000..8fe5f91f
--- /dev/null
+++ b/systems/build-system-x86_64.morph
@@ -0,0 +1,55 @@
+name: build-system-x86_64
+kind: system
+description: The system that should be used for building all other Baserock systems for 64-bit Intel x86 computers.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: bsp-x86_both-tools
+ morph: strata/bsp-x86_both-tools.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
+- install-essential-files
diff --git a/systems/ceph-service-x86_64-generic.morph b/systems/ceph-service-x86_64-generic.morph
new file mode 100644
index 00000000..7431e56a
--- /dev/null
+++ b/systems/ceph-service-x86_64-generic.morph
@@ -0,0 +1,64 @@
+name: ceph-service-x86_64-generic
+kind: system
+description: The set of strata required to have a minimal system for a 64-bit x86
+ system + the service daemons for ceph object, block and file storage.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: network-security
+ morph: strata/network-security.morph
+- name: ceph-service
+ morph: strata/ceph-service.morph
+- name: tools
+ morph: strata/tools.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: ntpd
+ morph: strata/ntpd.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: chef
+ morph: strata/chef.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- ceph
+- cloud-init
+- install-essential-files
diff --git a/systems/cross-bootstrap-system-armv5l-generic.morph b/systems/cross-bootstrap-system-armv5l-generic.morph
new file mode 100644
index 00000000..08cc11f6
--- /dev/null
+++ b/systems/cross-bootstrap-system-armv5l-generic.morph
@@ -0,0 +1,21 @@
+name: cross-bootstrap-system-armv5l-generic
+kind: system
+description: A system that produces the minimum needed to build a devel system
+arch: armv5l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: cross-bootstrap
+ morph: strata/cross-bootstrap.morph
diff --git a/systems/cross-bootstrap-system-armv7lhf-generic.morph b/systems/cross-bootstrap-system-armv7lhf-generic.morph
new file mode 100644
index 00000000..e52a0375
--- /dev/null
+++ b/systems/cross-bootstrap-system-armv7lhf-generic.morph
@@ -0,0 +1,21 @@
+name: cross-bootstrap-system-armv7lhf-generic
+kind: system
+description: A system that produces the minimum needed to build a devel system
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: cross-bootstrap
+ morph: strata/cross-bootstrap.morph
diff --git a/systems/cross-bootstrap-system-armv8b64-generic.morph b/systems/cross-bootstrap-system-armv8b64-generic.morph
new file mode 100644
index 00000000..adfbd412
--- /dev/null
+++ b/systems/cross-bootstrap-system-armv8b64-generic.morph
@@ -0,0 +1,21 @@
+name: cross-bootstrap-system-armv8b64-generic
+kind: system
+description: A system that produces the minimum needed to build a devel system
+arch: armv8b64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: cross-bootstrap
+ morph: strata/cross-bootstrap.morph
diff --git a/systems/cross-bootstrap-system-armv8l64-generic.morph b/systems/cross-bootstrap-system-armv8l64-generic.morph
new file mode 100644
index 00000000..5cc2f83b
--- /dev/null
+++ b/systems/cross-bootstrap-system-armv8l64-generic.morph
@@ -0,0 +1,21 @@
+name: cross-bootstrap-system-armv8l64-generic
+kind: system
+description: A system that produces the minimum needed to build a devel system
+arch: armv8l64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: cross-bootstrap
+ morph: strata/cross-bootstrap.morph
diff --git a/systems/cross-bootstrap-system-ppc64-generic.morph b/systems/cross-bootstrap-system-ppc64-generic.morph
new file mode 100644
index 00000000..35b3f626
--- /dev/null
+++ b/systems/cross-bootstrap-system-ppc64-generic.morph
@@ -0,0 +1,21 @@
+name: cross-bootstrap-system-ppc64-generic
+kind: system
+description: A system that produces the minimum needed to build a devel system
+arch: ppc64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: cross-bootstrap
+ morph: strata/cross-bootstrap.morph
diff --git a/systems/cross-bootstrap-system-x86_64-generic.morph b/systems/cross-bootstrap-system-x86_64-generic.morph
new file mode 100644
index 00000000..471bd40d
--- /dev/null
+++ b/systems/cross-bootstrap-system-x86_64-generic.morph
@@ -0,0 +1,21 @@
+name: cross-bootstrap-system-x86_64-generic
+kind: system
+description: A system that produces the minimum needed to build a devel system
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: cross-bootstrap
+ morph: strata/cross-bootstrap.morph
diff --git a/systems/cxmanage-system-x86_64-generic.morph b/systems/cxmanage-system-x86_64-generic.morph
new file mode 100644
index 00000000..aaa0fa81
--- /dev/null
+++ b/systems/cxmanage-system-x86_64-generic.morph
@@ -0,0 +1,24 @@
+name: cxmanage-system-x86_64-generic
+kind: system
+description: A baserock development system with calxeda management tools added
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: cxmanage
+ morph: strata/cxmanage.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7-chroot.morph b/systems/devel-system-armv7-chroot.morph
new file mode 100644
index 00000000..620d8fb9
--- /dev/null
+++ b/systems/devel-system-armv7-chroot.morph
@@ -0,0 +1,62 @@
+name: devel-system-armv7-chroot
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use as a chroot inside a Linux-based operating system on ARMv7
+ computers using little-endian word order.
+arch: armv7l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7-highbank.morph b/systems/devel-system-armv7-highbank.morph
new file mode 100644
index 00000000..a92561fe
--- /dev/null
+++ b/systems/devel-system-armv7-highbank.morph
@@ -0,0 +1,65 @@
+name: devel-system-armv7-highbank
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on ARMv7 Calxeda Highbank computers using little-endian word order.
+arch: armv7l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7-highbank
+ morph: strata/bsp-armv7-highbank.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: unionfs-fuse-group
+ morph: strata/unionfs-fuse-group.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7-versatile.morph b/systems/devel-system-armv7-versatile.morph
new file mode 100644
index 00000000..50588f66
--- /dev/null
+++ b/systems/devel-system-armv7-versatile.morph
@@ -0,0 +1,63 @@
+name: devel-system-armv7-versatile
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on generic ARMv7 versatile computers using little-endian word order.
+arch: armv7l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7-versatile
+ morph: strata/bsp-armv7-versatile.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7-wandboard.morph b/systems/devel-system-armv7-wandboard.morph
new file mode 100644
index 00000000..e2c9e175
--- /dev/null
+++ b/systems/devel-system-armv7-wandboard.morph
@@ -0,0 +1,63 @@
+name: devel-system-armv7-wandboard
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on ARMv7 Wandboard computers using little-endian word order.
+arch: armv7l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-wandboard
+ morph: strata/bsp-wandboard.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7b-chroot.morph b/systems/devel-system-armv7b-chroot.morph
new file mode 100644
index 00000000..34bc04f1
--- /dev/null
+++ b/systems/devel-system-armv7b-chroot.morph
@@ -0,0 +1,54 @@
+name: devel-system-armv7b-chroot
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use as a chroot inside a Linux-based operating system on ARMv7 computers
+ using big-endian word order.
+arch: armv7b
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7b-highbank.morph b/systems/devel-system-armv7b-highbank.morph
new file mode 100644
index 00000000..cddd5ff4
--- /dev/null
+++ b/systems/devel-system-armv7b-highbank.morph
@@ -0,0 +1,61 @@
+name: devel-system-armv7b-highbank
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on ARMv7 Calxeda Highbank computers using big-endian word order.
+arch: armv7b
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7b-highbank
+ morph: strata/bsp-armv7b-highbank.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: unionfs-fuse-group
+ morph: strata/unionfs-fuse-group.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7lhf-chroot.morph b/systems/devel-system-armv7lhf-chroot.morph
new file mode 100644
index 00000000..a8d46bd1
--- /dev/null
+++ b/systems/devel-system-armv7lhf-chroot.morph
@@ -0,0 +1,62 @@
+name: devel-system-armv7lhf-chroot
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on ARMv7 computers using little-endian word order and hardware
+ floating point support.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7lhf-highbank.morph b/systems/devel-system-armv7lhf-highbank.morph
new file mode 100644
index 00000000..9722644c
--- /dev/null
+++ b/systems/devel-system-armv7lhf-highbank.morph
@@ -0,0 +1,68 @@
+name: devel-system-armv7lhf-highbank
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on ARMv7 Calxeda Highbank computers using little-endian word order
+ and hardware floating-point support.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7-highbank
+ morph: strata/bsp-armv7-highbank.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: unionfs-fuse-group
+ morph: strata/unionfs-fuse-group.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7lhf-jetson.morph b/systems/devel-system-armv7lhf-jetson.morph
new file mode 100644
index 00000000..e3d1843d
--- /dev/null
+++ b/systems/devel-system-armv7lhf-jetson.morph
@@ -0,0 +1,66 @@
+name: devel-system-armv7lhf-jetson
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on ARMv7 NVIDIA Jetson TK1 computers using little-endian word order
+ and hardware floating-point support.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: bsp-jetson
+ morph: strata/bsp-jetson.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv7lhf-wandboard.morph b/systems/devel-system-armv7lhf-wandboard.morph
new file mode 100644
index 00000000..a47df980
--- /dev/null
+++ b/systems/devel-system-armv7lhf-wandboard.morph
@@ -0,0 +1,66 @@
+name: devel-system-armv7lhf-wandboard
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on ARMv7 Wandboard computers using little-endian word order and
+ hardware floating point support.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-wandboard
+ morph: strata/bsp-wandboard.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-armv8b64.morph b/systems/devel-system-armv8b64.morph
new file mode 100644
index 00000000..6c5f23ee
--- /dev/null
+++ b/systems/devel-system-armv8b64.morph
@@ -0,0 +1,67 @@
+name: devel-system-armv8b64
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on 64-bit ARMv8 computers using big-endian word.
+arch: armv8b64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv8b64-generic
+ morph: strata/bsp-armv8b64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- cloud-init
+- moonshot-kernel
+- install-essential-files
diff --git a/systems/devel-system-armv8l64.morph b/systems/devel-system-armv8l64.morph
new file mode 100644
index 00000000..cd7a1e44
--- /dev/null
+++ b/systems/devel-system-armv8l64.morph
@@ -0,0 +1,68 @@
+name: devel-system-armv8l64
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on 64-bit ARMv8 computers using little-endian word.
+arch: armv8l64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv8l64-generic
+ morph: strata/bsp-armv8l64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- cloud-init
+- moonshot-kernel
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-ppc64-chroot.morph b/systems/devel-system-ppc64-chroot.morph
new file mode 100644
index 00000000..b92073a9
--- /dev/null
+++ b/systems/devel-system-ppc64-chroot.morph
@@ -0,0 +1,60 @@
+name: devel-system-ppc64-chroot
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use as a chroot inside a Linux operating system on 64-bit POWER
+ computers.
+arch: ppc64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-ppc64-generic.morph b/systems/devel-system-ppc64-generic.morph
new file mode 100644
index 00000000..4d81ff5c
--- /dev/null
+++ b/systems/devel-system-ppc64-generic.morph
@@ -0,0 +1,63 @@
+name: devel-system-ppc64-generic
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on 64-bit POWER computers.
+arch: ppc64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-ppc64-generic
+ morph: strata/bsp-ppc64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-x86_32-chroot.morph b/systems/devel-system-x86_32-chroot.morph
new file mode 100644
index 00000000..7eb4fc01
--- /dev/null
+++ b/systems/devel-system-x86_32-chroot.morph
@@ -0,0 +1,62 @@
+name: devel-system-x86_32-chroot
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use as a chroot inside a Linux operating system on 32-bit Intel x86
+ computers.
+arch: x86_32
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: bsp-x86_both-tools
+ morph: strata/bsp-x86_both-tools.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-x86_32-generic.morph b/systems/devel-system-x86_32-generic.morph
new file mode 100644
index 00000000..1fd44086
--- /dev/null
+++ b/systems/devel-system-x86_32-generic.morph
@@ -0,0 +1,68 @@
+name: devel-system-x86_32-generic
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on 32-bit Intel x86 computers.
+arch: x86_32
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_32-generic
+ morph: strata/bsp-x86_32-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- cloud-init
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-x86_64-chroot.morph b/systems/devel-system-x86_64-chroot.morph
new file mode 100644
index 00000000..48f23cd0
--- /dev/null
+++ b/systems/devel-system-x86_64-chroot.morph
@@ -0,0 +1,64 @@
+name: devel-system-x86_64-chroot
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use as a chroot inside a Linux operating system on 64-bit Intel x86
+ computers.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: bsp-x86_both-tools
+ morph: strata/bsp-x86_both-tools.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-x86_64-generic.morph b/systems/devel-system-x86_64-generic.morph
new file mode 100644
index 00000000..b49964dd
--- /dev/null
+++ b/systems/devel-system-x86_64-generic.morph
@@ -0,0 +1,69 @@
+name: devel-system-x86_64-generic
+kind: system
+description: |
+ A system with useful tools for doing Baserock development.
+
+ For use on 64-bit Intel x86 computers.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- cloud-init
+- fstab
+- install-essential-files
diff --git a/systems/devel-system-x86_64-vagrant.morph b/systems/devel-system-x86_64-vagrant.morph
new file mode 100644
index 00000000..c6a5f6fe
--- /dev/null
+++ b/systems/devel-system-x86_64-vagrant.morph
@@ -0,0 +1,66 @@
+name: devel-system-x86_64-vagrant
+kind: system
+description: |
+ A system with useful tools for doing Baserock development, using Vagrant.
+
+ For use with the Vagrant tool for x86_64 virtual machines.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: virtualbox-guest-x86_64
+ morph: strata/virtualbox-guest-x86_64.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: baserock-import
+ morph: strata/baserock-import.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: swift
+ morph: strata/swift.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- vagrant
+- fstab
+- install-essential-files
diff --git a/systems/genivi-baseline-system-armv7lhf-jetson.morph b/systems/genivi-baseline-system-armv7lhf-jetson.morph
new file mode 100644
index 00000000..9306426d
--- /dev/null
+++ b/systems/genivi-baseline-system-armv7lhf-jetson.morph
@@ -0,0 +1,47 @@
+name: genivi-baseline-system-armv7lhf-jetson
+kind: system
+description: A GENIVI baseline system for a Jetson TK.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-jetson
+ morph: strata/bsp-jetson.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: connman-common
+ morph: strata/connman-common.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: libdrm-common
+ morph: strata/libdrm-common.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer
+ morph: strata/multimedia-gstreamer.morph
+- name: genivi
+ morph: strata/genivi.morph
+- name: wayland-generic
+ morph: strata/wayland-generic.morph
+- name: graphics-common
+ morph: strata/graphics-common.morph
+- name: input-common
+ morph: strata/input-common.morph
+- name: mesa-common
+ morph: strata/mesa-common.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: weston-genivi
+ morph: strata/weston-genivi.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- strip-gplv3
+- fstab
+- install-essential-files
diff --git a/systems/genivi-baseline-system-armv7lhf-versatile.morph b/systems/genivi-baseline-system-armv7lhf-versatile.morph
new file mode 100644
index 00000000..698230bb
--- /dev/null
+++ b/systems/genivi-baseline-system-armv7lhf-versatile.morph
@@ -0,0 +1,45 @@
+name: genivi-baseline-system-armv7lhf-versatile
+kind: system
+description: A GENIVI baseline system for an ARMv7 system.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv7-versatile
+ morph: strata/bsp-armv7-versatile.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: connman-common
+ morph: strata/connman-common.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: libdrm-common
+ morph: strata/libdrm-common.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer
+ morph: strata/multimedia-gstreamer.morph
+- name: genivi
+ morph: strata/genivi.morph
+- name: wayland-generic
+ morph: strata/wayland-generic.morph
+- name: graphics-common
+ morph: strata/graphics-common.morph
+- name: input-common
+ morph: strata/input-common.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: weston-genivi
+ morph: strata/weston-genivi.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- strip-gplv3
+- fstab
+- install-essential-files
diff --git a/systems/genivi-baseline-system-x86_64-generic.morph b/systems/genivi-baseline-system-x86_64-generic.morph
new file mode 100644
index 00000000..f04485bc
--- /dev/null
+++ b/systems/genivi-baseline-system-x86_64-generic.morph
@@ -0,0 +1,48 @@
+name: genivi-baseline-system-x86_64-generic
+kind: system
+description: A GENIVI baseline system for a 64-bit x86 system.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: connman-common
+ morph: strata/connman-common.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: libdrm-common
+ morph: strata/libdrm-common.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer
+ morph: strata/multimedia-gstreamer.morph
+- name: genivi
+ morph: strata/genivi.morph
+- name: wayland-generic
+ morph: strata/wayland-generic.morph
+- name: graphics-common
+ morph: strata/graphics-common.morph
+- name: input-common
+ morph: strata/input-common.morph
+- name: llvm-common
+ morph: strata/llvm-common.morph
+- name: mesa-common
+ morph: strata/mesa-common.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: weston-genivi
+ morph: strata/weston-genivi.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- strip-gplv3
+- install-essential-files
diff --git a/systems/initramfs-x86_64.morph b/systems/initramfs-x86_64.morph
new file mode 100644
index 00000000..4bb0020d
--- /dev/null
+++ b/systems/initramfs-x86_64.morph
@@ -0,0 +1,11 @@
+name: initramfs-x86_64
+kind: system
+description: Initramfs for x86_64
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+ artifacts:
+ - build-essential-minimal
+- name: initramfs-utils
+ morph: strata/initramfs-utils.morph
diff --git a/systems/installer-system-armv8b64.morph b/systems/installer-system-armv8b64.morph
new file mode 100644
index 00000000..726354c9
--- /dev/null
+++ b/systems/installer-system-armv8b64.morph
@@ -0,0 +1,36 @@
+name: installer-system-armv8b64
+kind: system
+description: The system that should be used as an Installer to install other Baserock systems.
+arch: armv8b64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-armv8b64-generic
+ morph: strata/bsp-armv8b64-generic.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: installer-utils
+ morph: strata/installer-utils.morph
+configuration-extensions:
+- set-hostname
+- install-files
+- fstab
+- installer
+- moonshot-kernel
+- install-essential-files
diff --git a/systems/installer-system-x86_64.morph b/systems/installer-system-x86_64.morph
new file mode 100644
index 00000000..3d0ced5d
--- /dev/null
+++ b/systems/installer-system-x86_64.morph
@@ -0,0 +1,35 @@
+name: installer-system-x86_64
+kind: system
+description: The system that should be used as an Installer to install other Baserock systems.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: installer-utils
+ morph: strata/installer-utils.morph
+configuration-extensions:
+- set-hostname
+- install-files
+- fstab
+- installer
+- install-essential-files
diff --git a/systems/minimal-system-armv5l-openbmc-aspeed.morph b/systems/minimal-system-armv5l-openbmc-aspeed.morph
new file mode 100644
index 00000000..fe596057
--- /dev/null
+++ b/systems/minimal-system-armv5l-openbmc-aspeed.morph
@@ -0,0 +1,20 @@
+name: minimal-system-armv5l-openbmc-aspeed
+kind: system
+description: The set of strata required to have a minimal system for armv5l
+arch: armv5l
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+ artifacts:
+ - build-essential-minimal
+- name: bsp-armv5l-openbmc-aspeed
+ morph: strata/bsp-armv5l-openbmc-aspeed.morph
+ artifacts:
+ - bsp-armv5l-openbmc-aspeed-runtime
+configuration-extensions:
+- set-hostname
+- simple-network
+- nfsboot
+- install-files
+- busybox-init
+- install-essential-files
diff --git a/systems/minimal-system-x86_32-generic.morph b/systems/minimal-system-x86_32-generic.morph
new file mode 100644
index 00000000..785a72a2
--- /dev/null
+++ b/systems/minimal-system-x86_32-generic.morph
@@ -0,0 +1,21 @@
+name: minimal-system-x86_32-generic
+kind: system
+description: The set of strata required to have a minimal system for a 32-bit x86
+ system.
+arch: x86_32
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+ artifacts:
+ - build-essential-minimal
+- name: bsp-x86_32-generic
+ morph: strata/bsp-x86_32-generic.morph
+ artifacts:
+ - bsp-x86_32-generic-runtime
+configuration-extensions:
+- set-hostname
+- simple-network
+- nfsboot
+- install-files
+- busybox-init
+- install-essential-files
diff --git a/systems/minimal-system-x86_64-generic.morph b/systems/minimal-system-x86_64-generic.morph
new file mode 100644
index 00000000..9da22ec8
--- /dev/null
+++ b/systems/minimal-system-x86_64-generic.morph
@@ -0,0 +1,21 @@
+name: minimal-system-x86_64-generic
+kind: system
+description: The set of strata required to have a minimal system for a 64-bit x86
+ system.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+ artifacts:
+ - build-essential-minimal
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+ artifacts:
+ - bsp-x86_64-generic-runtime
+configuration-extensions:
+- set-hostname
+- simple-network
+- nfsboot
+- install-files
+- busybox-init
+- install-essential-files
diff --git a/systems/nodejs-system-x86_64.morph b/systems/nodejs-system-x86_64.morph
new file mode 100644
index 00000000..d5ebcf30
--- /dev/null
+++ b/systems/nodejs-system-x86_64.morph
@@ -0,0 +1,22 @@
+name: nodejs-system-x86_64
+kind: system
+description: A system that is able to build other systems based on the 64-bit x86
+ architecture.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/ocaml-system-x86_64.morph b/systems/ocaml-system-x86_64.morph
new file mode 100644
index 00000000..1903e4d6
--- /dev/null
+++ b/systems/ocaml-system-x86_64.morph
@@ -0,0 +1,20 @@
+name: ocaml-system-x86_64
+kind: system
+description: OCaml base system
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: ocaml-language
+ morph: strata/ocaml-language.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
diff --git a/systems/openstack-system-x86_64.morph b/systems/openstack-system-x86_64.morph
new file mode 100644
index 00000000..8ab38bee
--- /dev/null
+++ b/systems/openstack-system-x86_64.morph
@@ -0,0 +1,85 @@
+name: openstack-system-x86_64
+kind: system
+description: Openstack system in baserock for the x86_64 architecture
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+- name: devtools
+ morph: strata/devtools.morph
+- name: tools
+ morph: strata/tools.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: lvm
+ morph: strata/lvm.morph
+- name: virtualization
+ morph: strata/virtualization.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: networking-utils
+ morph: strata/networking-utils.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: databases
+ morph: strata/databases.morph
+- name: erlang
+ morph: strata/erlang.morph
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: apache-httpd-server
+ morph: strata/apache-httpd-server.morph
+- name: django
+ morph: strata/django.morph
+- name: xstatic
+ morph: strata/xstatic.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: openstack-services
+ morph: strata/openstack-services.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: ntpd
+ morph: strata/ntpd.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: test-tools
+ morph: strata/test-tools.morph
+- name: swift
+ morph: strata/swift.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- cloud-init
+- hosts
+- openstack-keystone
+- openstack-glance
+- openstack-cinder
+- openstack-nova
+- openstack-network
+- openstack-neutron
+- openstack-ceilometer
+- fstab
+- openstack-ironic
+- install-essential-files
diff --git a/systems/qt4-devel-system-x86_64-generic.morph b/systems/qt4-devel-system-x86_64-generic.morph
new file mode 100644
index 00000000..15b85f70
--- /dev/null
+++ b/systems/qt4-devel-system-x86_64-generic.morph
@@ -0,0 +1,44 @@
+name: qt4-devel-system-x86_64-generic
+kind: system
+description: A system that is able to build other systems based on the 64-bit x86
+ architecture.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer-0.10
+ morph: strata/multimedia-gstreamer-0.10.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: x-generic
+ morph: strata/x-generic.morph
+- name: qt4-tools
+ morph: strata/qt4-tools.morph
+- name: qt4-sdk
+ morph: strata/qt4-sdk.morph
+- name: lua
+ morph: strata/lua.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: connman-common
+ morph: strata/connman-common.morph
+- name: enlightenment
+ morph: strata/enlightenment.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/qt5-devel-system-x86_64-generic.morph b/systems/qt5-devel-system-x86_64-generic.morph
new file mode 100644
index 00000000..3cdce60c
--- /dev/null
+++ b/systems/qt5-devel-system-x86_64-generic.morph
@@ -0,0 +1,46 @@
+name: qt5-devel-system-x86_64-generic
+kind: system
+description: A system that is able to build other systems based on the 64-bit x86
+ architecture.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer
+ morph: strata/multimedia-gstreamer.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: x-generic
+ morph: strata/x-generic.morph
+- name: qt5-tools
+ morph: strata/qt5-tools.morph
+- name: qt5-tools-qtwebkit
+ morph: strata/qt5-tools-qtwebkit.morph
+- name: qt5-sdk
+ morph: strata/qt5-sdk.morph
+- name: lua
+ morph: strata/lua.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: connman-common
+ morph: strata/connman-common.morph
+- name: enlightenment
+ morph: strata/enlightenment.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/swift-system-x86_64.morph b/systems/swift-system-x86_64.morph
new file mode 100644
index 00000000..81738558
--- /dev/null
+++ b/systems/swift-system-x86_64.morph
@@ -0,0 +1,34 @@
+name: swift-system-x86_64
+kind: system
+description: A base system for deploying swift nodes
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: swift
+ morph: strata/swift.morph
+- name: ntpd
+ morph: strata/ntpd.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- fstab
+- swift-storage
+- install-essential-files
diff --git a/systems/trove-system-x86_64.morph b/systems/trove-system-x86_64.morph
new file mode 100644
index 00000000..0a5692f5
--- /dev/null
+++ b/systems/trove-system-x86_64.morph
@@ -0,0 +1,57 @@
+name: trove-system-x86_64
+kind: system
+description: Trove server
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: core
+ morph: strata/core.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: lorry-controller
+ morph: strata/lorry-controller.morph
+- name: trove
+ morph: strata/trove.morph
+- name: lighttpd-server
+ morph: strata/lighttpd-server.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: devtools
+ morph: strata/devtools.morph
+configuration-extensions:
+- set-hostname
+- trove
+- nfsboot-server
+- fstab
+- install-files
+- cloud-init
+- install-essential-files
diff --git a/systems/web-system-x86_64-generic.morph b/systems/web-system-x86_64-generic.morph
new file mode 100644
index 00000000..0b6e84b1
--- /dev/null
+++ b/systems/web-system-x86_64-generic.morph
@@ -0,0 +1,37 @@
+name: web-system-x86_64-generic
+kind: system
+description: system with web tools
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: core
+ morph: strata/core.morph
+- name: tools
+ morph: strata/tools.morph
+- name: nfs
+ morph: strata/nfs.morph
+- name: databases
+ morph: strata/databases.morph
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: lighttpd-server
+ morph: strata/lighttpd-server.morph
+- name: webtools
+ morph: strata/webtools.morph
+- name: erlang
+ morph: strata/erlang.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/weston-system-armv7lhf-jetson.morph b/systems/weston-system-armv7lhf-jetson.morph
new file mode 100644
index 00000000..ccb2c3ee
--- /dev/null
+++ b/systems/weston-system-armv7lhf-jetson.morph
@@ -0,0 +1,49 @@
+name: weston-system-armv7lhf-jetson
+kind: system
+description: A weston system for ARMv7 little-endian hard-float NVIDIA Jetson TK1 computers.
+arch: armv7lhf
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-jetson
+ morph: strata/bsp-jetson.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: libdrm-common
+ morph: strata/libdrm-common.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer
+ morph: strata/multimedia-gstreamer.morph
+- name: wayland-generic
+ morph: strata/wayland-generic.morph
+- name: graphics-common
+ morph: strata/graphics-common.morph
+- name: input-common
+ morph: strata/input-common.morph
+- name: llvm-common
+ morph: strata/llvm-common.morph
+- name: mesa-common
+ morph: strata/mesa-common.morph
+- name: weston-common
+ morph: strata/weston-common.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: x-generic
+ morph: strata/x-generic.morph
+- name: tools
+ morph: strata/tools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/weston-system-x86_64-generic.morph b/systems/weston-system-x86_64-generic.morph
new file mode 100644
index 00000000..84f0bad0
--- /dev/null
+++ b/systems/weston-system-x86_64-generic.morph
@@ -0,0 +1,49 @@
+name: weston-system-x86_64-generic
+kind: system
+description: A weston system for a 64-bit x86 system.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: coreutils-common
+ morph: strata/coreutils-common.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: libdrm-common
+ morph: strata/libdrm-common.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer
+ morph: strata/multimedia-gstreamer.morph
+- name: wayland-generic
+ morph: strata/wayland-generic.morph
+- name: graphics-common
+ morph: strata/graphics-common.morph
+- name: input-common
+ morph: strata/input-common.morph
+- name: llvm-common
+ morph: strata/llvm-common.morph
+- name: mesa-common
+ morph: strata/mesa-common.morph
+- name: weston-common
+ morph: strata/weston-common.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: x-generic
+ morph: strata/x-generic.morph
+- name: tools
+ morph: strata/tools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/xfce-system.morph b/systems/xfce-system.morph
new file mode 100644
index 00000000..643291d6
--- /dev/null
+++ b/systems/xfce-system.morph
@@ -0,0 +1,55 @@
+name: xfce-system
+kind: system
+description: A GENIVI devel with XFCE
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: connman-common
+ morph: strata/connman-common.morph
+- name: audio-bluetooth
+ morph: strata/audio-bluetooth.morph
+- name: x-common
+ morph: strata/x-common.morph
+- name: x-generic
+ morph: strata/x-generic.morph
+- name: multimedia-common
+ morph: strata/multimedia-common.morph
+- name: multimedia-gstreamer
+ morph: strata/multimedia-gstreamer.morph
+- name: genivi
+ morph: strata/genivi.morph
+- name: gtk2
+ morph: strata/gtk2.morph
+- name: tools
+ morph: strata/tools.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-pygobject
+ morph: strata/python-pygobject.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: libsoup-common
+ morph: strata/libsoup-common.morph
+- name: ostree-core
+ morph: strata/ostree-core.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: xfce
+ morph: strata/xfce.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/zookeeper-client-x86_64.morph b/systems/zookeeper-client-x86_64.morph
new file mode 100644
index 00000000..a9b01cd5
--- /dev/null
+++ b/systems/zookeeper-client-x86_64.morph
@@ -0,0 +1,28 @@
+name: zookeeper-client-x86_64
+kind: system
+description: |
+ A system that is able to build other systems based on the 64-bit x86
+ architecture. includes the installation of zookeeper and a test client.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: zookeeper
+ morph: strata/zookeeper.morph
+- name: zookeeper-client
+ morph: strata/zookeeper-client.morph
+- name: test-tools
+ morph: strata/test-tools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/systems/zookeeper-server-x86_64.morph b/systems/zookeeper-server-x86_64.morph
new file mode 100644
index 00000000..987ba9e1
--- /dev/null
+++ b/systems/zookeeper-server-x86_64.morph
@@ -0,0 +1,28 @@
+name: zookeeper-server-X86_64
+kind: system
+description: |
+ A system that is able to build other systems based on the 64-bit x86
+ architecture. includes the zookeeper server and setup for basic tests
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: zookeeper
+ morph: strata/zookeeper.morph
+- name: zookeeper-server
+ morph: strata/zookeeper-server.morph
+- name: test-tools
+ morph: strata/test-tools.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- install-essential-files
diff --git a/trove.configure b/trove.configure
new file mode 100755
index 00000000..f823762c
--- /dev/null
+++ b/trove.configure
@@ -0,0 +1,148 @@
+#!/bin/sh
+#
+# Copyright (C) 2013 - 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to fully configure
+# a Trove instance at deployment time. It uses the following variables
+# from the environment (run `morph help trove.configure` to see a description
+# of them):
+#
+# * TROVE_ID
+# * TROVE_HOSTNAME (optional, defaults to TROVE_ID)
+# * TROVE_COMPANY
+# * LORRY_SSH_KEY
+# * UPSTREAM_TROVE
+# * UPSTREAM_TROVE_PROTOCOL
+# * TROVE_ADMIN_USER
+# * TROVE_ADMIN_EMAIL
+# * TROVE_ADMIN_NAME
+# * TROVE_ADMIN_SSH_PUBKEY
+# * LORRY_CONTROLLER_MINIONS (optional, defaults to 4)
+# * TROVE_BACKUP_KEYS - a space-separated list of paths to SSH keys.
+# (optional)
+# * TROVE_GENERIC (optional)
+#
+# The configuration of a Trove is slightly tricky: part of it has to
+# be run on the configured system after it has booted. We accomplish
+# this by copying in all the relevant data to the target system
+# (in /var/lib/trove-setup), and creating a systemd unit file that
+# runs on the first boot. The first boot will be detected by the
+# existence of the /var/lib/trove-setup/needed file.
+
+set -e
+
+if [ "$TROVE_GENERIC" ]
+then
+ echo "Not configuring the trove, it will be generic"
+ exit 0
+fi
+
+
+# Check that all the variables needed are present:
+
+error_vars=false
+if test "x$TROVE_ID" = "x"; then
+ echo "ERROR: TROVE_ID needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_COMPANY" = "x"; then
+ echo "ERROR: TROVE_COMPANY needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_USER" = "x"; then
+ echo "ERROR: TROVE_ADMIN_USER needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_NAME" = "x"; then
+ echo "ERROR: TROVE_ADMIN_NAME needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_EMAIL" = "x"; then
+ echo "ERROR: TROVE_ADMIN_EMAIL needs to be defined."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $LORRY_SSH_KEY > /dev/null 2>&1
+then
+ echo "ERROR: LORRY_SSH_KEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $WORKER_SSH_PUBKEY > /dev/null 2>&1
+then
+ echo "ERROR: WORKER_SSH_PUBKEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $TROVE_ADMIN_SSH_PUBKEY > /dev/null 2>&1
+then
+ echo "ERROR: TROVE_ADMIN_SSH_PUBKEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if "$error_vars"; then
+ exit 1
+fi
+
+ROOT="$1"
+
+
+TROVE_DATA="$ROOT/etc/trove"
+mkdir -p "$TROVE_DATA"
+
+install -m 0600 "$LORRY_SSH_KEY" "$TROVE_DATA/lorry.key"
+install -m 0644 "${LORRY_SSH_KEY}.pub" "$TROVE_DATA/lorry.key.pub"
+install -m 0644 "$TROVE_ADMIN_SSH_PUBKEY" "$TROVE_DATA/admin.key.pub"
+install -m 0644 "$WORKER_SSH_PUBKEY" "$TROVE_DATA/worker.key.pub"
+
+
+python <<'EOF' >"$TROVE_DATA/trove.conf"
+import os, sys, yaml
+
+trove_configuration={
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_COMPANY': os.environ['TROVE_COMPANY'],
+ 'TROVE_ADMIN_USER': os.environ['TROVE_ADMIN_USER'],
+ 'TROVE_ADMIN_EMAIL': os.environ['TROVE_ADMIN_EMAIL'],
+ 'TROVE_ADMIN_NAME': os.environ['TROVE_ADMIN_NAME'],
+ 'LORRY_SSH_KEY': '/etc/trove/lorry.key',
+ 'LORRY_SSH_PUBKEY': '/etc/trove/lorry.key.pub',
+ 'TROVE_ADMIN_SSH_PUBKEY': '/etc/trove/admin.key.pub',
+ 'WORKER_SSH_PUBKEY': '/etc/trove/worker.key.pub',
+}
+
+
+
+optional_keys = ('MASON_ID', 'HOSTNAME', 'TROVE_HOSTNAME',
+ 'LORRY_CONTROLLER_MINIONS', 'TROVE_BACKUP_KEYS',
+ 'UPSTREAM_TROVE', 'UPSTREAM_TROVE_PROTOCOL')
+
+for key in optional_keys:
+ if key in os.environ:
+ trove_configuration[key]=os.environ[key]
+
+yaml.dump(trove_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+if [ -n "$TROVE_BACKUP_KEYS" ]; then
+ mkdir -p "$TROVE_DATA/backup-keys"
+ cp -- $TROVE_BACKUP_KEYS "$TROVE_DATA/backup-keys"
+ echo "TROVE_BACKUP_KEYS: /etc/trove/backup-keys/*" >> "$TROVE_DATA/trove.conf"
+fi
diff --git a/trove.configure.help b/trove.configure.help
new file mode 100644
index 00000000..c96bdf74
--- /dev/null
+++ b/trove.configure.help
@@ -0,0 +1,126 @@
+help: |
+ This is a "morph deploy" configuration extension to fully configure
+ a Trove instance at deployment time. It uses the following
+ configuration variables:
+
+ * `TROVE_ID`
+ * `TROVE_HOSTNAME` (optional, defaults to `TROVE_ID`)
+ * `TROVE_COMPANY`
+ * `LORRY_SSH_KEY`
+ * `UPSTREAM_TROVE`
+ * `TROVE_ADMIN_USER`
+ * `TROVE_ADMIN_EMAIL`
+ * `TROVE_ADMIN_NAME`
+ * `TROVE_ADMIN_SSH_PUBKEY`
+ * `LORRY_CONTROLLER_MINIONS` (optional, defaults to 4)
+ * `TROVE_BACKUP_KEYS` - a space-separated list of paths to SSH keys.
+ (optional)
+
+ The variables are described in more detail below.
+
+ A Trove deployment needs to know the following things:
+
+ * The Trove's ID and public name.
+ * The Trove's administrator name and access details.
+ * Private and public SSH keys for the Lorry user on the Trove.
+ * Which upstream Trove it should be set to mirror upon initial deploy.
+
+ These are specified with the configuration variables described in this
+ help.
+
+ * `TROVE_GENERIC` -- boolean. If it's true the trove will be generic
+ and it won't be configured with any of the other variables listed
+ here.
+
+ * `TROVE_ID` -- the identifier of the Trove. This separates it from
+ other Troves, and allows mirroring of Troves to happen without local
+ changes getting overwritten.
+
+ The Trove ID is used in several ways. Any local repositories (those not
+ mirrored from elsewhere) get created under a prefix that is the ID.
+ Thus, the local repositories on the `git.baserock.org` Trove, whose
+ Trove ID is `baserock`, are named
+ `baserock/baserock/definitions.git` and similar. The ID is used
+ there twice: first as a prefix and then as a "project name" within
+ that prefix. There can be more projects under the prefix. For
+ example, there is a `baserock/local-config/lorries.git` repository,
+ where `local-config` is a separate project from `baserock`. Projects
+ here are a concept for the Trove's git access control language.
+
+ The Trove ID also used as the prefix for any branch and tag names
+ created locally for repositories that are not local. Thus, in the
+ `delta/linux.git` repository, any local branches would be called
+ something like `baserock/morph`, instead of just `morph`. The
+ Trove's git access control prevents normal uses from pushing
+ branches and tags that do not have the Trove ID as the prefix.
+
+ * `TROVE_HOSTNAME` -- the public name of the Trove. This is an
+ optional setting, and defaults to `TROVE_ID`. The public name is
+ typically the domain name of the server (e.g., `git.baserock.org`),
+ but can also be an IP address. This setting is used when Trove needs
+ to generate URLs that point to itself, such as the `git://` and
+ `http://` URLs for each git repository that is viewed via the web
+ interface.
+
+ Note that this is _not_ the system hostname. That is set separately,
+ with the `HOSTNAME` configuration setting (see the
+ `set-hostname.configure` extension).
+
+ * `TROVE_COMPANY` -- a description of the organisation who own the
+ Trove. This is shown in various parts of the web interface of the
+ Trove. It is for descriptive purposes only.
+
+ * `LORRY_SSH_KEY` -- ssh key pair that the Trove's Lorry will use to
+ access an upstream Trove, and to push updates to the Trove's git
+ server.
+
+ The value is a filename on the system doing the deployment (where
+ `morph deploy` is run). The file contains the _private_ key, and the
+ public key is in a file with the `.pub` suffix added to the name.
+
+ The upstream Trove needs to be configured to allow this key to
+ access it. This configuration does not do that automatically.
+
+ * `UPSTREAM_TROVE` -- public name of the upstream Trove (domain
+ name or IP address). This is an optional setting. If it's set,
+ the new Trove will be configured to mirror that Trove.
+
+ * `TROVE_ADMIN_USER`, `TROVE_ADMIN_EMAIL`, `TROVE_ADMIN_NAME`,
+ `TROVE_ADMIN_SSH_PUBKEY` -- details of the Trove's (initial)
+ administrator.
+
+ Each Trove needs at least one administrator user, and one is created
+ upon initial deployment. `TROVE_ADMIN_USER` is the username of the
+ account to be created, `TROVE_ADMIN_EMAIL` should be the e-mail of
+ the user, and `TROVE_ADMIN_NAME` is their name. If more
+ administrators are needed, the initial person should create them
+ using the usual Gitano commands.
+
+ * `LORRY_CONTROLLER_MINIONS` -- the number of Lorry Controller worker
+ processes to start. This is an optional setting and defaults to 4.
+ The more workers are running, the more Lorry jobs can run at the same
+ time, but the more resources they require.
+
+ * `TROVE_BACKUP_KEYS` -- a space-separated list of paths to SSH keys.
+ If this is set, the Trove will have a backup user that can be accessed
+ with rsync using the SSH keys provided.
+
+ Example
+ -------
+
+ The following set of variables could be to deploy a Trove instance:
+
+ TROVE_ID: my-trove
+ TROVE_HOSTNAME: my-trove.example.com
+ TROVE_COMPANY: My Personal Trove for Me, Myself and I
+ LORRY_SSH_KEY: my-trove/lorry.key
+ UPSTREAM_TROVE: git.baserock.org
+ UPSTREAM_TROVE_USER: my-trove
+ UPSTREAM_TROVE_EMAIL: my-trove@example.com
+ TROVE_ADMIN_USER: tomjon
+ TROVE_ADMIN_EMAIL: tomjon@example.com
+ TROVE_ADMIN_NAME: Tomjon of Lancre
+ TROVE_ADMIN_SSH_PUBKEY: my-trove/tomjon.key.pub
+
+ These would be put into the cluster morphology used to do the
+ deployment.
diff --git a/vagrant-files/home/vagrant/.ssh/authorized_keys b/vagrant-files/home/vagrant/.ssh/authorized_keys
new file mode 100644
index 00000000..18a9c00f
--- /dev/null
+++ b/vagrant-files/home/vagrant/.ssh/authorized_keys
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
diff --git a/vagrant-files/manifest b/vagrant-files/manifest
new file mode 100644
index 00000000..67168341
--- /dev/null
+++ b/vagrant-files/manifest
@@ -0,0 +1,4 @@
+0040755 0 0 /home
+0040755 1000 0000 /home/vagrant
+0040700 1000 1000 /home/vagrant/.ssh
+0100600 1000 1000 /home/vagrant/.ssh/authorized_keys
diff --git a/vagrant.configure b/vagrant.configure
new file mode 100644
index 00000000..abc3ea0c
--- /dev/null
+++ b/vagrant.configure
@@ -0,0 +1,55 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+if test "x$VAGRANT" = "x"; then
+ exit 0
+fi
+
+for needed in etc/ssh/sshd_config etc/sudoers; do
+ if ! test -e "$ROOT/$needed"; then
+ echo >&2 "Unable to find $needed"
+ echo >&2 "Cannot continue configuring as Vagrant basebox"
+ exit 1
+ fi
+done
+
+# SSH daemon needs to be configured to not use DNS...
+sed -i -e's/^(.*[Uu]][Ss][Ee][Dd][Nn][Ss].*)$/#\1/' "$ROOT/etc/ssh/sshd_config"
+echo "UseDNS no" >> "$ROOT/etc/ssh/sshd_config"
+
+# We need to add a vagrant user with "vagrant" as the password We're doing this
+# manually because chrooting in to run adduser is not really allowed for
+# deployment time since we wouldn't be able to run the adduser necessarily. In
+# practice for now we'd be able to because we can't deploy raw disks
+# cross-platform and expect extlinux to install but we won't, for good
+# practice and to hilight this deficiency.
+echo 'vagrant:x:1000:1000:Vagrant User:/home/vagrant:/bin/bash' >> "$ROOT/etc/passwd"
+echo 'vagrant:/6PTOoWylhw3w:16198:0:99999:7:::' >> "$ROOT/etc/shadow"
+echo 'vagrant:x:1000:' >> "$ROOT/etc/group"
+mkdir -p "$ROOT/home/vagrant"
+chown -R 1000:1000 "$ROOT/home/vagrant"
+
+# Next, the vagrant user is meant to have sudo access
+echo 'vagrant ALL=(ALL) NOPASSWD: ALL' >> "$ROOT/etc/sudoers"
+
+# And ensure that we get sbin in our path
+echo 'PATH="$PATH:/sbin:/usr/sbin"' >> "$ROOT/etc/profile"
+echo 'export PATH' >> "$ROOT/etc/profile"
+