diff options
Diffstat (limited to 'clusters')
27 files changed, 1573 insertions, 0 deletions
diff --git a/clusters/cephclient.morph b/clusters/cephclient.morph new file mode 100644 index 00000000..b4db22e0 --- /dev/null +++ b/clusters/cephclient.morph @@ -0,0 +1,20 @@ +name: ceph-cluster +kind: cluster +systems: +- morph: systems/ceph-service-x86_64-generic.morph + deploy: + ceph-node-virtualbox-image: + type: virtualbox-ssh + SYSTEM: systems/ceph-service-x86_64-generic.morph + location: vbox+ssh://user@machine/ChefNode4/home/user/chefnode4.vdi + # HOST_IPADDR and NETMASK should be set to the IP address and netmask of the virtualbox host on the host-only interface. + #HOST_IPADDR: 10.0.100.100 + #NETMASK: 255.255.255.0 + + # This is an example of how to configure the three interfaces necessary to support ceph in the BCPC configuration. + #NETWORK_CONFIG: lo:loopback;enp0s3:static,address=10.0.100.14,netmask=255.255.255.0;enp0s8:static,address=172.16.100.14,netmask=255.255.255.0;enp0s9:static,address=192.168.100.14,netmask=255.255.255.0 + DISK_SIZE: 8G + HOSTNAME: CephNode4 + + # You must install authorized_keys in chef/root/.ssh/ before this will work. + INSTALL_FILES: chef/manifest diff --git a/clusters/ci.morph b/clusters/ci.morph new file mode 100644 index 00000000..cb56328c --- /dev/null +++ b/clusters/ci.morph @@ -0,0 +1,117 @@ +name: ci +kind: cluster +description: | + Deploy all the systems for CD. + + This cluster morph is for use by the Mason Continuous Delivery pipeline + during development. +systems: +- morph: systems/devel-system-x86_64-generic.morph + deploy: + devel-system-x86_64-generic: + type: rawdisk + location: devel-system-x86_64-generic.img + DISK_SIZE: 4G +- morph: systems/devel-system-x86_32-generic.morph + deploy: + devel-system-x86_32-generic: + type: rawdisk + location: devel-system-x86_32-generic.img + DISK_SIZE: 4G +- morph: systems/build-system-armv7lhf-jetson.morph + deploy: + build-system-armv7lhf-jetson: + type: rawdisk + location: build-system-armv7lhf-jetson.img + DISK_SIZE: 2G + BOOT_DEVICE: "/dev/mmcblk0p1" + ROOT_DEVICE: "/dev/mmcblk0p2" + DTB_PATH: "boot/tegra124-jetson-tk1.dtb" + BOOTLOADER_CONFIG_FORMAT: "extlinux" + BOOTLOADER_INSTALL: "none" + KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1 +- morph: systems/weston-system-x86_64-generic.morph + deploy: + weston-system-x86_64-generic: + type: rawdisk + location: weston-system-x86_64-generic.img + DISK_SIZE: 4G + KERNEL_ARGS: vga=788 +- morph: systems/weston-system-armv7lhf-jetson.morph + deploy: + weston-system-armv7lhf-jetson: + type: rawdisk + location: weston-system-armv7lhf-jetson.img + DISK_SIZE: 4G + BOOT_DEVICE: "/dev/mmcblk0p1" + ROOT_DEVICE: "/dev/mmcblk0p2" + DTB_PATH: "boot/tegra124-jetson-tk1.dtb" + BOOTLOADER_CONFIG_FORMAT: "extlinux" + BOOTLOADER_INSTALL: "none" + KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1 +- morph: systems/genivi-baseline-system-x86_64-generic.morph + deploy: + genivi-baseline-system-x86_64-generic: + type: rawdisk + location: genivi-baseline-system-x86_64-generic.img + DISK_SIZE: 4G + KERNEL_ARGS: vga=788 +- morph: systems/genivi-baseline-system-armv7lhf-jetson.morph + deploy: + genivi-baseline-system-armv7lhf-jetson: + type: rawdisk + location: genivi-baseline-system-armv7lhf-jetson.img + DISK_SIZE: 4G + BOOT_DEVICE: "/dev/mmcblk0p1" + ROOT_DEVICE: "/dev/mmcblk0p2" + DTB_PATH: "boot/tegra124-jetson-tk1.dtb" + BOOTLOADER_CONFIG_FORMAT: "extlinux" + BOOTLOADER_INSTALL: "none" + KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1 +- morph: systems/openstack-system-x86_64.morph + deploy: + openstack-system-x86_64: + type: rawdisk + location: baserock-openstack-system-x86_64.img + DISK_SIZE: 5G + INSTALL_FILES: openstack/manifest + HOSTNAME: onenode + RABBITMQ_HOST: onenode + RABBITMQ_PORT: 5672 + RABBITMQ_USER: rabbitmq + RABBITMQ_PASSWORD: veryinsecure + CONTROLLER_HOST_ADDRESS: onenode + MANAGEMENT_INTERFACE_IP_ADDRESS: 127.0.0.1 + KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8 + KEYSTONE_ADMIN_PASSWORD: veryinsecure + KEYSTONE_DB_USER: keystoneDB + KEYSTONE_DB_PASSWORD: veryinsecure + GLANCE_SERVICE_USER: glance + GLANCE_SERVICE_PASSWORD: veryinsecure + GLANCE_DB_USER: glanceDB + GLANCE_DB_PASSWORD: veryinsecure + NOVA_SERVICE_USER: nova + NOVA_SERVICE_PASSWORD: veryinsecure + NOVA_DB_USER: novaDB + NOVA_DB_PASSWORD: veryinsecure + NOVA_VIRT_TYPE: qemu + CINDER_SERVICE_USER: cinder + CINDER_SERVICE_PASSWORD: veryinsecure + CINDER_DB_USER: cinderDB + CINDER_DB_PASSWORD: veryinsecure + CINDER_DEVICE: /dev/sdb + NEUTRON_SERVICE_USER: neutron + NEUTRON_SERVICE_PASSWORD: veryinsecure + NEUTRON_DB_USER: neutronDB + NEUTRON_DB_PASSWORD: veryinsecure + METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret + IRONIC_SERVICE_USER: ironic + IRONIC_SERVICE_PASSWORD: veryinsecure + IRONIC_DB_USER: ironicDB + IRONIC_DB_PASSWORD: veryinsecure + CEILOMETER_SERVICE_USER: ceilometer + CEILOMETER_SERVICE_PASSWORD: veryinsecure + CEILOMETER_DB_USER: ceilometerDB + CEILOMETER_DB_PASSWORD: veryinsecure + METERING_SECRET: insecureceilometersecret + HOSTS_CONTROLLER: 127.0.0.1 onenode diff --git a/clusters/example-distbuild-cluster.morph b/clusters/example-distbuild-cluster.morph new file mode 100644 index 00000000..513c16c5 --- /dev/null +++ b/clusters/example-distbuild-cluster.morph @@ -0,0 +1,37 @@ +name: example-distbuild-cluster +kind: cluster +description: | + This is an example cluster morph that can be adapted to set up a + Baserock distributed build network. + + You will need to deploy a Trove for the distributed build network + to use before deploying this cluster. The worker SSH key file should + be generated as part of the Trove deployment. It is the key used by + workers to authenticate with the Trove to give them read access to + all source repositories. +systems: +- morph: systems/build-system-x86_64.morph + deploy-defaults: + CONTROLLERHOST: build-controller + DISTBUILD_CONTROLLER: false + DISTBUILD_WORKER: true + FSTAB_SRC: LABEL=src /srv/distbuild auto defaults,rw,noatime 0 2 + INSTALL_FILES: distbuild/manifest + NFSBOOT_CONFIGURE: true + TROVE_ID: $MY_TROVE + WORKER_SSH_KEY: ssh-keys/worker.key + deploy: + build-controller: + type: nfsboot + location: $MY_TROVE + DISTBUILD_CONTROLLER: true + HOSTNAME: build-controller + WORKERS: build-node-1, build-node-2 + build-node-1: + type: nfsboot + location: $MY_TROVE + HOSTNAME: build-node-1 + build-node-2: + type: nfsboot + location: $MY_TROVE + HOSTNAME: build-node-2 diff --git a/clusters/example-swift-storage-cluster.morph b/clusters/example-swift-storage-cluster.morph new file mode 100644 index 00000000..b1ea784f --- /dev/null +++ b/clusters/example-swift-storage-cluster.morph @@ -0,0 +1,62 @@ +name: example-swift-storage-cluster +kind: cluster +systems: +- morph: systems/swift-system-x86_64.morph + deploy-defaults: + INSTALL_FILES: swift/manifest + + CONTROLLER_HOST_ADDRESS: <controller host address> + + SWIFT_PART_POWER: 10 + SWIFT_REPLICAS: 3 + SWIFT_MIN_PART_HOURS: 1 + + SWIFT_STORAGE_DEVICES: [{ ip: <node0 management ip>, device: sdb, weight: 100 }, + { ip: <node0 management ip>, device: sdc, weight: 100 }, + { ip: <node0 management ip>, device: sdd, weight: 100 }, + + { ip: <node1 management ip>, device: sdb, weight: 100 }, + { ip: <node1 management ip>, device: sdc, weight: 100 }, + { ip: <node1 management ip>, device: sdd, weight: 100 }] + + # This value can be any random string or number + # but each node in your swift cluster must have the same value + SWIFT_REBALANCE_SEED: 3828 + + # NOTE: Replace SWIFT_HASH_PATH_PREFIX and SWIFT_HASH_PATH_SUFFIX + # with your own unique values, + # + # `openssl rand -hex 10' can be used to generate unique values + # + # These values should be kept secret, do not lose them. + # + SWIFT_HASH_PATH_PREFIX: 041fc210e4e1d333ce1d + SWIFT_HASH_PATH_SUFFIX: 4d6f5362a356dda7fb7d + + FSTAB_SDB: /dev/sdb /srv/node/sdb xfs defaults,user,rw 0 0 + FSTAB_SDC: /dev/sdc /srv/node/sdc xfs defaults,user,rw 0 0 + FSTAB_SDD: /dev/sdd /srv/node/sdd xfs defaults,user,rw 0 0 + + deploy: + node0: + type: kvm + location: kvm+ssh://user@host/swift-storage-0/home/user/swift-storage-0.img + DISK_SIZE: 10G + RAM_SIZE: 1G + VCPUS: 1 + HOSTNAME: swift-storage-0 + NIC_CONFIG: network=default + NETWORK_CONFIG: ens3:static,address=<node0 management ip>,netmask=255.255.255.0 + MANAGEMENT_INTERFACE_IP_ADDRESS: <node0 management ip> + ATTACH_DISKS: /dev/node0_sdb:/dev/node0_sdc:/dev/node0_sdd + node1: + type: kvm + location: kvm+ssh://user@host/swift-storage-1/home/user/swift-storage-1.img + DISK_SIZE: 10G + RAM_SIZE: 1G + VCPUS: 1 + HOSTNAME: swift-storage-1 + NIC_CONFIG: network=default + NETWORK_CONFIG: ens3:static,address=<node1 management ip>,netmask=255.255.255.0 + MANAGEMENT_INTERFACE_IP_ADDRESS: <node1 management ip> + ATTACH_DISKS: /dev/node1_sdb:/dev/node1_sdc:/dev/node1_sdd diff --git a/clusters/hardware-deployment.morph b/clusters/hardware-deployment.morph new file mode 100644 index 00000000..c6b7dce9 --- /dev/null +++ b/clusters/hardware-deployment.morph @@ -0,0 +1,35 @@ +name: hardware-deployment +kind: cluster +description: | + Deploy a build-system into hardware using the combination + of the pxeboot.write extension and the installer system. + This examples uses the spawn-novlan mode of pxeboot.write. +systems: +- morph: systems/installer-system-x86_64.morph + deploy: + installer: + type: pxeboot + location: AB:CD:EF:12:34:56:78 #MAC address. + PXEBOOT_MODE: spawn-novlan + PXEBOOT_DEPLOYER_INTERFACE: ens6 + KERNEL_ARGS: console=ttyS1,9600 console=tty0 init=/usr/lib/baserock-installer/installer + HOSTNAME: installer-system + IPMI_USER: myipmiuser + IPMI_PASSWORD: myipmipassword + IPMI_HOST: 123.34.45.120 #IPMI ip address + INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda + INSTALLER_ROOTFS_TO_INSTALL: /rootfs + subsystems: + - morph: systems/build-system-x86_64.morph + deploy: + to-install: + type: sysroot + location: /rootfs + INITRAMFS_PATH: boot/initramfs.gz + KERNEL_ARGS: console=ttyS1,9600 console=tty0 + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + initramfs: + type: initramfs + location: boot/initramfs.gz diff --git a/clusters/image-package-example.morph b/clusters/image-package-example.morph new file mode 100644 index 00000000..fd8487e2 --- /dev/null +++ b/clusters/image-package-example.morph @@ -0,0 +1,12 @@ +name: image-package-example +kind: cluster +description: | + Packaged system and script for installing it, for deferred instantiation. +systems: +- morph: systems/base-system-x86_32-generic.morph + deploy: + imgpkg: + type: image-package + location: image-package-example.tar + BOOTLOADER_BLOBS: /usr/share/syslinux/mbr.bin + INCLUDE_SCRIPTS: image-package-example/make-disk-image.sh.in:image-package-example/disk-install.sh.in:image-package-example/common.sh.in diff --git a/clusters/initramfs-test.morph b/clusters/initramfs-test.morph new file mode 100644 index 00000000..afc94961 --- /dev/null +++ b/clusters/initramfs-test.morph @@ -0,0 +1,17 @@ +name: initramfs-test +kind: cluster +systems: +- morph: systems/base-system-x86_64-generic.morph + deploy: + system: + type: rawdisk + location: initramfs-system-x86_64.img + DISK_SIZE: 1G + HOSTNAME: initramfs-system + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + initramfs: + type: initramfs + location: boot/initramfs.gz diff --git a/clusters/installer-build-system-x86_64.morph b/clusters/installer-build-system-x86_64.morph new file mode 100644 index 00000000..a9ebcaca --- /dev/null +++ b/clusters/installer-build-system-x86_64.morph @@ -0,0 +1,52 @@ +name: installer-build-system-x86_64 +kind: cluster +description: | + This is a cluster morphology that can be used to deploy + installer systems. This is done by adding the files needed + using a manifest file (installer/manifest) with the INSTALL_FILES + extension, and using the installer.configure extension to generate + the configuration needed in the system. + + This manifest, which is installing the installer script in + /usr/lib/installer/installer.py, in combination of adding + "init=/usr/lib/installer/installer.py" as KERNEL_ARGS in the system + makes the system run the installer.py script as init script. + + The installer.py script will read the information needed to + install the system (where is the root filesystem to install and + where to install it) from /etc/install.conf. + + This cluster also deploys a subsystem (a build-system in this case) + which is going to be the system that the installer system/script is + going to install. + +systems: +- morph: systems/installer-system-x86_64.morph + deploy: + installer: + type: rawdisk + location: installer-build-system-x86_64.img + KERNEL_ARGS: init=/usr/lib/baserock-installer/installer + DISK_SIZE: 6G + HOSTNAME: installer-x86_64 + INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda + INSTALLER_ROOTFS_TO_INSTALL: /rootfs + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + installer-initramfs: + type: initramfs + location: boot/initramfs.gz + - morph: systems/build-system-x86_64.morph + deploy: + to-install: + type: sysroot + location: /rootfs + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + to-install-initramfs: + type: initramfs + location: boot/initramfs.gz diff --git a/clusters/jetson-upgrade.morph b/clusters/jetson-upgrade.morph new file mode 100644 index 00000000..9fd5155b --- /dev/null +++ b/clusters/jetson-upgrade.morph @@ -0,0 +1,18 @@ +name: jetson-upgrade +kind: cluster +systems: +- morph: systems/devel-system-armv7lhf-jetson.morph + deploy-defaults: + TROVE_HOST: TROVE_HOST + TROVE_ID: TROVE_ID + BOOT_DEVICE: "/dev/mmcblk0p1" + ROOT_DEVICE: "/dev/mmcblk0p2" + DTB_PATH: "boot/tegra124-jetson-tk1.dtb" + BOOTLOADER_CONFIG_FORMAT: "extlinux" + BOOTLOADER_INSTALL: "none" + KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1 + FSTAB_SRC: LABEL=src /src auto defaults,rw,noatime,nofail 0 2 + deploy: + self: + type: ssh-rsync + location: root@127.0.0.1 diff --git a/clusters/mason-openstack.morph b/clusters/mason-openstack.morph new file mode 100644 index 00000000..6ef14888 --- /dev/null +++ b/clusters/mason-openstack.morph @@ -0,0 +1,39 @@ +name: openstack-mason +kind: cluster +description: | + This is a template cluster morphology that can be adapted to set up a + Mason. Masons are composed of a trove and a distbuild system. +systems: +- morph: systems/build-system-x86_64.morph + deploy-defaults: + ARTIFACT_CACHE_SERVER: example-cache-server + CONTROLLERHOST: controller-hostname + DISTBUILD_CONTROLLER: true + DISTBUILD_WORKER: true + INSTALL_FILES: distbuild/manifest + RAM_SIZE: 8G + TROVE_HOST: your-upstream-trove + TROVE_ID: your-upstream-trove-prefix + VCPUS: 2 + WORKER_SSH_KEY: ssh-keys/worker.key + deploy: + mason-openstack: + type: openstack + location: openstack-auth-url (eg example.com:5000/v2.0) + DISK_SIZE: 6G + DISTBUILD_CONTROLLER: true + HOSTNAME: controller-hostname + MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph + MASON_DEFINITIONS_REF: master + MASON_DISTBUILD_ARCH: x86_64 + MASON_TEST_HOST: user@openstack-auth-url + WORKERS: controller-hostname + OPENSTACK_AUTH_URL: openstack-auth-url + OPENSTACK_USER: baserock + OPENSTACK_TENANT: baserock + OPENSTACK_TENANT_ID: 7d7ebfe23367490b973a10fa426c3aec + OPENSTACK_IMAGENAME: mason-openstack + OPENSTACK_NETWORK_ID: 71f5151a-b7c3-405d-a841-d1b07e5db099 + CLOUD_INIT: yes + KERNEL_ARGS: console=ttyS0 console=tty0 + TEST_INFRASTRUCTURE_TYPE: openstack diff --git a/clusters/mason.morph b/clusters/mason.morph new file mode 100644 index 00000000..9717239d --- /dev/null +++ b/clusters/mason.morph @@ -0,0 +1,56 @@ +name: example-mason-cluster +kind: cluster +description: | + This is a template cluster morphology that can be adapted to set up a + Mason. Masons are composed of a trove and a distbuild system. + + It is suggested that you use mason/mason-generator.sh to adapt this + template to suit your needs. It also handles the generation of + keys to let the systems communicate. +systems: +- morph: systems/trove-system-x86_64.morph + deploy: + red-box-v1-trove: + type: kvm + location: kvm+ssh://vm-user@vm-host/red-box-v1-trove/vm-path/red-box-v1-trove.img + AUTOSTART: true + DISK_SIZE: 20G + HOSTNAME: red-box-v1-trove + LORRY_SSH_KEY: ssh_keys/lorry.key + MASON_SSH_PUBKEY: ssh_keys/mason.key.pub + RAM_SIZE: 8G + TROVE_ADMIN_EMAIL: adminuser@example.com + TROVE_ADMIN_NAME: Nobody + TROVE_ADMIN_SSH_PUBKEY: ssh_keys/id_rsa.pub + TROVE_ADMIN_USER: adminuser + TROVE_COMPANY: Company name goes here + TROVE_HOST: red-box-v1 + TROVE_ID: red-box-v1-trove + UPSTREAM_TROVE: upstream-trove + VCPUS: 2 + VERSION_LABEL: 45 + WORKER_SSH_PUBKEY: ssh_keys/worker.key.pub +- morph: systems/build-system-x86_64.morph + deploy-defaults: + ARTIFACT_CACHE_SERVER: red-box-v1-trove.example.com + CONTROLLERHOST: red-box-v1-controller.example.com + DISTBUILD_CONTROLLER: false + DISTBUILD_WORKER: true + INSTALL_FILES: distbuild/manifest + RAM_SIZE: 8G + TROVE_HOST: upstream-trove + TROVE_ID: upstream-trove + VCPUS: 2 + WORKER_SSH_KEY: ssh_keys/worker.key + deploy: + red-box-v1-controller: + type: kvm + location: kvm+ssh://vm-user@vm-host/red-box-v1-controller/vm-path/red-box-v1-controller.img + DISK_SIZE: 60G + DISTBUILD_CONTROLLER: true + HOSTNAME: red-box-v1-controller + MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph + MASON_DEFINITIONS_REF: master + MASON_DISTBUILD_ARCH: x86_64 + MASON_TEST_HOST: vm-user@vm-host:/vm-path/ + WORKERS: red-box-v1-controller diff --git a/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph b/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph new file mode 100644 index 00000000..eea600cf --- /dev/null +++ b/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph @@ -0,0 +1,13 @@ +name: minimal-system-armv5l-openbmc-aspeed-deploy +kind: cluster +systems: +- morph: systems/minimal-system-armv5l-openbmc-aspeed.morph + deploy: + minimal-system-armv5l-openbmc-aspeed: + type: jffs2 + location: minimal-system-armv5l-openbmc-aspeed.img + ROOT_DEVICE: "/dev/mtdblock" + BOOTLOADER_CONFIG_FORMAT: "extlinux" + BOOTLOADER_INSTALL: "none" + ERASE_BLOCK: 64 + INIT_SYSTEM: busybox diff --git a/clusters/minimal-system-deploy.morph b/clusters/minimal-system-deploy.morph new file mode 100644 index 00000000..06629ffc --- /dev/null +++ b/clusters/minimal-system-deploy.morph @@ -0,0 +1,14 @@ +name: minimal-system-deploy +kind: cluster +description: | + Deploy a minimal system to a system running KVM +systems: +- morph: systems/minimal-system-x86_32-generic.morph + deploy: + vm: + type: kvm + location: kvm+ssh://192.168.122.1/tiny-x86_32/srv/VMs/tiny-x86_32.img + DISK_SIZE: 512M + HOSTNAME: tiny-x86_32 + INIT_SYSTEM: busybox + RAM_SIZE: 512M diff --git a/clusters/moonshot-m2-armv8b64.morph b/clusters/moonshot-m2-armv8b64.morph new file mode 100644 index 00000000..c8e5bc81 --- /dev/null +++ b/clusters/moonshot-m2-armv8b64.morph @@ -0,0 +1,56 @@ +name: moonshot-m2-deployment +kind: cluster +description: | + Install a build armv8b64 system into the M.2 SSD storage of an HP + Moonshot node, using a combination of the pxeboot.write extension and + the installer system. +systems: +- morph: systems/installer-system-armv8b64.morph + deploy: + installer: + type: pxeboot + location: 14:58:d0:57:7f:42 + PXEBOOT_MODE: existing-server + PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/ + PXEBOOT_ROOTFS_RSYNC_ADDRESS: rsync://192.168.0.1/srv/nfsboot/ + PXEBOOT_PXE_REBOOT_COMMAND: | + ssh Administrator@10.0.1.10 set node power off force c31n1 + ssh Administrator@10.0.1.10 set node boot pxe c31n1 + # Nodes are powered on twice as sometimes powering them on + # once is not enough + ssh Administrator@10.0.1.10 set node power on c31n1 + ssh Administrator@10.0.1.10 set node power on c31n1 + PXEBOOT_REBOOT_COMMAND: | + ssh Administrator@10.0.1.10 set node power off force c31n1 + ssh Administrator@10.0.1.10 set node boot m.2 c31n1 + ssh Administrator@10.0.1.10 set node power on c31n1 + ssh Administrator@10.0.1.10 set node power on c31n1 + + INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda + INSTALLER_ROOTFS_TO_INSTALL: /rootfs + INSTALLER_POST_INSTALL_COMMAND: | + while : ; do + echo "enter 'installed' in your deployment machine to finish the installation" + sleep 2 + done + INSTALLER_CREATE_BOOT_PARTITION: yes + + HOSTNAME: installer-system-c31n1 + DTB_PATH: boot/m400-1003.dtb + KERNEL_ARGS: console=ttyS0,9600n8r init=/usr/lib/baserock-installer/installer + INSTALL_FILES: moonshot/manifest + MOONSHOT_KERNEL: yes + subsystems: + - morph: systems/devel-system-armv8b64.morph + deploy: + to-install: + type: sysroot + location: /rootfs + HOSTNAME: baserock-c31n1 + DTB_PATH: boot/m400-1003.dtb + INSTALL_FILES: moonshot/manifest + MOONSHOT_KERNEL: yes + BOOT_DEVICE: /dev/sda1 + ROOT_DEVICE: /dev/sda2 + BOOTLOADER_CONFIG_FORMAT: extlinux + BOOTLOADER_INSTALL: "none" diff --git a/clusters/moonshot-pxe-armv8b64.morph b/clusters/moonshot-pxe-armv8b64.morph new file mode 100644 index 00000000..2d32efb0 --- /dev/null +++ b/clusters/moonshot-pxe-armv8b64.morph @@ -0,0 +1,30 @@ +name: moonshot-m400-armv8b64-netboot +kind: cluster +description: | + Deploy a big-endian armv8b64 devel system onto an HP Moonshot node + + The system will be configured to boot through PXE from existing DHCP, + TFTP and NFS servers. +systems: +- morph: systems/devel-system-armv8b64.morph + deploy: + netboot: + type: pxeboot + location: 14:58:d0:57:7f:42 + PXEBOOT_MODE: existing-server + PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/ + PXEBOOT_ROOTFS_RSYNC_ADDRESS: rsync://192.168.0.1/srv/nfsboot/ + PXEBOOT_PXE_REBOOT_COMMAND: | + ssh Administrator@10.0.1.10 set node power off force c31n1 + ssh Administrator@10.0.1.10 set node boot pxe c31n1 + # Nodes are powered on twice as sometimes powering them on + # once is not enough + ssh Administrator@10.0.1.10 set node power on c31n1 + ssh Administrator@10.0.1.10 set node power on c31n1 + PXE_INSTALLER: no + + HOSTNAME: baserock-c31n1 + DTB_PATH: boot/m400-1003.dtb + KERNEL_ARGS: console=ttyS0,9600n8r rw + INSTALL_FILES: moonshot/manifest + MOONSHOT_KERNEL: yes diff --git a/clusters/moonshot-pxe-armv8l64.morph b/clusters/moonshot-pxe-armv8l64.morph new file mode 100644 index 00000000..3286c72e --- /dev/null +++ b/clusters/moonshot-pxe-armv8l64.morph @@ -0,0 +1,22 @@ +name: moonshot-m400-armv8l64-netboot +kind: cluster +description: | + Deploy an armv8l64 devel system into a HP Moonshot node + + The system will be configured to boot through PXE from existing DHCP, + TFTP and NFS servers. +systems: +- morph: systems/devel-system-armv8l64.morph + deploy: + netboot: + type: pxeboot + location: 14:58:d0:57:7f:42 + PXEBOOT_MODE: existing-server + PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/ + PXEBOOT_ROOTFS_RSYNC_ADDRESS: rsync://192.168.0.1/srv/nfsboot/ + KERNEL_ARGS: console=ttyS0,9600n8r rw + DTB_PATH: boot/m400-1003.dtb + HOSTNAME: baserock-m400-node31 + MOONSHOT_KERNEL: yes + INSTALL_FILES: moonshot/manifest + PXE_INSTALLER: no diff --git a/clusters/openstack-one-node-swift.morph b/clusters/openstack-one-node-swift.morph new file mode 100644 index 00000000..588b6e81 --- /dev/null +++ b/clusters/openstack-one-node-swift.morph @@ -0,0 +1,142 @@ +name: openstack-one-node-swift +kind: cluster +description: | + This is a cluster morphology for deploying a x86_64 OpenStack system + all-in-one-node. + + Requirements to be able to run and test the system: + + - DISK_SIZE should be bigger than 5G + - The system has to have available at least 4G of RAM, but once + you start instantiating VMs you will need more. + - The IP of the system can't change, and you need to know it beforehand, + that is, the system needs a static IP address. + + This cluster is configurable, but with the following constraints: + + - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS, + and HOST_CONTROLLER. + - HOSTS_CONTROLLER is only needed if the hostname (see previous point) + is not a FQDN. + - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one + used in HOSTS_CONTROLLER. + - CINDER_DEVICE should be a path to a storage device ready to be + used/formated for cinder data. + - EXTERNAL_INTERFACE is required when the system has more than one network + interface. + + You can also have a look at the following suggestions: + + - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the + system is being deployed to. + - We recommend changing all the PASSWORDs variables, also the + KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and + METERING_SECRET + - Setting NOVA_BAREMETAL_SCHEDULING with an YAML truth value will configure + Nova to schedule baremetal machines through the Ironic driver, instead of + sheduling virtual machines. + +systems: +- morph: systems/openstack-system-x86_64.morph + deploy: + release: + type: rawdisk + location: baserock-openstack-system-x86_64.img + DISK_SIZE: 10G + INSTALL_FILES: openstack/manifest swift/manifest + + HOSTNAME: onenode + + ######################################################################### + ## Swift config options + ######################################################################### + + SWIFT_CONTROLLER: True + + SWIFT_ADMIN_PASSWORD: insecure + + SWIFT_PART_POWER: 10 + SWIFT_REPLICAS: 3 + SWIFT_MIN_PART_HOURS: 1 + + SWIFT_STORAGE_DEVICES: [{ ip: <storage node 0 management ip>, device: sdb, weight: 100 }, + { ip: <storage node 0 management ip>, device: sdc, weight: 100 }, + { ip: <storage node 0 management ip>, device: sdd, weight: 100 }, + + { ip: <storage node 1 management ip>, device: sdb, weight: 100 }, + { ip: <storage node 1 management ip>, device: sdc, weight: 100 }, + { ip: <storage node 1 management ip>, device: sdd, weight: 100 }] + + # This value can be any random string or number + # but each node in your swift cluster must have the same values + SWIFT_REBALANCE_SEED: 3828 + + # NOTE: Replace SWIFT_HASH_PATH_PREFIX and SWIFT_HASH_PATH_SUFFIX + # with your own unique values, + # + # `openssl rand -hex 10' can be used to generate unique values + # + # These values should be kept secret, do not lose them. + # + SWIFT_HASH_PATH_PREFIX: 041fc210e4e1d333ce1d + SWIFT_HASH_PATH_SUFFIX: 4d6f5362a356dda7fb7d + + ######################################################################### + + RABBITMQ_HOST: onenode + RABBITMQ_PORT: 5672 + RABBITMQ_USER: rabbitmq + RABBITMQ_PASSWORD: veryinsecure + + CONTROLLER_HOST_ADDRESS: onenode + MANAGEMENT_INTERFACE_IP_ADDRESS: <management ip> + + KEYSTONE_ENABLE_SERVICE: True + KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8 + KEYSTONE_ADMIN_PASSWORD: veryinsecure + KEYSTONE_DB_USER: keystoneDB + KEYSTONE_DB_PASSWORD: veryinsecure + + GLANCE_ENABLE_SERVICE: True + GLANCE_SERVICE_USER: glance + GLANCE_SERVICE_PASSWORD: veryinsecure + GLANCE_DB_USER: glanceDB + GLANCE_DB_PASSWORD: veryinsecure + + NOVA_SERVICE_USER: nova + NOVA_SERVICE_PASSWORD: veryinsecure + NOVA_DB_USER: novaDB + NOVA_DB_PASSWORD: veryinsecure + NOVA_VIRT_TYPE: qemu + NOVA_BAREMETAL_SCHEDULING: no + + CINDER_SERVICE_USER: cinder + CINDER_SERVICE_PASSWORD: veryinsecure + CINDER_DB_USER: cinderDB + CINDER_DB_PASSWORD: veryinsecure + # Storage device to be used by Cinder + CINDER_DEVICE: /dev/sdb + + NEUTRON_SERVICE_USER: neutron + NEUTRON_SERVICE_PASSWORD: veryinsecure + NEUTRON_DB_USER: neutronDB + NEUTRON_DB_PASSWORD: veryinsecure + METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret + + IRONIC_ENABLE_SERVICE: True + IRONIC_SERVICE_USER: ironic + IRONIC_SERVICE_PASSWORD: veryinsecure + IRONIC_DB_USER: ironicDB + IRONIC_DB_PASSWORD: veryinsecure + + CEILOMETER_SERVICE_USER: ceilometer + CEILOMETER_SERVICE_PASSWORD: veryinsecure + CEILOMETER_DB_USER: ceilometerDB + CEILOMETER_DB_PASSWORD: veryinsecure + METERING_SECRET: insecureceilometersecret + + HOSTS_CONTROLLER: <management ip> onenode + + # Network interface to be used, only needed if there are more + # than one available. + # EXTERNAL_INTERFACE: eno1 diff --git a/clusters/openstack-one-node.morph b/clusters/openstack-one-node.morph new file mode 100644 index 00000000..037cd23c --- /dev/null +++ b/clusters/openstack-one-node.morph @@ -0,0 +1,106 @@ +name: openstack-one-node +kind: cluster +description: | + This is a cluster morphology for deploying a x86_64 OpenStack system + all-in-one-node. + + Requirements to be able to run and test the system: + + - DISK_SIZE should be bigger than 5G + - The system has to have available at least 4G of RAM, but once + you start instantiating VMs you will need more. + - The IP of the system can't change, and you need to know it beforehand, + that is, the system needs a static IP address. + + This cluster is configurable, but with the following constraints: + + - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS, + and HOST_CONTROLLER. + - HOSTS_CONTROLLER is only needed if the hostname (see previous point) + is not a FQDN. + - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one + used in HOSTS_CONTROLLER. + - CINDER_DEVICE should be a path to a storage device ready to be + used/formated for cinder data. + - EXTERNAL_INTERFACE is required when the system has more than one network + interface. + + You can also have a look at the following suggestions: + + - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the + system is being deployed to. + - We recommend changing all the PASSWORDs variables, also the + KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and + METERING_SECRET + - Setting NOVA_BAREMETAL_SCHEDULING with an YAML truth value will configure + Nova to schedule baremetal machines through the Ironic driver, instead of + sheduling virtual machines. + +systems: +- morph: systems/openstack-system-x86_64.morph + deploy: + release: + type: rawdisk + location: baserock-openstack-system-x86_64.img + DISK_SIZE: 10G + INSTALL_FILES: openstack/manifest + + HOSTNAME: onenode + + RABBITMQ_HOST: onenode + RABBITMQ_PORT: 5672 + RABBITMQ_USER: rabbitmq + RABBITMQ_PASSWORD: veryinsecure + + CONTROLLER_HOST_ADDRESS: onenode + MANAGEMENT_INTERFACE_IP_ADDRESS: <management ip> + + KEYSTONE_ENABLE_SERVICE: True + KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8 + KEYSTONE_ADMIN_PASSWORD: veryinsecure + KEYSTONE_DB_USER: keystoneDB + KEYSTONE_DB_PASSWORD: veryinsecure + + GLANCE_ENABLE_SERVICE: True + GLANCE_SERVICE_USER: glance + GLANCE_SERVICE_PASSWORD: veryinsecure + GLANCE_DB_USER: glanceDB + GLANCE_DB_PASSWORD: veryinsecure + + NOVA_SERVICE_USER: nova + NOVA_SERVICE_PASSWORD: veryinsecure + NOVA_DB_USER: novaDB + NOVA_DB_PASSWORD: veryinsecure + NOVA_VIRT_TYPE: qemu + NOVA_BAREMETAL_SCHEDULING: no + + CINDER_SERVICE_USER: cinder + CINDER_SERVICE_PASSWORD: veryinsecure + CINDER_DB_USER: cinderDB + CINDER_DB_PASSWORD: veryinsecure + # Storage device to be used by Cinder + CINDER_DEVICE: /dev/sdb + + NEUTRON_SERVICE_USER: neutron + NEUTRON_SERVICE_PASSWORD: veryinsecure + NEUTRON_DB_USER: neutronDB + NEUTRON_DB_PASSWORD: veryinsecure + METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret + + IRONIC_ENABLE_SERVICE: True + IRONIC_SERVICE_USER: ironic + IRONIC_SERVICE_PASSWORD: veryinsecure + IRONIC_DB_USER: ironicDB + IRONIC_DB_PASSWORD: veryinsecure + + CEILOMETER_SERVICE_USER: ceilometer + CEILOMETER_SERVICE_PASSWORD: veryinsecure + CEILOMETER_DB_USER: ceilometerDB + CEILOMETER_DB_PASSWORD: veryinsecure + METERING_SECRET: insecureceilometersecret + + HOSTS_CONTROLLER: <management ip> onenode + + # Network interface to be used, only needed if there are more + # than one available. + # EXTERNAL_INTERFACE: eno1 diff --git a/clusters/openstack-three-node-installer.morph b/clusters/openstack-three-node-installer.morph new file mode 100644 index 00000000..6285217a --- /dev/null +++ b/clusters/openstack-three-node-installer.morph @@ -0,0 +1,239 @@ +name: openstack-three-node-installer +kind: cluster +description: | + + This is a cluster morphology for deploying an installer for an x86_64 + OpenStack system spread across three nodes. + + This cluster creates disk images that may be `dd`'d onto install media to + produce an OpenStack cluster when instantiated. + + Alternatively it may be used to install directly onto a physical disk by + running: + + morph deploy clusters/openstack-three-node-installer.morph \ + network-installer network-installer.location=/dev/vdb + + Substituting network-installer for either compute-installer or + controller-installer will produce different configurations, and it is possible + to substitue /dev/vdb for a different path to a disk image to install to a + different disk image. + + Substitute the values of HOSTNAME, NETWORK_CONFIG, EXTERNAL_INTERFACE, + MANAGEMENT_IP_ADDRESS, CONTROLLER_HOST_ADDRESS, RABBITMQ_HOST and HOSTS_* to + match your hardware and networking configuration. + + Requirements to be able to run and test the system: + + - DISK_SIZE should be bigger than 5G + - The system has to have available at least 4G of RAM, but once + you start instantiating VMs you will need more. + - The IP of the system can't change, and you need to know it beforehand, + that is, the system needs a static IP address. + + This cluster is configurable, but with the following constraints: + + - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS, + and HOST_CONTROLLER. + - HOSTS_CONTROLLER is only needed if the hostname (see previous point) + is not a FQDN. + - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one + used in HOSTS_CONTROLLER. + - CINDER_DEVICE should be a path to a storage device ready to be + used/formated for cinder data. + - EXTERNAL_INTERFACE is required when the system has more than one network + interface. + + You can also have a look at the following suggestions: + + - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the + system is being deployed to. + - We recommend changing all the PASSWORDs variables, also the + KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and + METERING_SECRET. + - Setting NOVA_BAREMETAL_SCHEDULING with an YAML truth value will configure + Nova to schedule baremetal machines through the Ironic driver, instead of + sheduling virtual machines. + +systems: +- morph: systems/installer-system-x86_64.morph + deploy: + network-installer: &installer + type: rawdisk + location: installer-openstack-network-x86_64.img + KERNEL_ARGS: init=/usr/lib/baserock-installer/installer + DISK_SIZE: 6G + HOSTNAME: installer-x86_64 + INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda + INSTALLER_ROOTFS_TO_INSTALL: /rootfs + INSTALLER_POST_INSTALL_COMMAND: 'sync; poweroff -f' + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + network-initramfs: &initramfs + type: initramfs + location: boot/initramfs.gz + - morph: systems/openstack-system-x86_64.morph + deploy: + network-to-install: &stack-node + type: sysroot + location: rootfs + INSTALL_FILES: openstack/manifest + INITRAMFS_PATH: boot/initramfs.gz + + HOSTNAME: threenode-network + + RABBITMQ_HOST: threenode-controller.os-mgmt + RABBITMQ_PORT: 5672 + RABBITMQ_USER: rabbitmq + RABBITMQ_PASSWORD: veryinsecure + + # This token needs to be unique and secret + KEYSTONE_ENABLE_SERVICE: False + KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8 + KEYSTONE_ADMIN_PASSWORD: veryinsecure + KEYSTONE_DB_USER: keystoneDB + KEYSTONE_DB_PASSWORD: veryinsecure + + GLANCE_ENABLE_SERVICE: False + GLANCE_SERVICE_USER: glance + GLANCE_SERVICE_PASSWORD: veryinsecure + GLANCE_DB_USER: glanceDB + GLANCE_DB_PASSWORD: veryinsecure + + NOVA_ENABLE_CONTROLLER: False + NOVA_ENABLE_COMPUTE: False + NOVA_SERVICE_USER: nova + NOVA_SERVICE_PASSWORD: veryinsecure + NOVA_DB_USER: novaDB + NOVA_DB_PASSWORD: veryinsecure + NOVA_VIRT_TYPE: kvm + NOVA_BAREMETAL_SCHEDULING: no + + CINDER_ENABLE_CONTROLLER: False + CINDER_ENABLE_COMPUTE: False + CINDER_ENABLE_STORAGE: False + CINDER_SERVICE_USER: cinder + CINDER_SERVICE_PASSWORD: veryinsecure + CINDER_DB_USER: cinderDB + CINDER_DB_PASSWORD: veryinsecure + # Storage device to be used by Cinder + CINDER_DEVICE: /dev/sdb + + NEUTRON_ENABLE_AGENT: False + NEUTRON_ENABLE_MANAGER: True + NEUTRON_ENABLE_CONTROLLER: False + NEUTRON_SERVICE_USER: neutron + NEUTRON_SERVICE_PASSWORD: veryinsecure + NEUTRON_DB_USER: neutronDB + NEUTRON_DB_PASSWORD: veryinsecure + METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret + + IRONIC_ENABLE_SERVICE: False + IRONIC_SERVICE_USER: ironic + IRONIC_SERVICE_PASSWORD: veryinsecure + IRONIC_DB_USER: ironicDB + IRONIC_DB_PASSWORD: veryinsecure + + CEILOMETER_SERVICE_USER: ceilometer + CEILOMETER_SERVICE_PASSWORD: veryinsecure + CEILOMETER_DB_USER: ceilometerDB + CEILOMETER_DB_PASSWORD: veryinsecure + CEILOMETER_ENABLE_CONTROLLER: False + CEILOMETER_ENABLE_COMPUTE: False + METERING_SECRET: insecureceilometersecret + + CONTROLLER_HOST_ADDRESS: threenode-controller.os-mgmt + MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.1 + + HOSTS_SELF: 10.0.0.1 threenode-network + HOSTS_NETWORK: 10.0.0.1 threenode-network.os-mgmt + HOSTS_CONTROL: 10.0.0.2 threenode-controller.os-mgmt + HOSTS_COMPUTE: 10.0.0.3 threenode-compute.os-mgmt + + EXTERNAL_INTERFACE: enp3s0 + NETWORK_CONFIG: enp3s0:dhcp;enp2s0:static,address=10.0.0.1,netmask=255.255.255.0 + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + network-to-install-initramfs: *initramfs +- morph: systems/installer-system-x86_64.morph + deploy: + controller-installer: + <<: *installer + location: installer-openstack-controller-x86_64.img + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + controller-initramfs: *initramfs + - morph: systems/openstack-system-x86_64.morph + deploy: + controller-to-install: + <<: *stack-node + HOSTNAME: threenode-controller + + KEYSTONE_ENABLE_SERVICE: True + + GLANCE_ENABLE_SERVICE: True + + NOVA_ENABLE_CONTROLLER: True + + CINDER_ENABLE_CONTROLLER: True + CINDER_ENABLE_COMPUTE: False + CINDER_ENABLE_STORAGE: False + + NEUTRON_ENABLE_AGENT: False + NEUTRON_ENABLE_MANAGER: False + NEUTRON_ENABLE_CONTROLLER: True + METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret + + IRONIC_ENABLE_SERVICE: True + + CEILOMETER_ENABLE_CONTROLLER: True + CEILOMETER_ENABLE_COMPUTE: False + + MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.2 + HOSTS_SELF: 10.0.0.2 threenode-controller + EXTERNAL_INTERFACE: enp2s0 + NETWORK_CONFIG: enp2s0:dhcp;enp0s26u1u2:static,address=10.0.0.2,netmask=255.255.255.0 + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + controller-to-install-initramfs: *initramfs +- morph: systems/installer-system-x86_64.morph + deploy: + compute-installer: + <<: *installer + location: installer-openstack-compute-x86_64.img + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + compute-initramfs: *initramfs + - morph: systems/openstack-system-x86_64.morph + deploy: + compute-to-install: + <<: *stack-node + HOSTNAME: threenode-compute + + NOVA_ENABLE_COMPUTE: True + + CINDER_ENABLE_CONTROLLER: False + CINDER_ENABLE_COMPUTE: True + CINDER_ENABLE_STORAGE: True + + NEUTRON_ENABLE_AGENT: True + NEUTRON_ENABLE_MANAGER: False + NEUTRON_ENABLE_CONTROLLER: False + + CEILOMETER_ENABLE_CONTROLLER: False + CEILOMETER_ENABLE_COMPUTE: True + + MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.3 + HOSTS_SELF: 10.0.0.3 threenode-compute + EXTERNAL_INTERFACE: eno1 + NETWORK_CONFIG: eno1:dhcp;enp0s29u1u3:static,address=10.0.0.3,netmask=255.255.255.0 + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + compute-to-install-initramfs: *initramfs diff --git a/clusters/openstack-two-node-installer.morph b/clusters/openstack-two-node-installer.morph new file mode 100644 index 00000000..f05b0e9b --- /dev/null +++ b/clusters/openstack-two-node-installer.morph @@ -0,0 +1,200 @@ +name: openstack-two-node-installer +kind: cluster +description: | + + This is a cluster morphology for deploying an installer for an x86_64 + OpenStack system spread across three nodes. + + This cluster creates disk images that may be `dd`'d onto install media to + produce an OpenStack cluster when instantiated. + + Alternatively it may be used to install directly onto a physical disk by + running: + + morph deploy clusters/openstack-two-node-installer.morph \ + controller-installer controller-installer.location=/dev/vdb + + Substituting contrller-installer for compute-installer will produce + different configurations, and it is possible to substitue /dev/vdb for a + different path to a disk image to install to a different disk image. + + Substitute the values of HOSTNAME, NETWORK_CONFIG, EXTERNAL_INTERFACE, + MANAGEMENT_IP_ADDRESS, CONTROLLER_HOST_ADDRESS, RABBITMQ_HOST and HOSTS_* to + match your hardware and networking configuration. + + Requirements to be able to run and test the system: + + - DISK_SIZE should be bigger than 5G + - The system has to have available at least 4G of RAM, but once + you start instantiating VMs you will need more. + - The IP of the system can't change, and you need to know it beforehand, + that is, the system needs a static IP address. + + This cluster is configurable, but with the following constraints: + + - The hostname in RABBITMQ_HOST has to match CONTROLLER_HOST_ADDRESS, + and HOST_CONTROLLER. + - HOSTS_CONTROLLER is only needed if the hostname (see previous point) + is not a FQDN. + - The IP listed in MANAGEMENT_INTERFACE_IP_ADDRESS has to match the one + used in HOSTS_CONTROLLER. + - CINDER_DEVICE should be a path to a storage device ready to be + used/formated for cinder data. + - EXTERNAL_INTERFACE is required when the system has more than one network + interface. + + You can also have a look at the following suggestions: + + - NOVA_VIRT_TYPE can be either 'kvm' or 'qemu', depending on where the + system is being deployed to. + - We recommend changing all the PASSWORDs variables, also the + KEYSTONE_TEMPORARY_ADMIN_TOKEN, METADATA_PROXY_SHARED_SECRET and + METERING_SECRET. + - Setting NOVA_BAREMETAL_SCHEDULING with a YAML truth value will configure + Nova to schedule baremetal machines through the Ironic driver, instead of + sheduling virtual machines. + +systems: +- morph: systems/installer-system-x86_64.morph + deploy: + controller-installer: &installer + type: rawdisk + location: installer-openstack-controller-x86_64.img + KERNEL_ARGS: init=/usr/lib/baserock-installer/installer + DISK_SIZE: 6G + HOSTNAME: installer-x86_64 + INSTALLER_TARGET_STORAGE_DEVICE: /dev/sda + INSTALLER_ROOTFS_TO_INSTALL: /rootfs + INSTALLER_POST_INSTALL_COMMAND: 'sync; poweroff -f' + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + controller-initramfs: &initramfs + type: initramfs + location: boot/initramfs.gz + - morph: systems/openstack-system-x86_64.morph + deploy: + controller-to-install: &stack-node + type: sysroot + location: rootfs + INSTALL_FILES: openstack/manifest + INITRAMFS_PATH: boot/initramfs.gz + + HOSTNAME: twonode-controller + + RABBITMQ_HOST: twonode-controller.os-mgmt + RABBITMQ_PORT: 5672 + RABBITMQ_USER: rabbitmq + RABBITMQ_PASSWORD: veryinsecure + + # This token needs to be unique and secret + KEYSTONE_ENABLE_SERVICE: True + KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8 + KEYSTONE_ADMIN_PASSWORD: veryinsecure + KEYSTONE_DB_USER: keystoneDB + KEYSTONE_DB_PASSWORD: veryinsecure + + GLANCE_ENABLE_SERVICE: True + GLANCE_SERVICE_USER: glance + GLANCE_SERVICE_PASSWORD: veryinsecure + GLANCE_DB_USER: glanceDB + GLANCE_DB_PASSWORD: veryinsecure + + NOVA_ENABLE_CONTROLLER: True + NOVA_ENABLE_COMPUTE: False + NOVA_SERVICE_USER: nova + NOVA_SERVICE_PASSWORD: veryinsecure + NOVA_DB_USER: novaDB + NOVA_DB_PASSWORD: veryinsecure + NOVA_VIRT_TYPE: kvm + NOVA_BAREMETAL_SCHEDULING: no + + CINDER_ENABLE_CONTROLLER: True + CINDER_ENABLE_COMPUTE: False + CINDER_ENABLE_STORAGE: False + CINDER_SERVICE_USER: cinder + CINDER_SERVICE_PASSWORD: veryinsecure + CINDER_DB_USER: cinderDB + CINDER_DB_PASSWORD: veryinsecure + # Storage device to be used by Cinder + CINDER_DEVICE: /dev/sdb + + NEUTRON_ENABLE_AGENT: False + NEUTRON_ENABLE_MANAGER: True + NEUTRON_ENABLE_CONTROLLER: True + NEUTRON_SERVICE_USER: neutron + NEUTRON_SERVICE_PASSWORD: veryinsecure + NEUTRON_DB_USER: neutronDB + NEUTRON_DB_PASSWORD: veryinsecure + METADATA_PROXY_SHARED_SECRET: novaneutronmetasecret + + IRONIC_ENABLE_SERVICE: True + IRONIC_SERVICE_USER: ironic + IRONIC_SERVICE_PASSWORD: veryinsecure + IRONIC_DB_USER: ironicDB + IRONIC_DB_PASSWORD: veryinsecure + + CEILOMETER_SERVICE_USER: ceilometer + CEILOMETER_SERVICE_PASSWORD: veryinsecure + CEILOMETER_DB_USER: ceilometerDB + CEILOMETER_DB_PASSWORD: veryinsecure + CEILOMETER_ENABLE_CONTROLLER: True + CEILOMETER_ENABLE_COMPUTE: False + METERING_SECRET: insecureceilometersecret + + CONTROLLER_HOST_ADDRESS: twonode-controller.os-mgmt + MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.1 + + HOSTS_SELF: 10.0.0.1 twonode-controller + HOSTS_CONTROL: 10.0.0.1 twonode-controller.os-mgmt + HOSTS_COMPUTE: 10.0.0.3 twonode-compute.os-mgmt + EXTERNAL_INTERFACE: enp3s0 + NETWORK_CONFIG: enp3s0:dhcp;enp2s0:static,address=10.0.0.1,netmask=255.255.255.0 + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + controller-to-install-initramfs: *initramfs +- morph: systems/installer-system-x86_64.morph + deploy: + compute-installer: + <<: *installer + location: installer-openstack-compute-x86_64.img + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + compute-initramfs: *initramfs + - morph: systems/openstack-system-x86_64.morph + deploy: + compute-to-install: + <<: *stack-node + HOSTNAME: twonode-compute + + KEYSTONE_ENABLE_SERVICE: False + + GLANCE_ENABLE_SERVICE: False + + NOVA_ENABLE_COMPUTE: True + NOVA_ENABLE_CONTROLLER: False + + CINDER_ENABLE_CONTROLLER: False + CINDER_ENABLE_COMPUTE: True + CINDER_ENABLE_STORAGE: True + + NEUTRON_ENABLE_AGENT: True + NEUTRON_ENABLE_MANAGER: False + NEUTRON_ENABLE_CONTROLLER: False + + IRONIC_ENABLE_SERVICE: False + + CEILOMETER_ENABLE_CONTROLLER: False + CEILOMETER_ENABLE_COMPUTE: True + + MANAGEMENT_INTERFACE_IP_ADDRESS: 10.0.0.3 + HOSTS_SELF: 10.0.0.3 twonode-compute + EXTERNAL_INTERFACE: eno1 + NETWORK_CONFIG: eno1:dhcp;enp0s29u1u3:static,address=10.0.0.3,netmask=255.255.255.0 + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + compute-to-install-initramfs: *initramfs diff --git a/clusters/release.morph b/clusters/release.morph new file mode 100644 index 00000000..c5bfffca --- /dev/null +++ b/clusters/release.morph @@ -0,0 +1,76 @@ +name: release +kind: cluster +description: | + Deploy all the systems for we support in a release. + + This cluster morph is used by the tool 'scripts/do-release'. While + you can deploy the systems yourself, if you are making a Baserock release + then the script should be used. +systems: +- morph: systems/build-system-x86_32-chroot.morph + deploy: + build-system-x86_32-chroot: + type: tar + location: build-system-x86_32-chroot.tar +- morph: systems/build-system-x86_32.morph + deploy: + build-system-x86_32: + type: rawdisk + location: build-system-x86_32.img + DISK_SIZE: 6G +- morph: systems/build-system-x86_64-chroot.morph + deploy: + build-system-x86_64-chroot: + type: tar + location: build-system-x86_64-chroot.tar +- morph: systems/build-system-x86_64.morph + deploy: + build-system-x86_64: + type: rawdisk + location: build-system-x86_64.img + DISK_SIZE: 6G + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + initramfs-build-system-x86_64: + type: initramfs + location: boot/initramfs.gz +- morph: systems/build-system-armv7lhf-jetson.morph + deploy: + build-system-armv7lhf-jetson: + type: rawdisk + location: build-system-armv7lhf-jetson.img + DISK_SIZE: 2G + BOOT_DEVICE: "/dev/mmcblk0p1" + ROOT_DEVICE: "/dev/mmcblk0p2" + DTB_PATH: "boot/tegra124-jetson-tk1.dtb" + BOOTLOADER_CONFIG_FORMAT: "extlinux" + BOOTLOADER_INSTALL: "none" + KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1 +- morph: systems/genivi-baseline-system-x86_64-generic.morph + deploy: + genivi-baseline-system-x86_64-generic: + type: rawdisk + location: genivi-baseline-system-x86_64-generic.img + DISK_SIZE: 4G + KERNEL_ARGS: vga=788 + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + initramfs-genivi-baseline-system-x86_64-generic: + type: initramfs + location: boot/initramfs.gz +- morph: systems/genivi-baseline-system-armv7lhf-jetson.morph + deploy: + genivi-baseline-system-armv7lhf-jetson: + type: rawdisk + location: genivi-baseline-system-armv7lhf-jetson.img + DISK_SIZE: 4G + BOOT_DEVICE: "/dev/mmcblk0p1" + ROOT_DEVICE: "/dev/mmcblk0p2" + DTB_PATH: "boot/tegra124-jetson-tk1.dtb" + BOOTLOADER_CONFIG_FORMAT: "extlinux" + BOOTLOADER_INSTALL: "none" + KERNEL_ARGS: cma=256M console=tty0 console=ttyS0,115200n8 no_console_suspend=1 nouveau.pstate=1 diff --git a/clusters/sdk-example-cluster.morph b/clusters/sdk-example-cluster.morph new file mode 100644 index 00000000..92e4a413 --- /dev/null +++ b/clusters/sdk-example-cluster.morph @@ -0,0 +1,46 @@ +name: sdk +kind: cluster +description: | + An example of creating a cross-compile SDK for an embedded Baserock system. + + This cluster demonstrates how you can use the 'sdk' write extension to + produce a cross-compile SDK tarball for an Baserock applicance. In this + example the system is assumed to run on ARMv7, and the SDK is built to + run on any x86_32 GNU/Linux system. + + The SDK is a Baserock system itself, containing just 'build-essential' and a + 'cross-toolchain' stratum. The SDK system also includes the target + appliance's system, as a 'subsystem', so that the libraries and headers are + available when building. + + This cluster deploys the SDK system using the 'sdk' write extension, which + produces a tarball with a small shell header. When the shell header is + executed, and passed a directory name on the commandline, it extracts the SDK + to that path and patches the binaries so that they execute correctly from + that directory. + + Deploying the applicate system artifact to the target device should be + done with a separate cluster morphology, because you will often want to + do this without rebuilding the SDK. + + You must build each system with `morph build` before deploying. We recommend + doing this all from your Baserock development machine, using a Baserock + ARM distributed build network to produce the system artifact. Once both + system artifacts are cached locally, the `morph deploy` command will produce + a self-extracting shell script/tarball following the 'location' field. + + See the documentation of the sdk.write extension for more information. +systems: +- morph: systems/armv7lhf-cross-toolchain-system-x86_64.morph + deploy: + sdk: + type: sdk + location: armv7lhf-cross-toolchain-system-x86_64.sh + PREFIX: /usr + TARGET: armv7lhf-baserock-linux-gnueabi + subsystems: + - morph: systems/devel-system-armv7lhf-highbank.morph + deploy: + sysroot: + type: sysroot + location: usr/armv7lhf-baserock-linux-gnueabi/sys-root diff --git a/clusters/trove-example.morph b/clusters/trove-example.morph new file mode 100644 index 00000000..2812f60e --- /dev/null +++ b/clusters/trove-example.morph @@ -0,0 +1,58 @@ +name: trove-example +kind: cluster +description: | + This is an example cluster morphology for deploying a Trove, + both the initial deployment and an upgrade. + + You need some ssh keys, which you can create like this: + + mkdir ssh_keys + ssh-keygen -N '' -f ssh_keys/lorry.key + ssh-keygen -N '' -f ssh_keys/worker.key + ssh-keygen -N '' -f ssh_keys/trove-admin.key + + You may also put in your own keys instead of creating new ones. + + To do the initial deployment: + + morph deploy clusters/trove-example.morph \ + initial \ + initial.location=kvm+ssh://liw@192.168.122.1/test-trove/tmp/test-trove.img + + To do an upgrade: + + morph deploy clusters/trove-example.morph \ + upgrade upgrade.VERSION_LABEL=123 + + where `VERSION_LABEL` gets a new unique value each time. + + Remember to always specify either initial or upgrade as the + deployment name to use, otherwise morph will attempt to deploy both. + + You can find documentation for Trove at the following web address: + http://wiki.baserock.org/Trove/ +systems: +- morph: systems/trove-system-x86_64.morph + deploy-defaults: + HOSTNAME: test-trove + VCPUS: 2 + RAM_SIZE: 2G + DISK_SIZE: 8G + LORRY_SSH_KEY: ssh_keys/lorry.key + WORKER_SSH_PUBKEY: ssh_keys/worker.key.pub + TROVE_ADMIN_EMAIL: adminuser@example.com + TROVE_ADMIN_NAME: Nobody + TROVE_ADMIN_SSH_PUBKEY: ssh_keys/trove-admin.key.pub + TROVE_ADMIN_USER: adminuser + TROVE_COMPANY: Company name goes here + TROVE_HOST: test-trove + TROVE_ID: test-trove + UPSTREAM_TROVE: '' + deploy: + initial: + type: kvm + location: kvm+ssh://vm-user@vm-host/test-trove/vm-path/test-trove.img + VERSION_LABEL: 1 + upgrade: + type: ssh-rsync + location: test-trove diff --git a/clusters/trove.baserock.org-upgrade.morph b/clusters/trove.baserock.org-upgrade.morph new file mode 100644 index 00000000..eaf939e1 --- /dev/null +++ b/clusters/trove.baserock.org-upgrade.morph @@ -0,0 +1,23 @@ +name: trove.baserock.org-upgrade +kind: cluster +description: | + This is a cluster morphology for deploying an UPGRADE to + git.baserock.org. It doesn't work for the initial deployment. The + deployer MUST have ssh access to root@git.baserock.org. To use: + + morph deploy --upgrade trove.baserock.org-upgrade gbo.VERSION_LABEL=2014-05-29 + + Replace the value of gbo.VERSION_LABEL above with the current date. + You can add letters if you need to upgrade multiple times in a day. +systems: +- morph: systems/trove-system-x86_64.morph + deploy: + gbo: + type: ssh-rsync + location: root@git.baserock.org + FSTAB_HOME: LABEL=homes /home auto defaults,noatime,rw 0 2 + HOSTNAME: firehose1 + LORRY_CONTROLLER_MINIONS: 4 + TROVE_COMPANY: Baserock + TROVE_HOSTNAME: git.baserock.org + TROVE_ID: baserock diff --git a/clusters/upgrade-devel.morph b/clusters/upgrade-devel.morph new file mode 100644 index 00000000..b7ce9bc0 --- /dev/null +++ b/clusters/upgrade-devel.morph @@ -0,0 +1,39 @@ +name: upgrade-devel +kind: cluster +description: | + This is a cluster morphology that can be used to deploy systems to a + an existing Baserock devel system, as an upgrade of the running system. + + This method is for users who deployed a system manually from one of the + images provided on http://download.baserock.org. IT IS ONLY POSSIBLE TO + UPGRADE BASEROCK 14 RELEASES OR NEWER. + + If you deployed your system using `morph deploy` then you should reuse the + cluster morphology you did the initial deployment with, instead of this one, + so that the configuration is preserved in the new system. + + Ensure that your root user has passwordless SSH access to localhost with + `ssh root@localhost whoami`. If not, run `ssh-copy-id root@localhost`. + Make sure the 'morph' field below matches the system you are upgrading. + + To upgrade, select a sensible a value for VERSION_LABEL and run: + + morph deploy --upgrade upgrade-devel.morph self.HOSTNAME=$(hostname) self.VERSION_LABEL=$VERSION_LABEL + + Your configuration in /etc should be propagated to the new system, but there + may be merge conflicts. Check /etc for files named '.rej' and '.orig' in the + new system, which will indicate that there are changes from the old system + that must be merged manually. You can get a nice diff from the old /etc as + follows: + + mount /dev/sda /mnt + git diff --no-index /mnt/systems/factory/run/etc /mnt/systems/$VERSION_LABEL/run/etc + + On a base system, use 'diff -r' instead of 'git diff --no-index'. It will + not be as colourful. +systems: +- morph: systems/devel-system-x86_64-generic.morph + deploy: + self: + type: ssh-rsync + location: root@127.0.0.1 diff --git a/clusters/weston-system-x86_64-generic-deploy.morph b/clusters/weston-system-x86_64-generic-deploy.morph new file mode 100644 index 00000000..3a6f29ef --- /dev/null +++ b/clusters/weston-system-x86_64-generic-deploy.morph @@ -0,0 +1,23 @@ +name: weston-system-x86_64-generic-deploy +kind: cluster +description: | + Deploy a stock weston system. + + The resulting image can be copied to a USB and booted from there, + as well as in a virtual machine. + +systems: +- morph: systems/weston-system-x86_64-generic.morph + deploy: + weston-system-x86_64-generic: + type: rawdisk + location: /weston-system-x86_64-generic.img + DISK_SIZE: 4G + KERNEL_ARGS: vga=788 + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: systems/initramfs-x86_64.morph + deploy: + initramfs: + type: initramfs + location: boot/initramfs.gz diff --git a/clusters/zookeeper.morph b/clusters/zookeeper.morph new file mode 100644 index 00000000..1153d4b0 --- /dev/null +++ b/clusters/zookeeper.morph @@ -0,0 +1,21 @@ +name: zookeeper +kind: cluster +systems: + - morph: systems/zookeeper-client-x86_64.morph + deploy: + my-client-system: + type: kvm + location: kvm+ssh://username@HOSTNAME/machinename/path/to/zookeeper-client.img + DISK_SIZE: 4G + RAM_SIZE: 1G + VCPUS: 1 + HOSTNAME: zkclient + - morph: systems/zookeeper-server-x86_64.morph + deploy: + my-server-system: + type: kvm + location: kvm+ssh://username@HOSTNAME/machinename/path/to/zookeeper-server.img + DISK_SIZE: 4G + RAM_SIZE: 1G + VCPUS: 1 + HOSTNAME: zkserver |