summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitmodules3
-rw-r--r--README6
-rw-r--r--README.mdwn486
-rw-r--r--admin/cve-2015-0235-ghost.c40
-rw-r--r--admin/sshd_config.yaml8
-rw-r--r--admin/test-cve-2015-0235-ghost.yml18
-rw-r--r--ansible.cfg5
-rwxr-xr-xbackup-snapshot249
-rw-r--r--baserock-ops-team.cloud-config19
-rwxr-xr-xbaserock_backup/backup.sh25
-rw-r--r--baserock_backup/instance-config.yml29
-rw-r--r--baserock_backup/ssh_config4
-rw-r--r--baserock_database/backup-snapshot.conf4
-rwxr-xr-xbaserock_database/develop.sh70
-rw-r--r--baserock_database/image-config.yml22
-rw-r--r--baserock_database/instance-backup-config.yml26
-rw-r--r--baserock_database/instance-config.yml25
-rw-r--r--baserock_database/instance-mariadb-config.yml71
-rw-r--r--baserock_database/packer_template.json57
-rw-r--r--baserock_gerrit/All-Projects/groups16
-rw-r--r--baserock_gerrit/All-Projects/project.config123
-rw-r--r--baserock_gerrit/backup-snapshot.conf5
-rw-r--r--baserock_gerrit/baserock_gerrit.morph26
-rw-r--r--baserock_gerrit/gerrit-access-config.yml161
-rw-r--r--baserock_gerrit/gerrit.config49
-rw-r--r--baserock_gerrit/gerrit.service16
-rw-r--r--baserock_gerrit/instance-backup-config.yml29
-rw-r--r--baserock_gerrit/instance-ca-certificate-config.yml28
-rw-r--r--baserock_gerrit/instance-config.yml97
-rw-r--r--baserock_gerrit/instance-mirroring-config.yml68
-rw-r--r--baserock_gerrit/lorry-controller.conf38
-rw-r--r--baserock_gerrit/lorry-controller/minion.conf6
-rw-r--r--baserock_gerrit/lorry-controller/webapp.conf13
-rw-r--r--baserock_gerrit/lorry.conf8
-rw-r--r--baserock_gerrit/replication.config30
-rw-r--r--baserock_hosts40
-rw-r--r--baserock_irclogs/clusters/irclogs.morph13
-rw-r--r--baserock_irclogs/files/baserock.conf185
-rw-r--r--baserock_irclogs/files/irclogs-generation.service12
-rw-r--r--baserock_irclogs/files/irclogs-generation.timer9
-rw-r--r--baserock_irclogs/files/lighttpd-irclogs.conf16
-rw-r--r--baserock_irclogs/files/lighttpd-irclogs.service11
-rw-r--r--baserock_irclogs/files/supybot.service15
-rw-r--r--baserock_irclogs/irclogs.configure45
-rw-r--r--baserock_irclogs/strata/irclogs.morph16
-rw-r--r--baserock_irclogs/systems/irclogs-x86_64.morph33
-rw-r--r--baserock_mail/instance-config.yml75
-rw-r--r--baserock_mason_x86_32/distbuild.conf20
-rw-r--r--baserock_mason_x86_32/mason-x86-32.morph20
-rw-r--r--baserock_mason_x86_32/mason.conf19
-rw-r--r--baserock_mason_x86_64/distbuild.conf20
-rw-r--r--baserock_mason_x86_64/mason-x86-64.morph20
-rw-r--r--baserock_mason_x86_64/mason.conf19
-rw-r--r--baserock_openid_provider/baserock_openid_provider/__init__.py17
-rw-r--r--baserock_openid_provider/baserock_openid_provider/forms.py29
-rw-r--r--baserock_openid_provider/baserock_openid_provider/settings.py176
-rw-r--r--baserock_openid_provider/baserock_openid_provider/signals.py26
-rw-r--r--baserock_openid_provider/baserock_openid_provider/static/style.css268
-rw-r--r--baserock_openid_provider/baserock_openid_provider/urls.py12
-rw-r--r--baserock_openid_provider/baserock_openid_provider/views.py54
-rw-r--r--baserock_openid_provider/baserock_openid_provider/wsgi.py14
-rw-r--r--baserock_openid_provider/cherokee.conf300
-rwxr-xr-xbaserock_openid_provider/develop.sh11
-rw-r--r--baserock_openid_provider/image-config.yml53
-rw-r--r--baserock_openid_provider/instance-config.yml36
-rw-r--r--baserock_openid_provider/manage.py10
-rw-r--r--baserock_openid_provider/openid_provider/__init__.py0
-rw-r--r--baserock_openid_provider/openid_provider/admin.py17
-rw-r--r--baserock_openid_provider/openid_provider/conf.py27
-rw-r--r--baserock_openid_provider/openid_provider/migrations/0001_initial.py89
-rw-r--r--baserock_openid_provider/openid_provider/migrations/__init__.py0
-rw-r--r--baserock_openid_provider/openid_provider/models.py42
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/base.html1
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/decide.html41
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/error.html6
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/response.html12
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/server.html9
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml10
-rw-r--r--baserock_openid_provider/openid_provider/urls.py14
-rw-r--r--baserock_openid_provider/openid_provider/utils.py129
-rw-r--r--baserock_openid_provider/openid_provider/views.py317
-rw-r--r--baserock_openid_provider/packer_template.json77
-rw-r--r--baserock_openid_provider/templates/base.html38
-rw-r--r--baserock_openid_provider/templates/index.html15
-rw-r--r--baserock_openid_provider/templates/registration/activate.html8
-rw-r--r--baserock_openid_provider/templates/registration/activation_complete.html10
-rw-r--r--baserock_openid_provider/templates/registration/activation_email.txt6
-rw-r--r--baserock_openid_provider/templates/registration/activation_email_subject.txt1
-rw-r--r--baserock_openid_provider/templates/registration/login.html15
-rw-r--r--baserock_openid_provider/templates/registration/logout.html6
-rw-r--r--baserock_openid_provider/templates/registration/password_change_done.html6
-rw-r--r--baserock_openid_provider/templates/registration/password_change_form.html11
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_complete.html10
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_confirm.html21
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_done.html6
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_email.html5
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_form.html11
-rw-r--r--baserock_openid_provider/templates/registration/registration_closed.html6
-rw-r--r--baserock_openid_provider/templates/registration/registration_complete.html11
-rw-r--r--baserock_openid_provider/templates/registration/registration_form.html11
-rw-r--r--baserock_openid_provider/uwsgi.ini22
-rwxr-xr-xbaserock_storyboard/develop.sh9
-rw-r--r--baserock_storyboard/packer_template.json62
-rw-r--r--baserock_storyboard/projects.yaml31
m---------baserock_storyboard/puppet-storyboard0
-rw-r--r--baserock_storyboard/site.pp46
-rw-r--r--certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert118
-rw-r--r--clusters/mason-system-x86_64-openstack-deploy.morph55
-rw-r--r--firewall.yaml403
-rw-r--r--frontend/haproxy.cfg97
-rw-r--r--frontend/instance-backup-config.yml23
-rw-r--r--frontend/instance-config.yml18
-rw-r--r--frontend/packer_template.json60
-rw-r--r--mason.configure140
-rw-r--r--mason.configure.help127
-rw-r--r--mason/README120
-rw-r--r--mason/ansible/mason-setup.yml129
-rw-r--r--mason/lighttpd.service (renamed from mason/httpd.service)2
-rwxr-xr-xmason/mason-generator.sh101
-rwxr-xr-xmason/mason-report.sh252
-rw-r--r--mason/mason.service10
-rwxr-xr-xmason/mason.sh93
-rw-r--r--mason/mason.timer10
-rw-r--r--mason/share/lighttpd.conf21
-rw-r--r--mason/share/mason.conf14
-rw-r--r--mason/share/os.conf1
-rw-r--r--mason/share/turbo-hipster-config.yaml47
-rw-r--r--mason/share/zuul-layout.yaml22
-rw-r--r--mason/share/zuul-logging.conf44
-rw-r--r--mason/share/zuul.conf26
-rw-r--r--mason/ssh-config2
-rw-r--r--mason/turbo-hipster.service10
-rw-r--r--mason/zuul-merger.service10
-rw-r--r--mason/zuul-server.service10
-rwxr-xr-xscripts/cycle.sh61
-rw-r--r--scripts/licensecheck.pl604
-rwxr-xr-xscripts/licensecheck.sh101
-rwxr-xr-xscripts/organize-morphologies.py266
-rwxr-xr-xscripts/release-build175
-rw-r--r--scripts/release-build.test.conf6
-rwxr-xr-xscripts/release-test401
-rwxr-xr-xscripts/release-test-os526
-rwxr-xr-xscripts/release-upload473
-rw-r--r--scripts/release-upload.test.conf10
-rw-r--r--strata/baserock-ci-tests.morph14
-rw-r--r--strata/baserock-ci-tests/system-tests.morph5
-rw-r--r--strata/gerrit-tools.morph10
-rw-r--r--strata/python-paramiko.morph24
-rw-r--r--strata/python-paramiko/pycrypto.morph3
-rw-r--r--strata/zuul-ci.morph137
-rw-r--r--systems/gerrit-system-x86_64.morph66
-rw-r--r--systems/mason-system-x86_64-generic.morph58
-rw-r--r--tasks/create-data-volume.yml26
153 files changed, 6342 insertions, 3221 deletions
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 00000000..0177e325
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "baserock_storyboard/puppet-storyboard"]
+ path = baserock_storyboard/puppet-storyboard
+ url = https://github.com/ssssam/puppet-storyboard
diff --git a/README b/README
deleted file mode 100644
index 7d72b743..00000000
--- a/README
+++ /dev/null
@@ -1,6 +0,0 @@
-README for morphs
-=================
-
-These are some morphologies for Baserock. Baserock is a system
-for developing embedded and appliance Linux systems. For
-more information, see <http://wiki.baserock.org>.
diff --git a/README.mdwn b/README.mdwn
new file mode 100644
index 00000000..91d7db72
--- /dev/null
+++ b/README.mdwn
@@ -0,0 +1,486 @@
+Baserock project public infrastructure
+======================================
+
+This repository contains the definitions for all of the Baserock Project's
+infrastructure. This includes every service used by the project, except for
+the mailing lists (hosted by [Pepperfish]) and the wiki (hosted by
+[Branchable]).
+
+Some of these systems are Baserock systems. Other are Ubuntu or Fedora based.
+Eventually we want to move all of these to being Baserock systems.
+
+The infrastructure is set up in a way that parallels the preferred Baserock
+approach to deployment. All files necessary for (re)deploying the systems
+should be contained in this Git repository, with the exception of certain
+private tokens (which should be simple to inject at deploy time).
+
+[Pepperfish]: http://listmaster.pepperfish.net/cgi-bin/mailman/listinfo
+[Branchable]: http://www.branchable.com/
+
+
+General notes
+-------------
+
+When instantiating a machine that will be public, remember to give shell
+access everyone on the ops team. This can be done using a post-creation
+customisation script that injects all of their SSH keys. The SSH public
+keys of the Baserock Operations team are collected in
+`baserock-ops-team.cloud-config.`.
+
+Ensure SSH password login is disabled in all systems you deploy! See:
+<https://testbit.eu/is-ssh-insecure/> for why. The Ansible playbook
+`admin/sshd_config.yaml` can ensure that all systems have password login
+disabled.
+
+
+Administration
+--------------
+
+You can use [Ansible] to automate tasks on the baserock.org systems.
+
+To run a playbook:
+
+ ansible-playbook -i hosts $PLAYBOOK.yaml
+
+To run an ad-hoc command (upgrading, for example):
+
+ ansible-playbook -i hosts fedora -m command -a 'sudo yum update -y'
+ ansible-playbook -i hosts ubuntu -m command -a 'sudo apt-get update -y'
+
+[Ansible]: http://www.ansible.com
+
+
+Deployment to OpenStack
+-----------------------
+
+The intention is that all of the systems defined here are deployed to an
+OpenStack cloud. The instructions here harcode some details about the specific
+tenancy at [DataCentred](http://www.datacentred.io) that the Baserock project
+uses. It should be easy to adapt them for other OpenStack hosts, though.
+
+### Credentials
+
+The instructions below assume you have the following environment variables set
+according to the OpenStack host you are deploying to:
+
+ - `OS_AUTH_URL`
+ - `OS_TENANT_NAME`
+ - `OS_USERNAME`
+ - `OS_PASSWORD`
+
+When using `morph deploy` to deploy to OpenStack, you will need to set these
+variables, because currently Morph does not honour the standard ones. See:
+<https://storyboard.baserock.org/#!/story/35>.
+
+ - `OPENSTACK_USER=$OS_USERNAME`
+ - `OPENSTACK_PASSWORD=$OS_PASSWORD`
+ - `OPENSTACK_TENANT=$OS_TENANT_NAME`
+
+The `location` field in the deployment .morph file will also need to point to
+the correct `$OS_AUTH_URL`.
+
+### Firewall / Security Groups
+
+The instructions assume the presence of a set of security groups. You can
+create these by running the following Ansible playbook. You'll need the
+OpenStack Ansible modules cloned from
+`https://github.com/openstack-ansible/openstack-ansible-modules/`.
+
+ ANSIBLE_LIBRARY=../openstack-ansible-modules ansible-playbook -i hosts \
+ firewall.yaml
+
+### Placeholders
+
+The commands below use a couple of placeholders like $network_id, you can set
+them in your environment to allow you to copy and paste the commands below
+as-is.
+
+ - `export fedora_image_id=...` (find this with `glance image-list`)
+ - `export network_id=...` (find this with `neutron net-list`)
+ - `export keyname=...` (find this with `nova keypair-list`)
+
+The `$fedora_image_id` should reference a Fedora Cloud image. You can import
+these from <http://www.fedoraproject.org/>. At time of writing, these
+instructions were tested with Fedora Cloud 21 for x86_64.
+
+Backups
+-------
+
+Backups of git.baserock.org's data volume are run by and stored on on a
+Codethink-managed machine named 'access'. They will need to migrate off this
+system before long. The backups are taken without pausing services or
+snapshotting the data, so they will not be 100% clean. The current
+git.baserock.org data volume does not use LVM and cannot be easily snapshotted.
+
+Backups of 'gerrit' and 'database' are handled by the
+'baserock_backup/backup.py' script. This currently runs on an instance in
+Codethink's internal OpenStack cloud.
+
+Instances themselves are not backed up. In the event of a crisis we will
+redeploy them from the infrastructure.git repository. There should be nothing
+valuable stored outside of the data volumes that are backed up.
+
+
+Deployment with Packer
+----------------------
+
+> **NOTE**: I no longer think that Packer is the right tool for our needs. This
+> is partly because of critical bugs that have not been fixed since I started
+> using it (e.g. <https://github.com/mitchellh/packer/issues/1462>), and partly
+> because I realised that I was just using it to wrap `nova` and
+> `ansible-playbook`, and it is simple enough to use those commands directly.
+>
+> I had hoped that we could make use of Packer's multiple backends in order to
+> test systems locally in Docker before deploying them to OpenStack. It turns
+> out Docker is sufficiently different to OpenStack that this doesn't make life
+> any easier during development. Networking setup is different, systemd doesn't
+> work inside Docker by default, base images are different in other ways, etc.
+>
+> So I recommend not using Packer for future systems, and I will try to
+> migrate the definitions for the existing ones to just use Ansible.
+>
+> Sam Thursfield 10/04/15
+
+Some of the systems are built with [Packer]. I chose Packer because it provides
+similar functionality to the `morph deploy` command, although its
+implementation makes different tradeoffs. The documentation below shows the
+commands you need to run to build systems with Packer. Some of the systems can
+be deployed as Docker images as well as OpenStack images, to enable local
+development and testing.
+
+The following error from Packer means that you didn't set your credentials
+correctly in the `OS_...` environment variables, or they were not accepted.
+
+> Build 'production' errored: Missing or incorrect provider
+
+The the Packer tool requires a floating IP to be available at the time a system
+is being deployed to OpenStack. Currently 185.43.218.169 should be used for
+this. If you specify a floating IP that is in use by an existing instance, you
+will steal it for your own instance and probably break one of our web services.
+
+[Packer]: http://www.packer.io/
+
+
+Systems
+-------
+
+### Front-end
+
+The front-end provides a reverse proxy, to allow more flexible routing than
+simply pointing each subdomain to a different instance using separate public
+IPs. It also provides a starting point for future load-balancing and failover
+configuration.
+
+If you want to add a new service to the Baserock Project infrastructure via
+the frontend, alter the haproxy.cfg file in the frontend/ directory. Our
+OpenStack instance doesn't provide any kind of internal DNS service, so you
+must put the fixed IP of each instance.
+
+To deploy this system:
+
+ packer build -only=production frontend/packer_template.json
+
+ ansible-playbook -i hosts frontend/instance-config.yml
+
+Full HAProxy 1.5 documentation: <https://cbonte.github.io/haproxy-dconv/configuration-1.5.html>.
+
+When setting up a new instance with the frontend already deployed, do the
+following:
+
+- request a subdomain that points at 85.199.252.162
+- log in to the frontend-haproxy machine
+- edit /etc/haproxy/haproxy.conf, and make the same changes to the copy in this
+ repo.
+- run: `sudo haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -sf
+ $(cat /var/run/haproxy.pid)` to reload the configuration without interrupting
+ the service (this confuses systemd, but I'm not sure how to avoid that)
+
+### Database
+
+Baserock infrastructure uses a shared [MariaDB] database. MariaDB was chosen
+because Storyboard only supports MariaDB.
+
+To deploy this system to production:
+
+ packer build -only=production database/packer_template.json
+ nova boot database-mariadb \
+ --key-name=$keyname \
+ --flavor dc1.1x1 \
+ --image 'database-mariadb' \
+ --nic='net-id=d079fa3e-2558-4bcb-ad5a-279040c202b5,v4-fixed-ip=192.168.222.30' \
+ --security-groups default,database-mariadb
+ --user-data ./baserock-ops-team.cloud-config
+ nova volume-create \
+ --display-name database-volume \
+ --display-description 'Database volume' \
+ --volume-type Ceph \
+ 100
+ nova volume-attach database-mariadb <volume ID> /dev/vdb
+
+ ansible-playbook -i hosts database/instance-config.yml
+
+To add the required users and databases, run the following playbook. This can
+be altered and rerun whenever you need to add more users or databases.
+
+ ansible-playbook -i hosts database/instance-mariadb-config.yml
+
+[MariaDB]: https://www.mariadb.org
+
+### Mail relay
+
+The mail relay is currently a Fedora Cloud 21 image running Exim. You should be
+able to take a Fedora Cloud 21 base image, instantiate it in the
+'internal-mail-relay' security group, and then run
+'baserock_mail/instance-config.yml' to configure it and start the service.
+
+It is configured to only listen on its internal IP. It's not intended to
+receive mail, or relay mail sent by systems outside the baserock.org cloud.
+
+### OpenID provider
+
+To deploy a development instance:
+
+ packer build -only=development baserock_openid_provider/packer_template.json
+ baserock_openid_provider/develop.sh
+ # Now you have a root shell inside your container
+ cd /srv/baserock_openid_provider
+ python ./manage.py runserver 0.0.0.0:80
+ # Now you can browse to http://localhost:80/ and see the server.
+
+To deploy this system to production:
+
+ vim baserock_openid_provider/baserock_openid_provider/settings.py
+
+Edit the DATABASES['default']['HOST'] to point to the fixed IP of the
+'database' machine, and check the settings. See:
+https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
+
+ packer build -only=production baserock_openid_provider/packer_template.json
+
+ nova boot openid.baserock.org \
+ --key-name $keyname \
+ --flavor dc1.1x1 \
+ --image 'baserock_openid_provider' \
+ --nic "net-id=$network_id',v4-fixed-ip=192.168.222.67" \
+ --security-groups default,web-server
+ --user-data ./baserock-ops-team.cloud-config
+
+ ansible-playbook -i hosts baserock_openid_provider/instance-config.yml
+
+To change Cherokee configuration, it's usually easiest to use the
+cherokee-admin tool in a running instance. SSH in as normal but forward port
+9090 to localhost (pass `-L9090:localhost:9090` to SSH). Backup the old
+/etc/cherokee/cherokee.conf file, then run `cherokee-admin`, and log in using
+the creditials it gives you. After changing the configuration, please update
+the cherokee.conf in infrastructure.git to match the changes `cherokee-admin`
+made.
+
+### Gerrit
+
+To deploy to production, run these commands in a Baserock 'devel'
+or 'build' system.
+
+ nova volume-create \
+ --display-name gerrit-volume \
+ --display-description 'Gerrit volume' \
+ --volume-type Ceph \
+ 100
+
+ morph init ws; cd ws; morph checkout baserock:baserock/infrastructure master;
+ cd master/baserock/baserock/infrastructure
+
+ morph build systems/gerrit-system-x86_64.morph
+ morph deploy baserock_gerrit/baserock_gerrit.morph
+
+ nova boot gerrit.baserock.org \
+ --key-name $keyname \
+ --flavor 'dc1.2x4.40' \
+ --image baserock_gerrit \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.69" \
+ --security-groups default,gerrit,git-server,web-server
+ --user-data baserock-ops-team.cloud-config
+
+ nova volume-attach gerrit.baserock.org <volume-id> /dev/vdb
+
+Accept the license and download the latest Java Runtime Environment from
+http://www.oracle.com/technetwork/java/javase/downloads/server-jre8-downloads-2133154.html
+
+Accept the license and download the latest Java Cryptography Extensions from
+http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html
+
+Save these two files in the baserock_gerrit/ folder. The instance-config.yml
+Ansible playbook will upload them to the new system.
+
+ # Don't copy-paste this! Use the Oracle website instead!
+ wget --no-cookies --no-check-certificate \
+ --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" \
+ "http://download.oracle.com/otn-pub/java/jdk/8u40-b25/server-jre-8u40-linux-x64.tar.gz"
+ wget --no-cookies --no-check-certificate \
+ --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" \
+ "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip"
+
+ ansible-playbook -i hosts baserock_gerrit/instance-config.yml
+
+For baserock.org Gerrit you will also need to run:
+
+ ansible-playbook -i hosts baserock_gerrit/instance-ca-certificate-config.yml
+
+#### Access control
+
+Gerrit should now be up and running and accessible through the web interface.
+By default this is on port 8080. Log into the new Gerrit instance with your
+credentials. Make sure you're the first one to have registered, and you will
+automatically have been added to the Administrators group.
+
+You can add more users into the Administrators group later on using the [gerrit
+set-members] command, or the web interface.
+
+Go to the settings page, 'HTTP Password' and generate a HTTP password for
+yourself. You'll need it in the next step. The password can take a long time to
+appear for some reason, or it might not work at all. Click off the page and
+come back to it and it might suddenly have appeared. I've not investigated why
+this happens.
+
+Generate the SSH keys you need, if you don't have them.
+
+ mkdir -p keys
+ ssh-keygen -t rsa -b 4096 -C 'lorry@gerrit.baserock.org' -N '' -f keys/lorry-gerrit.key
+
+Now set up the Gerrit access configuration. This Ansible playbook requires a
+couple of non-standard packages.
+
+ git clone git://git.baserock.org/delta/python-packages/pygerrit.git
+ git clone git://github.com/ssssam/ansible-gerrit
+
+ export GERRIT_URL=gerrit web URL
+ export GERRIT_ADMIN_USERNAME=your username
+ export GERRIT_ADMIN_PASSWORD=your generated HTTP password
+
+ ANSIBLE_LIBRARY=./ansible-gerrit PYTHONPATH=./pygerrit \
+ ansible-playbook a2378_gerrit/gerrit-access-config.yml
+
+As well as creating all the groups and initial users in the new Gerrit
+instance, Ansible should update the file baserock_gerrit/All-Projects/groups,
+which will be needed in the next step.
+
+ git clone ssh://$GERRIT_ADMIN_USERNAME@gerrit.baserock.org:29418/All-Projects.git /tmp/All-Projects
+ cp -a baserock_gerrit/All-Projects/* /tmp/All-Projects
+ cd /tmp/All-Projects
+ git checkout -b config
+ git commit -a -m "Set up initial access controls."
+ git push origin config:refs/meta/config
+ cd -
+
+[gerrit set-members]: https://gerrit-documentation.storage.googleapis.com/Documentation/2.9.4/cmd-set-members.html
+
+#### Mirroring
+
+Run:
+
+ ansible-playbook -i hosts baserock_gerrit/instance-mirroring-config.yml
+
+Now clone the Gerrit's lorry-controller configuration repository, commit the
+configuration file to it, and push.
+
+ git clone ssh://$GERRIT_ADMIN_USERNAME@gerrit.baserock.org:29418/local-config/lorries.git /tmp/lorries
+ cp baserock_gerrit/lorry-controller.conf /tmp/lorries
+ cd /tmp/lorries
+ git checkout -b master
+ git add .
+ git commit -m "Add initial Lorry Controller mirroring configuration"
+ git push origin master
+ cd -
+
+Now SSH in as 'root' to gerrit.baserock.org, tunnelling the lorry-controller
+webapp's port to your local machine:
+
+ ssh -L 12765:localhost:12765 root@gerrit.baserock.org
+
+Visit <http://localhost/1.0/status-html>. You should see the lorry-controller
+status page. Click 'Re-read configuration', if there are any errors in the
+configuration it'll tell you. If not, it should start mirroring stuff from
+your Trove.
+
+Create a Gitano account on the Trove you want to push changes to for the Gerrit
+user. The `instance-config.yml` Ansible playbook will have generated an SSH
+key. Run these commands on the Gerrit instance:
+
+ ssh git@git.baserock.org user add gerrit "gerrit.baserock.org" gerrit@baserock.org
+ ssh git@git.baserock.org as gerrit sshkey add main < ~gerrit/.ssh/id_rsa.pub
+
+Add the 'gerrit' user to the necessary -writers groups on the Trove, to allow
+the gerrit-replication plugin to push merged changes to 'master' in the Trove.
+
+ ssh git@git.baserock.org group adduser baserock-writers gerrit
+ ssh git@git.baserock.org group adduser local-config-writers gerrit
+
+Add the host key of the remote trove, to the Gerrit system:
+
+ sudo -u gerrit sh -c 'ssh-keyscan git.baserock.org >> ~gerrit/.ssh/known_hosts'
+
+Check the 'gerrit' user's Trove account is working.
+
+ sudo -u gerrit ssh git@git.baserock.org whoami
+
+Now enable the gerrit-replication plugin, check that it's now in the list of
+plugins, and manually start a replication cycle. You should see log output from
+the final SSH command showing any errors.
+
+ ssh $GERRIT_ADMIN_USERNAME@gerrit.baserock.org -p 29418 gerrit plugin enable replication
+ ssh $GERRIT_ADMIN_USERNAME@gerrit.baserock.org -p 29418 gerrit plugin ls
+ ssh $GERRIT_ADMIN_USERNAME@gerrit.baserock.org -p 29418 replication start --all --wait
+
+### Storyboard
+
+We use a slightly adapted version of
+<https://github.com/openstack-infra/puppet-storyboard> to deploy Storyboard.
+
+There's no development deployment for Storyboard at this time: the Puppet
+script expects to start services using systemd, and that doesn't work by
+default in a Docker container.
+
+To deploy the production version:
+
+ packer build -only=production baserock_storyboard/packer_template.json
+ nova boot openid_provider
+ --flavor dc1.1x1 --image 'baserock_storyboard' \
+ --key-name=$keyname storyboard.baserock.org \
+ --nic="net-id=$network_id"
+ --security-groups default,web-server
+ --user-data baserock-ops-team.cloud-config
+
+Storyboard deployment does not yet work fully (you can manually kludge it into
+working after deploying it, though).
+
+### Masons
+
+Mason is the name we use for an automated build and test system used in the
+Baserock project. The V2 Mason that runs as <https://mason-x86-32.baserock.org/>
+and <https://mason-x86-64.baserock.org/> lives in definitions.git, and is thus
+available in infrastructure.git too by default.
+
+To build mason-x86-64:
+
+ morph init ws; cd ws; morph checkout baserock:baserock/infrastructure master;
+ cd master/baserock/baserock/infrastructure
+
+ morph build systems/build-system-x86_64.morph
+ morph deploy baserock_mason_x86_64/mason-x86-64.morph
+
+ nova boot mason-x86-64.baserock.org \
+ --key-name $keyname \
+ --flavor 'dc1.2x2' \
+ --image baserock_mason_x86_64 \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.80" \
+ --security-groups internal-only,mason-x86
+ --user-data baserock-ops-team.cloud-config
+
+The mason-x86-32 system is the same, just subsitute '64' for '32' in the above
+commands.
+
+Note that the Masons are NOT in the 'default' security group, they are in
+'internal-only'. This is a way of enforcing the [policy] that the Baserock
+reference system definitions can only use source code hosted on
+git.baserock.org, by making it impossible to fetch code from anywhere else.
+
+[policy]: http://wiki.baserock.org/policies/
diff --git a/admin/cve-2015-0235-ghost.c b/admin/cve-2015-0235-ghost.c
new file mode 100644
index 00000000..3615ff57
--- /dev/null
+++ b/admin/cve-2015-0235-ghost.c
@@ -0,0 +1,40 @@
+/* From http://www.openwall.com/lists/oss-security/2015/01/27/9 */
+
+#include <netdb.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#define CANARY "in_the_coal_mine"
+
+struct {
+ char buffer[1024];
+ char canary[sizeof(CANARY)];
+} temp = { "buffer", CANARY };
+
+int main(void) {
+ struct hostent resbuf;
+ struct hostent *result;
+ int herrno;
+ int retval;
+
+ /*** strlen (name) = size_needed - sizeof (*host_addr) - sizeof (*h_addr_ptrs) - 1; ***/
+ size_t len = sizeof(temp.buffer) - 16*sizeof(unsigned char) - 2*sizeof(char *) - 1;
+ char name[sizeof(temp.buffer)];
+ memset(name, '0', len);
+ name[len] = '\0';
+
+ retval = gethostbyname_r(name, &resbuf, temp.buffer, sizeof(temp.buffer), &result, &herrno);
+
+ if (strcmp(temp.canary, CANARY) != 0) {
+ puts("vulnerable");
+ exit(EXIT_SUCCESS);
+ }
+ if (retval == ERANGE) {
+ puts("not vulnerable");
+ exit(EXIT_SUCCESS);
+ }
+ puts("should not happen");
+ exit(EXIT_FAILURE);
+}
diff --git a/admin/sshd_config.yaml b/admin/sshd_config.yaml
new file mode 100644
index 00000000..aba6a9f1
--- /dev/null
+++ b/admin/sshd_config.yaml
@@ -0,0 +1,8 @@
+---
+- hosts: all
+ gather_facts: false
+ tasks:
+ - name: ensure SSH login with password is disabled
+ lineinfile:
+ dest=/etc/ssh/sshd_config
+ line='PasswordAuthentication no'
diff --git a/admin/test-cve-2015-0235-ghost.yml b/admin/test-cve-2015-0235-ghost.yml
new file mode 100644
index 00000000..6090eb2b
--- /dev/null
+++ b/admin/test-cve-2015-0235-ghost.yml
@@ -0,0 +1,18 @@
+# Test systems for CVE-2015-0235 GHOST
+#
+# http://www.openwall.com/lists/oss-security/2015/01/27/9
+---
+- hosts: all
+ gather_facts: False
+ tasks:
+ - name: copy in the cve-2015-0235-ghost-x86-64 test program
+ copy: src=cve-2015-0235-ghost-x86-64 dest=~ mode=755
+
+ - name: run the test program
+ command: ~/cve-2015-0235-ghost-x86-64
+ register: test_output
+
+ - debug: var=test_output.stdout_lines
+
+ - name: remove test program again
+ file: path=~/cve-2015-0235-ghost-x86-64 state=absent
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 00000000..b81f6a5d
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,5 @@
+# Proxy SSH connection through the frontend-haproxy machine to access
+# the instances that don't have public IPs.
+[ssh_connection]
+ssh_args = -o ProxyCommand="ssh -q -A fedora@185.43.218.170 'nc %h %p'"
+
diff --git a/backup-snapshot b/backup-snapshot
new file mode 100755
index 00000000..ce9ae88f
--- /dev/null
+++ b/backup-snapshot
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''Create a temporary backup snapshot of a volume.
+
+This program is intended as a wrapper for `rsync`, to allow copying data out
+of the system with a minimum of service downtime. You can't copy data from a
+volume used by a service like MariaDB or Gerrit while that service is running,
+because the contents will change underneath your feet while you copy them. This
+script assumes the data is stored on an LVM volume, so you can stop the
+services, snapshot the volume, start the services again and then copy the data
+out from the snapshot.
+
+To use it, you need to use the 'command' feature of the .ssh/authorized_keys
+file, which causes OpenSSH to run a given command whenever a given SSH key
+connects (instead of allowing the owner of the key to run any command). This
+ensures that even if the backup key is compromised, all the attacker can do is
+make backups, and only then if they are connecting from the IP listed in 'from'
+
+ command=/usr/bin/backup-snapshot <key details>
+
+You'll need to create a YAML configuration file in /etc/backup-snapshot.conf
+that describes how to create the snapshot. Here's an example:
+
+ services:
+ - lorry-controller-minion@1.service
+ - gerrit.service
+
+ volume: /dev/vg0/gerrit
+
+To test this out, run:
+
+ rsync root@192.168.0.1: /srv/backup --rsync-path="/usr/bin/backup-snapshot"
+
+There is a Perl script named 'rrsync' that does something similar:
+
+ http://git.baserock.org/cgi-bin/cgit.cgi/delta/rsync.git/tree/support/rrsync
+
+'''
+
+
+import contextlib
+import logging
+import os
+import signal
+import shlex
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import yaml
+
+
+CONFIG_FILE = '/etc/backup-snapshot.conf'
+
+
+def status(msg, *format):
+ # Messages have to go on stderr because rsync communicates on stdout.
+ logging.info(msg, *format)
+ sys.stderr.write(msg % format + '\n')
+
+
+def run_command(argv):
+ '''Run a command, raising an exception on failure.
+
+ Output on stdout is returned.
+ '''
+ logging.debug("Running: %s", argv)
+ output = subprocess.check_output(argv, close_fds=True)
+
+ logging.debug("Output: %s", output)
+ return output
+
+
+@contextlib.contextmanager
+def pause_services(services):
+ '''Stop a set of systemd services for the duration of a 'with' block.'''
+
+ logging.info("Pausing services: %s", services)
+ try:
+ for service in services:
+ run_command(['systemctl', 'stop', service])
+ yield
+ finally:
+ for service in services:
+ run_command(['systemctl', 'start', service])
+ logging.info("Restarted services: %s", services)
+
+
+def snapshot_volume(volume_path, suffix=None):
+ '''Create a snapshot of an LVM volume.'''
+
+ volume_group_path, volume_name = os.path.split(volume_path)
+
+ if suffix is None:
+ suffix = time.strftime('-backup-%Y-%m-%d')
+ snapshot_name = volume_name + suffix
+
+ logging.info("Snapshotting volume %s as %s", volume_path, snapshot_name)
+ run_command(['lvcreate', '--name', snapshot_name, '--snapshot', volume_path, '--extents', '100%ORIGIN', '--permission=r'])
+
+ snapshot_path = os.path.join(volume_group_path, snapshot_name)
+ return snapshot_path
+
+
+def delete_volume(volume_path):
+ '''Delete an LVM volume or snapshot.'''
+
+ # Sadly, --force seems necessary, because activation applies to the whole
+ # volume group rather than to the individual volumes so we can't deactivate
+ # only the snapshot before removing it.
+ logging.info("Deleting volume %s", volume_path)
+ run_command(['lvremove', '--force', volume_path])
+
+
+@contextlib.contextmanager
+def mount(block_device, path=None):
+ '''Mount a block device for the duration of 'with' block.'''
+
+ if path is None:
+ path = tempfile.mkdtemp()
+ tempdir = path
+ logging.debug('Created temporary directory %s', tempdir)
+ else:
+ tempdir = None
+
+ try:
+ run_command(['mount', block_device, path])
+ try:
+ yield path
+ finally:
+ run_command(['umount', path])
+ finally:
+ if tempdir is not None:
+ logging.debug('Removed temporary directory %s', tempdir)
+ os.rmdir(tempdir)
+
+
+def load_config(filename):
+ '''Load configuration from a YAML file.'''
+
+ logging.info("Loading config from %s", filename)
+ with open(filename, 'r') as f:
+ config = yaml.safe_load(f)
+
+ logging.debug("Config: %s", config)
+ return config
+
+
+def get_rsync_sender_flag(rsync_commandline):
+ '''Parse an 'rsync --server' commandline to get the --sender ID.
+
+ This parses a remote commandline, so be careful.
+
+ '''
+ args = shlex.split(rsync_commandline)
+ if args[0] != 'rsync':
+ raise RuntimeError("Not passed an rsync commandline.")
+
+ for i, arg in enumerate(args):
+ if arg == '--sender':
+ sender = args[i + 1]
+ return sender
+ else:
+ raise RuntimeError("Did not find --sender flag.")
+
+
+def run_rsync_server(source_path, sender_flag):
+ # Adding '/' to the source_path tells rsync that we want the /contents/
+ # of that directory, not the directory itself.
+ #
+ # You'll have realised that it doesn't actually matter what remote path the
+ # user passes to their local rsync.
+ rsync_command = ['rsync', '--server', '--sender', sender_flag, '.',
+ source_path + '/']
+ logging.debug("Running: %s", rsync_command)
+ subprocess.check_call(rsync_command, stdout=sys.stdout)
+
+
+def main():
+ logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ filename='/var/log/backup-snapshot.log',
+ level=logging.DEBUG)
+
+ logging.debug("Running as UID %i GID %i", os.getuid(), os.getgid())
+
+ # Ensure that clean up code (various 'finally' blocks in the functions
+ # above) always runs. This is important to ensure we never leave services
+ # stopped if the process is interrupted somehow.
+
+ signal.signal(signal.SIGHUP, signal.default_int_handler)
+
+ config = load_config(CONFIG_FILE)
+
+ # Check commandline early, so we don't stop services just to then
+ # give an error message.
+ rsync_command = os.environ.get('SSH_ORIGINAL_COMMAND', '')
+ logging.info("Original SSH command: %s", rsync_command)
+
+ if len(rsync_command) == 0:
+ # For testing only -- this can only happen if
+ # ~/.ssh/authorized_keys isn't set up as described above.
+ logging.info("Command line: %s", sys.argv)
+ rsync_command = 'rsync ' + ' '.join(sys.argv[1:])
+
+ # We want to ignore as much as possible of the
+ # SSH_ORIGINAL_COMMAND, because it's a potential attack vector.
+ # If an attacker has somehow got hold of the backup SSH key,
+ # they can pass whatever they want, so we hardcode the 'rsync'
+ # commandline here instead of honouring what the user passed
+ # in. We can anticipate everything except the '--sender' flag.
+ sender_flag = get_rsync_sender_flag(rsync_command)
+
+ with pause_services(config['services']):
+ snapshot_path = snapshot_volume(config['volume'])
+
+ try:
+ with mount(snapshot_path) as mount_path:
+ run_rsync_server(mount_path, sender_flag))
+
+ status("rsync server process exited with success.")
+ finally:
+ delete_volume(snapshot_path)
+
+
+try:
+ status('backup-snapshot started')
+ main()
+except RuntimeError as e:
+ sys.stderr.write('ERROR: %s' % e)
+except Exception as e:
+ logging.debug(traceback.format_exc())
+ raise
diff --git a/baserock-ops-team.cloud-config b/baserock-ops-team.cloud-config
new file mode 100644
index 00000000..c6c51264
--- /dev/null
+++ b/baserock-ops-team.cloud-config
@@ -0,0 +1,19 @@
+#cloud-config
+
+# The contents of this cloud-config script should be included in the
+# post-creation 'customisation script' for every instance in the public
+# baserock.org infrastructure. It gives access to all members the Baserock Ops
+# team, so that any member of the team can deploy security updates.
+
+ssh_authorized_keys:
+ # Pedro Alvarez
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPuirtOH8D/6iNAb3DE079FcTmDlDgusVBJ2FC0O/FHSxwAgNwHhUbCxHAcO/N+HICSvDmjp16Ki0ti2ZxfworG88shPiMOGQfuJaRv1X15AV7NsO80Llsqy/x8X+WdA5iwpUyKM011vv/pS/DhSCHJFJ/vQFgox12HQSKZuzGIOupCiZfHES5t5oEPAcoQYCC0hO4ZevyeO0ZixrOGf/iyXHyb2BoQJAehixt28YOfdaW7Z29SssCGf7QvtADYg+vF5Tazln51vp1M+fo1oF0aa/VLN3gYuf+BI6x6sEc4N/ZQaCR5+oBP3/gIVlIwOOftzC9G+l6PBOS4368nZTv pedro.alvarez@codethink.co.uk
+
+ # Gary Perkins
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQvRqmYpSVpff0MJq9aigjNQX22PdMkDiXpcV7EbDWdE3QLk7D818dljMKy2SvmgiEO7e/5jn8K7b9Dr88GF4dM/Oxc2k2yP9fzMoW+cE/drHBH+zDb9Zw1xa+t1AcMtl0XAEZft/hvpgx+Tp2XaEv6t7O9Ogxw1ahKtbkgDprhrnC9cVctu3VJhu8amY4BYZC9hRZUa02pCQl1i0klYq7E61zF8I25hS6HP0fbD/O+hAt5N3VqmkN+4QmCP8kkXSmyjKOurnXcGKPWonpOyB3cwVk3DO7krsw2qIIVoe/9PIK112oHNJxM01UUF+ZiPGEWawQfHRNG8Y03KQJanaf gary@garyp
+
+ # Francisco Redondo Marchena
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOZHTTBcWTN99IptDR2DlNZ30XaK9fhTxzeYm2ZFKKmTcyls7rO4D51+OkAKQpW50SABcw2we4T94WmEHPmC+Se6cNN4OYTT71PYQQAc6rQ5IeNRj5DcAQj3WaZvyF1qFXsTQ1ZYWP8D/2gQ9rEhImBHDAuX+bFKgup/4D7Y0fOsVLGBrO0rIYB6Cxgt6rnHWrrFO/8foL/SKDQpJP/fLD+Zf37m0XSsd3M3Q5fegtoSq3YEXaqRdVB119bUL4AovgZJ30+aC9ei3ff1ASqgQLVMmMdLaqrzKAfwtBKKdLnPJYviKjcDhXxY6fykZIsEymi0Zg3CRh9c5HlUY3Pofr francisco.marchena@codethink.co.uk
+
+ # Sam Thursfield
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkDPLAg9ueRzIVdPbfsGboXbnd7HIwQ9oiFh1JP8NvyZ0ZWejmY7m5k6FOTDBp70Rwx1/6+rzLyCsbT5SN+rK0Ywl145A09jODRt2PWZ3ddsUDfXTY7Ycu3MLOsyjIiY5o9zhSmu+/pU90TlvlE16TFUPnyc4PDqI1DALCUf7OxDVihcecn4Fhd0XQI8FBM/c47CjvyD2g+xr2b5Qa7eCfBEFTCqpQegDOQN3Hlq1t1VLLXv+srcQkI+uh4wseJ3GcQ4T/+6w6axlGd6a2v8IjKALxveCKyI5bHirKTMJZg+BCulb+ucoafbRbLcNpmrEVfhUE5O4/ffBExaEiwni1 sam.thursfield@codethink.co.uk
diff --git a/baserock_backup/backup.sh b/baserock_backup/backup.sh
new file mode 100755
index 00000000..f16ba447
--- /dev/null
+++ b/baserock_backup/backup.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+# These aren't normal invocations of rsync: the targets use the
+# 'command' option in /root/.ssh/authorized_keys to force execution of
+# the 'backup-snapshot' script at the remote end, which then starts the
+# rsync server process. So the backup SSH key can only be used to make
+# backups, nothing more.
+
+# Don't make the mistake of trying to run this from a systemd unit. There is
+# some brokenness in systemd that causes the SSH connection forwarding to not
+# work, so you will not be able to connect to the remote machines.
+
+# Database
+/usr/bin/rsync --archive --delete-before --delete-excluded \
+ --hard-links --human-readable --progress --sparse \
+ root@192.168.222.30: /srv/backup/database
+date > /srv/backup/database.timestamp
+
+# Gerrit
+/usr/bin/rsync --archive --delete-before --delete-excluded \
+ --hard-links --human-readable --progress --sparse \
+ --exclude='cache/' --exclude='tmp/' \
+ root@192.168.222.69: /srv/backup/gerrit
+date > /srv/backup/gerrit.timestamp
+
diff --git a/baserock_backup/instance-config.yml b/baserock_backup/instance-config.yml
new file mode 100644
index 00000000..327b84e9
--- /dev/null
+++ b/baserock_backup/instance-config.yml
@@ -0,0 +1,29 @@
+# Configuration for a machine that runs data backups of baserock.org.
+#
+# The current backup machine is not a reproducible deployment, but this
+# playbook should be easily adaptable to produce a properly reproducible
+# one.
+---
+- hosts: baserock-backup1
+ gather_facts: false
+ tasks:
+ - name: user for running backups
+ user: name=backup
+
+ # You'll need to copy in the SSH key manually for this user.
+
+ - name: SSH config for backup user
+ copy: src=ssh_config dest=/home/backup/.ssh/config
+
+ - name: backup script
+ copy: src=backup.sh dest=/home/backup/backup.sh mode=755
+
+ # You will need https://github.com/ansible/ansible-modules-core/pull/986
+ # for this to work.
+ - name: backup cron job, runs every day at midnight
+ cron:
+ hour: 00
+ minute: 00
+ job: /home/backup/backup.sh
+ name: baserock.org data backup
+ user: backup
diff --git a/baserock_backup/ssh_config b/baserock_backup/ssh_config
new file mode 100644
index 00000000..e14b38a0
--- /dev/null
+++ b/baserock_backup/ssh_config
@@ -0,0 +1,4 @@
+# SSH configuration to route all requests to baserock.org systems
+# via the frontend system, 185.43.218.170.
+Host 192.168.222.*
+ ProxyCommand ssh backup@185.43.218.170 -W %h:%p
diff --git a/baserock_database/backup-snapshot.conf b/baserock_database/backup-snapshot.conf
new file mode 100644
index 00000000..cb3a2ff0
--- /dev/null
+++ b/baserock_database/backup-snapshot.conf
@@ -0,0 +1,4 @@
+services:
+ - mariadb.service
+
+volume: /dev/vg0/database
diff --git a/baserock_database/develop.sh b/baserock_database/develop.sh
new file mode 100755
index 00000000..140092b1
--- /dev/null
+++ b/baserock_database/develop.sh
@@ -0,0 +1,70 @@
+#!/bin/sh
+
+# Start up a development instance of 'database', which will be accessible on
+# the local machine. (To stop it again, use `docker stop baserock-database`).
+
+# Note that this container works in a different way to the official Docker
+# MariaDB image (<https://registry.hub.docker.com/_/mariadb/>). That's
+# intentional: the official image is for use when Docker is being used as a
+# production environment and the official Docker images are considered trusted.
+# Here I am using Docker as a tool to locally test out trusted(ish) images that
+# I create with Packer, before deploying them to an OpenStack cloud.
+
+set -eu
+
+# These lines of SQL are needed to authorize the container host for accessing
+# the database remotely. (It actually grants access to any host, but since
+# this is a development instance that's OK!)
+CREATE_REMOTE_ROOT_USER_SQL="CREATE USER 'root'@'%' IDENTIFIED BY 'insecure' ;"
+ALLOW_REMOTE_ROOT_USER_SQL="GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION ;"
+
+docker run --detach \
+ --name=baserock-database \
+ --publish=127.0.0.1:3306:3306 \
+ baserock/database \
+ /bin/sh -c " \
+ echo \"$CREATE_REMOTE_ROOT_USER_SQL\" > /tmp/mariadb-init.sql && \
+ echo \"$ALLOW_REMOTE_ROOT_USER_SQL\" >> /tmp/mariadb-init.sql && \
+ /usr/libexec/mariadb-prepare-db-dir mariadb && \
+ /usr/bin/mysqld_safe --basedir=/usr --init-file=/tmp/mariadb-init.sql"
+
+trap 'docker rm -f baserock-database > /dev/null' ERR
+
+# Create some dummy accounts (in production deployments, this is done using the
+# 'service-config.yml' Ansible playbook). We expect that there exists a 'root'
+# user with no password set already.
+
+create_without_overwriting() {
+ target_file="$1"
+ content="$2"
+ if [ -e "$target_file" -a "$(cat "$target_file")" != "$content" ]; then
+ echo >&2 "Not overwriting existing file $target_file"
+ # Don't let the user create a development environment using files that
+ # could contain the real passwords, to avoid them being used in an
+ # insecure deployment.
+ exit 1
+ fi
+ echo "$content" > "$target_file"
+}
+
+create_without_overwriting "database/root.database_password.yml" "root_password: insecure"
+create_without_overwriting "database/baserock_openid_provider.database_password.yml" "baserock_openid_provider_password: openid_insecure"
+
+# Ouch! Would be nice if you could get the 'docker run' command to wait until
+# the database server is ready, or poll somehow until it is.
+echo "Waiting 30 seconds for database server to be ready"
+sleep 30
+
+# Note that the Python 'mysqldb' module is required on the machine Ansible
+# connects to for this playbook. For development deployments that is *your*
+# machine (since we cannot and should not SSH into the Docker container). On
+# Red Hat OSes the package you need is called 'MySQL-python'.
+ansible-playbook database/user_config.yml
+
+echo "You have a container named 'baserock-database' listening on port 3306."
+echo
+echo "Pass '--link baserock-database:mysql' to 'docker run' when starting "
+echo "other containers if you want to give them access to this instance."
+echo
+echo "Run 'docker stop baserock-database; docker rm baserock-database' when "
+echo "you are done with it (all data will then be lost)."
diff --git a/baserock_database/image-config.yml b/baserock_database/image-config.yml
new file mode 100644
index 00000000..a6ba9866
--- /dev/null
+++ b/baserock_database/image-config.yml
@@ -0,0 +1,22 @@
+# System configuration for Baserock database server.
+#
+# Packer runs this playbook inside the system at 'build' time, using the
+# command `sudo ansible-playbook`.
+---
+- hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: install lvm2 tools
+ yum: name=lvm2 state=latest
+
+ - name: install MariaDB
+ yum: name={{ item }} state=latest
+ with_items:
+ - mariadb
+ - mariadb-server
+ - MySQL-python
diff --git a/baserock_database/instance-backup-config.yml b/baserock_database/instance-backup-config.yml
new file mode 100644
index 00000000..79e5ff6c
--- /dev/null
+++ b/baserock_database/instance-backup-config.yml
@@ -0,0 +1,26 @@
+# Instance backup configuration for the baserock.org database.
+---
+- hosts: database-mariadb
+ gather_facts: false
+ sudo: yes
+ vars:
+ FRONTEND_IP: 192.168.222.21
+ tasks:
+ - name: backup-snapshot script
+ copy: src=../backup-snapshot dest=/usr/bin/backup-snapshot mode=755
+
+ - name: backup-snapshot config
+ copy: src=backup-snapshot.conf dest=/etc/backup-snapshot.conf
+
+ # We need to give the backup automation 'root' access, because it needs to
+ # manage system services, LVM volumes, and mounts, and because it needs to
+ # be able to read private data. The risk of having the backup key
+ # compromised is mitigated by only allowing it to execute the
+ # 'backup-snapshot' script, and limiting the hosts it can be used from.
+ - name: access for backup SSH key
+ authorized_key:
+ user: root
+ key: "{{ lookup('file', '../keys/backup.key.pub') }}"
+ # Quotes are important in this options, the OpenSSH server will reject
+ # the entry if the 'from' or 'command' values are not quoted.
+ key_options: 'from="{{FRONTEND_IP}}",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,command="/usr/bin/backup-snapshot"'
diff --git a/baserock_database/instance-config.yml b/baserock_database/instance-config.yml
new file mode 100644
index 00000000..6592b394
--- /dev/null
+++ b/baserock_database/instance-config.yml
@@ -0,0 +1,25 @@
+# Instance configuration for Baserock database server.
+#
+# This script expects a volume to be available at /dev/vdb.
+---
+- hosts: database-mariadb
+ gather_facts: False
+ sudo: yes
+ tasks:
+ - name: ensure system up to date
+ yum: name=* state=latest
+
+ # FIXME: the create-data-volume.yml role should handle this... the gotcha
+ # is that this won't work in Baserock systems right now. Once there's an
+ # lvm2-lvmetad.service in Baserock we can move this entry to
+ # create-data-volume.yml.
+ - name: ensure LVM metadata service is running
+ service: name=lvm2-lvmetad enabled=yes state=started
+
+ - include: ../tasks/create-data-volume.yml lv_name=database lv_size=25g mountpoint=/var/lib/mysql
+
+ - name: ensure mysql user owns /var/lib/mysql
+ file: path=/var/lib/mysql owner=mysql group=mysql mode=600 state=directory
+
+ - name: restart the MariaDB service
+ service: name=mariadb enabled=true state=restarted
diff --git a/baserock_database/instance-mariadb-config.yml b/baserock_database/instance-mariadb-config.yml
new file mode 100644
index 00000000..0febaaf4
--- /dev/null
+++ b/baserock_database/instance-mariadb-config.yml
@@ -0,0 +1,71 @@
+# MariaDB configuration for Baserock database server.
+#
+# The relevant .database_password.yml files will need to be available already.
+# Create these manually and keep them somewhere safe and secret.
+---
+- hosts: database-mariadb
+ gather_facts: False
+ vars_files:
+ - root.database_password.yml
+ - baserock_gerrit.database_password.yml
+ - baserock_openid_provider.database_password.yml
+ - baserock_storyboard.database_password.yml
+ tasks:
+ - name: creating root database user
+ mysql_user: |
+ name=root
+ password={{ root_password }}
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+ check_implicit_admin=yes
+
+ - name: remove the MySQL test database
+ mysql_db:
+ name=test state=absent
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+
+ # Note that UTF-8 encoding and collation is *not* the default. Don't remove
+ # those lines or you will end up with a horrible disaster of a database.
+ - name: adding databases
+ mysql_db: |
+ name={{ item }}
+ state=present
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+ collation=utf8_unicode_ci
+ encoding=utf8
+ with_items:
+ - gerrit
+ - openid_provider
+ - storyboard
+
+ # We could probably restrict the privileges of these users further...
+ #
+ # I feel like setting 'host="%"' (i.e. not enforcing that the account can
+ # only be used by IPs within the cloud's local network, or even a single
+ # known IP adress) is kind of bad practice, but since the database server
+ # is not exposed to the internet anyway I don't think it's important right
+ # now.
+ - name: adding other database users
+ mysql_user: |
+ name="{{ item.name }}"
+ host="%"
+ password={{ item.password }}
+ priv={{ item.priv }}
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+ with_items:
+ - name: gerrit
+ password: "{{ baserock_gerrit_password }}"
+ priv: gerrit.*:ALL
+ - name: openid
+ password: "{{ baserock_openid_provider_password }}"
+ priv: openid_provider.*:ALL
+ - name: storyboard
+ password: "{{ baserock_storyboard_password }}"
+ priv: storyboard.*:ALL
diff --git a/baserock_database/packer_template.json b/baserock_database/packer_template.json
new file mode 100644
index 00000000..2afd78ef
--- /dev/null
+++ b/baserock_database/packer_template.json
@@ -0,0 +1,57 @@
+{
+ "builders": [
+ {
+ "name": "development",
+ "type": "docker",
+ "image": "fedora:20",
+ "commit": true,
+ "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"]
+ },
+ {
+ "name": "production",
+ "type": "openstack",
+ "image_name": "database-mariadb",
+ "flavor": "f0577618-9125-4948-b450-474e225bbc4c",
+ "source_image": "742e0414-c985-4994-b307-4aafade942b3",
+ "networks": ["d079fa3e-2558-4bcb-ad5a-279040c202b5"],
+ "floating_ip": "85.199.252.164",
+ "use_floating_ip": true,
+ "ssh_username": "fedora"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "inline": [ "sudo yum install -y ansible"]
+ },
+ {
+ "type": "ansible-local",
+ "playbook_file": "database/image-config.yml",
+ "command": "sudo ansible-playbook"
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "sudo yum install -y libselinux-python",
+ "sudo ansible localhost -m selinux -a state=disabled",
+ "sudo setenforce 0",
+ ],
+ "only": ["production"]
+ },
+ {
+ "type": "shell",
+ "inline": [ "sync; sync; sleep 10; sync" ],
+ "only": ["production"]
+ }
+ ],
+ "post-processors": [
+ [
+ {
+ "type": "docker-tag",
+ "repository": "baserock/database",
+ "tag": "latest",
+ "only": ["development"]
+ }
+ ]
+ ]
+}
diff --git a/baserock_gerrit/All-Projects/groups b/baserock_gerrit/All-Projects/groups
new file mode 100644
index 00000000..da2baa74
--- /dev/null
+++ b/baserock_gerrit/All-Projects/groups
@@ -0,0 +1,16 @@
+# UUID Group Name
+#
+global:Anonymous-Users Anonymous Users
+global:Project-Owners Project Owners
+global:Registered-Users Registered Users
+
+# This file is filled in with the other group IDs by the
+# gerrit-access-config.yml Ansible playbook.
+b660c33b68509db9dbd9578ae00035da90c0d5eb Administrators
+8e467a11f116bb716a65ac85e28bf09ebfeb0d63 Non-Interactive Users
+898d9c4232b8fcac6a3b128f7264c5d4c8b1eead Developers
+b8fc45c681b94669fe3fa965c48d5221a515a3a6 Mergers
+8c788c828285c3dd0a8c1cc152de6735085def9f Mirroring Tools
+a7a9cc6639bd943e47da0d20b39267a08b43cd91 Release Team
+d643abb0ad6e9d5ac33093af5cd3a3d4e484d95d Reviewers
+cea6c19a08e11b74e63a567e050bec2c6eeb14dc Testers
diff --git a/baserock_gerrit/All-Projects/project.config b/baserock_gerrit/All-Projects/project.config
new file mode 100644
index 00000000..e418ac0b
--- /dev/null
+++ b/baserock_gerrit/All-Projects/project.config
@@ -0,0 +1,123 @@
+# Top-level access controls for projects on Baserock Gerrit.
+
+# These can be overridden by a project's own project.config file. They are also
+# overridden by the config of a project's parent repo, if it is set to something
+# other than the default parent project 'All-Projects'.
+
+# Useful references:
+#
+# https://gerrit-documentation.storage.googleapis.com/Documentation/2.11/access-control.html
+# https://git.openstack.org/cgit/openstack-infra/system-config/tree/doc/source/gerrit.rst
+
+# To deploy changes to this file, you need to manually commit it and push it to
+# the 'refs/meta/config' ref of the All-Projects repo in Gerrit.
+
+[project]
+ description = Access inherited by all other projects.
+
+[receive]
+ requireContributorAgreement = false
+ requireSignedOffBy = false
+ requireChangeId = true
+
+[submit]
+ mergeContent = true
+ action = rebase if necessary
+
+[capability]
+ administrateServer = group Administrators
+ priority = batch group Non-Interactive Users
+ streamEvents = group Non-Interactive Users
+
+ createProject = group Mirroring Tools
+
+# Everyone can read everything.
+[access "refs/*"]
+ read = group Administrators
+ read = group Anonymous Users
+
+
+# Developers can propose changes. All 'Registered Users' are 'Developers'.
+[access "refs/for/refs/*"]
+ push = group Developers
+ pushMerge = group Developers
+
+
+[access "refs/heads/*"]
+ forgeAuthor = group Developers
+ rebase = group Developers
+ label-Code-Review = -2..+2 group Mergers
+ submit = group Mergers
+ label-Code-Review = -1..+1 group Reviewers
+# label-Verified = -1..+1 group Testers
+
+ create = group Administrators
+ forgeAuthor = group Administrators
+ forgeCommitter = group Administrators
+ push = group Administrators
+ create = group Project Owners
+ forgeAuthor = group Project Owners
+ forgeCommitter = group Project Owners
+ push = group Project Owners
+ create = group Mergers
+ forgeAuthor = group Mergers
+ push = group Mergers
+
+ create = group Mirroring Tools
+ forgeAuthor = group Mirroring Tools
+ forgeCommitter = group Mirroring Tools
+ push = +force group Mirroring Tools
+
+
+# We allow Lorry to force-push to personal branches, but don't ever let
+# it force-push to master as this may undo merges that Gerrit just did
+# and really confuse things.
+[access "refs/heads/master"]
+ exclusiveGroupPermissions = push
+ push = block +force group Mirroring Tools
+
+
+[access "refs/tags/*"]
+ pushTag = group Release Team
+ pushSignedTag = group Release Team
+
+ pushTag = group Administrators
+ pushSignedTag = group Administrators
+ pushTag = group Project Owners
+ pushSignedTag = group Project Owners
+
+ create = group Mirroring Tools
+ forgeAuthor = group Mirroring Tools
+ forgeCommitter = group Mirroring Tools
+ push = +force group Mirroring Tools
+ pushTag = +force group Mirroring Tools
+ pushSignedTag = +force group Mirroring Tools
+
+
+# Changing project configuration is allowed for Administrators only. (In theory
+# anyone who owns a project can change its permissions, but right now all
+# projects should be owned by the Administrators group).
+[access "refs/meta/config"]
+ exclusiveGroupPermissions = read
+
+ read = group Administrators
+ push = group Administrators
+ read = group Project Owners
+ push = group Project Owners
+
+[label "Code-Review"]
+ function = MaxWithBlock
+ copyMinScore = true
+ value = -2 Do not merge
+ value = -1 This patch needs further work before it can be merged
+ value = 0 No score
+ value = +1 Looks good to me, but someone else must approve
+ value = +2 Looks good to me, approved
+
+# Disabled for now, because there is no automated test tool hooked up to our
+# Gerrit yet.
+#[label "Verified"]
+# function = MaxWithBlock
+# value = -1 Failed
+# value = 0 No score
+# value = +1 Verified
diff --git a/baserock_gerrit/backup-snapshot.conf b/baserock_gerrit/backup-snapshot.conf
new file mode 100644
index 00000000..e8e2f3fc
--- /dev/null
+++ b/baserock_gerrit/backup-snapshot.conf
@@ -0,0 +1,5 @@
+services:
+ - lorry-controller-minion@1.service
+ - gerrit.service
+
+volume: /dev/vg0/gerrit
diff --git a/baserock_gerrit/baserock_gerrit.morph b/baserock_gerrit/baserock_gerrit.morph
new file mode 100644
index 00000000..76a92667
--- /dev/null
+++ b/baserock_gerrit/baserock_gerrit.morph
@@ -0,0 +1,26 @@
+name: baserock_gerrit
+kind: cluster
+
+description: |
+ Deployment .morph for baserock.org Gerrit system.
+
+ Configuration of the system is handled separately, with a series of
+ Ansible playbooks that should be run after an instance of the system
+ is up and running. See the README for instructions.
+
+systems:
+ - morph: systems/gerrit-system-x86_64.morph
+ deploy:
+ gerrit.baserock.org:
+ type: openstack
+ location: https://compute.datacentred.io:5000/v2.0
+
+ # You can use this method to deploy upgrades over SSH, after the
+ # machine is deployed.
+ #type: ssh-rsync
+ #location: root@gerrit.baserock.org
+
+ OPENSTACK_IMAGENAME: baserock_gerrit
+ CLOUD_INIT: yes
+ DISK_SIZE: 3G
+ KERNEL_ARGS: console=tty0 console=ttyS0
diff --git a/baserock_gerrit/gerrit-access-config.yml b/baserock_gerrit/gerrit-access-config.yml
new file mode 100644
index 00000000..3966c928
--- /dev/null
+++ b/baserock_gerrit/gerrit-access-config.yml
@@ -0,0 +1,161 @@
+# Baserock Gerrit access controls, and predefined users, groups and projects.
+#
+# This Ansible playbook requires the ansible-gerrit modules:
+#
+# https://www.github.com/ssssam/ansible-gerrit
+#
+# These modules depend on pygerrit:
+#
+# https://www.github.com/sonyxperiadev/pygerrit/
+#
+# If you want to change the configuration, just edit this script and rerun it,
+# as described in the README.
+#
+# This script currently doesn't handle committing changes to the access control
+# rules for the 'All-Projects' project. To set up or modify the access control
+# rules, you'll need to manually commit project.config (in the All-Projects
+# subdirectory) to the 'refs/meta/config' ref of the All-Projects repo in
+# Gerrit. The 'groups' file will need to list all the groups referenced in
+# project.config. This script will add the UUIDs of all groups listed below
+# to the All-Projects/groups file, so you don't have to create it manually.
+---
+- hosts: localhost
+ tasks:
+ # System groups:
+ # - Anonymous Users
+ # - Change Owner
+ # - Project Owners
+ # - Registered Users
+
+ # Prefined groups:
+ # - Administrators
+ # - Non-Interactive Users
+
+ - gerrit_group:
+ name: Administrators
+ register: administrators_group
+
+ - gerrit_group:
+ name: Non-Interactive Users
+ register: non_interactive_users_group
+
+ # The 'owner' of a group defines who can modify that group. Users
+ # who are in the 'owner' group for a group 'Groupies' can add and remove
+ # people (and other groups) from 'Groupies' and can change the name,
+ # description and owner of 'Groupies.' Since we don't want the
+ # names, descriptions or owners of these predefined groups being
+ # changed, they are all left owned by the Administrators group.
+
+ - gerrit_group:
+ name: Developers
+ description: Registered users who choose to submit changes for consideration.
+ owner: Administrators
+ included_groups:
+ - Registered Users
+ register: developers_group
+
+ # Right now all Mergers are in the Release Team by default.
+ - gerrit_group:
+ name: Release Team
+ description: Developers who can tag releases
+ owner: Administrators
+ included_groups:
+ - Mergers
+ register: release_team_group
+
+ - gerrit_group:
+ name: Mergers
+ description: Developers who can trigger the actual merging of a change.
+ owner: Administrators
+ register: mergers_group
+
+ - gerrit_group:
+ name: Mirroring Tools
+ description: Programs that pull changes from external repositories into Gerrit's Git server
+ owner: Administrators
+ register: mirroring_tools_group
+
+ - gerrit_group:
+ name: Reviewers
+ description: Registered users who choose to give +1 / -1 reviews to proposed changes.
+ owner: Administrators
+ included_groups:
+ - Registered Users
+ register: reviewers_group
+
+ - gerrit_group:
+ name: Testers
+ description: Testers that can give +1 / -1 Verified to proposed changes.
+ owner: Administrators
+ register: testers_group
+
+ # Non-interactive accounts.
+
+ - gerrit_account:
+ username: firehose
+ fullname: Firehose integration bot
+ email: firehose@baserock.org
+ groups:
+ - Non-Interactive Users
+ - Developers
+ #ssh_key: xx
+
+ - gerrit_account:
+ username: lorry
+ fullname: Lorry mirroring service
+ email: lorry@baserock.org
+ groups:
+ - Mirroring Tools
+ - Non-Interactive Users
+ # FIXME: ansible-gerrit module should be able to handle a filename
+ # here, instead of needing this hack to read the contents.
+ ssh_key: "{{ lookup('file', '../keys/lorry-gerrit.key.pub') }}"
+
+ - gerrit_account:
+ username: mason
+ fullname: Mason automated tester
+ email: mason@baserock.org
+ groups:
+ - Non-Interactive Users
+ - Testers
+ #ssh_key: xx
+
+ # It'd make more sense to do this in the mirroring-config.yml file, but
+ # then the admin would need to supply their Gerrit credentials to that
+ # playbook too (which is more tricky, because it doesn't run on
+ # 'localhost').
+ - name: repo to hold Lorry Controller mirroring configuration
+ gerrit_project:
+ name: local-config/lorries
+ description: Configuration for Lorry for mirroring from Trove
+
+ - name: create 'groups' mapping required by Gerrit
+ lineinfile:
+ create: yes
+ dest: All-Projects/groups
+ line: "{{ item.group_info.id }}\t{{ item.group_info.name }}"
+ with_items:
+ - "{{ administrators_group }}"
+ - "{{ non_interactive_users_group }}"
+ - "{{ developers_group }}"
+ - "{{ mergers_group }}"
+ - "{{ mirroring_tools_group }}"
+ - "{{ release_team_group }}"
+ - "{{ reviewers_group }}"
+ - "{{ testers_group }}"
+
+# it'd be nice if this module existed... but it doesn't right now. You'll have
+# to commit the files manually.
+#
+# - name: push access configuration for all repos
+# git_commit_in_branch:
+# repo: ssh://{{ env.GERRIT_ADMIN_USERNAME }}@{{ env.GERRIT_URL}}:29418/All-Projects
+# ref: refs/meta/config
+# source: All-Projects
+# committer_name: Baserock Gerrit configuration scripts
+# committer_email: admin@baserock.org
+# commit_message: >
+# Update global configuration.
+#
+# This commit was made by an Ansible playbook living in
+# git://git.baserock.org/baserock/baserock/infrastructure.
diff --git a/baserock_gerrit/gerrit.config b/baserock_gerrit/gerrit.config
new file mode 100644
index 00000000..3e85b5c7
--- /dev/null
+++ b/baserock_gerrit/gerrit.config
@@ -0,0 +1,49 @@
+# This is the main Gerrit configuration. If you make changes to this
+# file, rerun `ansible-playbook -i hosts baserock_gerrit/instance-config.yml`
+# to deploy them to production.
+
+[gerrit]
+ basePath = git
+ canonicalWebUrl = https://gerrit.baserock.org/
+[database]
+ type = mysql
+ hostname = 192.168.222.30
+ database = gerrit
+ username = gerrit
+[index]
+ type = LUCENE
+[auth]
+ type = OPENID_SSO
+ allowedOpenID = https://openid.baserock.org/
+ trustedOpenID = https://openid.baserock.org/
+ # XRDS is a mechanism for saying 'here are the services I provide'. Gerrit
+ # expects the URL provided here to describe the OpenID provider service
+ # using XRDS.
+ openIdSsoUrl = https://openid.baserock.org/openid/xrds/
+[sendemail]
+ smtpServer = 192.168.222.111
+ # Send mails as '${user} (Code Review) <gerrit.baserock.org>'
+ # The gerrit@baserock.org email comes from the user.email setting
+ # below
+ from = MIXED
+[user]
+ name = Baserock Gerrit
+ email = gerrit@baserock.org
+[sshd]
+ listenAddress = *:29418
+[httpd]
+ listenUrl = proxy-https://*:8080/
+[cache]
+ directory = cache
+[cache "web_sessions"]
+ # Remember user logins for a year (default is 12 hours, which gets a
+ # bit annoying).
+ maxAge = 1 y
+[user]
+ email = "gerrit@baserock.org"
+
+# It seems like a bad idea to enable remote administration of plugins, but
+# there is absolutely no information available on how to do 'local'
+# administration of Gerrit plugins, so we can't really avoid it.
+[plugins]
+ allowRemoteAdmin = true
diff --git a/baserock_gerrit/gerrit.service b/baserock_gerrit/gerrit.service
new file mode 100644
index 00000000..478693c3
--- /dev/null
+++ b/baserock_gerrit/gerrit.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Gerrit Code Review Server
+After=network.target
+
+[Service]
+User=gerrit
+Group=gerrit
+Type=simple
+StandardOutput=syslog
+StandardError=syslog
+SyslogIdentifier=gerrit
+ExecStart={{ run_gerrit }} daemon --site-path /srv/gerrit --console-log
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_gerrit/instance-backup-config.yml b/baserock_gerrit/instance-backup-config.yml
new file mode 100644
index 00000000..60434b5d
--- /dev/null
+++ b/baserock_gerrit/instance-backup-config.yml
@@ -0,0 +1,29 @@
+# Instance backup configuration for the baserock.org Gerrit system.
+---
+- hosts: gerrit
+ gather_facts: false
+ vars:
+ FRONTEND_IP: 192.168.222.21
+ tasks:
+ - name: backup-snapshot script
+ copy: src=../backup-snapshot dest=/usr/bin/backup-snapshot mode=755
+
+ - name: backup-snapshot config
+ copy: src=backup-snapshot.conf dest=/etc/backup-snapshot.conf
+
+ # Would be good to limit this to 'backup' user.
+ - name: passwordless sudo
+ lineinfile: dest=/etc/sudoers state=present line='%wheel ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
+
+ # We need to give the backup automation 'root' access, because it needs to
+ # manage system services, LVM volumes, and mounts, and because it needs to
+ # be able to read private data. The risk of having the backup key
+ # compromised is mitigated by only allowing it to execute the
+ # 'backup-snapshot' script, and limiting the hosts it can be used from.
+ - name: access for backup SSH key
+ authorized_key:
+ user: root
+ key: "{{ lookup('file', '../keys/backup.key.pub') }}"
+ # Quotes are important in this options, the OpenSSH server will reject
+ # the entry if the 'from' or 'command' values are not quoted.
+ key_options: 'from="{{FRONTEND_IP}}",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,command="/usr/bin/backup-snapshot"'
diff --git a/baserock_gerrit/instance-ca-certificate-config.yml b/baserock_gerrit/instance-ca-certificate-config.yml
new file mode 100644
index 00000000..0424b176
--- /dev/null
+++ b/baserock_gerrit/instance-ca-certificate-config.yml
@@ -0,0 +1,28 @@
+# The CA chain needed for the baserock.org certificate we use is present in
+# the system, but it's not present in the set of trusted root certificates
+# bundled with Java.
+#
+# We need Gerrit to trust the baserock.org certificate so that it will trust
+# https://openid.baserock.org/.
+#
+# This playbook is a hack at present: the second time you run it, the command
+# will fail because the certificate is already present. There is a proposed
+# Ansible module that can do this in a nicer way:
+# <https://github.com/ansible/ansible-modules-extras/pull/286/commits>.
+---
+- hosts: gerrit
+ gather_facts: False
+ vars:
+ JRE_DIR: /opt/jdk1.8.0_40
+ tasks:
+ - name: baserock.org SSL certificate with chain of trust
+ copy: src=../certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert dest=/home/gerrit
+
+ - name: install SSL certificate into Java certificate keystore
+ shell: >
+ {{ JRE_DIR }}/jre/bin/keytool \
+ -file /home/gerrit/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert \
+ -importcert \
+ -keystore {{ JRE_DIR }}/jre/lib/security/cacerts \
+ -storepass changeit \
+ -noprompt
diff --git a/baserock_gerrit/instance-config.yml b/baserock_gerrit/instance-config.yml
new file mode 100644
index 00000000..8a913212
--- /dev/null
+++ b/baserock_gerrit/instance-config.yml
@@ -0,0 +1,97 @@
+# Instance-specific configuration for the baserock.org Gerrit system.
+#
+# You must have the Java SE Runtime Environment binary available in the
+# baserock_gerrit directory when you run this script.
+#
+# Download it from here:
+# <http://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html>
+#
+- hosts: gerrit
+ gather_facts: False
+ vars:
+ GERRIT_VERSION: 2.9.4
+
+ # Download from http://www.oracle.com/technetwork/java/javase/downloads/server-jre8-downloads-2133154.html
+ JRE_FILE: server-jre-8u40-linux-x64.tar.gz
+ # This path should correspond to where the JRE ends up if you extract the
+ # downloaded tarball in /opt.
+ JRE_DIR: /opt/jdk1.8.0_40
+
+ # Download from http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html
+ JCE_FILE: jce_policy-8.zip
+
+ run_gerrit: "{{ JRE_DIR }}/bin/java -jar /opt/gerrit/gerrit-{{ GERRIT_VERSION }}.war"
+ vars_files:
+ - ../database/baserock_gerrit.database_password.yml
+ tasks:
+ - name: add gerrit user
+ user:
+ name: gerrit
+ shell: /bin/false
+ generate_ssh_key: yes
+ ssh_key_comment: gerrit@baserock.org
+
+ - name: unpack the Java Runtime Environment
+ unarchive: src={{ JRE_FILE }} dest=/opt owner=root group=root creates={{ JRE_DIR }}
+
+ # The Java Cryptography Extensions are needed in order to enable all SSH
+ # ciphers, due to US export restrictions.
+ - name: unpack the Java Cryptography Extensions
+ unarchive: src={{ JCE_FILE }} dest=/opt owner=root group=root creates=/opt/UnlimitedJCEPolicyJDK8/
+
+ - name: install the Java Cryptography Extensions
+ file: src=/opt/UnlimitedJCEPolicyJDK8/{{ item }} dest={{ JRE_DIR }}/jre/lib/security/{{ item }} state=link force=yes
+ with_items:
+ - local_policy.jar
+ - US_export_policy.jar
+
+ - name: create /opt/gerrit
+ file: path=/opt/gerrit state=directory
+
+ - name: download Gerrit
+ get_url:
+ url: https://gerrit-releases.storage.googleapis.com/gerrit-{{ GERRIT_VERSION }}.war
+ dest: /opt/gerrit/gerrit-{{ GERRIT_VERSION }}.war
+
+ - include: ../tasks/create-data-volume.yml lv_name=gerrit lv_size=25g mountpoint=/srv/gerrit
+
+ - name: ensure 'gerrit' user owns /srv/gerrit
+ file: path=/srv/gerrit owner=gerrit group=gerrit state=directory
+
+ - name: initialise Gerrit application directory
+ command: "{{ run_gerrit }} init -d /srv/gerrit creates=/srv/gerrit/etc/gerrit.config"
+
+ - name: extract and install some plugins for gerrit
+ shell: unzip /opt/gerrit/gerrit-{{ GERRIT_VERSION}}.war WEB-INF/plugins/{{ item }}.jar -p > /srv/gerrit/plugins/{{ item }}.jar
+ args:
+ creates: /srv/gerrit/plugins/{{ item }}.jar
+ with_items:
+ - replication
+ - download-commands
+
+ - name: download extra Java libraries
+ get_url:
+ url: "{{ item }}"
+ dest: /srv/gerrit/lib
+ with_items:
+ # MySQL Java Connector
+ - http://repo2.maven.org/maven2/mysql/mysql-connector-java/5.1.21/mysql-connector-java-5.1.21.jar
+
+ # Bouncy Castle Crypto APIs for Java. The interactive `gerrit init`
+ # command recommends installing these libraries, and who am I to argue?
+ - http://www.bouncycastle.org/download/bcpkix-jdk15on-149.jar
+ - http://www.bouncycastle.org/download/bcprov-jdk15on-149.jar
+
+ - name: install gerrit.config
+ template: src=gerrit.config dest=/srv/gerrit/etc/gerrit.config
+
+ - name: set database password
+ command: git config -f /srv/gerrit/etc/secure.config database.password "{{ baserock_gerrit_password }}"
+ sudo: yes
+ sudo_user: gerrit
+
+ - name: install gerrit.service
+ template: src=gerrit.service dest=/etc/systemd/system/gerrit.service
+
+ - name: start Gerrit service
+ service: name=gerrit enabled=yes state=restarted
diff --git a/baserock_gerrit/instance-mirroring-config.yml b/baserock_gerrit/instance-mirroring-config.yml
new file mode 100644
index 00000000..19ac76cc
--- /dev/null
+++ b/baserock_gerrit/instance-mirroring-config.yml
@@ -0,0 +1,68 @@
+# This Ansible playbook configures mirroring in and out of Gerrit.
+#
+# To run it, use:
+# ansible-playbook -i hosts baserock_gerrit/instance-mirroring-config.yml
+#
+# It expects the SSH key for the 'lorry' user to exist at
+# ../keys/lorry-gerrit.key.
+#
+# This script currently doesn't handle the lorry-controller.conf file that
+# controls what lorry-controller mirrors into Gerrit. To set up or modify
+# lorry-controller configuration you need to commit your changes to the
+# 'local-config/lorries' project on the Gerrit.
+---
+- hosts: gerrit
+ gather_facts: no
+ sudo: yes
+ tasks:
+ - name: Lorry user
+ user: name=lorry comment="Lorry mirroring service"
+
+ # Ansible can generate a new SSH key for Lorry when we add the user,
+ # but it seems tricky to then extract this and add it to the 'lorry' Gerrit
+ # user.
+ - name: SSH private key for Lorry user
+ copy: src=../keys/lorry-gerrit.key dest=~/.ssh/id_rsa mode=600
+ sudo_user: lorry
+
+ - name: SSH public key for Lorry user
+ copy: src=../keys/lorry-gerrit.key.pub dest=~/.ssh/id_rsa.pub mode=644
+ sudo_user: lorry
+
+ - name: directory in /etc for Lorry Controller system configuration
+ file: dest=/etc/lorry-controller state=directory
+
+ - name: Lorry tool configuration
+ copy: src=lorry.conf dest=/etc/lorry.conf
+
+ - name: Lorry Controller system configuration
+ copy:
+ src=lorry-controller/{{ item }}
+ dest=/etc/lorry-controller/{{ item }}
+ with_items:
+ - minion.conf
+ - webapp.conf
+
+ - name: enable and restart core lorry controller services.
+ service: name={{ item }} enabled=yes state=restarted
+ with_items:
+ - lighttpd-lorry-controller-webapp.service
+ - lorry-controller-minion@1.service
+
+ - name: enable lorry-controller scheduled activity timers
+ service: name={{ item }} enabled=yes
+ with_items:
+ - lorry-controller-ls-troves.timer
+ - lorry-controller-readconf.timer
+ - lorry-controller-remove-ghost-jobs.timer
+ - lorry-controller-remove-old-jobs.timer
+ - lorry-controller-status.timer
+
+ - name: gerrit-replication configuration
+ copy: src=replication.config dest=/srv/gerrit/etc
+ notify:
+ - restart gerrit
+
+handlers:
+ - name: restart gerrit
+ service: name=gerrit state=restarted
diff --git a/baserock_gerrit/lorry-controller.conf b/baserock_gerrit/lorry-controller.conf
new file mode 100644
index 00000000..3f4818fe
--- /dev/null
+++ b/baserock_gerrit/lorry-controller.conf
@@ -0,0 +1,38 @@
+[
+ {
+ "type": "trove",
+
+ "trovehost": "git.baserock.org",
+ "protocol": "http",
+
+ "prefixmap": {
+ "baserock": "baserock",
+ "delta": "delta"
+ },
+
+ "ignore": [
+ "baserock/baserock/documentation",
+ "baserock/baserock/jenkins-config",
+ "baserock/baserock/lorries",
+ "baserock/baserock/morph-cache-server",
+ "baserock/baserock/morphs",
+ "baserock/baserock/remo",
+ "baserock/local-config/mason",
+ "baserock/site/*",
+ "baserock/tests/*",
+ "delta/*"
+ ],
+
+ "ls-interval": "4H",
+ "interval": "2M"
+ },
+
+ {
+ "type": "lorries",
+ "interval": "2M",
+ "prefix": "delta",
+ "globs": [
+ "delta-lorries/*.lorry"
+ ]
+ }
+]
diff --git a/baserock_gerrit/lorry-controller/minion.conf b/baserock_gerrit/lorry-controller/minion.conf
new file mode 100644
index 00000000..99abdba8
--- /dev/null
+++ b/baserock_gerrit/lorry-controller/minion.conf
@@ -0,0 +1,6 @@
+[config]
+log = syslog
+log-level = debug
+webapp-host = localhost
+webapp-port = 12765
+webapp-timeout = 3600
diff --git a/baserock_gerrit/lorry-controller/webapp.conf b/baserock_gerrit/lorry-controller/webapp.conf
new file mode 100644
index 00000000..dde0d0f6
--- /dev/null
+++ b/baserock_gerrit/lorry-controller/webapp.conf
@@ -0,0 +1,13 @@
+[config]
+log = /home/lorry/webapp.log
+log-max = 100M
+log-keep = 0
+log-level = debug
+statedb = /home/lorry/webapp.db
+configuration-directory = /home/lorry/confgit
+status-html = /home/lorry/lc-status.html
+wsgi = yes
+debug-port = 12765
+templates = /usr/share/lorry-controller/templates
+confgit-url = http://localhost:8080/local-config/lorries
+git-server-type = gerrit
diff --git a/baserock_gerrit/lorry.conf b/baserock_gerrit/lorry.conf
new file mode 100644
index 00000000..03c1177b
--- /dev/null
+++ b/baserock_gerrit/lorry.conf
@@ -0,0 +1,8 @@
+[config]
+mirror-base-url-push = ssh://lorry@localhost:29418/
+bundle = never
+tarball = never
+working-area = /home/lorry/working-area
+verbose = yes
+log = /dev/stdout
+log-level = debug
diff --git a/baserock_gerrit/replication.config b/baserock_gerrit/replication.config
new file mode 100644
index 00000000..067acc9b
--- /dev/null
+++ b/baserock_gerrit/replication.config
@@ -0,0 +1,30 @@
+# Configuration for gerrit-replication plugin.
+#
+# This handles pushing changes from gerrit.baserock.org to git.baserock.org.
+#
+# To deploy changes in this file to production, run:
+# ansible-playbook -i hosts baserock_gerrit/instance-mirroring-config.yml
+
+[remote "trove"]
+ url = ssh://git@git.baserock.org/${name}.git
+
+ # Disable force-pushing and only sync 'master' and tags.
+ #
+ # This will probably prove annoying and we'll need to mirror more branches in
+ # future. But right now there are hundreds of personal branches and I want to
+ # avoid potential push errors for branches we don't care about.
+ push = refs/heads/master:refs/heads/master
+ push = refs/tags/*:refs/tags/*
+
+ createMissingRepositories = false
+ replicatePermissions = false
+
+ # What to sync: this is a regexp that must match the whole project name.
+ projects = ^baserock/.*$
+
+ # If true, gerrit-replication will remove remote branches that are absent in
+ # the trove. This is a bit dangerous, but necessary if we are to make gerrit
+ # the 'master'. Note that if you set 'authGroup', branches that are not
+ # visible to the configured authorisation group will also be removed. So do
+ # not set 'authGroup' to anything.
+ mirror = false
diff --git a/baserock_hosts b/baserock_hosts
new file mode 100644
index 00000000..0d67c3fd
--- /dev/null
+++ b/baserock_hosts
@@ -0,0 +1,40 @@
+# Ansible hosts file for Baserock infrastructure.
+# See: <http://docs.ansible.com/intro_inventory.html>.
+
+# We don't have DNS working for instances in the OpenStack cloud we use, which
+# makes this file a lot more fiddly than it would be otherwise. Access to these
+# machines works because the `ansible.cfg` file in the same directory redirects
+# all SSH access through the frontend machine.
+
+[baserock]
+baserock-mason-x86-64 ansible_ssh_host=192.168.222.48
+cache ansible_ssh_host=192.168.222.14
+devel-system-64b ansible_ssh_host=192.168.222.41
+firehose ansible_ssh_host=192.168.222.45
+gerrit ansible_ssh_host=192.168.222.69
+git ansible_ssh_host=192.168.222.58
+irclogs ansible_ssh_host=192.168.222.74
+#mason-armv7lhf ansible_ssh_host=192.168.222.15
+mason-x86-32 ansible_ssh_host=192.168.222.81
+mason-x86-64 ansible_ssh_host=192.168.222.80
+
+[fedora]
+frontend-haproxy ansible_ssh_host=185.43.218.170
+database-mariadb ansible_ssh_host=192.168.222.30
+mail ansible_ssh_host=192.168.222.111
+openid ansible_ssh_host=192.168.222.67
+storyboard ansible_ssh_host=192.168.222.40
+
+[ubuntu]
+paste ansible_ssh_host=192.168.222.6
+#testgerrit ansible_ssh_host=192.168.222.46
+
+
+[baserock:vars]
+ansible_ssh_user=root
+
+[ubuntu:vars]
+ansible_ssh_user=ubuntu
+
+[fedora:vars]
+ansible_ssh_user=fedora
diff --git a/baserock_irclogs/clusters/irclogs.morph b/baserock_irclogs/clusters/irclogs.morph
new file mode 100644
index 00000000..8c899d03
--- /dev/null
+++ b/baserock_irclogs/clusters/irclogs.morph
@@ -0,0 +1,13 @@
+name: irclogs
+kind: cluster
+systems:
+- morph: baserock_irclogs/systems/irclogs-x86_64.morph
+ deploy:
+ irclogs:
+ type: openstack
+ location: http://compute.datacentred.io:5000/v2.0/
+ DISK_SIZE: 4G
+ HOSTNAME: irclogs
+ CLOUD_INIT: yes
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ OPENSTACK_IMAGENAME: irclogs
diff --git a/baserock_irclogs/files/baserock.conf b/baserock_irclogs/files/baserock.conf
new file mode 100644
index 00000000..df54affd
--- /dev/null
+++ b/baserock_irclogs/files/baserock.conf
@@ -0,0 +1,185 @@
+supybot.nick: brlogger
+supybot.nick.alternates: %s` %s_
+supybot.ident: supybot
+supybot.user:
+supybot.networks: freenode
+supybot.networks.freenode.password:
+supybot.networks.freenode.servers: irc.freenode.com:6667
+supybot.networks.freenode.channels: #baserock
+supybot.networks.freenode.channels.key:
+supybot.networks.freenode.ssl: False
+supybot.reply.format.time: %I:%M %p, %B %d, %Y
+supybot.reply.format.time.elapsed.short: False
+supybot.reply.maximumLength: 131072
+supybot.reply.mores: True
+supybot.reply.mores.maximum: 50
+supybot.reply.mores.length: 0
+supybot.reply.mores.instant: 1
+supybot.reply.oneToOne: True
+supybot.reply.whenNotCommand: True
+supybot.reply.error.detailed: False
+supybot.reply.error.inPrivate: False
+supybot.reply.error.withNotice: False
+supybot.reply.error.noCapability: False
+supybot.reply.inPrivate: False
+supybot.reply.withNotice: False
+supybot.reply.withNoticeWhenPrivate: False
+supybot.reply.withNickPrefix: True
+supybot.reply.whenNotAddressed: False
+supybot.reply.requireChannelCommandsToBeSentInChannel: False
+supybot.reply.showSimpleSyntax: False
+supybot.reply.whenAddressedBy.chars:
+supybot.reply.whenAddressedBy.strings:
+supybot.reply.whenAddressedBy.nick: True
+supybot.reply.whenAddressedBy.nick.atEnd: False
+supybot.reply.whenAddressedBy.nicks:
+supybot.followIdentificationThroughNickChanges: False
+supybot.alwaysJoinOnInvite: False
+supybot.replies.success: The operation succeeded.
+supybot.replies.error: An error has occurred and has been logged. Please\
+ contact this bot's administrator for more\
+ information.
+supybot.replies.incorrectAuthentication: Your hostmask doesn't match or your\
+ password is wrong.
+supybot.replies.noUser: I can't find %s in my user database. If you didn't\
+ give a user name, then I might not know what your\
+ user is, and you'll need to identify before this\
+ command might work.
+supybot.replies.notRegistered: You must be registered to use this command.\
+ If you are already registered, you must\
+ either identify (using the identify command)\
+ or add a hostmask matching your current\
+ hostmask (using the "hostmask add" command).
+supybot.replies.noCapability: You don't have the %s capability. If you think\
+ that you should have this capability, be sure\
+ that you are identified before trying again.\
+ The 'whoami' command can tell you if you're\
+ identified.
+supybot.replies.genericNoCapability: You're missing some capability you\
+ need. This could be because you\
+ actually possess the anti-capability\
+ for the capability that's required of\
+ you, or because the channel provides\
+ that anti-capability by default, or\
+ because the global capabilities include\
+ that anti-capability. Or, it could be\
+ because the channel or\
+ supybot.capabilities.default is set to\
+ False, meaning that no commands are\
+ allowed unless explicitly in your\
+ capabilities. Either way, you can't do\
+ what you want to do.
+supybot.replies.requiresPrivacy: That operation cannot be done in a channel.
+supybot.replies.possibleBug: This may be a bug. If you think it is, please\
+ file a bug report at <http://sourceforge.net/tr\
+ acker/?func=add&group_id=58965&atid=489447>.
+supybot.snarfThrottle: 10.0
+supybot.upkeepInterval: 3600
+supybot.flush: True
+supybot.commands.quotes: "
+supybot.commands.nested: True
+supybot.commands.nested.maximum: 10
+supybot.commands.nested.brackets: []
+supybot.commands.nested.pipeSyntax: False
+supybot.commands.defaultPlugins.addcapability: Admin
+supybot.commands.defaultPlugins.capabilities: User
+supybot.commands.defaultPlugins.disable: Owner
+supybot.commands.defaultPlugins.enable: Owner
+supybot.commands.defaultPlugins.help: Misc
+supybot.commands.defaultPlugins.ignore: Admin
+supybot.commands.defaultPlugins.importantPlugins: Plugin Admin Misc User Owner Config Channel
+supybot.commands.defaultPlugins.list: Misc
+supybot.commands.defaultPlugins.reload: Owner
+supybot.commands.defaultPlugins.removecapability: Admin
+supybot.commands.defaultPlugins.unignore: Admin
+supybot.commands.disabled:
+supybot.abuse.flood.command: True
+supybot.abuse.flood.command.maximum: 12
+supybot.abuse.flood.command.punishment: 300
+supybot.abuse.flood.command.invalid: True
+supybot.abuse.flood.command.invalid.maximum: 5
+supybot.abuse.flood.command.invalid.punishment: 600
+supybot.drivers.poll: 1.0
+supybot.drivers.module: default
+supybot.drivers.maxReconnectWait: 300.0
+supybot.directories.conf: /home/supybot/conf
+supybot.directories.data: /home/supybot/data
+supybot.directories.data.tmp: /home/supybot/data/tmp
+supybot.directories.backup: /home/supybot/backup
+supybot.directories.plugins: /home/supybot/plugins
+supybot.directories.log: /home/supybot/logs
+supybot.plugins: Admin ChannelLogger Misc User Owner Config Channel
+supybot.plugins.Admin: True
+supybot.plugins.Admin.public: True
+supybot.plugins.Channel: True
+supybot.plugins.Channel.public: True
+supybot.plugins.Channel.alwaysRejoin: True
+supybot.plugins.ChannelLogger: True
+supybot.plugins.ChannelLogger.public: True
+supybot.plugins.ChannelLogger.enable: True
+supybot.plugins.ChannelLogger.flushImmediately: False
+supybot.plugins.ChannelLogger.stripFormatting: True
+supybot.plugins.ChannelLogger.timestamp: True
+supybot.plugins.ChannelLogger.noLogPrefix: [nolog]
+supybot.plugins.ChannelLogger.rotateLogs: True
+supybot.plugins.ChannelLogger.filenameTimestamp: %Y-%m-%d
+supybot.plugins.ChannelLogger.directories: True
+supybot.plugins.ChannelLogger.directories.network: True
+supybot.plugins.ChannelLogger.directories.channel: True
+supybot.plugins.ChannelLogger.directories.timestamp: False
+supybot.plugins.ChannelLogger.directories.timestamp.format: %B
+supybot.plugins.Config: True
+supybot.plugins.Config.public: True
+supybot.plugins.Misc: True
+supybot.plugins.Misc.public: True
+supybot.plugins.Misc.listPrivatePlugins: True
+supybot.plugins.Misc.timestampFormat: [%H:%M:%S]
+supybot.plugins.Misc.last.nested.includeTimestamp: False
+supybot.plugins.Misc.last.nested.includeNick: False
+supybot.plugins.Owner: True
+supybot.plugins.Owner.public: True
+supybot.plugins.Owner.quitMsg:
+supybot.plugins.User: True
+supybot.plugins.User.public: True
+supybot.plugins.alwaysLoadImportant: True
+supybot.databases:
+supybot.databases.users.filename: users.conf
+supybot.databases.users.timeoutIdentification: 0
+supybot.databases.users.allowUnregistration: False
+supybot.databases.ignores.filename: ignores.conf
+supybot.databases.channels.filename: channels.conf
+supybot.databases.plugins.channelSpecific: True
+supybot.databases.plugins.channelSpecific.link: #
+supybot.databases.plugins.channelSpecific.link.allow: True
+supybot.databases.types.cdb: True
+supybot.databases.types.cdb.maximumModifications: 0.5
+supybot.protocols.irc.banmask: host user
+supybot.protocols.irc.strictRfc: False
+supybot.protocols.irc.umodes:
+supybot.protocols.irc.vhost:
+supybot.protocols.irc.maxHistoryLength: 1000
+supybot.protocols.irc.throttleTime: 1.0
+supybot.protocols.irc.ping: True
+supybot.protocols.irc.ping.interval: 120
+supybot.protocols.irc.queuing.duplicates: False
+supybot.protocols.irc.queuing.rateLimit.join: 0.0
+supybot.protocols.http.peekSize: 4096
+supybot.protocols.http.proxy:
+supybot.defaultIgnore: False
+supybot.externalIP:
+supybot.defaultSocketTimeout: 10
+supybot.pidFile:
+supybot.debug.threadAllCommands: False
+supybot.debug.flushVeryOften: False
+supybot.log.format: %(levelname)s %(asctime)s %(name)s %(message)s
+supybot.log.level: INFO
+supybot.log.timestampFormat: %Y-%m-%dT%H:%M:%S
+supybot.log.stdout: True
+supybot.log.stdout.colorized: False
+supybot.log.stdout.wrap: True
+supybot.log.stdout.format: %(levelname)s %(asctime)s %(message)s
+supybot.log.stdout.level: INFO
+supybot.log.plugins.individualLogfiles: False
+supybot.log.plugins.format: %(levelname)s %(asctime)s %(message)s
+supybot.capabilities: -owner -admin -trusted
+supybot.capabilities.default: True
diff --git a/baserock_irclogs/files/irclogs-generation.service b/baserock_irclogs/files/irclogs-generation.service
new file mode 100644
index 00000000..ac480a15
--- /dev/null
+++ b/baserock_irclogs/files/irclogs-generation.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Irclogs generation
+Requires=supybot.service
+
+[Service]
+Type=oneshot
+User=supybot
+ExecStart=/usr/bin/logs2html -t 'IRC logs for #baserock' -p 'IRC logs for #baserock for ' /home/supybot/logs/ChannelLogger/freenode/#baserock/
+ExecStart=/bin/sh -c "/usr/bin/rsync -a /home/supybot/logs/ChannelLogger/freenode/\#baserock/*html /home/supybot/logs/ChannelLogger/freenode/\#baserock/*css /srv/irclogs/"
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_irclogs/files/irclogs-generation.timer b/baserock_irclogs/files/irclogs-generation.timer
new file mode 100644
index 00000000..c236c3d6
--- /dev/null
+++ b/baserock_irclogs/files/irclogs-generation.timer
@@ -0,0 +1,9 @@
+[Unit]
+Description=Generates the irclogs in html every 5 minutes
+
+[Timer]
+OnUnitActiveSec=5min
+Unit=irclogs-generation.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_irclogs/files/lighttpd-irclogs.conf b/baserock_irclogs/files/lighttpd-irclogs.conf
new file mode 100644
index 00000000..0b40346a
--- /dev/null
+++ b/baserock_irclogs/files/lighttpd-irclogs.conf
@@ -0,0 +1,16 @@
+server.document-root = "/srv/irclogs/"
+
+server.port = 80
+
+server.username = "supybot"
+server.groupname = "supybot"
+
+mimetype.assign = (
+ ".html" => "text/html",
+ ".css" => "text/css",
+ ".txt" => "text/plain",
+ ".jpg" => "image/jpeg",
+ ".png" => "image/png"
+)
+
+index-file.names = ( "index.html" )
diff --git a/baserock_irclogs/files/lighttpd-irclogs.service b/baserock_irclogs/files/lighttpd-irclogs.service
new file mode 100644
index 00000000..1c09b0d9
--- /dev/null
+++ b/baserock_irclogs/files/lighttpd-irclogs.service
@@ -0,0 +1,11 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Lighttpd Web Server
+After=network.target
+
+[Service]
+ExecStart=/usr/sbin/lighttpd -f /etc/lighttpd-irclogs.conf -D
+Restart=always
+
diff --git a/baserock_irclogs/files/supybot.service b/baserock_irclogs/files/supybot.service
new file mode 100644
index 00000000..49720f70
--- /dev/null
+++ b/baserock_irclogs/files/supybot.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Run supybot daemon
+Requires=network-online.target
+After=network-online.target
+# If there's a shared /home or /var subvolume, it must be
+# mounted before this unit runs.
+Requires=local-fs.target
+After=local-fs.target
+
+ConditionPathExists=/home/supybot/supybot-baserock.conf
+
+[Service]
+ExecStart=/usr/bin/supybot /home/supybot/supybot-baserock.conf
+User=supybot
+Restart=always
diff --git a/baserock_irclogs/irclogs.configure b/baserock_irclogs/irclogs.configure
new file mode 100644
index 00000000..8a2421ef
--- /dev/null
+++ b/baserock_irclogs/irclogs.configure
@@ -0,0 +1,45 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+echo 'supybot:x:1010:1010:Supybot User:/home/supybot:/bin/bash' >> "$ROOT/etc/passwd"
+echo 'supybot:x:1010:' >> "$ROOT/etc/group"
+mkdir -p "$ROOT/home/supybot"
+mkdir -p "$ROOT/srv/irclogs"
+chown -R 1010:1010 "$ROOT/home/supybot"
+chown -R 1010:1010 "$ROOT/srv/irclogs"
+
+# Copy supybot configuration
+install -m 644 -g 1010 -o 1010 baserock_irclogs/files/baserock.conf "$ROOT"/home/supybot/supybot-baserock.conf
+
+
+# Enable all the services needed
+services="irclogs-generation.service \
+irclogs-generation.timer \
+lighttpd-irclogs.service \
+supybot.service"
+
+for service in $services; do
+ cp "baserock_irclogs/files/$service" "$ROOT/etc/systemd/system/$service"
+ ln -sf "/etc/systemd/system/$service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$service"
+done
+
+# Copy lighttpd configuration
+cp baserock_irclogs/files/lighttpd-irclogs.conf "$ROOT"/etc/lighttpd-irclogs.conf
diff --git a/baserock_irclogs/strata/irclogs.morph b/baserock_irclogs/strata/irclogs.morph
new file mode 100644
index 00000000..a2b08fc6
--- /dev/null
+++ b/baserock_irclogs/strata/irclogs.morph
@@ -0,0 +1,16 @@
+name: irclogs
+kind: stratum
+description: Tools to create irclogs of a IRC channel
+build-depends:
+- morph: strata/python-core.morph
+chunks:
+- name: supybot
+ repo: http://gitorious.org/supybot/supybot.git
+ ref: 27a4ef0ed338a38f34180012cee7ec55a5ae11d9
+ unpetrify-ref: v0.83.4.1
+ build-depends: []
+- name: irclog2html
+ repo: git://github.com/mgedmin/irclog2html
+ ref: 2e399c2bdbe2442794d0ac7aa3a3941f826c74dc
+ unpetrify-ref: 2.14.0
+ build-depends: []
diff --git a/baserock_irclogs/systems/irclogs-x86_64.morph b/baserock_irclogs/systems/irclogs-x86_64.morph
new file mode 100644
index 00000000..07f42119
--- /dev/null
+++ b/baserock_irclogs/systems/irclogs-x86_64.morph
@@ -0,0 +1,33 @@
+name: irclogs-system-x86_64
+kind: system
+description: The set of strata required to have a minimal system for a 64-bit x86
+ system.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: irclogs
+ morph: baserock_irclogs/strata/irclogs.morph
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: lighttpd-server
+ morph: strata/lighttpd-server.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- cloud-init
+- baserock_irclogs/irclogs
diff --git a/baserock_mail/instance-config.yml b/baserock_mail/instance-config.yml
new file mode 100644
index 00000000..f4cb4927
--- /dev/null
+++ b/baserock_mail/instance-config.yml
@@ -0,0 +1,75 @@
+# Configuration for Baserock mail relay
+#
+# This Ansible playbook expects to be run on a Fedora 21 Cloud image.
+---
+- hosts: mail
+ gather_facts: false
+ sudo: yes
+ vars:
+ LOCAL_IP: 192.168.222.111
+ PUBLIC_IP: 185.43.218.171
+ tasks:
+ - name: exim4 installation
+ yum: name=exim state=installed
+
+ # Fedora provides a default /etc/exim/exim.conf. Rather than copy it and
+ # overwrite it, since we only need to make a few changes, I've used the
+ # lineinfile module to do search-and-replace. It's a bit ugly though. It
+ # may be better to just embed exim.conf.
+
+ # Several restrictions here are also enforced by the internal-mail-relay
+ # security group in firewall.yml, which only opens port 25, and only for
+ # traffic from the local network.
+
+ # This machine is only for sending mail.
+ - name: do not accept any incoming mail
+ lineinfile:
+ regexp: '^domainlist\s+local_domains.*$'
+ line: 'domainlist local_domains = '
+ dest: /etc/exim/exim.conf
+
+ - name: only accept mail from local network
+ lineinfile:
+ regexp: '^hostlist\s+relay_from_hosts.*$'
+ line: 'hostlist relay_from_hosts = 192.168.222.0/24'
+ dest: /etc/exim/exim.conf
+
+ - name: only listen on internal interface
+ lineinfile:
+ regexp: '^#?local_interfaces.*$'
+ line: 'local_interfaces = <; ::1 ; 127.0.0.1 ; {{ LOCAL_IP }}'
+ insertbefore: BOF
+ dest: /etc/exim/exim.conf
+
+ # The automation email addresses like gerrit@baserock.org do have aliases,
+ # but these are currently configured at Pepperfish, where our MX (mail)
+ # records for baserock.org point. So Exim thinks they are not routable
+ # and refuses to send mail from them, unless we disable this. Note that
+ # the address does have to be routable by something, or the receiving mail
+ # server may reject the mail anyway.
+ - name: do not verify that sender is routable within this Exim instance
+ lineinfile:
+ regexp: '^#?\s*require\s+verify\s+=\s+sender.*$'
+ line: '# require verify = sender'
+ dest: /etc/exim/exim.conf
+
+ # We don't have DNS in the internal baserock.org cloud right now, so this
+ # would be pointless.
+ - name: do not try to resolve hosts making SMTP requests
+ lineinfile:
+ regexp: '^#?\s+host_lookup = .*$'
+ line: '# host_lookup = *'
+ dest: /etc/exim/exim.conf
+
+ # The hostname of the machine will be 'mail', which isn't a fully-qualified
+ # domain name so will be rejected by SMTP servers. Ideally we would have
+ # mail.baserock.org set up and pointing to the floating IP of this machine.
+ # For now, we just have the IP.
+ - name: set primary hostname to public IP
+ lineinfile:
+ regexp: '^#?primary_hostname = .*$'
+ line: 'primary_hostname = {{ PUBLIC_IP }}'
+ dest: /etc/exim/exim.conf
+
+ - name: exim4 service
+ service: name=exim state=started enabled=yes
diff --git a/baserock_mason_x86_32/distbuild.conf b/baserock_mason_x86_32/distbuild.conf
new file mode 100644
index 00000000..7c2722d9
--- /dev/null
+++ b/baserock_mason_x86_32/distbuild.conf
@@ -0,0 +1,20 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+CONTROLLERHOST: mason-x86-32
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+DISTBUILD_CONTROLLER: true
+DISTBUILD_WORKER: true
+
+# This is the IP of git.baserock.org.
+TROVE_HOST: 192.168.222.58
+TROVE_ID: baserock
+WORKERS: mason-x86-32
+
+# Do not change the following
+WORKER_SSH_KEY: /etc/distbuild/worker.key
diff --git a/baserock_mason_x86_32/mason-x86-32.morph b/baserock_mason_x86_32/mason-x86-32.morph
new file mode 100644
index 00000000..6fe53fcb
--- /dev/null
+++ b/baserock_mason_x86_32/mason-x86-32.morph
@@ -0,0 +1,20 @@
+name: mason-x86-32
+kind: cluster
+description: |
+ Generic x86_32 Mason image.
+systems:
+- morph: systems/build-system-x86_32.morph
+ deploy:
+ mason:
+ type: openstack
+ location: https://compute.datacentred.io:5000/v2.0
+ CLOUD_INIT: yes
+ DISK_SIZE: 3G
+ DISTBUILD_GENERIC: true
+ INSTALL_FILES: distbuild/manifest
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ MASON_GENERIC: true
+ OPENSTACK_IMAGENAME: baserock_mason_x86_32
+ # Note that this *must* match the name of the instance, as
+ # cloud-init will override this hostname.
+ HOSTNAME: mason-x86-32
diff --git a/baserock_mason_x86_32/mason.conf b/baserock_mason_x86_32/mason.conf
new file mode 100644
index 00000000..9f643a83
--- /dev/null
+++ b/baserock_mason_x86_32/mason.conf
@@ -0,0 +1,19 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph
+MASON_DEFINITIONS_REF: master
+MASON_DISTBUILD_ARCH: x86_32
+MASON_TEST_HOST: None
+
+# This is the IP of git.baserock.org.
+TROVE_HOST: 192.168.222.58
+TROVE_ID: baserock
+
+CONTROLLERHOST: mason-x86-32
+TEST_INFRASTRUCTURE_TYPE: none
diff --git a/baserock_mason_x86_64/distbuild.conf b/baserock_mason_x86_64/distbuild.conf
new file mode 100644
index 00000000..92ebc667
--- /dev/null
+++ b/baserock_mason_x86_64/distbuild.conf
@@ -0,0 +1,20 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+CONTROLLERHOST: mason-x86-64
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+DISTBUILD_CONTROLLER: true
+DISTBUILD_WORKER: true
+
+# This is the IP of git.baserock.org.
+TROVE_HOST: 192.168.222.58
+TROVE_ID: baserock
+WORKERS: mason-x86-64
+
+# Do not change the following
+WORKER_SSH_KEY: /etc/distbuild/worker.key
diff --git a/baserock_mason_x86_64/mason-x86-64.morph b/baserock_mason_x86_64/mason-x86-64.morph
new file mode 100644
index 00000000..ae17716f
--- /dev/null
+++ b/baserock_mason_x86_64/mason-x86-64.morph
@@ -0,0 +1,20 @@
+name: mason-x86-64
+kind: cluster
+description: |
+ Generic x86_64 Mason image.
+systems:
+- morph: systems/build-system-x86_64.morph
+ deploy:
+ mason:
+ type: openstack
+ location: https://compute.datacentred.io:5000/v2.0
+ CLOUD_INIT: yes
+ DISK_SIZE: 3G
+ DISTBUILD_GENERIC: true
+ INSTALL_FILES: distbuild/manifest
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ MASON_GENERIC: true
+ OPENSTACK_IMAGENAME: baserock_mason_x86_64
+ # Note that this *must* match the name of the instance, as
+ # cloud-init will override this hostname.
+ HOSTNAME: mason-x86-64
diff --git a/baserock_mason_x86_64/mason.conf b/baserock_mason_x86_64/mason.conf
new file mode 100644
index 00000000..90ba1b5e
--- /dev/null
+++ b/baserock_mason_x86_64/mason.conf
@@ -0,0 +1,19 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph
+MASON_DEFINITIONS_REF: master
+MASON_DISTBUILD_ARCH: x86_64
+MASON_TEST_HOST: None
+
+# This is the IP of git.baserock.org.
+TROVE_HOST: 192.168.222.58
+TROVE_ID: baserock
+
+CONTROLLERHOST: mason-x86-64
+TEST_INFRASTRUCTURE_TYPE: none
diff --git a/baserock_openid_provider/baserock_openid_provider/__init__.py b/baserock_openid_provider/baserock_openid_provider/__init__.py
new file mode 100644
index 00000000..8dd54d2a
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import signals
diff --git a/baserock_openid_provider/baserock_openid_provider/forms.py b/baserock_openid_provider/baserock_openid_provider/forms.py
new file mode 100644
index 00000000..dd6a414d
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/forms.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from registration.forms import RegistrationForm
+
+from django import forms
+from django.utils.translation import ugettext_lazy as _
+
+
+class RegistrationFormWithNames(RegistrationForm):
+ # I'd rather just have a 'Full name' box, but django.contrib.auth is
+ # already set up to separate first_name and last_name.
+
+ first_name = forms.CharField(label=_("First name(s)"),
+ required=False)
+ last_name = forms.CharField(label=_("Surname"))
diff --git a/baserock_openid_provider/baserock_openid_provider/settings.py b/baserock_openid_provider/baserock_openid_provider/settings.py
new file mode 100644
index 00000000..a7e892ba
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/settings.py
@@ -0,0 +1,176 @@
+"""
+Django settings for baserock_openid_provider project.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.7/topics/settings/
+
+For the full list of settings and their values, see
+https://docs.djangoproject.com/en/1.7/ref/settings/
+"""
+
+import yaml
+
+import os
+
+BASE_DIR = os.path.dirname(os.path.dirname(__file__))
+
+# Quick-start development settings - unsuitable for production
+# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
+
+# SECURITY WARNING: keep the secret key used in production secret!
+secret_key_file = '/etc/baserock_openid_provider.secret_key.yml'
+with open(secret_key_file) as f:
+ data = yaml.load(f)
+ SECRET_KEY = data['baserock_openid_provider_secret_key']
+
+# SECURITY WARNING: don't run with debug turned on in production!
+DEBUG = False
+
+TEMPLATE_DEBUG = True
+
+ALLOWED_HOSTS = [
+ 'openid.baserock.org',
+]
+
+# All connections for openid.baserock.org are forced through HTTPS by HAProxy.
+# This line is necessary so that the Django code generates https:// rather than
+# http:// URLs for internal redirects.
+#
+# You MUST remove this line if this application is not running behind a proxy
+# that forces all traffic through HTTPS.
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+
+
+# Application definition
+
+INSTALLED_APPS = (
+ 'baserock_openid_provider',
+ 'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.messages',
+ 'django.contrib.staticfiles',
+ 'openid_provider',
+ 'registration'
+)
+
+MIDDLEWARE_CLASSES = (
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ 'django.middleware.csrf.CsrfViewMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.contrib.messages.middleware.MessageMiddleware',
+ 'django.middleware.clickjacking.XFrameOptionsMiddleware',
+)
+
+ROOT_URLCONF = 'baserock_openid_provider.urls'
+
+WSGI_APPLICATION = 'baserock_openid_provider.wsgi.application'
+
+
+# Logging
+
+LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'simple': {
+ 'format': '%(asctime)s %(message)s'
+ }
+ },
+ 'handlers': {
+ 'file': {
+ 'level': 'DEBUG',
+ 'formatter': 'simple',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': '/var/log/baserock_openid_provider/debug.log',
+ 'maxBytes': 10 * 1024 * 1024,
+ 'backupCount': 0,
+ }
+ },
+ 'loggers': {
+ 'django.request': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+ 'openid_provider.views': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ }
+ }
+}
+
+
+# Database
+# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.mysql',
+ 'NAME': 'openid_provider',
+ 'USER': 'openid',
+ 'PORT': '3306',
+
+ # You must change this to the correct IP address when
+ # deploying to production! For development deployments this
+ # gets the IP of the 'baserock-database' container from the
+ # environment, which Docker will have set if you passed it
+ # `--link=baseock-database:db`.
+ 'HOST': os.environ.get('DB_PORT_3306_TCP_ADDR', '192.168.222.30')
+ }
+}
+
+
+pw_file = '/etc/baserock_openid_provider.database_password.yml'
+with open(pw_file) as f:
+ data = yaml.load(f)
+ password = data['baserock_openid_provider_password']
+ DATABASES['default']['PASSWORD'] = password
+
+# Internationalization
+# https://docs.djangoproject.com/en/1.7/topics/i18n/
+
+LANGUAGE_CODE = 'en-us'
+
+TIME_ZONE = 'UTC'
+
+USE_I18N = True
+
+USE_L10N = True
+
+USE_TZ = True
+
+
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/1.7/howto/static-files/
+
+STATIC_URL = '/static/'
+
+STATIC_ROOT = '/var/www/static'
+
+TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
+
+
+# Other stuff
+
+LOGIN_REDIRECT_URL = '/'
+
+
+# We get mailed when stuff breaks.
+ADMINS = (
+ ('Sam Thursfield', 'sam.thursfield@codethink.co.uk'),
+)
+
+# FIXME: this email address doesn't actually exist.
+DEFAULT_FROM_EMAIL = 'openid@baserock.org'
+
+EMAIL_HOST = 'localhost'
+EMAIL_PORT = 25
+
+
+# django-registration-redux settings
+
+ACCOUNT_ACTIVATION_DAYS = 3
diff --git a/baserock_openid_provider/baserock_openid_provider/signals.py b/baserock_openid_provider/baserock_openid_provider/signals.py
new file mode 100644
index 00000000..dc2a7f78
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/signals.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from django.dispatch import receiver
+import registration.signals
+
+import logging
+
+
+@receiver(registration.signals.user_activated)
+def user_activation_handler(sender, user, request, **kwargs):
+ logging.info('Creating OpenID for user %s' % (user.username))
+ user.openid_set.create(openid=user.username)
diff --git a/baserock_openid_provider/baserock_openid_provider/static/style.css b/baserock_openid_provider/baserock_openid_provider/static/style.css
new file mode 100644
index 00000000..e8237b40
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/static/style.css
@@ -0,0 +1,268 @@
+// Baserock-ish stylesheet
+// Fetched from http://wiki.baserock.org/local.css/ on 2015-01-23.
+
+/* HTML5 display-role reset for older browsers */
+article, aside, details, figcaption, figure,
+footer, header, hgroup, menu, nav, section {
+ display: block;
+}
+body {
+ line-height: 1;
+}
+ol, ul {
+ padding: 0 0 0 1.5em;
+ margin: 0 0 1.2em;
+}
+li > ul, li > ol {
+ margin: 0;
+}
+ul {
+ list-style: disc;
+}
+ol {
+ list-style: decimal;
+}
+blockquote, q {
+ quotes: none;
+}
+blockquote:before, blockquote:after,
+q:before, q:after {
+ content: '';
+ content: none;
+}
+table {
+ border-collapse: collapse;
+ border-spacing: 0;
+}
+i, em {
+ font-style: italic;
+}
+b, strong {
+ font-weight: bold;
+}
+
+/*
+Main elements
+*/
+
+html, body {
+ font-size: 15px;
+ font-family: 'Open Sans', sans-serif;
+ line-height: 1.6em;
+}
+h1 {
+ color: #58595B;
+ font-size: 1.6em;
+ font-weight: bold;
+ margin: 0 0 0.4em;
+ padding: 1em 0 0.3em;
+}
+h2 {
+ border-bottom: 2px solid #E0E0E0;
+ border-top: 2px solid #E0E0E0;
+ background: #fafafa;
+ color: #58595B;
+ font-size: 1.4em;
+ font-weight: bold;
+ margin: 1.2em 0 0.4em;
+ padding: 0.4em 0;
+}
+h3 {
+ border-bottom: 2px solid #E0E0E0;
+ color: #58595B;
+ font-size: 1.2em;
+ font-weight: bold;
+ margin: 2em 0 0.3em;
+}
+h4 {
+ color: #58595B;
+ font-size: 1.1em;
+ font-weight: bold;
+ margin: 1.7em 0 0.3em;
+}
+h5 {
+ color: #58595B;
+ font-size: 1em;
+ font-weight: bold;
+ margin: 1.7em 0 0.3em;
+}
+a {
+ color: #bf2400;
+}
+p {
+ padding: 0;
+ margin: 0 0 1.2em;
+}
+table {
+ margin-bottom: 1.2em;
+}
+th, td {
+ padding: 0.2em 1em;
+}
+th {
+ font-weight: bold;
+ text-align: left;
+ border-bottom: 1px solid #ddd;
+}
+pre {
+ border: 1px solid #aaa;
+ border-radius: 0.5em;
+ padding: 1em 2em;
+ margin: 0 0 1.2em 2em;
+ background: #faf8f7;
+ font-size: 80%;
+}
+pre, code {
+ font-family: monospace;
+}
+code {
+ background: #faf8f7;
+ padding: 0.2em 0.4em;
+ border: 1px solid #ddd;
+ border-radius: 0.3em;
+ font-size: 0.9em;
+}
+pre > code {
+ background: none;
+ padding: 0;
+ border: none;
+ font-size: 1em;
+}
+blockquote {
+ border: .4em solid #ffaa55;
+ border-left-width: 3em;
+ padding: 0.3em 1em;
+ margin: 1.2em 3em;
+ border-radius: 2.2em 0 0 2.2em;
+}
+blockquote p {
+ margin: 0;
+}
+/*
+*/
+.max960 {
+ max-width: 960px;
+ margin: 0 auto;
+ position: relative;
+ height: 80px;
+}
+input#searchbox {
+ background: url("wikiicons/search-bg.gif") no-repeat scroll 100% 50% #FFFFFF;
+ color: #000000;
+ padding: 0 16px 0 10px;
+ border: solid 1px #CCC;
+ width: 180px;
+ height: 20px;
+ border-radius: 10px;
+}
+#searchform {
+ right: 0 !important;
+}
+.page {
+ max-width: 960px;
+ padding: 0 10px;
+ margin: 0 auto;
+}
+.pageheader {
+ background-color: #FFF;
+ border-bottom:2px solid #E65837;
+ color: #009099;
+ padding: 10px 10px 0 !important;
+ height: 80px;
+ background: #333;
+}
+.pageheader span a {
+ color: #FFF;
+}
+.pageheader span.title {
+ color: #E65837;
+}
+.pageheader .actions ul li {
+ background: none !important;
+ border-color: #28170B;
+ border-style: solid solid none;
+ border-width: 0;
+ margin: 0;
+ width: auto !important;
+ color: #FFF;
+ padding: 0 !important;
+}
+.pageheader li a:hover {
+ background: #E65837;
+ color: #FFF;
+}
+.header span {
+ display: inline-block;
+ padding: 6px 0;
+}
+.header span span {
+ padding: 0;
+}
+.parentlinks {
+ font: 13px 'Open Sans', sans-serif;
+}
+
+.title {
+ font: 13px 'Open Sans', sans-serif;
+ margin-top: 0.2em;
+ display:inline;
+}
+
+#logo a {
+ height: 40px;
+ width: 282px;
+ display: block;
+ padding-bottom: 10px;
+ background: url(logo.png) no-repeat;
+}
+#logo a span {
+ display: none;
+}
+#logo a:hover {
+ text-decoration: none;
+}
+.pageheader .actions {
+ position: static !important;
+ width: auto !important;
+ padding: 0 !important;
+}
+.pageheader .actions ul {
+ position: absolute;
+ right: 0;
+ bottom: 0;
+ height: auto !important;
+ padding: 0 !important;
+}
+.pageheader .actions a {
+ color: #FFF;
+ padding: 5px 0.5em;
+ display: inline-block;
+ background: #666;
+}
+
+div.header {
+ background-repeat: no-repeat;
+ min-width: 282px;
+ padding-top: 0px;
+}
+#pageinfo {
+ border-top: 0;
+}
+
+#content {
+ max-width: 51em;
+}
+#content, #comments, #footer {
+ margin: 1em 2em 1em 0 !important;
+}
+.pagedate {
+ font-size:10px;
+}
+.sidebar {
+ padding: 10px !important;
+ border: solid 1px #CCC !important;
+ background: #F2F2F2 !important;
+ margin: 1em 0 2em 1em !important;
+}
+
+
diff --git a/baserock_openid_provider/baserock_openid_provider/urls.py b/baserock_openid_provider/baserock_openid_provider/urls.py
new file mode 100644
index 00000000..8af8ade5
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/urls.py
@@ -0,0 +1,12 @@
+from django.conf.urls import patterns, include, url
+from django.contrib import admin
+
+from . import views
+
+urlpatterns = patterns('',
+ url(r'^$', views.index, name='index'),
+
+ url(r'^accounts/', include('registration.backends.default.urls')),
+ url(r'^admin/', include(admin.site.urls)),
+ url(r'^openid/', include('openid_provider.urls')),
+)
diff --git a/baserock_openid_provider/baserock_openid_provider/views.py b/baserock_openid_provider/baserock_openid_provider/views.py
new file mode 100644
index 00000000..14060902
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/views.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import registration.backends.default.views
+
+from registration import signals
+from registration.users import UserModel
+
+from django.contrib.auth import authenticate
+from django.contrib.auth import login
+from django.shortcuts import render
+
+from . import forms
+
+
+def index(request):
+ return render(request, '../templates/index.html')
+
+
+class RegistrationViewWithNames(registration.backends.default.views.RegistrationView):
+ # Overrides the django-registration default view so that the extended form
+ # including the full name gets used.
+ form_class = forms.RegistrationFormWithNames
+
+ def register(self, request, **cleaned_data):
+ # Calling the base class first means that we don't have to copy and
+ # paste the contents of the register() function, but it has the
+ # downside that we don't know the user's name when we send the
+ # activation email.
+ superclass = super(RegistrationViewWithNames, self)
+ user = superclass.register(request, **cleaned_data)
+
+ first_name, last_name = cleaned_data['first_name'], cleaned_data['last_name']
+ user.first_name = first_name
+ user.last_name = last_name
+ user.save()
+
+ return user
+
+
+registration.backends.default.views.RegistrationView = RegistrationViewWithNames
diff --git a/baserock_openid_provider/baserock_openid_provider/wsgi.py b/baserock_openid_provider/baserock_openid_provider/wsgi.py
new file mode 100644
index 00000000..5993d3e5
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/wsgi.py
@@ -0,0 +1,14 @@
+"""
+WSGI config for baserock_openid_provider project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
+"""
+
+import os
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "baserock_openid_provider.settings")
+
+from django.core.wsgi import get_wsgi_application
+application = get_wsgi_application()
diff --git a/baserock_openid_provider/cherokee.conf b/baserock_openid_provider/cherokee.conf
new file mode 100644
index 00000000..38c4f1fa
--- /dev/null
+++ b/baserock_openid_provider/cherokee.conf
@@ -0,0 +1,300 @@
+# Cherokee configuration to run the Baserock OpenID provider, using
+# uWSGI to run the Django app from /srv/baserock_openid_provider.
+
+config!version = 001002103
+
+# Overall server config
+server!bind!1!port = 80
+server!group = cherokee
+server!keepalive = 1
+server!keepalive_max_requests = 500
+server!panic_action = /usr/bin/cherokee-panic
+server!pid_file = /var/run/cherokee.pid
+server!server_tokens = full
+server!timeout = 15
+server!user = cherokee
+
+# One virtual server which communicates with the uwsgi-django code and
+# also serves static files.
+vserver!1!directory_index = index.html
+vserver!1!document_root = /var/www/cherokee
+vserver!1!error_writer!filename = /var/log/cherokee/error_log
+vserver!1!error_writer!type = file
+vserver!1!logger = combined
+vserver!1!logger!access!buffsize = 16384
+vserver!1!logger!access!filename = /var/log/cherokee/access_log
+vserver!1!logger!access!type = file
+vserver!1!nick = default
+vserver!1!rule!110!document_root = /var/www/static
+vserver!1!rule!110!handler = file
+vserver!1!rule!110!match = directory
+vserver!1!rule!110!match!directory = /static
+vserver!1!rule!10!document_root = /var/www
+vserver!1!rule!10!handler = uwsgi
+vserver!1!rule!10!handler!balancer = round_robin
+vserver!1!rule!10!handler!balancer!source!10 = 1
+vserver!1!rule!10!handler!iocache = 1
+vserver!1!rule!10!match = default
+source!1!env_inherited = 1
+source!1!host = 127.0.0.1:45023
+source!1!interpreter = /usr/sbin/uwsgi --socket 127.0.0.1:45023 --ini=/srv/baserock_openid_provider/uwsgi.ini
+source!1!nick = uwsgi-django
+source!1!type = interpreter
+
+# Icons and mime types.
+icons!default = page_white.png
+icons!directory = folder.png
+icons!file!bomb.png = core
+icons!file!page_white_go.png = *README*
+icons!parent_directory = arrow_turn_left.png
+icons!suffix!camera.png = jpg,jpeg,jpe
+icons!suffix!cd.png = iso,ngr,cue
+icons!suffix!color_wheel.png = png,gif,xcf,bmp,pcx,tiff,tif,cdr,psd,xpm,xbm
+icons!suffix!control_play.png = bin,exe,com,msi,out
+icons!suffix!css.png = css
+icons!suffix!cup.png = java,class,jar
+icons!suffix!email.png = eml,mbox,box,email,mbx
+icons!suffix!film.png = avi,mpeg,mpe,mpg,mpeg3,dl,fli,qt,mov,movie,flv,webm
+icons!suffix!font.png = ttf
+icons!suffix!html.png = html,htm
+icons!suffix!music.png = au,snd,mid,midi,kar,mpga,mpega,mp2,mp3,sid,wav,aif,aiff,aifc,gsm,m3u,wma,wax,ra,rm,ram,pls,sd2,ogg
+icons!suffix!package.png = tar,gz,bz2,zip,rar,ace,lha,7z,dmg,cpk
+icons!suffix!page_white_acrobat.png = pdf
+icons!suffix!page_white_c.png = c,h,cpp
+icons!suffix!page_white_office.png = doc,ppt,xls
+icons!suffix!page_white_php.png = php
+icons!suffix!page_white_text.png = txt,text,rtf,sdw
+icons!suffix!printer.png = ps,eps
+icons!suffix!ruby.png = rb
+icons!suffix!script.png = sh,csh,ksh,tcl,tk,py,pl
+mime!application/bzip2!extensions = bz2
+mime!application/gzip!extensions = gz
+mime!application/hta!extensions = hta
+mime!application/java-archive!extensions = jar
+mime!application/java-serialized-object!extensions = ser
+mime!application/java-vm!extensions = class
+mime!application/json!extensions = json
+mime!application/mac-binhex40!extensions = hqx
+mime!application/msaccess!extensions = mdb
+mime!application/msword!extensions = doc,dot
+mime!application/octet-stream!extensions = bin
+mime!application/octetstream!extensions = ace
+mime!application/oda!extensions = oda
+mime!application/ogg!extensions = ogx
+mime!application/pdf!extensions = pdf
+mime!application/pgp-keys!extensions = key
+mime!application/pgp-signature!extensions = pgp
+mime!application/pics-rules!extensions = prf
+mime!application/postscript!extensions = ps,ai,eps
+mime!application/rar!extensions = rar
+mime!application/rdf+xml!extensions = rdf
+mime!application/rss+xml!extensions = rss
+mime!application/smil!extensions = smi,smil
+mime!application/vnd.mozilla.xul+xml!extensions = xul
+mime!application/vnd.ms-excel!extensions = xls,xlb,xlt
+mime!application/vnd.ms-pki.seccat!extensions = cat
+mime!application/vnd.ms-pki.stl!extensions = stl
+mime!application/vnd.ms-powerpoint!extensions = ppt,pps
+mime!application/vnd.oasis.opendocument.chart!extensions = odc
+mime!application/vnd.oasis.opendocument.database!extensions = odb
+mime!application/vnd.oasis.opendocument.formula!extensions = odf
+mime!application/vnd.oasis.opendocument.graphics!extensions = odg
+mime!application/vnd.oasis.opendocument.image!extensions = odi
+mime!application/vnd.oasis.opendocument.presentation!extensions = odp
+mime!application/vnd.oasis.opendocument.spreadsheet!extensions = ods
+mime!application/vnd.oasis.opendocument.text!extensions = odt
+mime!application/vnd.oasis.opendocument.text-master!extensions = odm
+mime!application/vnd.oasis.opendocument.text-web!extensions = oth
+mime!application/vnd.pkg5.info!extensions = p5i
+mime!application/vnd.visio!extensions = vsd
+mime!application/vnd.wap.wbxml!extensions = wbxml
+mime!application/vnd.wap.wmlc!extensions = wmlc
+mime!application/vnd.wap.wmlscriptc!extensions = wmlsc
+mime!application/x-7z-compressed!extensions = 7z
+mime!application/x-abiword!extensions = abw
+mime!application/x-apple-diskimage!extensions = dmg
+mime!application/x-bcpio!extensions = bcpio
+mime!application/x-bittorrent!extensions = torrent
+mime!application/x-cdf!extensions = cdf
+mime!application/x-cpio!extensions = cpio
+mime!application/x-csh!extensions = csh
+mime!application/x-debian-package!extensions = deb,udeb
+mime!application/x-director!extensions = dcr,dir,dxr
+mime!application/x-dvi!extensions = dvi
+mime!application/x-flac!extensions = flac
+mime!application/x-font!extensions = pfa,pfb,gsf,pcf,pcf.Z
+mime!application/x-freemind!extensions = mm
+mime!application/x-gnumeric!extensions = gnumeric
+mime!application/x-gtar!extensions = gtar,tgz,taz
+mime!application/x-gzip!extensions = gz,tgz
+mime!application/x-httpd-php!extensions = phtml,pht,php
+mime!application/x-httpd-php-source!extensions = phps
+mime!application/x-httpd-php3!extensions = php3
+mime!application/x-httpd-php3-preprocessed!extensions = php3p
+mime!application/x-httpd-php4!extensions = php4
+mime!application/x-internet-signup!extensions = ins,isp
+mime!application/x-iphone!extensions = iii
+mime!application/x-iso9660-image!extensions = iso
+mime!application/x-java-jnlp-file!extensions = jnlp
+mime!application/x-javascript!extensions = js
+mime!application/x-kchart!extensions = chrt
+mime!application/x-killustrator!extensions = kil
+mime!application/x-koan!extensions = skp,skd,skt,skm
+mime!application/x-kpresenter!extensions = kpr,kpt
+mime!application/x-kspread!extensions = ksp
+mime!application/x-kword!extensions = kwd,kwt
+mime!application/x-latex!extensions = latex
+mime!application/x-lha!extensions = lha
+mime!application/x-lzh!extensions = lzh
+mime!application/x-lzx!extensions = lzx
+mime!application/x-ms-wmd!extensions = wmd
+mime!application/x-ms-wmz!extensions = wmz
+mime!application/x-msdos-program!extensions = com,exe,bat,dll
+mime!application/x-msi!extensions = msi
+mime!application/x-netcdf!extensions = nc
+mime!application/x-ns-proxy-autoconfig!extensions = pac
+mime!application/x-nwc!extensions = nwc
+mime!application/x-object!extensions = o
+mime!application/x-oz-application!extensions = oza
+mime!application/x-pkcs7-certreqresp!extensions = p7r
+mime!application/x-pkcs7-crl!extensions = crl
+mime!application/x-python-code!extensions = pyc,pyo
+mime!application/x-quicktimeplayer!extensions = qtl
+mime!application/x-redhat-package-manager!extensions = rpm
+mime!application/x-sh!extensions = sh
+mime!application/x-shar!extensions = shar
+mime!application/x-shockwave-flash!extensions = swf,swfl
+mime!application/x-stuffit!extensions = sit,sea
+mime!application/x-sv4cpio!extensions = sv4cpio
+mime!application/x-sv4crc!extensions = sv4crc
+mime!application/x-tar!extensions = tar
+mime!application/x-tcl!extensions = tcl
+mime!application/x-tex-pk!extensions = pk
+mime!application/x-texinfo!extensions = texinfo,texi
+mime!application/x-trash!extensions = ~,bak,old,sik
+mime!application/x-troff!extensions = t,tr,roff
+mime!application/x-troff-man!extensions = man
+mime!application/x-troff-me!extensions = me
+mime!application/x-troff-ms!extensions = ms
+mime!application/x-ustar!extensions = ustar
+mime!application/x-x509-ca-cert!extensions = crt
+mime!application/x-xcf!extensions = xcf
+mime!application/x-xfig!extensions = fig
+mime!application/x-xpinstall!extensions = xpi
+mime!application/xhtml+xml!extensions = xhtml,xht
+mime!application/xml!extensions = xml,xsl
+mime!application/zip!extensions = zip
+mime!audio/basic!extensions = au,snd
+mime!audio/midi!extensions = mid,midi,kar
+mime!audio/mpeg!extensions = mpga,mpega,mp2,mp3,m4a
+mime!audio/ogg!extensions = ogg,oga
+mime!audio/prs.sid!extensions = sid
+mime!audio/x-aiff!extensions = aif,aiff,aifc
+mime!audio/x-gsm!extensions = gsm
+mime!audio/x-mpegurl!extensions = m3u
+mime!audio/x-ms-wax!extensions = wax
+mime!audio/x-ms-wma!extensions = wma
+mime!audio/x-pn-realaudio!extensions = ra,rm,ram
+mime!audio/x-realaudio!extensions = ra
+mime!audio/x-scpls!extensions = pls
+mime!audio/x-sd2!extensions = sd2
+mime!audio/x-wav!extensions = wav
+mime!chemical/x-cache!extensions = cac,cache
+mime!chemical/x-cache-csf!extensions = csf
+mime!chemical/x-cdx!extensions = cdx
+mime!chemical/x-cif!extensions = cif
+mime!chemical/x-cmdf!extensions = cmdf
+mime!chemical/x-cml!extensions = cml
+mime!chemical/x-compass!extensions = cpa
+mime!chemical/x-crossfire!extensions = bsd
+mime!chemical/x-csml!extensions = csml,csm
+mime!chemical/x-ctx!extensions = ctx
+mime!chemical/x-cxf!extensions = cxf,cef
+mime!chemical/x-isostar!extensions = istr,ist
+mime!chemical/x-jcamp-dx!extensions = jdx,dx
+mime!chemical/x-kinemage!extensions = kin
+mime!chemical/x-pdb!extensions = pdb,ent
+mime!chemical/x-swissprot!extensions = sw
+mime!chemical/x-vamas-iso14976!extensions = vms
+mime!chemical/x-vmd!extensions = vmd
+mime!chemical/x-xtel!extensions = xtel
+mime!chemical/x-xyz!extensions = xyz
+mime!image/gif!extensions = gif
+mime!image/jpeg!extensions = jpeg,jpg,jpe
+mime!image/pcx!extensions = pcx
+mime!image/png!extensions = png
+mime!image/svg+xml!extensions = svg,svgz
+mime!image/tiff!extensions = tiff,tif
+mime!image/vnd.djvu!extensions = djvu,djv
+mime!image/vnd.wap.wbmp!extensions = wbmp
+mime!image/x-icon!extensions = ico
+mime!image/x-ms-bmp!extensions = bmp
+mime!image/x-photoshop!extensions = psd
+mime!image/x-portable-anymap!extensions = pnm
+mime!image/x-portable-bitmap!extensions = pbm
+mime!image/x-portable-graymap!extensions = pgm
+mime!image/x-portable-pixmap!extensions = ppm
+mime!image/x-xbitmap!extensions = xbm
+mime!image/x-xpixmap!extensions = xpm
+mime!image/x-xwindowdump!extensions = xwd
+mime!model/iges!extensions = igs,iges
+mime!model/mesh!extensions = msh,mesh,silo
+mime!model/vrml!extensions = wrl,vrml
+mime!text/calendar!extensions = ics,icz
+mime!text/comma-separated-values!extensions = csv
+mime!text/css!extensions = css
+mime!text/h323!extensions = 323
+mime!text/html!extensions = html,htm,shtml
+mime!text/iuls!extensions = uls
+mime!text/mathml!extensions = mml
+mime!text/plain!extensions = asc,txt,text,diff,pot
+mime!text/richtext!extensions = rtx
+mime!text/rtf!extensions = rtf
+mime!text/scriptlet!extensions = sct,wsc
+mime!text/tab-separated-values!extensions = tsv
+mime!text/vnd.sun.j2me.app-descriptor!extensions = jad
+mime!text/vnd.wap.wml!extensions = wml
+mime!text/vnd.wap.wmlscript!extensions = wmls
+mime!text/x-boo!extensions = boo
+mime!text/x-c++hdr!extensions = h++,hpp,hxx,hh
+mime!text/x-c++src!extensions = c++,cpp,cxx,cc
+mime!text/x-chdr!extensions = h
+mime!text/x-csh!extensions = csh
+mime!text/x-csrc!extensions = c
+mime!text/x-dsrc!extensions = d
+mime!text/x-haskell!extensions = hs
+mime!text/x-java!extensions = java
+mime!text/x-literate-haskell!extensions = lhs
+mime!text/x-moc!extensions = moc
+mime!text/x-pascal!extensions = p,pas
+mime!text/x-pcs-gcd!extensions = gcd
+mime!text/x-perl!extensions = pl,pm
+mime!text/x-python!extensions = py
+mime!text/x-setext!extensions = etx
+mime!text/x-sh!extensions = sh
+mime!text/x-tcl!extensions = tcl,tk
+mime!text/x-tex!extensions = tex,ltx,sty,cls
+mime!text/x-vcalendar!extensions = vcs
+mime!text/x-vcard!extensions = vcf
+mime!video/dl!extensions = dl
+mime!video/dv!extensions = dif,dv
+mime!video/fli!extensions = fli
+mime!video/gl!extensions = gl
+mime!video/mp4!extensions = mp4
+mime!video/mpeg!extensions = mpeg,mpg,mpe
+mime!video/ogg!extensions = ogv
+mime!video/quicktime!extensions = qt,mov
+mime!video/vnd.mpegurl!extensions = mxu
+mime!video/webm!extensions = webm
+mime!video/x-flv!extensions = flv
+mime!video/x-la-asf!extensions = lsf,lsx
+mime!video/x-mng!extensions = mng
+mime!video/x-ms-asf!extensions = asf,asx
+mime!video/x-ms-wm!extensions = wm
+mime!video/x-ms-wmv!extensions = wmv
+mime!video/x-ms-wmx!extensions = wmx
+mime!video/x-ms-wvx!extensions = wvx
+mime!video/x-msvideo!extensions = avi
+mime!video/x-sgi-movie!extensions = movie
+mime!x-conference/x-cooltalk!extensions = ice
+mime!x-world/x-vrml!extensions = vrm,vrml,wrl
diff --git a/baserock_openid_provider/develop.sh b/baserock_openid_provider/develop.sh
new file mode 100755
index 00000000..534a1333
--- /dev/null
+++ b/baserock_openid_provider/develop.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+# Set up a development environment in a container.
+
+exec docker run -i -t --rm \
+ --name=baserock-openid-provider \
+ --link=baserock-database:db \
+ --publish=127.0.0.1:80:80 \
+ --volume=`pwd`:/srv/test-baserock-infrastructure \
+ baserock/openid-provider
+
diff --git a/baserock_openid_provider/image-config.yml b/baserock_openid_provider/image-config.yml
new file mode 100644
index 00000000..3c0d8bb5
--- /dev/null
+++ b/baserock_openid_provider/image-config.yml
@@ -0,0 +1,53 @@
+# Image configuration for Baserock OpenID provider.
+#
+# This playbook is run at image-creation time by Packer.
+---
+- hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: install Cherokee web server
+ yum: name=cherokee state=latest
+
+ - name: install PIP package manager
+ yum: name=python-pip state=latest
+
+ - name: install Sendmail mail transfer agent
+ yum: name=sendmail state=latest
+
+ - name: install uWSGI application container server and Python plugin
+ yum: name=uwsgi-plugin-python state=latest
+
+ - name: install Django
+ pip: name=django
+
+ # This is a fork of django-registration which supports Django 1.7.
+ # Source: https://github.com/macropin/django-registration
+ # The original django-registration (which seems to be abandoned) lives at:
+ # https://bitbucket.org/ubernostrum/django-registration/
+ - name: install django-registration-redux
+ pip: name=django-registration-redux
+
+ - name: install python-openid
+ pip: name=python-openid
+
+ # Install the MySQL-python package from Yum, because if it's installed from
+ # PyPI you need to have the mariadb-devel package installed to build the C
+ # code and that's an extra 21MB of dependencies or so. Note that this driver
+ # doesn't support Python 3, but there is a fork available which does, see:
+ # https://docs.djangoproject.com/en/dev/ref/databases/#mysql-db-api-drivers
+ - name: install MySQL-python
+ yum: name=MySQL-python state=latest
+
+ - name: install Cherokee configuration
+ file: src=/srv/baserock_openid_provider/cherokee.conf dest=/etc/cherokee/cherokee.conf state=link force=yes
+
+ - name: create log directory for baserock_openid_provider
+ file: path=/var/log/baserock_openid_provider owner=cherokee group=cherokee state=directory
+
+ - name: create directory for static content
+ file: path=/var/www/static owner=cherokee group=cherokee state=directory
diff --git a/baserock_openid_provider/instance-config.yml b/baserock_openid_provider/instance-config.yml
new file mode 100644
index 00000000..7eac185d
--- /dev/null
+++ b/baserock_openid_provider/instance-config.yml
@@ -0,0 +1,36 @@
+# Instance configuration for Baserock OpenID provider.
+#
+# This playbook should be run after starting an instance of the Baserock
+# OpenID Provider image.
+---
+- hosts: openid
+ gather_facts: False
+ sudo: yes
+ tasks:
+ - name: ensure system up to date
+ yum: name=* state=latest
+
+ - name: install database password
+ copy: src=../database/baserock_openid_provider.database_password.yml dest=/etc owner=cherokee group=cherokee mode=400
+
+ - name: install Django secret key
+ copy: src=baserock_openid_provider.secret_key.yml dest=/etc owner=cherokee group=cherokee mode=400
+
+ # This step could be part of image creation, except that because the secret
+ # key file wouldn't be available at that time, the 'manage.py' script would
+ # fail to run.
+ - name: install static content
+ django_manage: app_path=/srv/baserock_openid_provider command=collectstatic
+ sudo_user: cherokee
+
+ - name: run database migrations
+ django_manage: app_path=/srv/baserock_openid_provider command=migrate
+ sudo_user: cherokee
+
+ # Default configuration of Sendmail in Fedora is to only accept connections from
+ # localhost. This is what we want, so no extra config required.
+ - name: enable and start sendmail service
+ service: name=sendmail enabled=yes state=started
+
+ - name: enable and start Cherokee service
+ service: name=cherokee enabled=yes state=restarted
diff --git a/baserock_openid_provider/manage.py b/baserock_openid_provider/manage.py
new file mode 100644
index 00000000..924662bf
--- /dev/null
+++ b/baserock_openid_provider/manage.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+import os
+import sys
+
+if __name__ == "__main__":
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "baserock_openid_provider.settings")
+
+ from django.core.management import execute_from_command_line
+
+ execute_from_command_line(sys.argv)
diff --git a/baserock_openid_provider/openid_provider/__init__.py b/baserock_openid_provider/openid_provider/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/__init__.py
diff --git a/baserock_openid_provider/openid_provider/admin.py b/baserock_openid_provider/openid_provider/admin.py
new file mode 100644
index 00000000..0d1b62aa
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/admin.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# vim: set ts=4 sw=4 : */
+
+from django.contrib import admin
+
+from openid_provider.models import TrustedRoot, OpenID
+
+class TrustedRootInline(admin.TabularInline):
+ model = TrustedRoot
+
+class OpenIDAdmin(admin.ModelAdmin):
+ list_display = ['openid', 'user', 'default']
+ inlines = [TrustedRootInline, ]
+ raw_id_fields = ("user",)
+ search_fields = ('user__email',)
+
+admin.site.register(OpenID, OpenIDAdmin)
diff --git a/baserock_openid_provider/openid_provider/conf.py b/baserock_openid_provider/openid_provider/conf.py
new file mode 100644
index 00000000..7355c840
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/conf.py
@@ -0,0 +1,27 @@
+import os
+from django.conf import settings
+
+STORE = getattr(settings, 'OPENID_PROVIDER_STORE',
+ 'openid.store.filestore.FileOpenIDStore')
+
+if STORE == 'openid.store.filestore.FileOpenIDStore':
+ import tempfile
+ tempdir = tempfile.gettempdir()
+
+ FILESTORE_PATH = getattr(settings, 'OPENID_PROVIDER_FILESTORE_PATH',
+ os.path.join(tempdir, 'openid-filestore'))
+
+SREG_DATA_CALLBACK = getattr(settings, 'OPENID_PROVIDER_SREG_DATA_CALLBACK',
+ 'openid_provider.utils.get_default_sreg_data')
+
+AX_DATA_CALLBACK = getattr(settings, 'OPENID_PROVIDER_AX_DATA_CALLBACK',
+ 'openid_provider.utils.get_default_ax_data')
+
+AX_EXTENSION = getattr(settings, 'OPENID_PROVIDER_AX_EXTENSION', False)
+
+AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
+
+# RPs without relying party verification mechanisms will be each time
+# redirected to decide page, set to True to disable this:
+FAILED_DISCOVERY_AS_VALID = getattr(
+ settings, 'OPENID_FAILED_DISCOVERY_AS_VALID', False)
diff --git a/baserock_openid_provider/openid_provider/migrations/0001_initial.py b/baserock_openid_provider/openid_provider/migrations/0001_initial.py
new file mode 100644
index 00000000..1857f59a
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/migrations/0001_initial.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding model 'OpenID'
+ db.create_table('openid_provider_openid', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
+ ('openid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200, blank=True)),
+ ('default', self.gf('django.db.models.fields.BooleanField')(default=False)),
+ ))
+ db.send_create_signal('openid_provider', ['OpenID'])
+
+ # Adding model 'TrustedRoot'
+ db.create_table('openid_provider_trustedroot', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('openid', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['openid_provider.OpenID'])),
+ ('trust_root', self.gf('django.db.models.fields.CharField')(max_length=200)),
+ ))
+ db.send_create_signal('openid_provider', ['TrustedRoot'])
+
+
+ def backwards(self, orm):
+ # Deleting model 'OpenID'
+ db.delete_table('openid_provider_openid')
+
+ # Deleting model 'TrustedRoot'
+ db.delete_table('openid_provider_trustedroot')
+
+
+ models = {
+ 'auth.group': {
+ 'Meta': {'object_name': 'Group'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
+ 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
+ },
+ 'auth.permission': {
+ 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
+ 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
+ },
+ 'auth.user': {
+ 'Meta': {'object_name': 'User'},
+ 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
+ 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
+ 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
+ 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
+ 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
+ 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
+ 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
+ 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
+ },
+ 'contenttypes.contenttype': {
+ 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
+ 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
+ },
+ 'openid_provider.openid': {
+ 'Meta': {'ordering': "['openid']", 'object_name': 'OpenID'},
+ 'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'openid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'}),
+ 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
+ },
+ 'openid_provider.trustedroot': {
+ 'Meta': {'object_name': 'TrustedRoot'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'openid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['openid_provider.OpenID']"}),
+ 'trust_root': ('django.db.models.fields.CharField', [], {'max_length': '200'})
+ }
+ }
+
+ complete_apps = ['openid_provider'] \ No newline at end of file
diff --git a/baserock_openid_provider/openid_provider/migrations/__init__.py b/baserock_openid_provider/openid_provider/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/migrations/__init__.py
diff --git a/baserock_openid_provider/openid_provider/models.py b/baserock_openid_provider/openid_provider/models.py
new file mode 100644
index 00000000..bad24d9a
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/models.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# vim: set ts=4 sw=4 : */
+
+from django.utils.translation import ugettext_lazy as _
+from django.db import models
+
+from openid_provider.conf import AUTH_USER_MODEL
+from openid_provider.utils import get_username
+
+class OpenID(models.Model):
+ user = models.ForeignKey(AUTH_USER_MODEL)
+ openid = models.CharField(max_length=200, blank=True, unique=True)
+ default = models.BooleanField(default=False)
+
+ class Meta:
+ verbose_name = _('OpenID')
+ verbose_name_plural = _('OpenIDs')
+ ordering = ['openid']
+
+ def __unicode__(self):
+ return u"%s|%s" % (get_username(self.user), self.openid)
+
+ def save(self, *args, **kwargs):
+ if self.openid in ['', u'', None]:
+ from hashlib import sha1
+ import random, base64
+ sha = sha1()
+ sha.update(unicode(get_username(self.user)).encode('utf-8'))
+ sha.update(str(random.random()))
+ value = str(base64.b64encode(sha.digest()))
+ value = value.replace('/', '').replace('+', '').replace('=', '')
+ self.openid = value
+ super(OpenID, self).save(*args, **kwargs)
+ if self.default:
+ self.user.openid_set.exclude(pk=self.pk).update(default=False)
+
+class TrustedRoot(models.Model):
+ openid = models.ForeignKey(OpenID)
+ trust_root = models.CharField(max_length=200)
+
+ def __unicode__(self):
+ return unicode(self.trust_root)
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/base.html b/baserock_openid_provider/openid_provider/templates/openid_provider/base.html
new file mode 100644
index 00000000..94d9808c
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/base.html
@@ -0,0 +1 @@
+{% extends "base.html" %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/decide.html b/baserock_openid_provider/openid_provider/templates/openid_provider/decide.html
new file mode 100644
index 00000000..5b87f824
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/decide.html
@@ -0,0 +1,41 @@
+{% extends "openid_provider/base.html" %}
+
+{% block content %}
+{% ifequal trust_root_valid "Valid" %}
+ <!-- Trust root has been validated by OpenID 2 mechanism. -->
+ <p>The site <tt>{{ trust_root|escape }}</tt> has requested verification
+ of your OpenID.</p>
+{% endifequal %}
+{% ifequal trust_root_valid "Invalid" %}
+<div class="error">
+ <p>This request claims to be from {{ trust_root|escape }} but I have
+ determined that <em>it is a pack of lies</em>. Beware, if you release
+ information to them, they are likely to do unconscionable things with it,
+ being the lying liars that they are.</p>
+ <p>Please tell the <em>real</em> {{ trust_root|escape }} that someone is
+ trying to abuse your trust in their good name.</p>
+</div>
+{% endifequal %}
+{% ifequal trust_root_valid "Unreachable" %}
+ <p>The site <tt>{{ trust_root|escape }}</tt> has requested verification
+ of your OpenID. I have failed to reach it and thus cannot vouch for its
+ authenticity. Perhaps it is on your local network.</p>
+{% endifequal %}
+{% ifequal trust_root_valid "DISCOVERY_FAILED" %}
+ <p>The site <tt>{{ trust_root|escape }}</tt> has requested verification
+ of your OpenID. However, <tt>{{ trust_root|escape }}</tt> does not
+ implement OpenID 2.0's relying party verification mechanism. Please use
+ extra caution in deciding whether to release information to this party,
+ and ask <tt>{{ trust_root|escape }}</tt> to implement relying party
+ verification for your future transactions.</p>
+ <p>You will return to <tt>{{ return_to|escape }}</tt></p>
+{% endifequal %}
+
+<form method="post">{% csrf_token %}
+Verify your identity to the relying party?
+<br/>
+<input type="hidden" name="decide_page" value="True" />
+<input type="submit" value="Yes (Allow)" name="allow" />
+<input type="submit" value="No (Cancel)" name="cancel" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/error.html b/baserock_openid_provider/openid_provider/templates/openid_provider/error.html
new file mode 100644
index 00000000..11b77b21
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/error.html
@@ -0,0 +1,6 @@
+{% extends "openid_provider/base.html" %}
+
+{% block content %}
+<h1>{{ title }}</h1>
+{{ msg }}
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/response.html b/baserock_openid_provider/openid_provider/templates/openid_provider/response.html
new file mode 100644
index 00000000..5f7e46fa
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/response.html
@@ -0,0 +1,12 @@
+{% extends "openid_provider/base.html" %}
+
+{% block content %}
+<div id="openid-body">
+ {{ body|safe }}
+</div>
+<script type="text/javascript">
+ // the url is too long (> 2047) to be submitted via GET. It needs to be POSTed.
+ // the should not require to click the "Continue"-Button, therefore we submit it via js
+ document.getElementById('openid-body').getElementsByTagName('form')[0].submit();
+</script>
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/server.html b/baserock_openid_provider/openid_provider/templates/openid_provider/server.html
new file mode 100644
index 00000000..80615157
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/server.html
@@ -0,0 +1,9 @@
+{% extends "openid_provider/base.html" %}
+
+{% block extrahead %}{{ block.super }}
+<meta http-equiv="x-xrds-location" content="{{ xrds_location }}">
+{% endblock %}
+
+{% block content %}
+This is an OpenID server.
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml b/baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml
new file mode 100644
index 00000000..960685b0
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<xrds:XRDS xmlns:xrds="xri://$xrds" xmlns="xri://$xrd*($v*2.0)">
+ <XRD>
+ <Service priority="0">{% for uri in types %}
+ <Type>{{ uri|escape }}</Type>
+ {% endfor %}{% for endpoint in endpoints %}
+ <URI>{{ endpoint }}</URI>
+ {% endfor %}</Service>
+ </XRD>
+</xrds:XRDS>
diff --git a/baserock_openid_provider/openid_provider/urls.py b/baserock_openid_provider/openid_provider/urls.py
new file mode 100644
index 00000000..33f79ce7
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/urls.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# vim: set ts=4 sw=4 : */
+
+try:
+ from django.conf.urls import patterns, url
+except ImportError: # Django < 1.4
+ from django.conf.urls.defaults import patterns, url
+
+urlpatterns = patterns('openid_provider.views',
+ url(r'^$', 'openid_server', name='openid-provider-root'),
+ url(r'^decide/$', 'openid_decide', name='openid-provider-decide'),
+ url(r'^xrds/$', 'openid_xrds', name='openid-provider-xrds'),
+ url(r'^(?P<id>.*)/$', 'openid_xrds', {'identity': True}, name='openid-provider-identity'),
+)
diff --git a/baserock_openid_provider/openid_provider/utils.py b/baserock_openid_provider/openid_provider/utils.py
new file mode 100644
index 00000000..ae704001
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/utils.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*- vim: set et ts=4 sw=4 :
+# some code from http://www.djangosnippets.org/snippets/310/ by simon
+# and from examples/djopenid from python-openid-2.2.4
+from hashlib import sha1
+from openid_provider import conf
+from openid.extensions import ax, sreg
+from openid.server.server import Server, BROWSER_REQUEST_MODES
+from openid.server.trustroot import verifyReturnTo
+from openid.yadis.discover import DiscoveryFailure
+from openid.fetchers import HTTPFetchingError
+
+from django.core.exceptions import ImproperlyConfigured
+from django.core.urlresolvers import reverse
+from django.http import HttpResponse
+from django.shortcuts import render_to_response
+from django.utils.importlib import import_module
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+def import_module_attr(path):
+ package, module = path.rsplit('.', 1)
+ return getattr(import_module(package), module)
+
+def get_username(u):
+ if hasattr(u, 'get_username'):
+ return u.get_username()
+ return u.username
+
+def get_default_sreg_data(request, orequest):
+ return {
+ 'email': request.user.email,
+ 'nickname': get_username(request.user),
+ 'fullname': request.user.get_full_name(),
+ }
+
+def get_default_ax_data(request, orequest):
+ return {
+ 'http://axschema.org/contact/email': request.user.email,
+ 'http://axschema.org/namePerson': request.user.get_full_name(),
+ 'http://axschema.org/namePerson/friendly': get_username(request.user),
+ 'http://axschema.org/namePerson/first': request.user.first_name,
+ 'http://axschema.org/namePerson/last': request.user.last_name,
+ }
+
+def add_sreg_data(request, orequest, oresponse):
+ callback = get_sreg_callback()
+ if callback is None or not callable(callback):
+ return
+ sreg_data = callback(request, orequest)
+ sreg_req = sreg.SRegRequest.fromOpenIDRequest(orequest)
+ sreg_resp = sreg.SRegResponse.extractResponse(sreg_req, sreg_data)
+ oresponse.addExtension(sreg_resp)
+
+def add_ax_data(request, orequest, oresponse):
+ callback = get_ax_callback()
+ if callback is None or not callable(callback):
+ return
+ ax_data = callback(request, orequest)
+ ax_req = ax.FetchRequest.fromOpenIDRequest(orequest)
+ ax_resp = ax.FetchResponse(ax_req)
+ if ax_req is not None:
+ for attr in ax_req.getRequiredAttrs():
+ value = ax_data.get(attr, None)
+ if value is not None:
+ ax_resp.addValue(attr, value)
+ oresponse.addExtension(ax_resp)
+
+def get_sreg_callback():
+ try:
+ return import_module_attr(conf.SREG_DATA_CALLBACK)
+ except (ImportError, AttributeError):
+ return None
+
+def get_ax_callback():
+ try:
+ return import_module_attr(conf.AX_DATA_CALLBACK)
+ except (ImportError, AttributeError):
+ return None
+
+def get_store(request):
+ try:
+ store_class = import_module_attr(conf.STORE)
+ except ImportError:
+ raise ImproperlyConfigured(
+ "OpenID store %r could not be imported" % conf.STORE)
+ # The FileOpenIDStore requires a path to save the user files.
+ if conf.STORE == 'openid.store.filestore.FileOpenIDStore':
+ return store_class(conf.FILESTORE_PATH)
+ return store_class()
+
+def trust_root_validation(orequest):
+ """
+ OpenID specs 9.2.1: using realm for return url verification
+ """
+ try:
+ return verifyReturnTo(
+ orequest.trust_root, orequest.return_to) and "Valid" or "Invalid"
+ except HTTPFetchingError:
+ return "Unreachable"
+ except DiscoveryFailure:
+ return "DISCOVERY_FAILED"
+
+def get_trust_session_key(orequest):
+ return 'OPENID_' + sha1(
+ orequest.trust_root + orequest.return_to).hexdigest()
+
+def prep_response(request, orequest, oresponse, server=None):
+ # Convert a webresponse from the OpenID library in to a Django HttpResponse
+
+ if not server:
+ server = Server(get_store(request),
+ op_endpoint=request.build_absolute_uri(
+ reverse('openid-provider-root')))
+ webresponse = server.encodeResponse(oresponse)
+ if webresponse.code == 200 and orequest.mode in BROWSER_REQUEST_MODES:
+ response = render_to_response('openid_provider/response.html', {
+ 'body': webresponse.body,
+ }, context_instance=RequestContext(request))
+ logger.debug('rendering browser response')
+ else:
+ response = HttpResponse(webresponse.body)
+ response.status_code = webresponse.code
+ for key, value in webresponse.headers.items():
+ response[key] = value
+ logger.debug('rendering raw response')
+ return response
+
diff --git a/baserock_openid_provider/openid_provider/views.py b/baserock_openid_provider/openid_provider/views.py
new file mode 100644
index 00000000..2633abf0
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/views.py
@@ -0,0 +1,317 @@
+# -*- coding: utf-8 -*-
+# some code from http://www.djangosnippets.org/snippets/310/ by simon
+# and from examples/djopenid from python-openid-2.2.4
+import urlparse
+import logging
+from urllib import urlencode, quote
+
+from django.conf import settings
+from django.core.urlresolvers import reverse
+from django.http import HttpResponse, HttpResponseRedirect, QueryDict
+from django.shortcuts import render_to_response
+from django.template import RequestContext
+from django.utils.translation import ugettext as _
+
+from django.utils.encoding import smart_str
+try:
+ from django.views.decorators.csrf import csrf_exempt
+except ImportError:
+ from django.contrib.csrf.middleware import csrf_exempt
+
+from django.contrib.auth import REDIRECT_FIELD_NAME
+
+from openid.association import default_negotiator, encrypted_negotiator
+from openid.consumer.discover import OPENID_IDP_2_0_TYPE, OPENID_2_0_TYPE
+from openid.extensions import sreg, ax
+from openid.server.server import Server, BROWSER_REQUEST_MODES
+from openid.yadis.constants import YADIS_CONTENT_TYPE
+
+from openid_provider import conf
+from openid_provider.utils import add_sreg_data, add_ax_data, get_store, \
+ trust_root_validation, get_trust_session_key, prep_response
+from openid_provider.models import TrustedRoot
+
+logger = logging.getLogger(__name__)
+
+
+# Special URL which means 'let the user choose whichever identity'.
+IDENTIFIER_SELECT_URL = 'http://specs.openid.net/auth/2.0/identifier_select'
+
+
+@csrf_exempt
+def openid_server(request):
+ """
+ This view is the actual OpenID server - running at the URL pointed to by
+ the <link rel="openid.server"> tag.
+ """
+ logger.debug('server request %s: %s',
+ request.method, request.POST or request.GET)
+ server = openid_get_server(request)
+
+ if not request.is_secure():
+ # if request is not secure allow only encrypted association sessions
+ server.negotiator = encrypted_negotiator
+
+ # Clear AuthorizationInfo session var, if it is set
+ if request.session.get('AuthorizationInfo', None):
+ del request.session['AuthorizationInfo']
+
+ querydict = dict(request.REQUEST.items())
+ orequest = server.decodeRequest(querydict)
+ if not orequest:
+ orequest = server.decodeRequest(request.session.get('OPENID_REQUEST', None))
+ if orequest:
+ # remove session stored data:
+ del request.session['OPENID_REQUEST']
+ else:
+ # not request, render info page:
+ data = {
+ 'host': request.build_absolute_uri('/'),
+ 'xrds_location': request.build_absolute_uri(
+ reverse('openid-provider-xrds')),
+ }
+ logger.debug('invalid request, sending info: %s', data)
+ return render_to_response('openid_provider/server.html',
+ data,
+ context_instance=RequestContext(request))
+
+ if orequest.mode in BROWSER_REQUEST_MODES:
+ if not request.user.is_authenticated():
+ logger.debug('no local authentication, sending landing page')
+ return landing_page(request, orequest)
+
+ openid = openid_is_authorized(request, orequest.identity,
+ orequest.trust_root)
+
+ # verify return_to:
+ trust_root_valid = trust_root_validation(orequest)
+ validated = False
+
+ if conf.FAILED_DISCOVERY_AS_VALID:
+ if trust_root_valid == 'DISCOVERY_FAILED':
+ validated = True
+ else:
+ # if in decide already took place, set as valid:
+ if request.session.get(get_trust_session_key(orequest), False):
+ validated = True
+
+ if openid is not None and (validated or trust_root_valid == 'Valid'):
+ if orequest.identity == IDENTIFIER_SELECT_URL:
+ id_url = request.build_absolute_uri(
+ reverse('openid-provider-identity', args=[openid.openid]))
+ else:
+ # We must return exactly the identity URL that was requested,
+ # otherwise the openid.server module raises an error.
+ id_url = orequest.identity
+
+ oresponse = orequest.answer(True, identity=id_url)
+ logger.debug('orequest.answer(True, identity="%s")', id_url)
+ elif orequest.immediate:
+ logger.debug('checkid_immediate mode not supported')
+ raise Exception('checkid_immediate mode not supported')
+ else:
+ request.session['OPENID_REQUEST'] = orequest.message.toPostArgs()
+ request.session['OPENID_TRUSTROOT_VALID'] = trust_root_valid
+ logger.debug(
+ 'Set OPENID_REQUEST to %s in session %s',
+ request.session['OPENID_REQUEST'], request.session)
+ logger.debug(
+ 'Set OPENID_TRUSTROOT_VALID to %s in session %s',
+ request.session['OPENID_TRUSTROOT_VALID'], request.session)
+ logger.debug('redirecting to decide page')
+ return HttpResponseRedirect(reverse('openid-provider-decide'))
+ else:
+ oresponse = server.handleRequest(orequest)
+ if request.user.is_authenticated():
+ add_sreg_data(request, orequest, oresponse)
+ if conf.AX_EXTENSION:
+ add_ax_data(request, orequest, oresponse)
+
+ return prep_response(request, orequest, oresponse, server)
+
+def openid_xrds(request, identity=False, id=None):
+ if identity:
+ types = [OPENID_2_0_TYPE]
+ else:
+ types = [OPENID_IDP_2_0_TYPE, sreg.ns_uri]
+ if conf.AX_EXTENSION:
+ types.append(ax.AXMessage.ns_uri)
+ endpoints = [request.build_absolute_uri(reverse('openid-provider-root'))]
+ return render_to_response('openid_provider/xrds.xml', {
+ 'host': request.build_absolute_uri('/'),
+ 'types': types,
+ 'endpoints': endpoints,
+ }, context_instance=RequestContext(request), content_type=YADIS_CONTENT_TYPE)
+
+
+def url_for_openid(request, openid):
+ return request.build_absolute_uri(
+ reverse('openid-provider-identity', args=[openid.openid]))
+
+
+def openid_not_found_error_message(request, identity_url):
+ ids = request.user.openid_set
+ if ids.count() == 0:
+ message = "You have no OpenIDs configured. Contact the administrator."
+ else:
+ id_urls = [url_for_openid(request, id) for id in ids.iterator()]
+ id_urls = ', '.join(id_urls)
+ if ids.count() != 1:
+ message = "You somehow have multiple OpenIDs: " + id_urls
+ else:
+ message = "Your OpenID URL is: " + id_urls
+ return "You do not have the OpenID '%s'. %s" % (identity_url, message)
+
+
+def openid_decide(request):
+ """
+ The page that asks the user if they really want to sign in to the site, and
+ lets them add the consumer to their trusted whitelist.
+ # If user is logged in, ask if they want to trust this trust_root
+ # If they are NOT logged in, show the landing page
+ """
+ server = openid_get_server(request)
+ orequest = server.decodeRequest(request.session.get('OPENID_REQUEST'))
+ trust_root_valid = request.session.get('OPENID_TRUSTROOT_VALID')
+
+ logger.debug('Got OPENID_REQUEST %s, OPENID_TRUSTROOT_VALID %s from '
+ 'session %s', orequest, trust_root_valid, request.session)
+
+ if not request.user.is_authenticated():
+ return landing_page(request, orequest)
+
+ if orequest is None:
+ # This isn't normal, but can occur if the user uses the 'back' button
+ # or if the session data is otherwise lost for some reason.
+ return error_page(
+ request, "I've lost track of your session now. Sorry! Please go "
+ "back to the site you are logging in to with a Baserock "
+ "OpenID and, if you're not yet logged in, try again.")
+
+ openid = openid_get_identity(request, orequest.identity)
+ if openid is None:
+ # User should only ever have one OpenID, created for them when they
+ # registered.
+ message = openid_not_found_error_message(request, orequest.identity)
+ return error_page(request, message)
+
+ if request.method == 'POST' and request.POST.get('decide_page', False):
+ if request.POST.get('allow', False):
+ TrustedRoot.objects.get_or_create(
+ openid=openid, trust_root=orequest.trust_root)
+ if not conf.FAILED_DISCOVERY_AS_VALID:
+ request.session[get_trust_session_key(orequest)] = True
+ return HttpResponseRedirect(reverse('openid-provider-root'))
+
+ oresponse = orequest.answer(False)
+ logger.debug('orequest.answer(False)')
+ return prep_response(request, orequest, oresponse)
+
+ return render_to_response('openid_provider/decide.html', {
+ 'title': _('Trust this site?'),
+ 'trust_root': orequest.trust_root,
+ 'trust_root_valid': trust_root_valid,
+ 'return_to': orequest.return_to,
+ 'identity': orequest.identity,
+ }, context_instance=RequestContext(request))
+
+def error_page(request, msg):
+ return render_to_response('openid_provider/error.html', {
+ 'title': _('Error'),
+ 'msg': msg,
+ }, context_instance=RequestContext(request))
+
+class SafeQueryDict(QueryDict):
+ """
+ A custom QueryDict class that implements a urlencode method
+ knowing how to excempt some characters as safe.
+
+ Backported from Django 1.3
+ """
+ def urlencode(self, safe=None):
+ output = []
+ if safe:
+ encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
+ else:
+ encode = lambda k, v: urlencode({k: v})
+ for k, list_ in self.lists():
+ k = smart_str(k, self.encoding)
+ output.extend([encode(k, smart_str(v, self.encoding))
+ for v in list_])
+ return '&'.join(output)
+
+def landing_page(request, orequest, login_url=None,
+ redirect_field_name=REDIRECT_FIELD_NAME):
+ """
+ The page shown when the user attempts to sign in somewhere using OpenID
+ but is not authenticated with the site. For idproxy.net, a message telling
+ them to log in manually is displayed.
+ """
+ request.session['OPENID_REQUEST'] = orequest.message.toPostArgs()
+ logger.debug(
+ 'Set OPENID_REQUEST to %s in session %s',
+ request.session['OPENID_REQUEST'], request.session)
+ if not login_url:
+ login_url = settings.LOGIN_URL
+ path = request.get_full_path()
+ login_url_parts = list(urlparse.urlparse(login_url))
+ if redirect_field_name:
+ querystring = SafeQueryDict(login_url_parts[4], mutable=True)
+ querystring[redirect_field_name] = path
+ login_url_parts[4] = querystring.urlencode(safe='/')
+ return HttpResponseRedirect(urlparse.urlunparse(login_url_parts))
+
+def openid_is_authorized(request, identity_url, trust_root):
+ """
+ Check that they own the given identity URL, and that the trust_root is
+ in their whitelist of trusted sites.
+ """
+ if not request.user.is_authenticated():
+ return None
+
+ openid = openid_get_identity(request, identity_url)
+ if openid is None:
+ return None
+
+ if openid.trustedroot_set.filter(trust_root=trust_root).count() < 1:
+ return None
+
+ return openid
+
+
+def url_is_equivalent(a, b):
+ """
+ Test if two URLs are equivalent OpenIDs.
+ """
+ return a.rstrip('/') == b.rstrip('/')
+
+
+def openid_get_identity(request, identity_url):
+ """
+ Select openid based on claim (identity_url).
+ If none was claimed identity_url will be
+ 'http://specs.openid.net/auth/2.0/identifier_select'
+ - in that case return default one
+ - if user has no default one, return any
+ - in other case return None!
+ """
+ logger.debug('Looking for %s in user %s set of OpenIDs %s',
+ identity_url, request.user, request.user.openid_set)
+ for openid in request.user.openid_set.iterator():
+ if url_is_equivalent(identity_url, url_for_openid(request, openid)):
+ return openid
+ if identity_url == IDENTIFIER_SELECT_URL:
+ # no claim was made, choose user default openid:
+ openids = request.user.openid_set.filter(default=True)
+ if openids.count() == 1:
+ return openids[0]
+ if request.user.openid_set.count() > 0:
+ return request.user.openid_set.all()[0]
+ return None
+
+
+def openid_get_server(request):
+ return Server(
+ get_store(request),
+ op_endpoint=request.build_absolute_uri(
+ reverse('openid-provider-root')))
diff --git a/baserock_openid_provider/packer_template.json b/baserock_openid_provider/packer_template.json
new file mode 100644
index 00000000..0de9bc84
--- /dev/null
+++ b/baserock_openid_provider/packer_template.json
@@ -0,0 +1,77 @@
+{
+ "builders": [
+ {
+ "name": "development",
+ "type": "docker",
+ "image": "fedora:20",
+ "commit": true,
+ "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"]
+ },
+ {
+ "name": "production",
+ "type": "openstack",
+ "image_name": "baserock_openid_provider",
+ "flavor": "f0577618-9125-4948-b450-474e225bbc4c",
+ "source_image": "742e0414-c985-4994-b307-4aafade942b3",
+ "networks": ["d079fa3e-2558-4bcb-ad5a-279040c202b5"],
+ "floating_ip": "185.43.218.169",
+ "use_floating_ip": true,
+ "ssh_username": "fedora"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "inline": [
+ "sudo chown fedora:fedora /srv"
+ ],
+ "only": ["production"]
+ },
+ {
+ "type": "file",
+ "source": "baserock_openid_provider",
+ "destination": "/srv",
+ "only": ["production"]
+ },
+ {
+ "type": "shell",
+ "inline": [ "sudo yum install -y ansible"]
+ },
+ {
+ "type": "ansible-local",
+ "playbook_file": "baserock_openid_provider/image-config.yml",
+ "command": "sudo ansible-playbook"
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "ln -s /srv/test-baserock-infrastructure/baserock_openid_provider /srv"
+ ],
+ "only": ["development"]
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "sudo yum install -y libselinux-python",
+ "sudo ansible localhost -m selinux -a state=disabled",
+ "sudo setenforce 0"
+ ],
+ "only": ["production"]
+ },
+ {
+ "type": "shell",
+ "inline": [ "sync; sync; sleep 10; sync" ],
+ "only": ["production"]
+ }
+ ],
+ "post-processors": [
+ [
+ {
+ "type": "docker-tag",
+ "repository": "baserock/openid-provider",
+ "tag": "latest",
+ "only": ["development"]
+ }
+ ]
+ ]
+}
diff --git a/baserock_openid_provider/templates/base.html b/baserock_openid_provider/templates/base.html
new file mode 100644
index 00000000..6b1a4bc0
--- /dev/null
+++ b/baserock_openid_provider/templates/base.html
@@ -0,0 +1,38 @@
+{% load i18n %}
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+ <link rel="stylesheet" href="{{ STATIC_URL }}style.css" />
+ <title>{% block title %}User test{% endblock %}</title>
+</head>
+
+<body>
+ <div id="header">
+ {% block header %}
+ <a href="{% url 'index' %}">{% trans "Home" %}</a> |
+
+ {% if user.is_authenticated %}
+ {% trans "Logged in" %}: {{ user.username }}
+ (<a href="{% url 'auth_logout' %}">{% trans "Log out" %}</a> |
+ <a href="{% url 'auth_password_change' %}">{% trans "Change password" %}</a>)
+ {% else %}
+ <a href="{% url 'auth_login' %}">{% trans "Log in" %}</a> |
+ <a href="{% url 'registration_register' %}">{% trans "Register" %}</a>
+ {% endif %}
+ <hr />
+ {% endblock %}
+ </div>
+
+ <div id="content">
+ {% block content %}{% endblock %}
+ </div>
+
+ <div id="footer">
+ {% block footer %}
+ <hr />
+ {% endblock %}
+ </div>
+</body>
+
+</html>
diff --git a/baserock_openid_provider/templates/index.html b/baserock_openid_provider/templates/index.html
new file mode 100644
index 00000000..1cb4bf73
--- /dev/null
+++ b/baserock_openid_provider/templates/index.html
@@ -0,0 +1,15 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>This is the Baserock OpenID provider.</p>
+
+{% if user.is_authenticated %}
+ <p>You are registered as {{ user.get_full_name }}.</p>
+
+ <p>Your OpenID is:
+ <a href="https://openid.baserock.org/openid/{{ user.username }}/">https://openid.baserock.org/openid/{{ user.username }}/</a>
+ </p>
+{% endif %}
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/activate.html b/baserock_openid_provider/templates/registration/activate.html
new file mode 100644
index 00000000..8deb01c8
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activate.html
@@ -0,0 +1,8 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+
+<p>{% trans "Account activation failed" %}</p>
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/activation_complete.html b/baserock_openid_provider/templates/registration/activation_complete.html
new file mode 100644
index 00000000..a0a268ca
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activation_complete.html
@@ -0,0 +1,10 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Your account is now activated." %}</p>
+
+<p>Your OpenID is:
+<a href="http://openid.baserock.org/openid/{{ user.username }}/">http://openid.baserock.org/openid/{{ user.username }}/</a>
+</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/activation_email.txt b/baserock_openid_provider/templates/registration/activation_email.txt
new file mode 100644
index 00000000..bfa784d9
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activation_email.txt
@@ -0,0 +1,6 @@
+{% load i18n %}
+{% trans "Activate account at" %} {{ site.name }}:
+
+https://{{ site.domain }}{% url 'registration_activate' activation_key %}
+
+{% blocktrans %}Link is valid for {{ expiration_days }} days.{% endblocktrans %}
diff --git a/baserock_openid_provider/templates/registration/activation_email_subject.txt b/baserock_openid_provider/templates/registration/activation_email_subject.txt
new file mode 100644
index 00000000..24f477cb
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activation_email_subject.txt
@@ -0,0 +1 @@
+{% load i18n %}{% trans "Account activation on" %} {{ site.name }}
diff --git a/baserock_openid_provider/templates/registration/login.html b/baserock_openid_provider/templates/registration/login.html
new file mode 100644
index 00000000..9b245989
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/login.html
@@ -0,0 +1,15 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Log in' %}" />
+ <input type="hidden" name="next" value="{{ next }}" />
+</form>
+
+<p>{% trans "Forgot password" %}? <a href="{% url 'auth_password_reset' %}">{% trans "Reset it" %}</a>!</p>
+<p>{% trans "Not member" %}? <a href="{% url 'registration_register' %}">{% trans "Register" %}</a>!</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/logout.html b/baserock_openid_provider/templates/registration/logout.html
new file mode 100644
index 00000000..f8da51fa
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/logout.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Logged out" %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_change_done.html b/baserock_openid_provider/templates/registration/password_change_done.html
new file mode 100644
index 00000000..659be0a4
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_change_done.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Password changed" %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_change_form.html b/baserock_openid_provider/templates/registration/password_change_form.html
new file mode 100644
index 00000000..10b1fc13
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_change_form.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_complete.html b/baserock_openid_provider/templates/registration/password_reset_complete.html
new file mode 100644
index 00000000..55993e85
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_complete.html
@@ -0,0 +1,10 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+
+<p>{% trans "Password reset successfully" %}</p>
+
+<p><a href="{% url 'auth_login' %}">{% trans "Log in" %}</a></p>
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_confirm.html b/baserock_openid_provider/templates/registration/password_reset_confirm.html
new file mode 100644
index 00000000..33bd276a
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_confirm.html
@@ -0,0 +1,21 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+
+{% if validlink %}
+
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+
+{% else %}
+
+<p>{% trans "Password reset failed" %}</p>
+
+{% endif %}
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_done.html b/baserock_openid_provider/templates/registration/password_reset_done.html
new file mode 100644
index 00000000..6057ccbe
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_done.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Email with password reset instructions has been sent." %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_email.html b/baserock_openid_provider/templates/registration/password_reset_email.html
new file mode 100644
index 00000000..c78893ed
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_email.html
@@ -0,0 +1,5 @@
+{% load i18n %}
+{% blocktrans %}Reset password at {{ site_name }}{% endblocktrans %}:
+{% block reset_link %}
+{{ protocol }}://{{ domain }}{% url 'auth_password_reset_confirm' uid token %}
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_form.html b/baserock_openid_provider/templates/registration/password_reset_form.html
new file mode 100644
index 00000000..10b1fc13
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_form.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/registration_closed.html b/baserock_openid_provider/templates/registration/registration_closed.html
new file mode 100644
index 00000000..c73cfacc
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/registration_closed.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+ <p>{% trans "Registration is currently closed." %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/registration_complete.html b/baserock_openid_provider/templates/registration/registration_complete.html
new file mode 100644
index 00000000..6bc06bc0
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/registration_complete.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>You are now registered. An activation email has been sent to you with
+a link that you will need to click to activate your account.</p>
+
+<p>The mail should arrive within 15 minutes, depending on your mail provider's
+use of <a href="https://en.wikipedia.org/wiki/Greylisting>greylisting.</a></p>
+</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/registration_form.html b/baserock_openid_provider/templates/registration/registration_form.html
new file mode 100644
index 00000000..6d0854d6
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/registration_form.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/uwsgi.ini b/baserock_openid_provider/uwsgi.ini
new file mode 100644
index 00000000..0849096d
--- /dev/null
+++ b/baserock_openid_provider/uwsgi.ini
@@ -0,0 +1,22 @@
+# Configuration for uWSGI web application gateway for Baserock OpenID provider.
+#
+# System-wide configuration should live in /etc/uwsgi.ini.
+#
+# Some good reading for uWSGI:
+# - http://uwsgi-docs.readthedocs.org/en/latest/ThingsToKnow.html
+# - http://uwsgi-docs.readthedocs.org/en/latest/Configuration.html
+
+[uwsgi]
+need-plugin = python
+
+# This slightly weird setup seems the only way to avoid
+# django.ImproperlyConfigured exceptions.
+pythonpath = /srv/baserock_openid_provider
+chdir = /srv/baserock_openid_provider/baserock_openid_provider
+wsgi = wsgi
+
+# These numbers are pulled completely out of my arse. Testing should
+# be done to find good values.
+processes = 1
+
+buffer-size = 32768
diff --git a/baserock_storyboard/develop.sh b/baserock_storyboard/develop.sh
new file mode 100755
index 00000000..79def875
--- /dev/null
+++ b/baserock_storyboard/develop.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+# Set up a development environment in a container.
+
+exec docker run -i -t --rm \
+ --publish=127.0.0.1:80:80 \
+ --volume=`pwd`:/src/test-baserock-infrastructure \
+ baserock/storyboard
+
diff --git a/baserock_storyboard/packer_template.json b/baserock_storyboard/packer_template.json
new file mode 100644
index 00000000..dc7cc95a
--- /dev/null
+++ b/baserock_storyboard/packer_template.json
@@ -0,0 +1,62 @@
+{
+ "builders": [
+ {
+ "name": "development",
+ "type": "docker",
+ "image": "fedora:20",
+ "commit": true,
+ "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"]
+ },
+ {
+ "name": "production",
+ "type": "openstack",
+ "image_name": "baserock_storyboard",
+ "flavor": "f0577618-9125-4948-b450-474e225bbc4c",
+ "source_image": "742e0414-c985-4994-b307-4aafade942b3",
+ "networks": ["d079fa3e-2558-4bcb-ad5a-279040c202b5"],
+ "floating_ip": "85.199.252.159",
+ "use_floating_ip": true,
+ "ssh_username": "fedora"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "inline": [
+ "sudo yum install -y ruby gem",
+ "sudo gem install puppet"
+ ]
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "sudo chown fedora:fedora /srv"
+ ],
+ "only": ["production"]
+ },
+ {
+ "type": "file",
+ "source": "baserock_storyboard",
+ "destination": "/srv",
+ "only": ["production"]
+ },
+ { "type": "shell",
+ "inline": [
+ "cd /srv/baserock_storyboard",
+ "sudo /usr/local/bin/puppet module build puppet-storyboard",
+ "sudo /usr/local/bin/puppet module install puppet-storyboard/pkg/openstack-storyboard-*.*.*.tar.gz",
+ "sudo /usr/local/bin/puppet module install camptocamp-openssl"
+ ]
+ }
+ ],
+ "post-processors": [
+ [
+ {
+ "type": "docker-tag",
+ "repository": "baserock/storyboard",
+ "tag": "latest",
+ "only": ["development"]
+ }
+ ]
+ ]
+}
diff --git a/baserock_storyboard/projects.yaml b/baserock_storyboard/projects.yaml
new file mode 100644
index 00000000..91dd75b5
--- /dev/null
+++ b/baserock_storyboard/projects.yaml
@@ -0,0 +1,31 @@
+# Projects defined for Baserock Storyboard
+
+# This file lives in <http://git.baserock.org/baserock/baserock/infrastructure>.
+# This is a temporary version for the work-in-progress storyboard.
+
+# If you update this list, you'll need to log into storyboard.baserock.org and
+# run the following:
+#
+# sudo -u apache storyboard-db-manage \
+# --config-file /etc/storyboard/storyboard.conf \
+# load_projects ./projects.yaml
+
+- project: baserock/definitions
+ description: Baserock reference system definitions
+ use-storyboard: true
+
+- project: baserock/import
+ description: Baserock Import Tool
+ use-storyboard: true
+
+- project: baserock/morph
+ description: Morph build tool
+ use-storyboard: true
+
+- project: baserock/infrastructure
+ description: baserock.org infrastructure
+ use-storyboard: true
+
+- project: baserock/wiki
+ description: Baserock Wiki at http://wiki.baserock.org/
+ use-storyboard: true
diff --git a/baserock_storyboard/puppet-storyboard b/baserock_storyboard/puppet-storyboard
new file mode 160000
+Subproject cf3e84f0dfa52e57bc9892b1ee1147889b154e5
diff --git a/baserock_storyboard/site.pp b/baserock_storyboard/site.pp
new file mode 100644
index 00000000..32323877
--- /dev/null
+++ b/baserock_storyboard/site.pp
@@ -0,0 +1,46 @@
+node default {
+ group { 'ssl-cert':
+ ensure => 'present'
+ }
+
+ # This directory doesn't seem to exist by default in Fedora
+ file { '/etc/ssl/private':
+ ensure => directory
+ before => Class['storyboard::cert']
+ }
+
+ # TEMPORARY SSL private key
+ openssl::certificate::x509 { 'storyboard_dummy':
+ country => 'UK',
+ organization => 'The Baserock Project',
+ commonname => 'baserock.org',
+ base_dir => '/tmp/',
+ password => 'insecure',
+ before => Class['storyboard::cert']
+ }
+
+ class { 'storyboard::cert':
+ ssl_cert_file => '/tmp/storyboard_dummy.crt',
+ ssl_key_file => '/tmp/storyboard_dummy.key',
+ ssl_ca_file => '/etc/ssl/certs/ca-bundle.crt'
+ }
+
+ # need class storyboard::rabbitmq too
+
+ class { 'storyboard::application':
+ openid_url => 'https://openid.baserock.org/',
+
+ mysql_host => '192.168.222.30',
+ mysql_database => 'storyboard',
+ mysql_user => 'storyboard',
+ # FIXME: need to read this from a file in /var/lib
+ mysql_user_password => 'storyboard_insecure',
+
+ rabbitmq_host => 'localhost',
+ rabbitmq_port => 5672,
+ rabbitmq_vhost => '/',
+ rabbitmq_user => 'storyboard',
+ # FIXME: need to read this from a file in /var/lib
+ rabbitmq_user_password => 'storyboard_insecure'
+ }
+}
diff --git a/certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert b/certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert
new file mode 100644
index 00000000..78a80f5e
--- /dev/null
+++ b/certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert
@@ -0,0 +1,118 @@
+-----BEGIN CERTIFICATE-----
+MIIGkTCCBXmgAwIBAgIHBv5yWci2CjANBgkqhkiG9w0BAQsFADCBjDELMAkGA1UE
+BhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0ZC4xKzApBgNVBAsTIlNlY3VyZSBE
+aWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcxODA2BgNVBAMTL1N0YXJ0Q29tIENs
+YXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUgU2VydmVyIENBMB4XDTE1MDMyNjIz
+MjEyM1oXDTE3MDMyNzA5MjcxOFowgZUxCzAJBgNVBAYTAkdCMRMwEQYDVQQIEwpN
+YW5jaGVzdGVyMRMwEQYDVQQHEwpNYW5jaGVzdGVyMRswGQYDVQQKExJEYW5pZWwg
+U2lsdmVyc3RvbmUxFzAVBgNVBAMUDiouYmFzZXJvY2sub3JnMSYwJAYJKoZIhvcN
+AQkBFhdwb3N0bWFzdGVyQGJhc2Vyb2NrLm9yZzCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAM4nR/R7lmJ9wdtp9AqG3AWjCPrKcZp1JSnCe6K5y4tqvT+A
+GJ6FyJHLzEaiYTpvTIALTQQEhqj/POG3wVZunj9MUJLuXRtFfyEmcHwmKDYhfxsW
+KrqI3N4K5WYZWA/W8Ly8shltp968ub0KP6PW9kPDgtAZ2Ds96T2bqTiVZkrS/pwq
+8mNtdIhxUs3L5j/8zGY5gpmFcDQiRTsHf6qwpce0xzK425WhPjdjeSIf/LTEEebI
+jdausyhssM1TSA3obfV5pfovRG/tr2FOt38WoH7qeImDWFW8s5l5089XbgD4nXiZ
+9RG4FIQnlaIYyBOIXw56ZjeOsQASNE8Z5J1icssCAwEAAaOCAuswggLnMAkGA1Ud
+EwQCMAAwCwYDVR0PBAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
+ATAdBgNVHQ4EFgQU74kXy6Zm5IHkNEdJpyROwvWGBBUwHwYDVR0jBBgwFoAUEdsj
+Rf1UzGpxb4SKA9e+9wEvJoYwJwYDVR0RBCAwHoIOKi5iYXNlcm9jay5vcmeCDGJh
+c2Vyb2NrLm9yZzCCAVYGA1UdIASCAU0wggFJMAgGBmeBDAECAjCCATsGCysGAQQB
+gbU3AQIDMIIBKjAuBggrBgEFBQcCARYiaHR0cDovL3d3dy5zdGFydHNzbC5jb20v
+cG9saWN5LnBkZjCB9wYIKwYBBQUHAgIwgeowJxYgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwAwIBARqBvlRoaXMgY2VydGlmaWNhdGUgd2FzIGlzc3Vl
+ZCBhY2NvcmRpbmcgdG8gdGhlIENsYXNzIDIgVmFsaWRhdGlvbiByZXF1aXJlbWVu
+dHMgb2YgdGhlIFN0YXJ0Q29tIENBIHBvbGljeSwgcmVsaWFuY2Ugb25seSBmb3Ig
+dGhlIGludGVuZGVkIHB1cnBvc2UgaW4gY29tcGxpYW5jZSBvZiB0aGUgcmVseWlu
+ZyBwYXJ0eSBvYmxpZ2F0aW9ucy4wNQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2Ny
+bC5zdGFydHNzbC5jb20vY3J0Mi1jcmwuY3JsMIGOBggrBgEFBQcBAQSBgTB/MDkG
+CCsGAQUFBzABhi1odHRwOi8vb2NzcC5zdGFydHNzbC5jb20vc3ViL2NsYXNzMi9z
+ZXJ2ZXIvY2EwQgYIKwYBBQUHMAKGNmh0dHA6Ly9haWEuc3RhcnRzc2wuY29tL2Nl
+cnRzL3N1Yi5jbGFzczIuc2VydmVyLmNhLmNydDAjBgNVHRIEHDAahhhodHRwOi8v
+d3d3LnN0YXJ0c3NsLmNvbS8wDQYJKoZIhvcNAQELBQADggEBAI8iIwqDTd673Dvk
+JNon+bcUoTfWAOasthSAbE646Xly3G4o29egj+FciVRk37arEXU2tJYOt+ypBEgZ
+zWHwdf/uZaUFnxZCPbb1KuAeNnKMS2OWWrQGSwqw5aGiKR2ianDpNXsmNtroTjFM
+5rXCs0s5WWTBE2Jgw7shGG0GD1xaHUlMQg0vRQeRRFd/NHvEuKC7Ry8zKlMHRIbu
+Osr+lwq95GsC96vcXteL8ELnIuPUrWrixnqMPyMvi/01YA2P/r3BWlEmWTphVBrQ
+VoE7IHAL9DUzw0nxVDO/tvyqbAgpckat1zsIYQIAgo75ExXo+kJznJllT9BsDsfD
+5D3fiF8=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGNDCCBBygAwIBAgIBGjANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDcxMDI0MjA1NzA5WhcNMTcxMDI0MjA1NzA5WjCB
+jDELMAkGA1UEBhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0ZC4xKzApBgNVBAsT
+IlNlY3VyZSBEaWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcxODA2BgNVBAMTL1N0
+YXJ0Q29tIENsYXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUgU2VydmVyIENBMIIB
+IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4k85L6GMmoWtCA4IPlfyiAEh
+G5SpbOK426oZGEY6UqH1D/RujOqWjJaHeRNAUS8i8gyLhw9l33F0NENVsTUJm9m8
+H/rrQtCXQHK3Q5Y9upadXVACHJuRjZzArNe7LxfXyz6CnXPrB0KSss1ks3RVG7RL
+hiEs93iHMuAW5Nq9TJXqpAp+tgoNLorPVavD5d1Bik7mb2VsskDPF125w2oLJxGE
+d2H2wnztwI14FBiZgZl1Y7foU9O6YekO+qIw80aiuckfbIBaQKwn7UhHM7BUxkYa
+8zVhwQIpkFR+ZE3EMFICgtffziFuGJHXuKuMJxe18KMBL47SLoc6PbQpZ4rEAwID
+AQABo4IBrTCCAakwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFBHbI0X9VMxqcW+EigPXvvcBLyaGMB8GA1UdIwQYMBaAFE4L7xqkQFul
+F2mHMMo0aEPQQa7yMGYGCCsGAQUFBwEBBFowWDAnBggrBgEFBQcwAYYbaHR0cDov
+L29jc3Auc3RhcnRzc2wuY29tL2NhMC0GCCsGAQUFBzAChiFodHRwOi8vd3d3LnN0
+YXJ0c3NsLmNvbS9zZnNjYS5jcnQwWwYDVR0fBFQwUjAnoCWgI4YhaHR0cDovL3d3
+dy5zdGFydHNzbC5jb20vc2ZzY2EuY3JsMCegJaAjhiFodHRwOi8vY3JsLnN0YXJ0
+c3NsLmNvbS9zZnNjYS5jcmwwgYAGA1UdIAR5MHcwdQYLKwYBBAGBtTcBAgEwZjAu
+BggrBgEFBQcCARYiaHR0cDovL3d3dy5zdGFydHNzbC5jb20vcG9saWN5LnBkZjA0
+BggrBgEFBQcCARYoaHR0cDovL3d3dy5zdGFydHNzbC5jb20vaW50ZXJtZWRpYXRl
+LnBkZjANBgkqhkiG9w0BAQUFAAOCAgEAnQfh7pB2MWcWRXCMy4SLS1doRKWJwfJ+
+yyiL9edwd9W29AshYKWhdHMkIoDW2LqNomJdCTVCKfs5Y0ULpLA4Gmj0lRPM4EOU
+7Os5GuxXKdmZbfWEzY5zrsncavqenRZkkwjHHMKJVJ53gJD2uSl26xNnSFn4Ljox
+uMnTiOVfTtIZPUOO15L/zzi24VuKUx3OrLR2L9j3QGPV7mnzRX2gYsFhw3XtsntN
+rCEnME5ZRmqTF8rIOS0Bc2Vb6UGbERecyMhK76F2YC2uk/8M1TMTn08Tzt2G8fz4
+NVQVqFvnhX76Nwn/i7gxSZ4Nbt600hItuO3Iw/G2QqBMl3nf/sOjn6H0bSyEd6Si
+BeEX/zHdmvO4esNSwhERt1Axin/M51qJzPeGmmGSTy+UtpjHeOBiS0N9PN7WmrQQ
+oUCcSyrcuNDUnv3xhHgbDlePaVRCaHvqoO91DweijHOZq1X1BwnSrzgDapADDC+P
+4uhDwjHpb62H5Y29TiyJS1HmnExUdsASgVOb7KD8LJzaGJVuHjgmQid4YAjff20y
+6NjAbx/rJnWfk/x7G/41kNxTowemP4NVCitOYoIlzmYwXSzg+RkbdbmdmFamgyd6
+0Y+NWZP8P3PXLrQsldiL98l+x/ydrHIEH9LMF/TtNGCbnkqXBP7dcg5XVFEGcE3v
+qhykguAzx/Q=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
+ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
+LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
+BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
+dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
+cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
+YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
+dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
+bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
+YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
+TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
+9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
+jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
+FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
+ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
+ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
+EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
+L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
+O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
+um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
+NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
+-----END CERTIFICATE-----
diff --git a/clusters/mason-system-x86_64-openstack-deploy.morph b/clusters/mason-system-x86_64-openstack-deploy.morph
new file mode 100644
index 00000000..a03435d3
--- /dev/null
+++ b/clusters/mason-system-x86_64-openstack-deploy.morph
@@ -0,0 +1,55 @@
+name: mason-system-x86_64-openstack-deploy
+kind: cluster
+description: |
+ An example cluster morphology demonstrating how to deploy a Mason
+ instance into an OpenStack Cloud.
+
+ Mason consists of a distbuild system and Zuul/turbo-hipster to run
+ tests. In the future, we should make it possible to deploy a Mason
+ instance which uses an existing distbuild network for builds,
+ rather than setting up its own.
+
+systems:
+- morph: systems/mason-system-x86_64-generic.morph
+ deploy-defaults:
+ # The following are all needed for extensions other than mason.configure
+ ARTIFACT_CACHE_SERVER: cache.baserock.org
+ CONTROLLERHOST: mason-system-x86-64
+ DISTBUILD_CONTROLLER: true
+ DISTBUILD_WORKER: true
+ RAM_SIZE: 8G
+ DISK_SIZE: 4G
+ VCPUS: 2
+ TROVE_HOST: git.baserock.org
+ TROVE_ID: baserock
+ WORKERS: mason-system-x86-64
+ WORKER_SSH_KEY: ssh-keys/worker.key
+ HOSTNAME: mason-system-x86-64
+ CLOUD_INIT: yes
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ INSTALL_FILES: distbuild/manifest
+ # It could be worthwhile setting these in your environment instead of here
+ OPENSTACK_AUTH_URL: auth-url
+ OPENSTACK_IMAGENAME: mason-system-x86_64
+ OPENSTACK_NETWORK_ID: network-id
+ OPENSTACK_USER: openstack-username
+ OPENSTACK_TENANT: openstack-tenant
+ deploy:
+ mason:
+ type: openstack
+ location: auth-url
+ MASON_OPENSTACK_AUTH_URL: auth-url
+ MASON_OPENSTACK_NETWORK_ID: network-id
+ MASON_OPENSTACK_USER: mason
+ MASON_OPENSTACK_TENANT: mason-tenant
+ GERRIT_USER: mason
+ GERRIT_HOSTNAME: gerrit.example.com
+ GERRIT_SSH_KEY: ssh-keys/worker.key
+ GERRIT_SSH_KEY_PATH: /root/.ssh/id_rsa
+ GEARMAN_HOST: 127.0.0.1
+ MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph
+ MASON_ARCHITECTURE: x86_64
+ MASON_TEST_HOSTS: "x86_64:user@auth-url"
+ MASON_DISTBUILD_CONTROLLERS: "x86_64:not-yet-used"
+ MASON_TEST_INFRASTRUCTURE_TYPE: openstack
+ MASON_UPSTREAM_TROVE: some-writeable-artifact-cache
diff --git a/firewall.yaml b/firewall.yaml
new file mode 100644
index 00000000..64c9200c
--- /dev/null
+++ b/firewall.yaml
@@ -0,0 +1,403 @@
+# OpenStack firewall setup for baserock.org
+#
+# This rather ugly and verbose Ansible script defines the firewall
+# configuration for the baserock.org cloud.
+#
+# OpenStack security group rules are all ACCEPT rules, and an instance
+# can be in multiple security groups.
+#
+# Note that many systems don't have a floating IP assigned and thus are
+# isolated from the internet. Requests to them are proxied by the
+# frontend-haproxy system.
+#
+# This playbook requires the 'neutron_sec_group' module, available in
+# <https://github.com/openstack-ansible/openstack-ansible-modules/>.
+
+- hosts: localhost
+ tasks:
+ - name: default security group
+ neutron_sec_group:
+ name: default
+ description: Allow all outgoing traffic, and allow incoming ICMP (ping) and SSH connections
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ - direction: egress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: udp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # ICMP: allow ping!
+ - direction: ingress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 22: Allow SSH access to all instances.
+ - direction: ingress
+ port_range_min: 22
+ port_range_max: 22
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # This group is an alternative to 'default' for machines that should be
+ # prevented from connecting to the outside world.
+ - name: internal-only security group
+ neutron_sec_group:
+ name: internal-only
+ description: Allow outgoing connections only to local network, and incoming ICMP and SSH
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ - direction: egress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 192.168.222.0/24
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 192.168.222.0/24
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: udp
+ remote_ip_prefix: 192.168.222.0/24
+
+ # ICMP: allow ping!
+ - direction: ingress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 22: Allow SSH access to all instances.
+ - direction: ingress
+ port_range_min: 22
+ port_range_max: 22
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: open security group
+ neutron_sec_group:
+ name: open
+ description: Allow inbound traffic on all ports. DO NOT USE EXCEPT FOR TESTING!!!
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ - direction: ingress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - direction: ingress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: udp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: database-mysql security group
+ neutron_sec_group:
+ name: database-mysql
+ description: Allow internal machines to access MariaDB database.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 3306: MariaDB
+ - direction: ingress
+ port_range_min: 3306
+ port_range_max: 3306
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: gerrit security group
+ neutron_sec_group:
+ name: gerrit
+ description: Allow access to Gerrit SSH daemon port 29418, plus HTTP, HTTPS and Git protocol.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 80: HTTP, for browsing repos with cgit, and Git-over-HTTP.
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS, for browsing repos with cgit, and Git-over-HTTPS.
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 8080: HTTP, for Gerrit web frontend
+ - direction: ingress
+ port_range_min: 8080
+ port_range_max: 8080
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 9418: Git.
+ - direction: ingress
+ port_range_min: 9418
+ port_range_max: 9418
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 29418: Gerrit SSH daemon.
+ - direction: ingress
+ port_range_min: 29418
+ port_range_max: 29418
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: git-server security group
+ neutron_sec_group:
+ name: git-server
+ description: Allow inbound SSH, HTTP, HTTPS and Git.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 22: SSH, for Git-over-SSH access.
+ - direction: ingress
+ port_range_min: 22
+ port_range_max: 22
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 80: HTTP, for browsing repos with cgit, and Git-over-HTTP.
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS, for browsing repos with cgit, and Git-over-HTTPS.
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 9418: Git.
+ - direction: ingress
+ port_range_min: 9418
+ port_range_max: 9418
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: internal mail relay security group
+ neutron_sec_group:
+ name: internal-mail-relay
+ description: Allow receiving internal-only connections on port 25 for SMTP
+ state: present
+
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ # 25: SMTP, for sending emails.
+ - direction: ingress
+ port_range_min: 25
+ port_range_max: 25
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 192.168.222.0/24
+
+ - name: Mason x86 security group
+ neutron_sec_group:
+ name: mason-x86
+ description: Allow inbound HTTP and HTTPS, and cache server fetches from port 8080.
+ state: present
+
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ # 80: HTTP
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 7878: morph distbuild controller port. This rule allows a devel
+ # system inside the Baserock cloud to use the Mason instances for
+ # distbuilding, which is useful when building a Baserock release
+ # among other things.
+ - direction: ingress
+ port_range_min: 7878
+ port_range_max: 7878
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 192.168.222.0/24
+
+ # 8080: morph-cache-server server port. The x86 Masons use
+ # cache.baserock.org as the cache server for their distbuild
+ # networks. So cache.baserock.org needs to be able to connect to
+ # them on this port to fetch artifacts.
+ - direction: ingress
+ port_range_min: 8080
+ port_range_max: 8080
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 185.43.218.0/0
+ # It'd be nice to limit access by security group, but it doesn't
+ # seem to actually work. Perhaps because we use external IP to
+ # access instead of internal IP.
+ #remote_group_id: "{{ default_group.sec_group.id }}"
+
+ - name: shared-artifact-cache security group
+ neutron_sec_group:
+ name: shared-artifact-cache
+ description: Allow inbound HTTP, HTTPS and read-only Morph artifact cache access. Allow writable Morph artifact cache access from internal IPs.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 80: HTTP for cache server web frontend (at the time of writing, this
+ # is a useless and empty cgit page, but we may improve it in future).
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS.
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 8080: Read-only Morph artifact cache server.
+ - direction: ingress
+ port_range_min: 8080
+ port_range_max: 8080
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 8081: 'writable cache server' port. Anyone who can connect
+ # to this port can delete or overwrite cached artifacts.
+ #
+ # FIXME: because the Masons use cache.baserock.org instead of
+ # 192.168.0.16 to access the shared artifact cache, we need to
+ # permit traffic from our public IP range. This provides a
+ # theoritical attack vector from other tenancies, so we should
+ # fix the Masons and remove this rule.
+ - direction: ingress
+ port_range_min: 8081
+ port_range_max: 8081
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 185.43.218.0/0
+ # It'd be nice to limit access by security group, but it doesn't
+ # seem to actually work. Perhaps because we use external IP to
+ # access instead of internal IP.
+ #remote_group_id: "{{ default_group.sec_group.id }}"
+
+ - name: web-server security group
+ neutron_sec_group:
+ name: web-server
+ description: Allow inbound HTTP and HTTPS.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 80: HTTP
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
diff --git a/frontend/haproxy.cfg b/frontend/haproxy.cfg
new file mode 100644
index 00000000..5ebbc031
--- /dev/null
+++ b/frontend/haproxy.cfg
@@ -0,0 +1,97 @@
+# HAProxy configuration for Baserock Project front-end proxy.
+
+global
+ maxconn 4000
+
+ daemon
+ pidfile /var/run/haproxy.pid
+ user haproxy
+ group haproxy
+
+ log /dev/log local0
+ stats socket /var/lib/haproxy/stats
+
+ # Maximum number of bits used when generating temporary
+ # keys for DHE key exchange. Higher values involve more CPU
+ # usage, lower values are less secure. HAProxy's default is
+ # 1024, which is too low and HAProxy actually warns if you use
+ # the default.
+ tune.ssl.default-dh-param 2048
+
+defaults
+ mode http
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+ log global
+ option httplog
+
+frontend http-in
+ # All HTTP traffic is redirected to HTTPS using the '301 Moved' HTTP code.
+ bind *:80
+ redirect scheme https code 301
+
+frontend https-in
+ # We do 'SSL termination' with HAProxy. So secure requests are received in
+ # the frontend, then decrypted and sent over HTTP on the internal network.
+ # This means we only need to have the certificate in one place, and the
+ # configuration of the other instances is simpler. It does mean that we
+ # need to avoid having any insecure machines in the cloud.
+ bind *:443 ssl crt /etc/pki/tls/private/baserock.pem
+ reqadd X-Forwarded-Proto:\ https
+
+ # Rules below here implement the URL-based forwarding to the
+ # appropriate instance. The hdr(host) call means 'extract the
+ # first Host header from the HTTP request or response', the '-m beg'
+ # switch means 'match against the beginning of it' and the '-i' flag
+ # makes the match case-insensitive.
+ #
+ # See <https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#7>
+ # for more documentation than you ever dreamed of.
+
+ acl host_gerrit hdr(host) -m beg -i gerrit
+ use_backend baserock_gerrit_http if host_gerrit
+
+ acl host_irclogs hdr(host) -m beg -i irclogs
+ use_backend baserock_irclogs_http if host_irclogs
+
+ acl host_mason_x86_32 hdr(host) -m beg -i mason-x86-32
+ use_backend baserock_mason_x86_32_http if host_mason_x86_32
+
+ acl host_mason_x86_64 hdr(host) -m beg -i mason-x86-64
+ use_backend baserock_mason_x86_64_http if host_mason_x86_64
+
+ use_backend baserock_openid_provider_http if { hdr(host) -m beg -i openid }
+
+frontend ssh-in:
+ # FIXME: it'd be better if we could limit traffic on port 29418 to
+ # gerrit.baserock.org. There's no way of knowing from an SSH request
+ # which subdomain the user tried to connect to, so for now they can
+ # clone repos from 'ssh://openid.baserock.org:29418' and such like.
+ # For this reason it's probably worth pointing gerrit.baserock.org to
+ # a different floating IP that serves only the gerrit instance.
+ mode tcp
+ bind *:29418
+ default_backend baserock_gerrit_ssh
+
+# Entries here locate each server backend.
+
+backend baserock_gerrit_http
+ server baserock_gerrit 192.168.222.69:8080
+
+backend baserock_gerrit_ssh
+ mode tcp
+ server baserock_gerrit 192.168.222.69:29418
+
+backend baserock_irclogs_http
+ server baserock_irclogs 192.168.222.74:80
+
+backend baserock_mason_x86_32_http
+ server baserock_mason_x86_32 192.168.222.81:80
+
+backend baserock_mason_x86_64_http
+ server baserock_mason_x86_64 192.168.222.80:80
+
+backend baserock_openid_provider_http
+ server baserock_openid_provider 192.168.222.67:80
diff --git a/frontend/instance-backup-config.yml b/frontend/instance-backup-config.yml
new file mode 100644
index 00000000..8f7ca550
--- /dev/null
+++ b/frontend/instance-backup-config.yml
@@ -0,0 +1,23 @@
+# Instance backup configuration for the baserock.org frontend system.
+#
+# We don't need to back anything up from this system, but the backup
+# SSH key needs access to it in order to SSH to the other systems on the
+# internal network.
+---
+- hosts: frontend-haproxy
+ gather_facts: false
+ sudo: yes
+ vars:
+ # The 'backup' key cannot be used to SSH into the 'frontend' machine except
+ # from this IP.
+ PERMITTED_BACKUP_HOSTS: 82.70.136.246/32
+ tasks:
+ - name: backup user
+ user:
+ name: backup
+
+ - name: authorize backup public key
+ authorized_key:
+ user: backup
+ key: "{{ lookup('file', '../keys/backup.key.pub') }}"
+ key_options: 'from="{{ PERMITTED_BACKUP_HOSTS }}",no-agent-forwarding,no-X11-forwarding'
diff --git a/frontend/instance-config.yml b/frontend/instance-config.yml
new file mode 100644
index 00000000..5484c344
--- /dev/null
+++ b/frontend/instance-config.yml
@@ -0,0 +1,18 @@
+# Instance configuration for Baserock HAProxy instance.
+#
+# This playbook should be run after starting an instance of the Baserock
+# frontend image.
+---
+- hosts: frontend-haproxy
+ gather_facts: false
+ sudo: yes
+ tasks:
+ - name: ensure system up to date
+ yum: name=* state=latest
+
+ # To create the .pem file, simply concatenate
+ # certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert with
+ # the private key for that certificate (which is not committed to Git, of
+ # course).
+ - name: install SSL certificate
+ copy: src=../private/baserock.org-ssl-certificate-temporary-dsilverstone.pem owner=haproxy mode=400
diff --git a/frontend/packer_template.json b/frontend/packer_template.json
new file mode 100644
index 00000000..79e398a2
--- /dev/null
+++ b/frontend/packer_template.json
@@ -0,0 +1,60 @@
+{
+ "builders": [
+ {
+ "name": "development",
+ "type": "docker",
+ "image": "fedora:20",
+ "commit": true,
+ "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"]
+ },
+ {
+ "name": "production",
+ "type": "openstack",
+ "image_name": "frontend-haproxy",
+ "flavor": 2,
+ "source_image": "742e0414-c985-4994-b307-4aafade942b3",
+ "networks": ["d079fa3e-2558-4bcb-ad5a-279040c202b5"],
+ "floating_ip": "85.199.252.162",
+ "use_floating_ip": true,
+ "ssh_username": "fedora"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "file",
+ "source": "frontend/haproxy.cfg",
+ "destination": "/var/tmp/haproxy.cfg"
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "sudo yum install -y haproxy nc",
+ "sudo mv /var/tmp/haproxy.cfg /etc/haproxy/haproxy.cfg",
+ "sudo mkdir /var/log/journal"
+ ]
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "sudo systemctl enable haproxy.service",
+ "sudo systemctl start haproxy.service"
+ ],
+ "only": ["production"]
+ },
+ {
+ "type": "shell",
+ "inline": [ "sync; sync; sleep 10; sync" ],
+ "only": ["production"]
+ }
+ ],
+ "post-processors": [
+ [
+ {
+ "type": "docker-tag",
+ "repository": "baserock/frontend",
+ "tag": "latest",
+ "only": ["development"]
+ }
+ ]
+ ]
+}
diff --git a/mason.configure b/mason.configure
index 1198ebd0..e0be6b2e 100644
--- a/mason.configure
+++ b/mason.configure
@@ -14,21 +14,6 @@
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# This is a "morph deploy" configuration extension to fully configure
-# a Mason instance at deployment time. It uses the following variables
-# from the environment:
-#
-# * ARTIFACT_CACHE_SERVER
-# * MASON_CLUSTER_MORPHOLOGY
-# * MASON_DEFINITIONS_REF
-# * MASON_DISTBUILD_ARCH
-# * MASON_TEST_HOST
-# * OPENSTACK_NETWORK_ID
-# * TEST_INFRASTRUCTURE_TYPE
-# * TROVE_HOST
-# * TROVE_ID
-# * CONTROLLERHOST
set -e
@@ -39,27 +24,21 @@ set -e
ROOT="$1"
mkdir -p "$ROOT"/usr/lib/mason
-cp mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh
-cp mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh
cp mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script
-cp mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer
-
-cp mason/mason.service "$ROOT"/etc/systemd/system/mason.service
+cp mason/mason-setup.service "$ROOT"/usr/lib/systemd/system/mason-setup.service
+cp mason/zuul-server.service "$ROOT"/usr/lib/systemd/system/zuul-server.service
+cp mason/zuul-merger.service "$ROOT"/usr/lib/systemd/system/zuul-merger.service
+cp mason/turbo-hipster.service "$ROOT"/usr/lib/systemd/system/turbo-hipster.service
+cp mason/lighttpd.service "$ROOT"/usr/lib/systemd/system/lighttpd.service
##########################################################################
-# Set up httpd web server
+# Create required directories
##########################################################################
-cp mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service
-
-mkdir -p "$ROOT"/srv/mason
-
-cat >>"$ROOT"/etc/httpd.conf <<EOF
-.log:text/plain
-EOF
-
-mkdir -p "$ROOT"/var/mason
+mkdir -p "$ROOT"/var/www/logs
+mkdir -p "$ROOT"/var/lib/zuul
+mkdir -p "$ROOT"/var/log/zuul
##########################################################################
# Copy files needed for Ansible configuration
@@ -70,7 +49,6 @@ mkdir -p "$ROOT/usr/lib/mason-setup"
cp mason/share/* "$ROOT/usr/share/mason-setup"
cp -r mason/ansible "$ROOT/usr/lib/mason-setup/"
-cp mason/mason-setup.service "$ROOT"/etc/systemd/system/mason-setup.service
ln -s ../mason-setup.service "$ROOT"/etc/systemd/system/multi-user.target.wants/mason-setup.service
@@ -84,26 +62,64 @@ if [ -n "$MASON_GENERIC" ]; then
fi
if [ -z "$MASON_CLUSTER_MORPHOLOGY" -a \
- -z "$MASON_DEFINITIONS_REF" -a \
- -z "$MASON_DISTBUILD_ARCH" -a \
- -z "$MASON_TEST_HOST" ]; then
+ -z "$MASON_ARCHITECTURE" -a \
+ -z "$MASON_TEST_HOSTS" -a \
+ -z "$MASON_DISTBUILD_CONTROLLERS" -a \
+ -z "$MASON_TEST_INFRASTRUCTURE_TYPE" -a \
+ -z "$MASON_UPSTREAM_TROVE" ]; then
# No Mason options defined, do nothing.
exit 0
fi
-if [ -z "$ARTIFACT_CACHE_SERVER" -o \
- -z "$CONTROLLERHOST" -o \
- -z "$MASON_CLUSTER_MORPHOLOGY" -o \
- -z "$MASON_DEFINITIONS_REF" -o \
- -z "$MASON_DISTBUILD_ARCH" -o \
- -z "$MASON_TEST_HOST" -o \
+if [ -z "GERRIT_HTTP_PORT" ]; then
+ GERRIT_HTTP_PORT="8080"
+fi
+
+if [ -z "GERRIT_GIT_PORT" ]; then
+ GERRIT_GIT_PORT="29418"
+fi
+
+if [ -z "GEARMAN_PORT" ]; then
+ GEARMAN_PORT="4730"
+fi
+
+if [ -z "START_GEARMAN" ]; then
+ START_GEARMAN="yes"
+fi
+
+if [ -z "$TROVE_ID" -o \
-z "$TROVE_HOST" -o \
- -z "$TROVE_ID" ]; then
+ -z "$ARTIFACT_CACHE_SERVER" -o \
+ -z "$GERRIT_USER" -o \
+ -z "$GERRIT_HOSTNAME" -o \
+ -z "$GERRIT_HTTP_PORT" -o \
+ -z "$GERRIT_GIT_PORT" -o \
+ -z "$GERRIT_SSH_KEY" -o \
+ -z "$GERRIT_SSH_KEY_PATH" -o \
+ -z "$GEARMAN_HOST" -o \
+ -z "$GEARMAN_PORT" -o \
+ -z "$START_GEARMAN" -o \
+ -z "$MASON_CLUSTER_MORPHOLOGY" -o \
+ -z "$MASON_ARCHITECTURE" -o \
+ -z "$MASON_TEST_HOSTS" -o \
+ -z "$MASON_DISTBUILD_CONTROLLERS" -o \
+ -z "$MASON_TEST_INFRASTRUCTURE_TYPE" -o \
+ -z "$MASON_UPSTREAM_TROVE" ]; then
echo Some options required for Mason were defined, but not all.
exit 1
fi
##########################################################################
+# Copy SSH keys into the system
+##########################################################################
+
+ssh_dir=$(dirname "$ROOT$GERRIT_SSH_KEY_PATH")
+mkdir -p "$ssh_dir"
+cp -a "$GERRIT_SSH_KEY" "$ROOT$GERRIT_SSH_KEY_PATH"
+cp -a "$GERRIT_SSH_KEY".pub "$ROOT$GERRIT_SSH_KEY_PATH".pub
+cp -a mason/ssh-config "$ssh_dir"/config
+
+##########################################################################
# Generate config variable shell snippet
##########################################################################
@@ -114,40 +130,40 @@ python <<'EOF' >"$MASON_DATA/mason.conf"
import os, sys, yaml
mason_configuration={
- 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'],
- 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'],
- 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'],
- 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'],
- 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'],
- 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'],
- 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'],
'TROVE_ID': os.environ['TROVE_ID'],
'TROVE_HOST': os.environ['TROVE_HOST'],
- 'CONTROLLERHOST': os.environ['CONTROLLERHOST'],
+ 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'],
+ 'GERRIT_USER': os.environ['GERRIT_USER'],
+ 'GERRIT_HOSTNAME': os.environ['GERRIT_HOSTNAME'],
+ 'GERRIT_HTTP_PORT': os.environ['GERRIT_HTTP_PORT'],
+ 'GERRIT_GIT_PORT': os.environ['GERRIT_GIT_PORT'],
+ 'GERRIT_SSH_KEY_PATH': os.environ['GERRIT_SSH_KEY_PATH'],
+ 'GEARMAN_HOST': os.environ['GEARMAN_HOST'],
+ 'GEARMAN_PORT': os.environ['GEARMAN_PORT'],
+ 'START_GEARMAN': os.environ['START_GEARMAN'],
+ 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'],
+ 'MASON_ARCHITECTURE': os.environ['MASON_ARCHITECTURE'],
+ 'MASON_TEST_HOSTS': os.environ['MASON_TEST_HOSTS'],
+ 'MASON_DISTBUILD_CONTROLLERS': os.environ['MASON_DISTBUILD_CONTROLLERS'],
+ 'MASON_TEST_INFRASTRUCTURE_TYPE': os.environ['MASON_TEST_INFRASTRUCTURE_TYPE'],
+ 'MASON_UPSTREAM_TROVE': os.environ['MASON_UPSTREAM_TROVE'],
}
yaml.dump(mason_configuration, sys.stdout, default_flow_style=False)
EOF
-if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
+if [ "$MASON_TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
python <<'EOF' >>"$MASON_DATA/mason.conf"
import os, sys, yaml
openstack_credentials={
- 'OS_USERNAME': os.environ['OPENSTACK_USER'],
- 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'],
- 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'],
- 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'],
- 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'],
+ 'OPENSTACK_NETWORK_ID': os.environ['MASON_OPENSTACK_NETWORK_ID'],
+ 'OS_USERNAME': os.environ['MASON_OPENSTACK_USER'],
+ 'OS_TENANT_NAME': os.environ['MASON_OPENSTACK_TENANT'],
+ 'OS_AUTH_URL': os.environ['MASON_OPENSTACK_AUTH_URL'],
+ 'OS_PASSWORD': os.environ['MASON_OPENSTACK_PASSWORD'],
}
yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False)
EOF
fi
-
-##########################################################################
-# Enable services
-##########################################################################
-
-ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer
-ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service
diff --git a/mason.configure.help b/mason.configure.help
new file mode 100644
index 00000000..d7b9926d
--- /dev/null
+++ b/mason.configure.help
@@ -0,0 +1,127 @@
+help: |
+ This is a "morph deploy" configuration extension to fully configure
+ a Mason instance at deployment time. It uses the following variables
+ from the environment:
+
+ * TROVE_ID
+ * TROVE_HOST
+ * ARTIFACT_CACHE_SERVER
+
+ * GERRIT_USER
+ * GERRIT_HOSTNAME
+ * GERRIT_HTTP_PORT
+ * GERRIT_GIT_PORT
+ * GERRIT_SSH_KEY
+ * GERRIT_SSH_KEY_PATH
+
+ * GEARMAN_HOST
+ * GEARMAN_PORT
+ * START_GEARMAN
+
+ * MASON_CLUSTER_MORPHOLOGY
+ * MASON_ARCHITECTURE
+ * MASON_TEST_HOSTS
+ * MASON_DISTBUILD_CONTROLLERS
+ * MASON_TEST_INFRASTRUCTURE_TYPE
+ * MASON_UPSTREAM_TROVE
+
+ * MASON_OPENSTACK_NETWORK_ID
+ * MASON_OPENSTACK_USER
+ * MASON_OPENSTACK_PASSWORD
+ * MASON_OPENSTACK_TENANT_ID
+ * MASON_OPENSTACK_TENANT_NAME
+ * MASON_OPENSTACK_AUTH_URL
+
+ The variables are described in more detail below.
+
+ A Mason deployment needs to know the following things:
+
+ * The ID and public name of the upstream Trove it should
+ use with morph.
+ * The hostname of the Gerrit instance to be monitored.
+ * A username to use on the Gerrit instance to be monitored,
+ and an ssh key with which to authenticate.
+
+ These, amongst some other configuration for Mason, are provided
+ by the variables described in this help.
+
+ * TROVE_ID: the same as for distbuild, this is the short ID of
+ your upstream Trove.
+ * TROVE_HOST: the same as for distbuild, this is the FQDN or
+ IP address of your upstream Trove.
+ * ARTIFACT_CACHE_SERVER: this is the FQDN or IP address of the
+ artifact cache you wish to use - normally the same as
+ TROVE_HOST.
+
+ * GERRIT_USER: the username of the Gerrit account Zuul should
+ use to look for and report on changes.
+ * GERRIT_HOSTNAME: the FQDN or IP address of the Gerrit instance
+ to be used.
+ * GERRIT_HTTP_PORT: the port used to access the HTTP frontend of
+ Gerrit - normally 8080.
+ * GERRIT_GIT_PORT: the port used to access Gerrit's git interface
+ (and via SSH) - normally 29418.
+ * GERRIT_SSH_KEY: the location of the SSH key to be added to the
+ system for use when accessing Gerrit. If this is not the same
+ WORKER_SSH_KEY then GERRIT_SSH_KEY_PATH should *not* be
+ "/root/.ssh/id_rsa"
+ * GERRIT_SSH_KEY_PATH: the location to put the SSH key for Gerrit
+ in the system. Normally, "/root/.ssh/id_rsa" is sufficient,
+ unless WORKER_SSH_KEY and GERRIT_SSH_KEY are not the same.
+
+ * GEARMAN_HOST: the FQDN or IP address of the Gearman server. If
+ START_GEARMAN is yes, then this should be 127.0.0.1
+ * GEARMAN_PORT: the port used for accessing the Gearman server.
+ This is normally 4730.
+ * START_GEARMAN: yes or no. If yes, then a Gearman server is run
+ on the Mason instance by Zuul.
+
+ * MASON_ARCHITECTURE: this is currently used to determine which
+ artifacts need to be uploaded. Artifacts from systems in
+ MASON_CLUSTER_MORPHOLOGY with arch: MASON_ARCHITECTURE are
+ uploaded.
+ * MASON_CLUSTER_MORPHOLOGY: this is the cluster morphology which
+ contains the systems to be tested.
+ * MASON_DISTBUILD_CONTROLLERS: this is a comma separated list of
+ pairs of the form "architecture:ip" which defines the distbuild
+ networks available for Mason. It is not used by the tests at
+ the moment however.
+ * MASON_TEST_HOSTS: this is a comma separated list of pairs of the
+ form "architecture:user@url" which defines the places to deploy
+ test systems to. If MASON_TEST_INFRASTRUCTURE_TYPE is "openstack"
+ then use your OpenStack auth URL.
+ * MASON_TEST_INFRASTRUCTURE_TYPE: this can be 'kvm' or 'openstack',
+ depending on if you are deploying your test instances to a kvm
+ host or an OpenStack cloud.
+ * MASON_UPSTREAM_TROVE: this is the Trove to which artifacts are
+ uploaded on success by Mason.
+
+ The following are only required if MASON_TEST_INFRASTRUCTURE_TYPE is
+ openstack:
+
+ * MASON_OPENSTACK_AUTH_URL: the auth url of the OpenStack instance
+ test systems are deployed to.
+ * MASON_OPENSTACK_NETWORK_ID: the network ID that test systems will
+ be connected to.
+ * MASON_OPENSTACK_USER: the username used by Mason to deploy test
+ systems with Glance.
+ * MASON_OPENSTACK_TENANT: the tenancy to deploy test systems in.
+ * MASON_OPENSTACK_PASSWORD: the password of the Mason user on
+ OpenStack. Note that this will be stored in plaintext on the
+ Mason instance. Perhaps pass this via the command line?
+
+ It is possible to deploy a generic Mason which you can then boot
+ and give configuration to at a later date:
+
+ MASON_GENERIC: yes
+
+ To configure a generic Mason, add a YAML file containing all the
+ required variables in the form at /etc/mason/mason.conf:
+
+ VARIABLE: value
+ VARIABLE: value
+ ... ...
+
+ However, you will still need to provide ARTIFACT_CACHE_SERVER,
+ TROVE_ID and TROVE_HOST as they are also used by distbuild.configure.
+
diff --git a/mason/README b/mason/README
new file mode 100644
index 00000000..22ece93b
--- /dev/null
+++ b/mason/README
@@ -0,0 +1,120 @@
+Mason with Zuul/Gearman/turbo-hipster
+=====================================
+
+Configuration
+-------------
+
+Some of the configuration is done by Ansible. This is documented in
+clusters/mason-system-x86_64-openstack-deploy.morph. The config files
+which are dealt with in this way are mason/share/zuul.conf and
+mason/share/turbo-hipster-config.yaml. You will probably want to
+edit the default configuration in mason/share/zuul-layout.yaml.
+
+### mason/share/zuul-layout.yaml
+
+Full documentation of the configuration of Zuul can be found [here][0].
+
+The projects section in this file contains a list of projects and the
+tests to run on them. The "name" field should be the name of the
+project on Gerrit. You then define the jobs to be run for each pipeline
+on that project. In the example,
+
+projects:
+ - name: baserock/baserock/definitions
+ check:
+ - build:
+ - build_test
+
+there exists some pipeline named check. The above sets up Zuul to run
+the job "build" when the check pipeline gets a change. If the build job
+is successful, then Zuul will run the build_test job. On completion,
+Zuul will report in the way defined in the pipeline.
+
+Not all tests have dependencies. The following is also valid:
+
+projects:
+ - name: baserock/baserock/definitions
+ check:
+ - build
+ - some-other-test
+
+[0]: http://ci.openstack.org/zuul/zuul.html#layout-yaml
+
+Debugging
+---------
+
+Once you've deployed a Mason and got it configured correctly, you may
+want or need to look at its logs or restart it. If you change its config,
+you'll need to do:
+
+ systemctl restart zuul-server
+ systemctl restart zuul-merger
+
+And if you change /etc/turbo-hipster-config.yaml you will also need
+to do:
+
+ systemctl restart turbo-hipster
+
+The logs created by the current plugins are in /var/www/logs/. These
+will be useful if a change causes a build to fail and you need to see why.
+They are also accessible by going to "http://<mason-ip>/logs/" in a web
+browser.
+
+Zuul saves its logs in /var/log/zuul/debug.log and /var/log/zuul/zuul.log.
+The most useful of these for discovering why something has gone wrong is
+debug.log.
+
+Turbo-hipster saves its logs in /var/log/turbo-hipster/debug.log. Its quite
+messy but can be useful for determining what part of a plugin went wrong.
+
+The plugins are in "/usr/share/system-tests/mason/tests/". If you change
+them, you will need to run
+
+ systemctl restart turbo-hipster
+
+in order for turbo-hipster to use the right plugin code.
+
+Issues
+------
+
+### "--verified" is not a valid option
+
+This suggests that either your Gerrit instance has not been configured
+to have a "Verified" label, or your Mason user on Gerrit cannot change
+the value of said label.
+
+### project "foo/bar" is not found
+
+This means that a Gerrit event has occurred on a project not listed in
+the projects section of /etc/zuul-layout.yaml. If that event was a
+change you wanted testing, you'll need to amend /etc/zuul-layout.yaml.
+
+### failing to deploy test systems
+
+Check your OpenStack credentials. Look in /var/log/morph.log to see
+where the deployment (if any) failed. If none, check your config. Does
+the test plugin in /etc/turbo-hipster-config.yaml have "deployment-hosts"
+set correctly (["architecture:user@url"], for OpenStack x86_64 this could
+be "x86_64:user@openstack.example.com:5000/v2.0/" - the username is not
+used for OpenStack)? Is the deployment host reachable from your machine?
+The turbo-hipster logs may be useful for finding the exact error with
+the deployment.
+
+Useful commands
+---------------
+
+Run these on the machine running Zuul.
+
+Show the running jobs:
+
+ zuul -c /etc/zuul.conf show running-jobs
+
+Force Zuul to reassess a change:
+
+ zuul -c /etc/zuul.conf enqueue --trigger gerrit --pipeline check --project baserock/baserock/definitions --change 71,1
+
+where 71 is the change number.
+
+Full docs of the Zuul client are [here][1].
+
+[1]: http://ci.openstack.org/zuul/client.html
diff --git a/mason/ansible/mason-setup.yml b/mason/ansible/mason-setup.yml
index d1528dbb..66d38d81 100644
--- a/mason/ansible/mason-setup.yml
+++ b/mason/ansible/mason-setup.yml
@@ -4,7 +4,6 @@
- "/etc/mason/mason.conf"
tasks:
-
- fail: msg='TROVE_ID is mandatory'
when: TROVE_ID is not defined
@@ -14,70 +13,104 @@
- fail: msg='ARTIFACT_CACHE_SERVER is mandatory'
when: ARTIFACT_CACHE_SERVER is not defined
+ - fail: msg='GERRIT_USER is mandatory'
+ when: GERRIT_USER is not defined
+
+ - fail: msg='GERRIT_HOSTNAME is mandatory'
+ when: GERRIT_HOSTNAME is not defined
+
+ - fail: msg='GERRIT_HTTP_PORT is mandatory'
+ when: GERRIT_HTTP_PORT is not defined
+
+ - fail: msg='GERRIT_GIT_PORT is mandatory'
+ when: GERRIT_GIT_PORT is not defined
+
+ - fail: msg='GERRIT_SSH_KEY_PATH is mandatory'
+ when: GERRIT_SSH_KEY_PATH is not defined
+
+ - fail: msg='GEARMAN_HOST is mandatory'
+ when: GEARMAN_HOST is not defined
+
+ - fail: msg='GEARMAN_PORT is mandatory'
+ when: GEARMAN_PORT is not defined
+
+ - fail: msg='START_GEARMAN is mandatory'
+ when: START_GEARMAN is not defined
+
- fail: msg='MASON_CLUSTER_MORPHOLOGY is mandatory'
when: MASON_CLUSTER_MORPHOLOGY is not defined
- - fail: msg='MASON_DEFINITIONS_REF is mandatory'
- when: MASON_DEFINITIONS_REF is not defined
-
- - fail: msg='MASON_DISTBUILD_ARCH is mandatory'
- when: MASON_DISTBUILD_ARCH is not defined
+ - fail: msg='MASON_ARCHITECTURE is mandatory'
+ when: MASON_ARCHITECTURE is not defined
- - fail: msg='MASON_TEST_HOST is mandatory'
- when: MASON_TEST_HOST is not defined
+ - fail: msg='MASON_TEST_HOSTS is mandatory'
+ when: MASON_TEST_HOSTS is not defined
- - fail: msg='CONTROLLERHOST is mandatory'
- when: CONTROLLERHOST is not defined
+ - fail: msg='MASON_DISTBUILD_CONTROLLERS is mandatory'
+ when: MASON_DISTBUILD_CONTROLLERS is not defined
- - fail: msg='TEST_INFRASTRUCTURE_TYPE is mandatory'
- when: TEST_INFRASTRUCTURE_TYPE is not defined
+ - fail: msg='MASON_TEST_INFRASTRUCTURE_TYPE is mandatory'
+ when: MASON_TEST_INFRASTRUCTURE_TYPE is not defined
- - fail: msg='OPENSTACK_NETWORK_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
- when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OPENSTACK_NETWORK_ID is not defined
+ - fail: msg='MASON_UPSTREAM_TROVE is mandatory'
+ when: MASON_UPSTREAM_TROVE is not defined
- - fail: msg='OS_USERNAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
- when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_USERNAME is not defined
+ - fail: msg='OPENSTACK_NETWORK_ID is mandatory when MASON_TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: MASON_TEST_INFRASTRUCTURE_TYPE == "openstack" and OPENSTACK_NETWORK_ID is not defined
- - fail: msg='OS_PASSWORD is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
- when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_PASSWORD is not defined
+ - fail: msg='OS_USERNAME is mandatory when MASON_TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: MASON_TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_USERNAME is not defined
- - fail: msg='OS_TENANT_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
- when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_ID is not defined
+ - fail: msg='OS_PASSWORD is mandatory when MASON_TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: MASON_TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_PASSWORD is not defined
- - fail: msg='OS_TENANT_NAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
- when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_NAME is not defined
+ - fail: msg='OS_TENANT_NAME is mandatory when MASON_TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: MASON_TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_NAME is not defined
- - fail: msg='OS_AUTH_URL is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
- when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_AUTH_URL is not defined
+ - fail: msg='OS_AUTH_URL is mandatory when MASON_TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: MASON_TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_AUTH_URL is not defined
- - name: Create the Mason configuration file
+ - name: Create required configuration files
template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
with_items:
- - mason.conf
+ - zuul.conf
+ - turbo-hipster-config.yaml
+ - lighttpd.conf
+ - zuul-layout.yaml
+ - zuul-logging.conf
- name: Create the OpenStack credentials file
template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
with_items:
- os.conf
- when: TEST_INFRASTRUCTURE_TYPE == "openstack"
-
- - name: Enable the mason service
- service: name=mason.service enabled=yes
- register: mason_service
- - name: Restart the mason service
- service: name=mason.service state=restarted
- when: mason_service|changed
-
- - name: Enable the mason timer
- service: name=mason.timer enabled=yes
- register: mason_timer
- - name: Restart the mason timer
- service: name=mason.timer state=restarted
- when: mason_timer|changed
-
- - name: Enable the httpd service
- service: name=httpd.service enabled=yes
- register: httpd_service
- - name: Restart the httpd service
- service: name=httpd state=restarted
- when: httpd_service|changed
+ when: MASON_TEST_INFRASTRUCTURE_TYPE == "openstack"
+
+ - name: Enable the zuul-server service
+ service: name=zuul-server.service enabled=yes
+ register: zuul_server_service
+ - name: Restart the zuul-server service
+ service: name=zuul-server.service state=restarted
+ when: zuul_server_service|changed
+
+ - name: Enable the zuul-merger service
+ service: name=zuul-merger.service enabled=yes
+ register: zuul_merger_service
+ - name: Restart the zuul-merger service
+ service: name=zuul-merger.service state=restarted
+ when: zuul_merger_service|changed
+
+ - name: Enable the turbo-hipster service
+ service: name=turbo-hipster.service enabled=yes
+ register: turbo_hipster_service
+ - name: Restart the turbo-hipster service
+ service: name=turbo-hipster.service state=restarted
+ when: turbo_hipster_service|changed
+
+ - user: name=www comment="Lighttpd user"
+
+ - name: Enable the lighttpd service
+ service: name=lighttpd.service enabled=yes
+ register: lighttpd_service
+ - name: Restart the lighttpd service
+ service: name=lighttpd.service state=restarted
+ when: lighttpd_service|changed
diff --git a/mason/httpd.service b/mason/lighttpd.service
index 7572b732..ffc1b9a0 100644
--- a/mason/httpd.service
+++ b/mason/lighttpd.service
@@ -4,7 +4,7 @@ After=network.target
[Service]
User=root
-ExecStart=/usr/sbin/httpd -f -p 80 -h /srv/mason
+ExecStart=/usr/sbin/lighttpd -D -f /etc/lighttpd.conf
[Install]
WantedBy=multi-user.target
diff --git a/mason/mason-generator.sh b/mason/mason-generator.sh
deleted file mode 100755
index 187db72c..00000000
--- a/mason/mason-generator.sh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/sh
-
-set -e
-
-if [ "$#" -lt 5 -o "$#" -gt 6 -o "$1" == "-h" -o "$1" == "--help" ]; then
- cat <<EOF
-Usage:
- `basename $0` HOST_PREFIX UPSTREAM_TROVE_HOSTNAME VM_USER VM_HOST VM_PATH [HOST_POSTFIX]
-
-Where:
- HOST_PREFIX -- Name of your Mason instance
- e.g. "my-mason" to produce hostnames:
- my-mason-trove and my-mason-controller
- UPSTREAM_TROVE_HOSTNAME -- Upstream trove's hostname
- VM_USER -- User on VM host for VM deployment
- VM_HOST -- VM host for VM deployment
- VM_PATH -- Path to store VM images in on VM host
- HOST_POSTFIX -- e.g. ".example.com" to get
- my-mason-trove.example.com
-
-This script makes deploying a Mason system simpler by automating
-the generation of keys for the systems to use, building of the
-systems, filling out the mason deployment cluster morphology
-template with useful values, and finally deploying the systems.
-
-To ensure that the deployed system can deploy test systems, you
-must supply an ssh key to the VM host. Do so with the following
-command:
- ssh-copy-id -i ssh_keys-HOST_PREFIX/worker.key.pub VM_USER@VM_HOST
-
-To ensure that the mason can upload artifacts to the upstream trove,
-you must supply an ssh key to the upstream trove. Do so with the
-following command:
- ssh-copy-id -i ssh_keys-HOST_PREFIX/id_rsa.key.pub root@UPSTREAM_TROVE_HOSTNAME
-
-EOF
- exit 0
-fi
-
-
-HOST_PREFIX="$1"
-UPSTREAM_TROVE="$2"
-VM_USER="$3"
-VM_HOST="$4"
-VM_PATH="$5"
-HOST_POSTFIX="$6"
-
-sedescape() {
- # Escape all non-alphanumeric characters
- printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
-}
-
-
-##############################################################################
-# Key generation
-##############################################################################
-
-mkdir -p "ssh_keys-${HOST_PREFIX}"
-cd "ssh_keys-${HOST_PREFIX}"
-test -e mason.key || ssh-keygen -t rsa -b 2048 -f mason.key -C mason@TROVE_HOST -N ''
-test -e lorry.key || ssh-keygen -t rsa -b 2048 -f lorry.key -C lorry@TROVE_HOST -N ''
-test -e worker.key || ssh-keygen -t rsa -b 2048 -f worker.key -C worker@TROVE_HOST -N ''
-test -e id_rsa || ssh-keygen -t rsa -b 2048 -f id_rsa -C trove-admin@TROVE_HOST -N ''
-cd ../
-
-
-##############################################################################
-# Mason setup
-##############################################################################
-
-cp clusters/mason.morph mason-${HOST_PREFIX}.morph
-
-sed -i "s/red-box-v1/$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
-sed -i "s/ssh_keys/ssh_keys-$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
-sed -i "s/upstream-trove/$(sedescape "$UPSTREAM_TROVE")/" "mason-$HOST_PREFIX.morph"
-sed -i "s/vm-user/$(sedescape "$VM_USER")/g" "mason-$HOST_PREFIX.morph"
-sed -i "s/vm-host/$(sedescape "$VM_HOST")/g" "mason-$HOST_PREFIX.morph"
-sed -i "s/vm-path/$(sedescape "$VM_PATH")/g" "mason-$HOST_PREFIX.morph"
-sed -i "s/\.example\.com/$(sedescape "$HOST_POSTFIX")/g" "mason-$HOST_PREFIX.morph"
-
-
-##############################################################################
-# System building
-##############################################################################
-
-morph build systems/trove-system-x86_64.morph
-morph build systems/build-system-x86_64.morph
-
-
-##############################################################################
-# System deployment
-##############################################################################
-
-morph deploy mason-${HOST_PREFIX}.morph
-
-
-##############################################################################
-# Cleanup
-##############################################################################
-
-rm mason-${HOST_PREFIX}.morph
diff --git a/mason/mason-report.sh b/mason/mason-report.sh
deleted file mode 100755
index 9c20b65b..00000000
--- a/mason/mason-report.sh
+++ /dev/null
@@ -1,252 +0,0 @@
-#!/bin/bash
-
-set -x
-
-. /etc/mason.conf
-
-REPORT_PATH=/var/mason/report.html
-SERVER_PATH=/srv/mason
-
-sed_escape() {
- printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
-}
-
-create_report() {
-cat > $REPORT_PATH <<'EOF'
-<html>
-<head>
-<meta charset="UTF-8">
-<meta http-equiv="refresh" content="60">
-<style>
-html, body {
- margin: 0;
- padding: 0;
-}
-p.branding {
- background: black;
- color: #fff;
- padding: 0.4em;
- margin: 0;
- font-weight: bold;
-}
-h1 {
- background: #225588;
- color: white;
- margin: 0;
- padding: 0.6em;
-}
-table {
- width: 90%;
- margin: 1em auto 6em auto;
- border: 1px solid black;
- border-spacing: 0;
-}
-table tr.headings {
- background: #555;
- color: white;
-}
-table tr.pass {
- background: #aaffaa;
-}
-table tr.pass:hover {
- background: #bbffbb;
-}
-table tr.fail {
- background: #ffaaaa;
-}
-table tr.fail:hover {
- background: #ffbbbb;
-}
-table tr.nonet {
- background: #ffdd99;
-}
-table tr.nonet:hover {
- background: #ffeeaa;
-}
-table tr.headings th {
- font-weight: bold;
- text-align: left;
- padding: 3px 2px;
-}
-table td {
- padding: 2px;
-}
-td.result {
- font-weight: bold;
- text-transform: uppercase;
-}
-td.result a {
- text-decoration: none;
-}
-td.result a:before {
- content: "âž« ";
-}
-tr.pass td.result a {
- color: #252;
-}
-tr.pass td.result a:hover {
- color: #373;
-}
-tr.fail td.result a {
- color: #622;
-}
-tr.fail td.result a:hover {
- color: #933;
-}
-tr.nonet td.result a {
- color: #641;
-}
-tr.nonet td.result a:hover {
- color: #962;
-}
-td.ref {
- font-family: monospace;
-}
-td.ref a {
- color: #333;
-}
-td.ref a:hover {
- color: #555;
-}
-table tr.pass td, table tr.fail td {
- border-top: solid white 1px;
-}
-p {
- margin: 1.3em;
-}
-code {
- padding: 0.3em 0.5em;
- background: #eee;
- border: 1px solid #bbb;
- border-radius: 1em;
-}
-#footer {
- margin: 0;
- background: #aaa;
- color: #222;
- border-top: #888 1px solid;
- font-size: 80%;
- padding: 0;
- position: fixed;
- bottom: 0;
- width: 100%;
- display: table;
-}
-#footer p {
- padding: 1.3em;
- display: table-cell;
-}
-#footer p code {
- font-size: 110%;
-}
-#footer p.about {
- text-align: right;
-}
-</style>
-</head>
-<body>
-<p class="branding">Mason</p>
-<h1>Baserock: Continuous Delivery</h1>
-<p>Build log of changes to <code>BRANCH</code> from <code>TROVE</code>. Most recent first.</p>
-<table>
-<tr class="headings">
- <th>Started</th>
- <th>Ref</th>
- <th>Duration</th>
- <th>Result</th>
-</tr>
-<!--INSERTION POINT-->
-</table>
-<div id="footer">
-<p>Last checked for updates at: <code>....-..-.. ..:..:..</code></p>
-<p class="about">Generated by Mason | Powered by Baserock</p>
-</div>
-</body>
-</html>
-EOF
-
- sed -i 's/BRANCH/'"$(sed_escape "$1")"'/' $REPORT_PATH
- sed -i 's/TROVE/'"$(sed_escape "$2")"'/' $REPORT_PATH
-}
-
-update_report() {
- # Give function params sensible names
- build_start_time="$1"
- build_trove_host="$2"
- build_ref="$3"
- build_sha1="$4"
- build_duration="$5"
- build_result="$6"
-
- # Generate template if report file is not there
- if [ ! -f $REPORT_PATH ]; then
- create_report $build_ref $build_trove_host
- fi
-
- # Build table row for insertion into report file
- if [ "$build_result" = nonet ]; then
- msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref">Failed to contact '"${build_trove_host}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="log/'"${build_sha1}"'--'"${build_start_time}"'.log">'"${build_result}"'</a></td></tr>'
- else
- msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref"><a href="http://'"${build_trove_host}"'/cgi-bin/cgit.cgi/baserock/baserock/definitions.git/commit/?h='"${build_ref}"'&id='"${build_sha1}"'">'"${build_sha1}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="log/'"${build_sha1}"'--'"${build_start_time}"'.log">'"${build_result}"'</a></td></tr>'
- fi
-
- # Insert report line, newest at top
- sed -i 's/<!--INSERTION POINT-->/<!--INSERTION POINT-->\n'"$(sed_escape "$msg")"'/' $REPORT_PATH
-}
-
-update_report_time() {
- # Give function params sensible names
- build_start_time="$1"
-
- # If the report file exists, update the last-checked-for-updates time
- if [ -f $REPORT_PATH ]; then
- sed -i 's/<code>....-..-.. ..:..:..<\/code>/<code>'"$(sed_escape "$build_start_time")"'<\/code>/' $REPORT_PATH
- fi
-}
-
-START_TIME=`date +%Y-%m-%d\ %T`
-
-update_report_time "$START_TIME"
-cp "$REPORT_PATH" "$SERVER_PATH/index.html"
-
-logfile="$(mktemp)"
-/usr/lib/mason/mason.sh 2>&1 | tee "$logfile"
-case "${PIPESTATUS[0]}" in
-0)
- RESULT=pass
- ;;
-33)
- RESULT=skip
- ;;
-42)
- RESULT=nonet
- ;;
-*)
- RESULT=fail
- ;;
-esac
-
-# TODO: Update page with last executed time
-if [ "$RESULT" = skip ]; then
- rm "$logfile"
- exit 0
-fi
-
-DURATION=$(( $(date +%s) - $(date --date="$START_TIME" +%s) ))
-SHA1="$(cd "ws/$DEFINITIONS_REF/$UPSTREAM_TROVE_ADDRESS/baserock/baserock/definitions" && git rev-parse HEAD)"
-
-update_report "$START_TIME" \
- "$UPSTREAM_TROVE_ADDRESS" \
- "$DEFINITIONS_REF" \
- "$SHA1" \
- "$DURATION" \
- "$RESULT"
-
-
-#
-# Copy report into server directory
-#
-
-cp "$REPORT_PATH" "$SERVER_PATH/index.html"
-mkdir "$SERVER_PATH/log"
-mv "$logfile" "$SERVER_PATH/log/$SHA1--$START_TIME.log"
diff --git a/mason/mason.service b/mason/mason.service
deleted file mode 100644
index a864d610..00000000
--- a/mason/mason.service
+++ /dev/null
@@ -1,10 +0,0 @@
-[Unit]
-Description=Mason: Continuous Delivery Service
-After=mason-setup.service
-
-[Service]
-User=root
-ExecStart=/usr/lib/mason/mason-report.sh
-
-[Install]
-WantedBy=multi-user.target
diff --git a/mason/mason.sh b/mason/mason.sh
deleted file mode 100755
index dba99dfa..00000000
--- a/mason/mason.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/sh
-
-# Load OpenStack credentials
-if [ -f "/etc/os.conf" ]; then
- . /etc/os.conf
-fi
-
-set -e
-set -x
-
-# Load our deployment config
-. /etc/mason.conf
-
-if [ ! -e ws ]; then
- morph init ws
-fi
-cd ws
-
-definitions_repo="$DEFINITIONS_REF"/"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions
-if [ ! -e "$definitions_repo" ]; then
- morph checkout git://"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions "$DEFINITIONS_REF"
- cd "$definitions_repo"
- git config user.name "$TROVE_ID"-mason
- git config user.email "$TROVE_ID"-mason@$(hostname)
-else
- cd "$definitions_repo"
- SHA1_PREV="$(git rev-parse HEAD)"
-fi
-
-if ! git remote update origin; then
- echo ERROR: Unable to contact trove
- exit 42
-fi
-git clean -fxd
-git reset --hard origin/"$DEFINITIONS_REF"
-
-SHA1="$(git rev-parse HEAD)"
-
-if [ -f "$HOME/success" ] && [ "$SHA1" = "$SHA1_PREV" ]; then
- echo INFO: No changes to "$DEFINITIONS_REF", nothing to do
- exit 33
-fi
-
-rm -f "$HOME/success"
-
-echo INFO: Mason building: $DEFINITIONS_REF at $SHA1
-
-if ! "scripts/release-build" --no-default-configs \
- --trove-host "$UPSTREAM_TROVE_ADDRESS" \
- --artifact-cache-server "http://$ARTIFACT_CACHE_SERVER:8080/" \
- --controllers "$DISTBUILD_ARCH:$DISTBUILD_CONTROLLER_ADDRESS" \
- "$BUILD_CLUSTER_MORPHOLOGY"; then
- echo ERROR: Failed to build release images
- echo Build logs for chunks:
- find builds -type f -exec echo {} \; -exec cat {} \;
- exit 1
-fi
-
-releases_made="$(cd release && ls | wc -l)"
-if [ "$releases_made" = 0 ]; then
- echo ERROR: No release images created
- exit 1
-else
- echo INFO: Created "$releases_made" release images
-fi
-
-if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
- "scripts/release-test-os" \
- --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
- --trove-host "$UPSTREAM_TROVE_ADDRESS" \
- --trove-id "$TROVE_ID" \
- --net-id "$OPENSTACK_NETWORK_ID" \
- "$BUILD_CLUSTER_MORPHOLOGY"
-elif [ "$TEST_INFRASTRUCTURE_TYPE" = "kvmhost" ]; then
- "scripts/release-test" \
- --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
- --trove-host "$UPSTREAM_TROVE_ADDRESS" \
- --trove-id "$TROVE_ID" \
- "$BUILD_CLUSTER_MORPHOLOGY"
-fi
-
-"scripts/release-upload" --build-trove-host "$ARTIFACT_CACHE_SERVER" \
- --arch "$DISTBUILD_ARCH" \
- --log-level=debug --log="$HOME"/release-upload.log \
- --public-trove-host "$UPSTREAM_TROVE_ADDRESS" \
- --public-trove-username root \
- --public-trove-artifact-dir /home/cache/artifacts \
- --no-upload-release-artifacts \
- "$BUILD_CLUSTER_MORPHOLOGY"
-
-echo INFO: Artifact upload complete for $DEFINITIONS_REF at $SHA1
-
-touch "$HOME/success"
diff --git a/mason/mason.timer b/mason/mason.timer
deleted file mode 100644
index 107dff97..00000000
--- a/mason/mason.timer
+++ /dev/null
@@ -1,10 +0,0 @@
-[Unit]
-Description=Runs Mason continually with 1 min between calls
-
-[Timer]
-#Time between Mason finishing and calling it again
-OnUnitActiveSec=1min
-Unit=mason.service
-
-[Install]
-WantedBy=multi-user.target
diff --git a/mason/share/lighttpd.conf b/mason/share/lighttpd.conf
new file mode 100644
index 00000000..2e9f6350
--- /dev/null
+++ b/mason/share/lighttpd.conf
@@ -0,0 +1,21 @@
+server.document-root = "/var/www/"
+
+server.port = 80
+
+server.username = "www"
+server.groupname = "www"
+
+mimetype.assign = (
+ ".html" => "text/html",
+ ".txt" => "text/plain",
+ ".log" => "text/plain",
+ ".jpg" => "image/jpeg",
+ ".png" => "image/png"
+)
+
+static-file.exclude-extensions = ( ".fcgi", ".php", ".rb", "~", ".inc" )
+index-file.names = ( "index.html" )
+
+$HTTP["url"] =~ "^/logs/" {
+ dir-listing.activate = "enable"
+}
diff --git a/mason/share/mason.conf b/mason/share/mason.conf
deleted file mode 100644
index 1295ce84..00000000
--- a/mason/share/mason.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file is generarated by the mason-setup systemd unit.
-# If you want to change the configuration, change the configuration
-# in /etc/mason/mason.conf and restart the service.
-
-ARTIFACT_CACHE_SERVER={{ ARTIFACT_CACHE_SERVER|quote }}
-UPSTREAM_TROVE_ADDRESS={{ TROVE_HOST|quote }}
-DEFINITIONS_REF={{ MASON_DEFINITIONS_REF|quote }}
-DISTBUILD_ARCH={{ MASON_DISTBUILD_ARCH|quote }}
-DISTBUILD_CONTROLLER_ADDRESS={{ CONTROLLERHOST|quote }}
-TROVE_ID={{ TROVE_ID|quote }}
-BUILD_CLUSTER_MORPHOLOGY={{ MASON_CLUSTER_MORPHOLOGY|quote }}
-MASON_TEST_HOST={{ MASON_TEST_HOST|quote }}
-TEST_INFRASTRUCTURE_TYPE={{ TEST_INFRASTRUCTURE_TYPE|quote }}
-{% if OPENSTACK_NETWORK_ID is defined %}OPENSTACK_NETWORK_ID={{ OPENSTACK_NETWORK_ID|quote }}{% endif %}
diff --git a/mason/share/os.conf b/mason/share/os.conf
index 21ef398c..1acfff79 100644
--- a/mason/share/os.conf
+++ b/mason/share/os.conf
@@ -18,7 +18,6 @@ export OS_AUTH_URL={{ OS_AUTH_URL|quote }}
# With the addition of Keystone we have standardized on the term **tenant**
# as the entity that owns the resources.
-export OS_TENANT_ID={{ OS_TENANT_ID|quote }}
export OS_TENANT_NAME={{ OS_TENANT_NAME|quote }}
# In addition to the owning entity (tenant), openstack stores the entity
diff --git a/mason/share/turbo-hipster-config.yaml b/mason/share/turbo-hipster-config.yaml
new file mode 100644
index 00000000..8ae8133d
--- /dev/null
+++ b/mason/share/turbo-hipster-config.yaml
@@ -0,0 +1,47 @@
+zuul_server:
+ gerrit_site: "http://{{ GERRIT_HOSTNAME }}:{{ GERRIT_HTTP_PORT }}"
+ git_origin: "git://{{ GERRIT_HOSTNAME }}:{{ GERRIT_GIT_PORT }}"
+ gearman_host: "{{ GEARMAN_HOST }}"
+ gearman_port: "{{ GEARMAN_PORT }}"
+
+debug_log: /var/log/turbo-hipster/debug.log
+jobs_working_dir: /var/lib/turbo-hipster/jobs
+git_working_dir: /var/lib/turbo-hipster/git
+pip_download_cache: /var/cache/pip
+
+plugins:
+ - name: build
+ function: build:build
+ import-path: mason.tests.build
+ location: /usr/share/system-tests/
+ config:
+ trove-host: "{{ TROVE_HOST }}"
+ artifact-cache-server: "{{ ARTIFACT_CACHE_SERVER }}"
+ controllers: [ "{{ MASON_DISTBUILD_CONTROLLERS }}" ]
+ cluster-morphology: "{{ MASON_CLUSTER_MORPHOLOGY }}"
+ - name: build-test
+ function: build:build_test
+ import-path: mason.tests.build_test
+ location: /usr/share/system-tests/
+ config:
+ trove-host: "{{ TROVE_HOST }}"
+ cluster-morphology: "{{ MASON_CLUSTER_MORPHOLOGY }}"
+ test-infrastructure-type: "{{ MASON_TEST_INFRASTRUCTURE_TYPE }}"
+ deployment-host: [ "{{ MASON_TEST_HOSTS }}" ]
+ trove-id: "{{ TROVE_ID }}"
+ openstack-network-id: "{{ OPENSTACK_NETWORK_ID }}"
+ - name: artifact-upload
+ function: build:artifact_upload
+ import-path: mason.tests.artifact_upload
+ location: /usr/share/system-tests/
+ config:
+ artifact-cache-server: "{{ ARTIFACT_CACHE_SERVER }}"
+ cluster-morphology: "{{ MASON_CLUSTER_MORPHOLOGY }}"
+ architecture: "{{ MASON_ARCHITECTURE }}"
+ upstream-trove: "{{ MASON_UPSTREAM_TROVE }}"
+ upload-release-artifacts: False
+
+publish_logs:
+ type: local
+ path: /var/log/
+ prepend_url: http://localhost/logs
diff --git a/mason/share/zuul-layout.yaml b/mason/share/zuul-layout.yaml
new file mode 100644
index 00000000..a845a62f
--- /dev/null
+++ b/mason/share/zuul-layout.yaml
@@ -0,0 +1,22 @@
+pipelines:
+ - name: check
+ manager: IndependentPipelineManager
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ verified: 1
+ failure:
+ gerrit:
+ verified: -1
+
+jobs:
+ - name: ^.*-merge$
+ failure-message: Unable to merge change, please rebase and try again.
+
+projects:
+ - name: baserock/baserock/definitions
+ check:
+ - build:
+ - build_test
diff --git a/mason/share/zuul-logging.conf b/mason/share/zuul-logging.conf
new file mode 100644
index 00000000..8b76da26
--- /dev/null
+++ b/mason/share/zuul-logging.conf
@@ -0,0 +1,44 @@
+[loggers]
+keys=root,zuul,gerrit
+
+[handlers]
+keys=console,debug,normal
+
+[formatters]
+keys=simple
+
+[logger_root]
+level=WARNING
+handlers=console
+
+[logger_zuul]
+level=DEBUG
+handlers=debug,normal
+qualname=zuul
+
+[logger_gerrit]
+level=DEBUG
+handlers=debug,normal
+qualname=gerrit
+
+[handler_console]
+level=WARNING
+class=StreamHandler
+formatter=simple
+args=(sys.stdout,)
+
+[handler_debug]
+level=DEBUG
+class=logging.handlers.TimedRotatingFileHandler
+formatter=simple
+args=('/var/log/zuul/debug.log', 'midnight', 1, 30,)
+
+[handler_normal]
+level=INFO
+class=logging.handlers.TimedRotatingFileHandler
+formatter=simple
+args=('/var/log/zuul/zuul.log', 'midnight', 1, 30,)
+
+[formatter_simple]
+format=%(asctime)s %(levelname)s %(name)s: %(message)s
+datefmt=
diff --git a/mason/share/zuul.conf b/mason/share/zuul.conf
new file mode 100644
index 00000000..21066e70
--- /dev/null
+++ b/mason/share/zuul.conf
@@ -0,0 +1,26 @@
+[gearman]
+server={{ GEARMAN_HOST }}
+port={{ GEARMAN_PORT }}
+
+[gearman_server]
+start={{ START_GEARMAN }}
+
+[gerrit]
+server={{ GERRIT_HOSTNAME }}
+port={{ GERRIT_GIT_PORT }}
+baseurl=http://{{ GERRIT_HOSTNAME }}:{{ GERRIT_HTTP_PORT }}
+user={{ GERRIT_USER }}
+sshkey={{ GERRIT_SSH_KEY_PATH }}
+
+[zuul]
+log_config=/etc/zuul-logging.conf
+pidfile=/var/run/zuul/zuul.pid
+state_dir=/var/lib/zuul
+git_dir=/var/lib/zuul/git
+status_url=http://127.0.0.1/logs
+
+[merger]
+git_dir=/var/lib/zuul/git
+git_user_email={{ GERRIT_USER }}@mason
+git_user_name={{ GERRIT_USER }}
+zuul_url=ssh://{{ GERRIT_USER }}@{{ GERRIT_HOSTNAME }}:{{ GERRIT_GIT_PORT }}
diff --git a/mason/ssh-config b/mason/ssh-config
new file mode 100644
index 00000000..f30d239b
--- /dev/null
+++ b/mason/ssh-config
@@ -0,0 +1,2 @@
+Host *
+ StrictHostKeyChecking no
diff --git a/mason/turbo-hipster.service b/mason/turbo-hipster.service
new file mode 100644
index 00000000..20cde2ad
--- /dev/null
+++ b/mason/turbo-hipster.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=turbo-hipster: Test runner for Zuul
+After=zuul-server.service
+
+[Service]
+User=root
+ExecStart=/bin/sh -c ". /etc/os.conf && /usr/bin/turbo-hipster -c /etc/turbo-hipster-config.yaml"
+
+[Install]
+WantedBy=multi-user.target
diff --git a/mason/zuul-merger.service b/mason/zuul-merger.service
new file mode 100644
index 00000000..a3aa0ca5
--- /dev/null
+++ b/mason/zuul-merger.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Zuul Merger: Handles automated merging of changes
+After=zuul-server.service
+
+[Service]
+User=root
+ExecStart=/usr/bin/zuul-merger -d -c /etc/zuul.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/mason/zuul-server.service b/mason/zuul-server.service
new file mode 100644
index 00000000..dfc6436f
--- /dev/null
+++ b/mason/zuul-server.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Zuul: CI orchestration and Gatekeeper
+After=mason-setup.service
+
+[Service]
+User=root
+ExecStart=/usr/bin/zuul-server -d -c /etc/zuul.conf -l /etc/zuul-layout.yaml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/scripts/cycle.sh b/scripts/cycle.sh
deleted file mode 100755
index c0e2aa67..00000000
--- a/scripts/cycle.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/sh
-# Copyright (C) 2014 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-usage() {
- echo "Usage: cycle.sh some-system some-cluster [newversion]"
- echo
- echo "This builds and deploys the current checked out version of"
- echo "some-system, applying it as a self-upgrade to the system you"
- echo "are working in, using configuration from some-cluster."
- echo "The upgrade is labelled TEST by default, or [newversion] if"
- echo "specified, and is set to be the default for next boot."
-}
-
-if [ -z "$1" ] || [ -z "$2" ] || [ ! -z "$4" ] ; then
- usage
- exit 1
-fi
-
-newversion=TEST
-if [ ! -z "$3" ] ; then
- newversion=$3
- if (echo "$newversion" | grep ' ' > /dev/null 2>&1) ; then
- echo 'Version label must not contain spaces.'
- exit 1
- fi
-fi
-
-if system-version-manager get-running | grep -q "^$newversion$"; then
- echo "You are currently running the $newversion system."
- echo "Maybe you want to boot into a different system version?"
- exit 1
-fi
-
-set -e
-set -v
-
-runningversion=`system-version-manager get-running`
-system-version-manager set-default $runningversion
-if system-version-manager list | grep -q "^$newversion$"; then
- system-version-manager remove $newversion
-fi
-
-morph gc
-morph build "$1"
-
-sed -i "s|^- morph: .*$|- morph: $1|" "$2"
-morph deploy --upgrade "$2" self.HOSTNAME=$(hostname) self.VERSION_LABEL=$newversion
-system-version-manager list
diff --git a/scripts/licensecheck.pl b/scripts/licensecheck.pl
deleted file mode 100644
index 5b6d0d33..00000000
--- a/scripts/licensecheck.pl
+++ /dev/null
@@ -1,604 +0,0 @@
-#!/usr/bin/perl
-# This script was originally based on the script of the same name from
-# the KDE SDK (by dfaure@kde.org)
-#
-# This version is
-# Copyright (C) 2007, 2008 Adam D. Barratt
-# Copyright (C) 2012 Francesco Poli
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <https://www.gnu.org/licenses/>.
-
-=head1 NAME
-
-licensecheck - simple license checker for source files
-
-=head1 SYNOPSIS
-
-B<licensecheck> B<--help>|B<--version>
-
-B<licensecheck> [B<--no-conf>] [B<--verbose>] [B<--copyright>]
-[B<-l>|B<--lines=>I<N>] [B<-i>|B<--ignore=>I<regex>] [B<-c>|B<--check=>I<regex>]
-[B<-m>|B<--machine>] [B<-r>|B<--recursive>]
-I<list of files and directories to check>
-
-=head1 DESCRIPTION
-
-B<licensecheck> attempts to determine the license that applies to each file
-passed to it, by searching the start of the file for text belonging to
-various licenses.
-
-If any of the arguments passed are directories, B<licensecheck> will add
-the files contained within to the list of files to process.
-
-=head1 OPTIONS
-
-=over 4
-
-=item B<--verbose>, B<--no-verbose>
-
-Specify whether to output the text being processed from each file before
-the corresponding license information.
-
-Default is to be quiet.
-
-=item B<-l=>I<N>, B<--lines=>I<N>
-
-Specify the number of lines of each file's header which should be parsed
-for license information. (Default is 60).
-
-=item B<-i=>I<regex>, B<--ignore=>I<regex>
-
-When processing the list of files and directories, the regular
-expression specified by this option will be used to indicate those which
-should not be considered (e.g. backup files, VCS metadata).
-
-=item B<-r>, B<--recursive>
-
-Specify that the contents of directories should be added
-recursively.
-
-=item B<-c=>I<regex>, B<--check=>I<regex>
-
-Specify a pattern against which filenames will be matched in order to
-decide which files to check the license of.
-
-The default includes common source files.
-
-=item B<--copyright>
-
-Also display copyright text found within the file
-
-=item B<-m>, B<--machine>
-
-Display the information in a machine readable way, i.e. in the form
-<file><tab><license>[<tab><copyright>] so that it can be easily sorted
-and/or filtered, e.g. with the B<awk> and B<sort> commands.
-Note that using the B<--verbose> option will kill the readability.
-
-=item B<--no-conf>, B<--noconf>
-
-Do not read any configuration files. This can only be used as the first
-option given on the command line.
-
-=back
-
-=head1 CONFIGURATION VARIABLES
-
-The two configuration files F</etc/devscripts.conf> and
-F<~/.devscripts> are sourced by a shell in that order to set
-configuration variables. Command line options can be used to override
-configuration file settings. Environment variable settings are
-ignored for this purpose. The currently recognised variables are:
-
-=over 4
-
-=item B<LICENSECHECK_VERBOSE>
-
-If this is set to I<yes>, then it is the same as the B<--verbose> command
-line parameter being used. The default is I<no>.
-
-=item B<LICENSECHECK_PARSELINES>
-
-If this is set to a positive number then the specified number of lines
-at the start of each file will be read whilst attempting to determine
-the license(s) in use. This is equivalent to the B<--lines> command line
-option.
-
-=back
-
-=head1 LICENSE
-
-This code is copyright by Adam D. Barratt <I<adam@adam-barratt.org.uk>>,
-all rights reserved; based on a script of the same name from the KDE
-SDK, which is copyright by <I<dfaure@kde.org>>.
-This program comes with ABSOLUTELY NO WARRANTY.
-You are free to redistribute this code under the terms of the GNU
-General Public License, version 2 or later.
-
-=head1 AUTHOR
-
-Adam D. Barratt <adam@adam-barratt.org.uk>
-
-=cut
-
-use strict;
-use warnings;
-use Getopt::Long qw(:config gnu_getopt);
-use File::Basename;
-
-my $progname = basename($0);
-
-# From dpkg-source
-my $default_ignore_regex = '
-# Ignore general backup files
-(?:^|/).*~$|
-# Ignore emacs recovery files
-(?:^|/)\.#.*$|
-# Ignore vi swap files
-(?:^|/)\..*\.swp$|
-# Ignore baz-style junk files or directories
-(?:^|/),,.*(?:$|/.*$)|
-# File-names that should be ignored (never directories)
-(?:^|/)(?:DEADJOE|\.cvsignore|\.arch-inventory|\.bzrignore|\.gitignore)$|
-# File or directory names that should be ignored
-(?:^|/)(?:CVS|RCS|\.pc|\.deps|\{arch\}|\.arch-ids|\.svn|\.hg|_darcs|\.git|
-\.shelf|_MTN|\.bzr(?:\.backup|tags)?)(?:$|/.*$)
-';
-
-# Take out comments and newlines
-$default_ignore_regex =~ s/^#.*$//mg;
-$default_ignore_regex =~ s/\n//sg;
-
-my $default_check_regex = '\.(c(c|pp|xx)?|h(h|pp|xx)?|f(77|90)?|go|p(l|m)|xs|sh|php|py(|x)|rb|java|js|vala|el|sc(i|e)|cs|pas|inc|dtd|xsl|mod|m|tex|mli?|(c|l)?hs)$';
-
-my $modified_conf_msg;
-
-my %OPT=(
- verbose => '',
- lines => '',
- noconf => '',
- ignore => '',
- check => '',
- recursive => 0,
- copyright => 0,
- machine => 0,
-);
-
-my $def_lines = 60;
-
-# Read configuration files and then command line
-# This is boilerplate
-
-if (@ARGV and $ARGV[0] =~ /^--no-?conf$/) {
- $modified_conf_msg = " (no configuration files read)";
- shift;
-} else {
- my @config_files = ('/etc/devscripts.conf', '~/.devscripts');
- my %config_vars = (
- 'LICENSECHECK_VERBOSE' => 'no',
- 'LICENSECHECK_PARSELINES' => $def_lines,
- );
- my %config_default = %config_vars;
-
- my $shell_cmd;
- # Set defaults
- foreach my $var (keys %config_vars) {
- $shell_cmd .= qq[$var="$config_vars{$var}";\n];
- }
- $shell_cmd .= 'for file in ' . join(" ", @config_files) . "; do\n";
- $shell_cmd .= '[ -f $file ] && . $file; done;' . "\n";
- # Read back values
- foreach my $var (keys %config_vars) { $shell_cmd .= "echo \$$var;\n" }
- my $shell_out = `/bin/bash -c '$shell_cmd'`;
- @config_vars{keys %config_vars} = split /\n/, $shell_out, -1;
-
- # Check validity
- $config_vars{'LICENSECHECK_VERBOSE'} =~ /^(yes|no)$/
- or $config_vars{'LICENSECHECK_VERBOSE'} = 'no';
- $config_vars{'LICENSECHECK_PARSELINES'} =~ /^[1-9][0-9]*$/
- or $config_vars{'LICENSECHECK_PARSELINES'} = $def_lines;
-
- foreach my $var (sort keys %config_vars) {
- if ($config_vars{$var} ne $config_default{$var}) {
- $modified_conf_msg .= " $var=$config_vars{$var}\n";
- }
- }
- $modified_conf_msg ||= " (none)\n";
- chomp $modified_conf_msg;
-
- $OPT{'verbose'} = $config_vars{'LICENSECHECK_VERBOSE'} eq 'yes' ? 1 : 0;
- $OPT{'lines'} = $config_vars{'LICENSECHECK_PARSELINES'};
-}
-
-GetOptions(\%OPT,
- "help|h",
- "check|c=s",
- "copyright",
- "ignore|i=s",
- "lines|l=i",
- "machine|m",
- "noconf|no-conf",
- "recursive|r",
- "verbose!",
- "version|v",
-) or die "Usage: $progname [options] filelist\nRun $progname --help for more details\n";
-
-$OPT{'lines'} = $def_lines if $OPT{'lines'} !~ /^[1-9][0-9]*$/;
-$OPT{'ignore'} = $default_ignore_regex if ! length $OPT{'ignore'};
-$OPT{'check'} = $default_check_regex if ! length $OPT{'check'};
-
-if ($OPT{'noconf'}) {
- fatal("--no-conf is only acceptable as the first command-line option!");
-}
-if ($OPT{'help'}) { help(); exit 0; }
-if ($OPT{'version'}) { version(); exit 0; }
-
-die "Usage: $progname [options] filelist\nRun $progname --help for more details\n" unless @ARGV;
-
-$OPT{'lines'} = $def_lines if not defined $OPT{'lines'};
-
-my @files = ();
-my @find_args = ();
-my $files_count = @ARGV;
-
-push @find_args, qw(-maxdepth 1) unless $OPT{'recursive'};
-push @find_args, qw(-follow -type f -print);
-
-while (@ARGV) {
- my $file = shift @ARGV;
-
- if (-d $file) {
- open my $FIND, '-|', 'find', $file, @find_args
- or die "$progname: couldn't exec find: $!\n";
-
- while (<$FIND>) {
- chomp;
- next unless m%$OPT{'check'}%;
- # Skip empty files
- next if (-z $_);
- push @files, $_ unless m%$OPT{'ignore'}%;
- }
- close $FIND;
- } else {
- next unless ($files_count == 1) or $file =~ m%$OPT{'check'}%;
- push @files, $file unless $file =~ m%$OPT{'ignore'}%;
- }
-}
-
-while (@files) {
- my $file = shift @files;
- my $content = '';
- my $copyright_match;
- my $copyright = '';
- my $license = '';
- my %copyrights;
-
- open (my $F, '<' ,$file) or die "Unable to access $file\n";
- while (<$F>) {
- last if ($. > $OPT{'lines'});
- $content .= $_;
- $copyright_match = parse_copyright($_);
- if ($copyright_match) {
- $copyrights{lc("$copyright_match")} = "$copyright_match";
- }
- }
- close($F);
-
- $copyright = join(" / ", reverse sort values %copyrights);
-
- print qq(----- $file header -----\n$content----- end header -----\n\n)
- if $OPT{'verbose'};
-
- $license = parselicense(clean_comments($content));
-
- if ($OPT{'machine'}) {
- print "$file\t$license";
- print "\t" . ($copyright or "*No copyright*") if $OPT{'copyright'};
- print "\n";
- } else {
- print "$file: ";
- print "*No copyright* " unless $copyright;
- print $license . "\n";
- print " [Copyright: " . $copyright . "]\n"
- if $copyright and $OPT{'copyright'};
- print "\n" if $OPT{'copyright'};
- }
-}
-
-sub parse_copyright {
- my $copyright = '';
- my $match;
-
- my $copyright_indicator_regex = '
- (?:copyright # The full word
- |copr\. # Legally-valid abbreviation
- |\x{00a9} # Unicode character COPYRIGHT SIGN
- |\xc2\xa9 # Unicode copyright sign encoded in iso8859
- |\(c\) # Legally-null representation of sign
- )';
- my $copyright_disindicator_regex = '
- \b(?:info(?:rmation)? # Discussing copyright information
- |(notice|statement|claim|string)s? # Discussing the notice
- |and|or|is|in|to # Part of a sentence
- |(holder|owner)s? # Part of a sentence
- |ownership # Part of a sentence
- )\b';
- my $copyright_predisindicator_regex = '(
- ^[#]define\s+.*\(c\) # #define foo(c) -- not copyright
- )';
-
- if ( ! m%$copyright_predisindicator_regex%ix) {
-
- if (m%$copyright_indicator_regex(?::\s*|\s+)(\S.*)$%ix) {
- $match = $1;
-
- # Ignore lines matching "see foo for copyright information" etc.
- if ($match !~ m%^\s*$copyright_disindicator_regex%ix) {
- # De-cruft
- $match =~ s/([,.])?\s*$//;
- $match =~ s/$copyright_indicator_regex//igx;
- $match =~ s/^\s+//;
- $match =~ s/\s{2,}/ /g;
- $match =~ s/\\@/@/g;
- $copyright = $match;
- }
- }
- }
-
- return $copyright;
-}
-
-sub clean_comments {
- local $_ = shift or return q{};
-
- # Remove generic comments: look for 4 or more lines beginning with
- # regular comment pattern and trim it. Fall back to old algorithm
- # if no such pattern found.
- my @matches = m/^\s*([^a-zA-Z0-9\s]{1,3})\s\w/mg;
- if (@matches >= 4) {
- my $comment_re = qr/\s*[\Q$matches[0]\E]{1,3}\s*/;
- s/^$comment_re//mg;
- }
-
- # Remove Fortran comments
- s/^[cC] //gm;
- tr/\t\r\n/ /;
-
- # Remove C / C++ comments
- s#(\*/|/[/*])##g;
- tr% A-Za-z.,@;0-9\(\)/-%%cd;
- tr/ //s;
-
- return $_;
-}
-
-sub help {
- print <<"EOF";
-Usage: $progname [options] filename [filename ...]
-Valid options are:
- --help, -h Display this message
- --version, -v Display version and copyright info
- --no-conf, --noconf Don't read devscripts config files; must be
- the first option given
- --verbose Display the header of each file before its
- license information
- --lines, -l Specify how many lines of the file header
- should be parsed for license information
- (Default: $def_lines)
- --check, -c Specify a pattern indicating which files should
- be checked
- (Default: '$default_check_regex')
- --machine, -m Display in a machine readable way (good for awk)
- --recursive, -r Add the contents of directories recursively
- --copyright Also display the file's copyright
- --ignore, -i Specify that files / directories matching the
- regular expression should be ignored when
- checking files
- (Default: '$default_ignore_regex')
-
-Default settings modified by devscripts configuration files:
-$modified_conf_msg
-EOF
-}
-
-sub version {
- print <<"EOF";
-This is $progname, from the Debian devscripts package, version ###VERSION###
-Copyright (C) 2007, 2008 by Adam D. Barratt <adam\@adam-barratt.org.uk>; based
-on a script of the same name from the KDE SDK by <dfaure\@kde.org>.
-
-This program comes with ABSOLUTELY NO WARRANTY.
-You are free to redistribute this code under the terms of the
-GNU General Public License, version 2, or (at your option) any
-later version.
-EOF
-}
-
-sub parselicense {
- my ($licensetext) = @_;
-
- my $gplver = "";
- my $extrainfo = "";
- my $license = "";
-
- if ($licensetext =~ /version ([^, ]+?)[.,]? (?:\(?only\)?.? )?(?:of the GNU (Affero )?(Lesser |Library )?General Public License )?(as )?published by the Free Software Foundation/i or
- $licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License (?:as )?published by the Free Software Foundation[;,] version ([^, ]+?)[.,]? /i) {
-
- $gplver = " (v$1)";
- } elsif ($licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License, version (\d+(?:\.\d+)?)[ \.]/) {
- $gplver = " (v$1)";
- } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or \(at your option\) any later version/) {
- $gplver = " (v$1 or later)";
- } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or \(at your option\) version (\d(?:[\.-]\d+)*)/) {
- $gplver = " (v$1 or v$2)";
- }
-
- if ($licensetext =~ /(?:675 Mass Ave|59 Temple Place|51 Franklin Steet|02139|02111-1307)/i) {
- $extrainfo = " (with incorrect FSF address)$extrainfo";
- }
-
- if ($licensetext =~ /permission (?:is (also granted|given))? to link (the code of )?this program with (any edition of )?(Qt|the Qt library)/i) {
- $extrainfo = " (with Qt exception)$extrainfo"
- }
-
- if ($licensetext =~ /(All changes made in this file will be lost|DO NOT (EDIT|delete this file)|Generated (automatically|by|from)|generated.*file)/i) {
- $license = "GENERATED FILE";
- }
-
- if ($licensetext =~ /((is free software.? )?you can redistribute (it|them) and\/or modify (it|them)|is licensed) under the terms of (version [^ ]+ of )?the (GNU (Library |Lesser )General Public License|LGPL)/i) {
- $license = "LGPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /is free software.? you can redistribute (it|them) and\/or modify (it|them) under the terms of the (GNU Affero General Public License|AGPL)/i) {
- $license = "AGPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /(is free software.? )?you (can|may) redistribute (it|them) and\/or modify (it|them) under the terms of (?:version [^ ]+ (?:\(?only\)? )?of )?the GNU General Public License/i) {
- $license = "GPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /is distributed under the terms of the GNU General Public License,/
- and length $gplver) {
- $license = "GPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /is distributed.*terms.*GPL/) {
- $license = "GPL (unversioned/unknown version) $license";
- }
-
- if ($licensetext =~ /This file is part of the .*Qt GUI Toolkit. This file may be distributed under the terms of the Q Public License as defined/) {
- $license = "QPL (part of Qt) $license";
- } elsif ($licensetext =~ /may (be distributed|redistribute it) under the terms of the Q Public License/) {
- $license = "QPL $license";
- }
-
- if ($licensetext =~ /opensource\.org\/licenses\/mit-license\.php/) {
- $license = "MIT/X11 (BSD like) $license";
- } elsif ($licensetext =~ /Permission is hereby granted, free of charge, to any person obtaining a copy of this software and(\/or)? associated documentation files \(the (Software|Materials)\), to deal in the (Software|Materials)/) {
- $license = "MIT/X11 (BSD like) $license";
- } elsif ($licensetext =~ /Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy, modify, and distribute this software and its documentation for any purpose/) {
- $license = "MIT/X11 (BSD like) $license";
- }
-
- if ($licensetext =~ /Permission to use, copy, modify, and(\/or)? distribute this software for any purpose with or without fee is hereby granted, provided.*copyright notice.*permission notice.*all copies/) {
- $license = "ISC $license";
- }
-
- if ($licensetext =~ /THIS SOFTWARE IS PROVIDED .*AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY/) {
- if ($licensetext =~ /All advertising materials mentioning features or use of this software must display the following acknowledge?ment.*This product includes software developed by/i) {
- $license = "BSD (4 clause) $license";
- } elsif ($licensetext =~ /(The name(?:\(s\))? .*? may not|Neither the (names? .*?|authors?) nor the names of( (its|their|other|any))? contributors may) be used to endorse or promote products derived from this software/i) {
- $license = "BSD (3 clause) $license";
- } elsif ($licensetext =~ /Redistributions of source code must retain the above copyright notice/i) {
- $license = "BSD (2 clause) $license";
- } else {
- $license = "BSD $license";
- }
- }
-
- if ($licensetext =~ /Mozilla Public License,? (Version|v\.) (\d+(?:\.\d+)?)/) {
- $license = "MPL (v$2) $license";
- }
-
- if ($licensetext =~ /Released under the terms of the Artistic License ([^ ]+)/) {
- $license = "Artistic (v$1) $license";
- }
-
- if ($licensetext =~ /is free software under the Artistic [Ll]icense/) {
- $license = "Artistic $license";
- }
-
- if ($licensetext =~ /This program is free software; you can redistribute it and\/or modify it under the same terms as Perl itself/) {
- $license = "Perl $license";
- }
-
- if ($licensetext =~ /under the Apache License, Version ([^ ]+)/) {
- $license = "Apache (v$1) $license";
- }
-
- if ($licensetext =~ /(THE BEER-WARE LICENSE)/i) {
- $license = "Beerware $license";
- }
-
- if ($licensetext =~ /This source file is subject to version ([^ ]+) of the PHP license/) {
- $license = "PHP (v$1) $license";
- }
-
- if ($licensetext =~ /under the terms of the CeCILL /) {
- $license = "CeCILL $license";
- }
-
- if ($licensetext =~ /under the terms of the CeCILL-([^ ]+) /) {
- $license = "CeCILL-$1 $license";
- }
-
- if ($licensetext =~ /under the SGI Free Software License B/) {
- $license = "SGI Free Software License B $license";
- }
-
- if ($licensetext =~ /is in the public domain/i) {
- $license = "Public domain $license";
- }
-
- if ($licensetext =~ /terms of the Common Development and Distribution License(, Version ([^(]+))? \(the License\)/) {
- $license = "CDDL " . ($1 ? "(v$2) " : '') . $license;
- }
-
- if ($licensetext =~ /Microsoft Permissive License \(Ms-PL\)/) {
- $license = "Ms-PL $license";
- }
-
- if ($licensetext =~ /Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license \(the \"Software\"\)/ or
- $licensetext =~ /Boost Software License([ ,-]+Version ([^ ]+)?(\.))/i) {
- $license = "BSL " . ($1 ? "(v$2) " : '') . $license;
- }
-
- if ($licensetext =~ /PYTHON SOFTWARE FOUNDATION LICENSE (VERSION ([^ ]+))/i) {
- $license = "PSF " . ($1 ? "(v$2) " : '') . $license;
- }
-
- if ($licensetext =~ /The origin of this software must not be misrepresented.*Altered source versions must be plainly marked as such.*This notice may not be removed or altered from any source distribution/ or
- $licensetext =~ /see copyright notice in zlib\.h/) {
- $license = "zlib/libpng $license";
- } elsif ($licensetext =~ /This code is released under the libpng license/) {
- $license = "libpng $license";
- }
-
- if ($licensetext =~ /Do What The Fuck You Want To Public License, Version ([^, ]+)/i) {
- $license = "WTFPL (v$1) $license";
- }
-
- if ($licensetext =~ /Do what The Fuck You Want To Public License/i) {
- $license = "WTFPL $license";
- }
-
- if ($licensetext =~ /(License WTFPL|Under (the|a) WTFPL)/i) {
- $license = "WTFPL $license";
- }
-
- $license = "UNKNOWN" if (!length($license));
-
- # Remove trailing spaces.
- $license =~ s/\s+$//;
-
- return $license;
-}
-
-sub fatal {
- my ($pack,$file,$line);
- ($pack,$file,$line) = caller();
- (my $msg = "$progname: fatal error at line $line:\n@_\n") =~ tr/\0//d;
- $msg =~ s/\n\n$/\n/;
- die $msg;
-}
diff --git a/scripts/licensecheck.sh b/scripts/licensecheck.sh
deleted file mode 100755
index a57b2f76..00000000
--- a/scripts/licensecheck.sh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/sh
-
-# Copyright (C) 2013 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-set -e
-
-usage() {
- echo "Usage: license-check your-system"
- echo
- echo "This checks license info for all the chunks in your-system"
- echo "It's re-runnable, and does morph edit to get each chunk."
- echo "The process can take a while."
-}
-
-
-if [ -z "$1" ]; then
- usage
- exit 1
-fi
-
-workspace="$PWD"/../../..
-system="$1"
-
-gplv3_chunks="\
-autoconf \
-automake \
-bash \
-binutils \
-bison \
-ccache \
-cmake \
-flex \
-gawk \
-gcc \
-gdbm \
-gettext \
-gperf \
-groff \
-libtool \
-m4 \
-make \
-nano \
-patch \
-rsync \
-texinfo-tarball"
-
-gplv3_repos=""
-
-
-for f in strata/*.morph; do
- cp "$f" "$f.bak"
-done
-
-
-strata=`grep "morph.*: *" "$system" | cut -d: -f2-`
-for stratum in $strata; do
- chunks=`grep -E -- "-? +name.*: *" "$stratum" | cut -d: -f2-`
- for chunk in $chunks; do
- if ! (echo $gplv3_chunks | grep -wq "$chunk"); then
- morph edit $chunk 1>&2
- else
- repo=`grep "name.*: *$chunk" "$stratum" -A1 | \
- tail -n1 | cut -d: -f3-`
- gplv3_repos="$gplv3_repos $repo"
- fi
- done
-done
-
-
-repos=`for stratum in $strata; do
- grep "repo.*: *" "$stratum" | cut -d: -f3-
- done | sort -u`
-
-
-for repo in $repos; do
- if ! (echo $gplv3_repos | grep -wq "$repo") && \
- [ -d "$workspace/upstream/$repo" ] ; then
- echo "$repo"
- perl scripts/licensecheck.pl -r "$workspace/upstream/$repo" | \
- cut -d: -f2- | sort -u
- echo
- fi
-done
-
-
-for f in strata/*.morph.bak; do
- mv "$f" "${f%.bak}"
-done
diff --git a/scripts/organize-morphologies.py b/scripts/organize-morphologies.py
deleted file mode 100755
index abc8c739..00000000
--- a/scripts/organize-morphologies.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2014 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-import json
-import morphlib
-import os
-import subprocess
-import sys
-import urllib
-import urllib2
-import urlparse
-import yaml
-import re
-import errno
-
-''' organize-morphologies.py:
-Tool for organizing morphologies in definitions.
-
-This script will move:
- - cluster morphologies into clusters directory
- - system morphologies into systems directory
- - stratum morphologies into strata directory
-
-This script will download the chunk morphologies for every stratum
-and placed into strata/stratum_which_the_chunk_belongs_to directory.
-
-It also modifies the morphologies fields which points to some morpholgy
-which has been moved.
-'''
-
-
-# NOTE: The following reimplements part of morphlib's remote repo cache stuff
-def parse_repo_alias(repo):
- domain, path = repo.split(':')
- if domain == 'baserock':
- repo = 'ssh://git@git.baserock.org/baserock/%s' % path
- elif domain == 'upstream':
- repo = 'ssh://git@git.baserock.org/delta/%s' % path
- else:
- raise Exception("I don't know how to parse the repo-alias \"%s\"" % repo)
- return repo
-
-def make_request(path):
- server_url = 'http://git.baserock.org:8080/'
- url = urlparse.urljoin(server_url, '/1.0/%s' % path)
- handle = urllib2.urlopen(url)
- return handle.read()
-
-def quote(*args):
- return tuple(urllib.quote(string) for string in args)
-
-def cat_file(repo, ref, filename):
- return make_request('files?repo=%s&ref=%s&filename=%s' %
- quote(repo, ref, filename))
-
-# NOTE: This function reimplement part of morphlib's loader
-def sanitise_morphology_path(morph_field, morph_kind, belongs_to='None'):
- '''This function receives the name or the morph field of one morphology
- and returns the path of the morphology depending on the name, kind and
- if it belongs to other morphologies.
- '''
- # Dictionary which match morphology's kind and morphology's
- # directory in definitions.git
- morph_dir = { 'chunk': 'chunks', 'stratum': 'strata',
- 'system':'systems', 'cluster': 'clusters'}
- # For chunks morphologies we need to know to which stratums
- # belongs this chunk.
- if morph_kind == 'chunk':
- if belongs_to == '':
- raise morphlib.Error('Chunk morphologies need the stratum name'
- 'to create the path. Please add the stratum'
- 'which belongs this morphology')
- # Get the name of the chunk which we assume is at the end
- # of the morph file
- if '/' in morph_field:
- morph_field = os.path.basename(morph_field)
-
- # Add the stratum name to the chunk name
- morph_field = os.path.join(belongs_to, morph_field)
-
- # Reset the kind to stratum because chunk contains stratum
- # name in its path.
- morph_kind = 'stratum'
-
- # Add the morphology path to the morph field.
- if not morph_field.startswith(morph_dir[morph_kind]):
- morph_field = os.path.join(morph_dir[morph_kind], morph_field)
-
- # Add the morphology suffix if the morphology.
- if not morph_field.endswith('.morph'):
- morph_field = morph_field + '.morph'
-
- return morph_field
-
-def create_directory(name, path):
- directory = os.path.join(path, name)
- try:
- os.makedirs(directory)
- except OSError as err:
- if err.errno != errno.EEXIST:
- raise err
- else:
- pass
- return directory
-
-def move_file(morph, directory, path, loader):
- if not morph.filename.startswith(directory):
- filename = os.path.basename(morph.filename)
- new_location = os.path.join(path, filename)
- print '\nMoving %s into %s' % (filename, new_location)
- subprocess.call(['git', 'mv', morph.filename, new_location])
- morph.filename = new_location
- loader.unset_defaults(morph)
- loader.save_to_file(morph.filename, morph)
-
-def load_and_fix_chunk(chunk_str, loader, name):
- try:
- chunk_morph = loader.load_from_string(chunk_str)
- except morphlib.morphloader.InvalidFieldError as err:
- if "comments" in str(err):
- # This error is caused because there are old morphologies which
- # contain the field "comments" instead of "description".
- # Replacing "comments" field by "description" will allow the morphology
- # to pass parse_morphology_text check and ready to be written to a file.
- fixed_chunk = loader.parse_morphology_text(chunk_str, name)
- fixed_chunk['description'] = fixed_chunk.pop('comments')
- print "WARNING: Invalid 'comments' field in " \
- "%s corrected to 'description'" % name
- chunk_morph = load_and_fix_chunk(str(fixed_chunk), loader, name)
- elif "buildsystem" in str(err):
- # This error is caused because a typo in a morphology which
- # has a field "buildsystem" instead of "build-system".
- fixed_chunk = loader.parse_morphology_text(chunk_str, name)
- fixed_chunk['build-system'] = fixed_chunk.pop('buildsystem')
- print "WARNING: Invalid 'buildsystem' field in %s" \
- "corrected to 'build-system'" % name
- chunk_morph = load_and_fix_chunk(str(fixed_chunk), loader, name)
- else:
- print "ERROR: %s in chunk %s" %(err, name)
- raise err
- except morphlib.morphloader.MorphologyNotYamlError as err:
- print "WARNING: %s in chunk %s is not valid YAML, " \
- "attempting to fix..." %(err, name)
- # This error is caused because there are old morphologies written
- # in JSON which contain '\t' characters. When try to load this
- # kind of morphologies load_from_string fails when parse_morphology_text.
- # Removing this characters will make load_from_string to load the morphology
- # and translate it into a correct yaml format.
- fixed_chunk = chunk_str.replace('\t','')
- print "INFO: %s successfully fixed" % name
- chunk_morph = load_and_fix_chunk(fixed_chunk, loader, name)
- return chunk_morph
-
-def move_clusters(morphs, path, loader):
- kind = 'system'
- directory = 'clusters'
- # Move cluster morphologies to clusters folder fixing their dependent
- # morphologies which are systems.
- full_path = create_directory(directory, path)
- for morph in morphs:
- all_systems = morph['systems'][:]
- for system in morph['systems']:
- all_systems.extend(system.get('subsystems', []))
- # Add the correct path to the morph fields for systems and subsystems
- for field in all_systems:
- field['morph'] = sanitise_morphology_path(field['morph'], kind)
- move_file(morph, directory, full_path, loader)
-
-def move_systems(morphs, path, loader):
- kind = 'stratum'
- directory = 'systems'
- # Move system morphologies to systems folder fixing their dependent
- # morphologies which are strata.
- full_path = create_directory(directory, path)
- for morph in morphs:
- # Add name field and the correct path to the stratum on the morph
- # fields in strata.
- for field in morph['strata']:
- field['name'] = os.path.basename(field['morph'])
- field['morph'] = sanitise_morphology_path(field['morph'], kind)
- move_file(morph, directory, full_path, loader)
-
-def download_chunks(morph, loader):
- # Download chunks morphologies defined on the stratum and
- # add them to the directory tree.
- for chunk in morph['chunks']:
- name = chunk['name'] + '.morph'
- try:
- chunk['morph'] = sanitise_morphology_path(chunk['morph'], 'chunk', morph['name'])
- except KeyError as err:
- if 'morph' in str(err):
- chunk['morph'] = sanitise_morphology_path(chunk['name'], 'chunk', morph['name'])
- else:
- raise err
- ref = chunk['ref']
- repo = parse_repo_alias(chunk['repo'])
- try:
- print "\nDownloading %s from %s into %s" %(name, repo, chunk['morph'])
- chunk_str = cat_file(repo, ref, name)
- except urllib2.HTTPError as err:
- # If there is no morphology in the repository we assume that the morphology
- # system will be autodetected, so we don't have to create a new one
- # unless we shut down the autodetecting system (fallback system).
- if err.code == 404:
- print 'INFO: Morph will fall-back to build-time' \
- 'autodetection for %s' %(name)
- # Remove morph field from autodetected chunks
- del chunk['morph']
- else:
- loaded_chunk = load_and_fix_chunk(chunk_str, loader, name)
- loader.unset_defaults(loaded_chunk)
- loader.save_to_file(chunk['morph'], loaded_chunk)
-
-def move_strata(morphs, path, loader):
- # Create strata directory
- strata_dir = 'strata/'
- strata_path = create_directory(strata_dir, path)
- for morph in morphs:
- # Create stratum directory where downloading its chunks.
- stratum_path = strata_path + morph['name']
- stratum_dir = create_directory(stratum_path, path)
-
- # Download chunks which belongs to the stratum
- download_chunks(morph, loader)
-
- # Add to build-depends the correct path to the dependent stratum morphologies.
- for build_depends in morph['build-depends']:
- build_depends['morph'] = sanitise_morphology_path(build_depends['morph'], 'stratum')
- # Move stratum morphologies to strata
- move_file(morph, strata_dir, strata_path, loader)
-
-def main():
- # Load all morphologies in the definitions repo
- sb = morphlib.sysbranchdir.open_from_within('.')
- loader = morphlib.morphloader.MorphologyLoader()
- morphs = [m for m in sb.load_all_morphologies(loader)]
-
- # Clasify the morphologies regarding of their kind field
- morphologies = { kind: [m for m in morphs if m['kind'] == kind]
- for kind in ('chunk', 'stratum', 'system', 'cluster') }
-
- for kind, morphs in morphologies.iteritems():
- print 'There are: %d %s.\n' %(len(morphs), kind)
-
- # Get the path from definitions repo
- definitions_repo = sb.get_git_directory_name(sb.root_repository_url)
-
- # Move the morphologies to its directories
- move_clusters(morphologies['cluster'], definitions_repo, loader)
- move_systems(morphologies['system'], definitions_repo, loader)
- move_strata(morphologies['stratum'], definitions_repo, loader)
-
-main()
diff --git a/scripts/release-build b/scripts/release-build
deleted file mode 100755
index 5525e9e9..00000000
--- a/scripts/release-build
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2014 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-import cliapp
-import morphlib
-import os
-import subprocess
-import sys
-import time
-
-
-class Build(object):
- '''A single distbuild instance.'''
-
- def __init__(self, name, arch, app):
- self.system_name = name
- self.controller = app.controllers[arch]
- self.command = [
- 'morph', 'distbuild-morphology',
- '--controller-initiator-address=%s' % self.controller,
- 'baserock:baserock/definitions', app.ref, self.system_name]
-
- def start(self):
- self.process = subprocess.Popen(self.command)
-
- def completed(self):
- return (self.process.poll() is not None)
-
-
-class ReleaseApp(cliapp.Application):
-
- '''Cliapp app that handles distbuilding and deploying a cluster.'''
-
- def add_settings(self):
- self.settings.string_list(['controllers'],
- 'a list of distbuild controllers and their '
- 'architecture')
-
- self.settings.string(['trove-host'],
- 'hostname of Trove instance')
-
- self.settings.string(['artifact-cache-server'],
- 'server to fetch artifacts from', default=None)
-
- self.settings.string(['release-number'],
- 'Baserock version of the systems being built',
- default='yy.ww')
-
- def process_args(self, args):
- '''Process the command line'''
- self.controllers = {}
- controllers_list = self.settings['controllers']
- for item in controllers_list:
- arch, controller = item.split(':')
- self.controllers[arch] = controller
-
- self.ref = cliapp.runcmd(['git', 'rev-parse', 'HEAD']).strip()
-
- sb = morphlib.sysbranchdir.open_from_within('.')
- definitions = sb.get_git_directory_name(sb.root_repository_url)
- defs_repo = morphlib.gitdir.GitDirectory(definitions)
- self.loader = morphlib.morphloader.MorphologyLoader()
- self.finder = morphlib.morphologyfinder.MorphologyFinder(defs_repo)
-
- cluster_name = args[0]
- cluster, cluster_path = self.load_morphology(cluster_name)
-
- builds = self.prepare_builds(cluster)
- if not os.path.exists('builds'):
- os.mkdir('builds')
- os.chdir('builds')
- for build in builds:
- build.start()
-
- while not all(build.completed() for build in builds):
- time.sleep(1)
-
- fail = False
- for build in builds:
- if build.process.returncode != 0:
- fail = True
- sys.stderr.write(
- 'Building failed for %s\n' % build.system_name)
- if fail:
- raise cliapp.AppException('Building of systems failed')
-
- os.chdir('..')
- if not os.path.exists('release'):
- os.mkdir('release')
- self.deploy_images(cluster, cluster_path)
-
- def load_morphology(self, name, kind=None):
- path = morphlib.util.sanitise_morphology_path(name)
- morph = self.loader.load_from_string(
- self.finder.read_morphology(path))
- if kind:
- assert morph['kind'] == kind
- return morph, path
-
- def iterate_systems(self, system_list):
- for system in system_list:
- yield system['morph']
- if 'subsystems' in system:
- for subsystem in self.iterate_systems(system['subsystems']):
- yield subsystem
-
- def prepare_builds(self, cluster):
- '''Prepare a list of builds'''
- systems = set(self.iterate_systems(cluster['systems']))
- builds = []
- for system_name in systems:
- system, _ = self.load_morphology(system_name)
- if system['arch'] in self.controllers:
- builds.append(Build(system_name, system['arch'], self))
- return builds
-
- def deploy_images(self, cluster, cluster_path):
- version_label = 'baserock-%s' % self.settings['release-number']
- outputs = {}
-
- for system in cluster['systems']:
- morphology_name = system['morph']
- morphology = self.load_morphology(morphology_name)[0]
- if morphology['arch'] not in self.controllers:
- continue
-
- for deployment_name, deployment_info in system['deploy'].iteritems():
- # The release.morph cluster must specify a basename for the file,
- # of name and extension. This script knows about name, but it
- # can't find out the appropriate file extension without second
- # guessing the behaviour of write extensions.
- basename = deployment_info['location']
-
- if '/' in basename or basename.startswith(version_label):
- raise cliapp.AppException(
- 'In %s: system %s.location should be just the base name, '
- 'e.g. "%s.img"' % (cluster_path, deployment_name, deployment_name))
-
- filename = os.path.join('release', '%s-%s' % (version_label, basename))
- if os.path.exists(filename):
- self.output.write('Reusing existing deployment of %s\n' % filename)
- else:
- self.output.write('Creating %s from release.morph\n' % filename)
- self.deploy_single_image(cluster_path, deployment_name, filename, version_label)
-
- def deploy_single_image(self, cluster_path, name, location, version_label):
- deploy_command = [
- 'morph', 'deploy', cluster_path, name,
- '--trove-host=%s' % self.settings['trove-host']]
- artifact_server = self.settings['artifact-cache-server']
- if artifact_server is not None:
- deploy_command.append('--artifact-cache-server=%s' % artifact_server)
- deploy_command.extend((
- '%s.location=%s' % (name, location),
- '%s.VERSION_LABEL=%s' % (name, version_label)
- ))
-
- cliapp.runcmd(deploy_command, stdout=sys.stdout)
-
-
-ReleaseApp().run()
diff --git a/scripts/release-build.test.conf b/scripts/release-build.test.conf
deleted file mode 100644
index 50083352..00000000
--- a/scripts/release-build.test.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-[config]
-trove-host = ct-mcr-1.ducie.codethink.co.uk
-controllers = x86_64:ct-mcr-1-distbuild-x86-64-majikthise-controller.dyn.ducie.codethink.co.uk,
- x86_32:ct-mcr-1-distbuild-x86-32-majikthise-controller.dyn.ducie.codethink.co.uk,
- armv7lhf:ct-mcr-1-distbuild-armv7lhf-jetson.dyn.ducie.codethink.co.uk
-release-number = 14.29
diff --git a/scripts/release-test b/scripts/release-test
deleted file mode 100755
index a1611721..00000000
--- a/scripts/release-test
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Codethink Ltd
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-'''release-test
-
-This script deploys the set of systems in the cluster morphology it is
-instructed to read, to test that they work correctly.
-
-'''
-
-import cliapp
-import os
-import pipes
-import shlex
-import shutil
-import socket
-import tempfile
-import time
-import uuid
-
-import morphlib
-
-
-class MorphologyHelper(object):
-
- def __init__(self):
- self.sb = sb = morphlib.sysbranchdir.open_from_within('.')
- defs_repo_path = sb.get_git_directory_name(sb.root_repository_url)
- self.defs_repo = morphlib.gitdir.GitDirectory(defs_repo_path)
- self.loader = morphlib.morphloader.MorphologyLoader()
- self.finder = morphlib.morphologyfinder.MorphologyFinder(self.defs_repo)
-
- def load_morphology(self, path):
- text = self.finder.read_morphology(path)
- return self.loader.load_from_string(text)
-
- @classmethod
- def iterate_systems(cls, systems_list):
- for system in systems_list:
- yield morphlib.util.sanitise_morphology_path(system['morph'])
- if 'subsystems' in system:
- for subsystem in cls.iterate_systems(system['subsystems']):
- yield subsystem
-
- def iterate_cluster_deployments(cls, cluster_morph):
- for system in cluster_morph['systems']:
- path = morphlib.util.sanitise_morphology_path(system['morph'])
- defaults = system.get('deploy-defaults', {})
- for name, options in system['deploy'].iteritems():
- config = dict(defaults)
- config.update(options)
- yield path, name, config
-
- def load_cluster_systems(self, cluster_morph):
- for system_path in set(self.iterate_systems(cluster_morph['systems'])):
- system_morph = self.load_morphology(system_path)
- yield system_path, system_morph
-
-
-class TimeoutError(cliapp.AppException):
-
- """Error to be raised when a connection waits too long"""
-
- def __init__(self, msg):
- super(TimeoutError, self).__init__(msg)
-
-
-class VMHost(object):
-
- def __init__(self, user, address, disk_path):
- self.user = user
- self.address = address
- self.disk_path = disk_path
-
- @property
- def ssh_host(self):
- return '{user}@{address}'.format(user=self.user, address=self.address)
-
- def runcmd(self, *args, **kwargs):
- cliapp.ssh_runcmd(self.ssh_host, *args, **kwargs)
-
- def virsh(self, *args, **kwargs):
- self.runcmd(['virsh', '-c', 'qemu:///system'] + list(args), **kwargs)
-
-
-class DeployedSystemInstance(object):
-
- def __init__(self, deployment, config, host_machine, vm_id, rootfs_path):
- self.deployment = deployment
- self.config = config
- # TODO: Stop assuming test machine can DHCP and be assigned its
- # hostname in the deployer's resolve search path.
- self.ip_address = self.config['HOSTNAME']
- self.host_machine = host_machine
- self.vm_id = vm_id
- self.rootfs_path = rootfs_path
-
- @property
- def ssh_host(self):
- # TODO: Stop assuming we ssh into test instances as root
- return 'root@{host}'.format(host=self.ip_address)
-
- def runcmd(self, argv, chdir='.', **kwargs):
- ssh_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
- '-o', 'UserKnownHostsFile=/dev/null', self.ssh_host]
- cmd = ['sh', '-c', 'cd "$1" && shift && exec "$@"', '-', chdir]
- cmd += argv
- ssh_cmd.append(' '.join(map(pipes.quote, cmd)))
- return cliapp.runcmd(ssh_cmd, **kwargs)
-
- def _wait_for_dhcp(self, timeout):
- '''Block until given hostname resolves successfully.
-
- Raises TimeoutError if the hostname has not appeared in 'timeout'
- seconds.
-
- '''
- start_time = time.time()
- while True:
- try:
- socket.gethostbyname(self.ip_address)
- return
- except socket.gaierror:
- pass
- if time.time() > start_time + timeout:
- raise TimeoutError("Host %s did not appear after %i seconds" %
- (self.ip_address, timeout))
- time.sleep(0.5)
-
- def _wait_for_ssh(self, timeout):
- """Wait until the deployed VM is responding via SSH"""
- start_time = time.time()
- while True:
- try:
- self.runcmd(['true'], stdin=None, stdout=None, stderr=None)
- return
- except cliapp.AppException:
- # TODO: Stop assuming the ssh part of the command is what failed
- if time.time() > start_time + timeout:
- raise TimeoutError("%s sshd did not start after %i seconds"
- % (self.ip_address, timeout))
- time.sleep(0.5)
-
- def wait_until_online(self, timeout=10):
- self._wait_for_dhcp(timeout)
- self._wait_for_ssh(timeout)
-
- def delete(self):
- # Stop and remove VM
- try:
- self.host_machine.virsh('destroy', self.vm_id)
- except cliapp.AppException as e:
- # TODO: Stop assuming that destroy failed because it wasn't running
- pass
- try:
- self.host_machine.virsh('undefine', self.vm_id, '--remove-all-storage')
- except cliapp.AppException as e:
- # TODO: Stop assuming that undefine failed because it was
- # already removed
- pass
-
-
-class Deployment(object):
-
- def __init__(self, cluster_path, name, deployment_config, host_machine):
- self.cluster_path = cluster_path
- self.name = name
- self.deployment_config = deployment_config
- self.host_machine = host_machine
-
- @staticmethod
- def _ssh_host_key_exists(hostname):
- """Check if an ssh host key exists in known_hosts"""
- if not os.path.exists('/root/.ssh/known_hosts'):
- return False
- with open('/root/.ssh/known_hosts', 'r') as known_hosts:
- return any(line.startswith(hostname) for line in known_hosts)
-
- def _update_known_hosts(self):
- if not self._ssh_host_key_exists(self.host_machine.address):
- with open('/root/.ssh/known_hosts', 'a') as known_hosts:
- cliapp.runcmd(['ssh-keyscan', self.host_machine.address],
- stdout=known_hosts)
-
- @staticmethod
- def _generate_sshkey_config(tempdir, config):
- manifest = os.path.join(tempdir, 'manifest')
- with open(manifest, 'w') as f:
- f.write('0040700 0 0 /root/.ssh\n')
- f.write('overwrite 0100600 0 0 /root/.ssh/authorized_keys\n')
- authkeys = os.path.join(tempdir, 'root', '.ssh', 'authorized_keys')
- os.makedirs(os.path.dirname(authkeys))
- with open(authkeys, 'w') as auth_f:
- with open('/root/.ssh/id_rsa.pub', 'r') as key_f:
- shutil.copyfileobj(key_f, auth_f)
-
- install_files = shlex.split(config.get('INSTALL_FILES', ''))
- install_files.append(manifest)
- yield 'INSTALL_FILES', ' '.join(pipes.quote(f) for f in install_files)
-
- def deploy(self):
- self._update_known_hosts()
-
- hostname = str(uuid.uuid4())
- vm_id = hostname
- image_base = self.host_machine.disk_path
- rootpath = '{image_base}/{hostname}.img'.format(image_base=image_base,
- hostname=hostname)
- loc = 'kvm+ssh://{ssh_host}/{id}/{path}'.format(
- ssh_host=self.host_machine.ssh_host, id=vm_id, path=rootpath)
-
- options = {
- 'type': 'kvm',
- 'location': loc,
- 'AUTOSTART': 'True',
- 'HOSTNAME': hostname,
- 'DISK_SIZE': '20G',
- 'RAM_SIZE': '2G',
- 'VERSION_LABEL': 'release-test',
- }
-
- tempdir = tempfile.mkdtemp()
- try:
- options.update(
- self._generate_sshkey_config(tempdir,
- self.deployment_config))
-
- args = ['morph', 'deploy', self.cluster_path, self.name]
- for k, v in options.iteritems():
- args.append('%s.%s=%s' % (self.name, k, v))
- cliapp.runcmd(args, stdin=None, stdout=None, stderr=None)
-
- config = dict(self.deployment_config)
- config.update(options)
-
- return DeployedSystemInstance(self, config, self.host_machine,
- vm_id, rootpath)
- finally:
- shutil.rmtree(tempdir)
-
-
-class ReleaseApp(cliapp.Application):
-
- """Cliapp application which handles automatic builds and tests"""
-
- def add_settings(self):
- """Add the command line options needed"""
- group_main = 'Program Options'
- self.settings.string_list(['deployment-host'],
- 'ARCH:HOST:PATH that VMs can be deployed to',
- default=None,
- group=group_main)
- self.settings.string(['trove-host'],
- 'Address of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['trove-id'],
- 'ID of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['build-ref-prefix'],
- 'Prefix of build branches for test systems',
- default=None,
- group=group_main)
-
- @staticmethod
- def _run_tests(instance, system_path, system_morph,
- (trove_host, trove_id, build_ref_prefix),
- morph_helper, systems):
- instance.wait_until_online()
-
- tests = []
- def baserock_build_test(instance):
- instance.runcmd(['git', 'config', '--global', 'user.name',
- 'Test Instance of %s' % instance.deployment.name])
- instance.runcmd(['git', 'config', '--global', 'user.email',
- 'ci-test@%s' % instance.config['HOSTNAME']])
- instance.runcmd(['mkdir', '-p', '/src/ws', '/src/cache',
- '/src/tmp'])
- def morph_cmd(*args, **kwargs):
- # TODO: decide whether to use cached artifacts or not by
- # adding --artifact-cache-server= --cache-server=
- argv = ['morph', '--log=/src/morph.log', '--cachedir=/src/cache',
- '--tempdir=/src/tmp', '--log-max=100M',
- '--trove-host', trove_host, '--trove-id', trove_id,
- '--build-ref-prefix', build_ref_prefix]
- argv.extend(args)
- instance.runcmd(argv, **kwargs)
-
- repo = morph_helper.sb.root_repository_url
- ref = morph_helper.defs_repo.HEAD
- sha1 = morph_helper.defs_repo.resolve_ref_to_commit(ref)
- morph_cmd('init', '/src/ws')
- chdir = '/src/ws'
-
- morph_cmd('checkout', repo, ref, chdir=chdir)
- # TODO: Add a morph subcommand that gives the path to the root repository.
- repo_path = os.path.relpath(
- morph_helper.sb.get_git_directory_name(repo),
- morph_helper.sb.root_directory)
- chdir = os.path.join(chdir, ref, repo_path)
-
- instance.runcmd(['git', 'reset', '--hard', sha1], chdir=chdir)
- print 'Building test systems for {sys}'.format(sys=system_path)
- for to_build_path, to_build_morph in systems.iteritems():
- if to_build_morph['arch'] == system_morph['arch']:
- print 'Test building {path}'.format(path=to_build_path)
- morph_cmd('build', to_build_path, chdir=chdir,
- stdin=None, stdout=None, stderr=None)
- print 'Finished Building test systems'
-
- def python_smoke_test(instance):
- instance.runcmd(['python', '-c', 'print "Hello World"'])
-
- # TODO: Come up with a better way of determining which tests to run
- if 'devel' in system_path:
- tests.append(baserock_build_test)
- else:
- tests.append(python_smoke_test)
-
- for test in tests:
- test(instance)
-
- def deploy_and_test_systems(self, cluster_path,
- deployment_hosts, build_test_config):
- """Run the deployments and tests"""
-
- version = 'release-test'
-
- morph_helper = MorphologyHelper()
- cluster_morph = morph_helper.load_morphology(cluster_path)
- systems = dict(morph_helper.load_cluster_systems(cluster_morph))
-
- for system_path, deployment_name, deployment_config in \
- morph_helper.iterate_cluster_deployments(cluster_morph):
-
- system_morph = systems[system_path]
- # We can only test systems in KVM that have a BSP
- if not any('bsp' in si['morph'] for si in system_morph['strata']):
- continue
-
- # We can only test systems in KVM that we have a host for
- if system_morph['arch'] not in deployment_hosts:
- continue
- host_machine = deployment_hosts[system_morph['arch']]
- deployment = Deployment(cluster_path, deployment_name,
- deployment_config, host_machine)
-
- instance = deployment.deploy()
- try:
- self._run_tests(instance, system_path, system_morph,
- build_test_config, morph_helper, systems)
- finally:
- instance.delete()
-
- def process_args(self, args):
- """Process the command line args and kick off the builds/tests"""
- if self.settings['build-ref-prefix'] is None:
- self.settings['build-ref-prefix'] = (
- os.path.join(self.settings['trove-id'], 'builds'))
- for setting in ('deployment-host', 'trove-host',
- 'trove-id', 'build-ref-prefix'):
- self.settings.require(setting)
-
- deployment_hosts = {}
- for host_config in self.settings['deployment-host']:
- arch, address = host_config.split(':', 1)
- user, address = address.split('@', 1)
- address, disk_path = address.split(':', 1)
- if user == '':
- user = 'root'
- # TODO: Don't assume root is the user with deploy access
- deployment_hosts[arch] = VMHost(user, address, disk_path)
-
- build_test_config = (self.settings['trove-host'],
- self.settings['trove-id'],
- self.settings['build-ref-prefix'])
-
- if len(args) != 1:
- raise cliapp.AppException('Usage: release-test CLUSTER')
- cluster_path = morphlib.util.sanitise_morphology_path(args[0])
- self.deploy_and_test_systems(cluster_path, deployment_hosts,
- build_test_config)
-
-
-if __name__ == '__main__':
- ReleaseApp().run()
diff --git a/scripts/release-test-os b/scripts/release-test-os
deleted file mode 100755
index a886300e..00000000
--- a/scripts/release-test-os
+++ /dev/null
@@ -1,526 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Codethink Ltd
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-'''release-test
-
-This script deploys the set of systems in the cluster morphology it is
-instructed to read, to test that they work correctly.
-
-'''
-
-import cliapp
-import os
-import pipes
-import shlex
-import shutil
-import socket
-import tempfile
-import time
-import uuid
-
-import morphlib
-
-
-class NovaList:
- def __init__(self):
- self.output = []
- self.lines = []
- self.instance = []
-
- def update(self):
- self.output = cliapp.runcmd(['nova', 'list'])
- self.lines = self.output.split('\n')
- self.lines = self.lines[3:-2]
-
- def get_nova_details_for_instance(self, name):
- self.update()
-
- for line in self.lines:
- entries = line.split('|')
- stripped_line = [entry.strip() for entry in entries]
- if stripped_line.count(name) == 1:
- self.instance = stripped_line
-
- def get_nova_state_for_instance(self, name):
- self.get_nova_details_for_instance(name)
- if not self.instance:
- return
- return self.instance[3]
-
- def get_nova_ip_for_instance(self, name):
- self.get_nova_details_for_instance(name)
- if not self.instance:
- return
-
- if self.get_nova_state_for_instance(name) != 'ACTIVE':
- return
-
- return self.instance[6]
-
- def get_nova_ip_for_instance_timeout(self, name, timeout=120):
- start_time = time.time()
-
- while self.get_nova_state_for_instance(name) != 'ACTIVE':
-
- if time.time() > start_time + timeout:
- print "%s not ACTIVE after %i seconds" % (name, timeout)
- return
-
- time.sleep(1)
-
- ip_addr = self.get_nova_ip_for_instance(name)
- if not ip_addr:
- return
-
- if ip_addr.count('=') == 0:
- return
-
- ip_addr = ip_addr[ip_addr.find('=') + 1:]
-
- if ip_addr.count(',') == 0:
- return ip_addr
-
- return ip_addr[:ip_addr.find(',')]
-
-
-
-class MorphologyHelper(object):
-
- def __init__(self):
- self.sb = sb = morphlib.sysbranchdir.open_from_within('.')
- defs_repo_path = sb.get_git_directory_name(sb.root_repository_url)
- self.defs_repo = morphlib.gitdir.GitDirectory(defs_repo_path)
- self.loader = morphlib.morphloader.MorphologyLoader()
- self.finder = morphlib.morphologyfinder.MorphologyFinder(self.defs_repo)
-
- def load_morphology(self, path):
- text = self.finder.read_morphology(path)
- return self.loader.load_from_string(text)
-
- @classmethod
- def iterate_systems(cls, systems_list):
- for system in systems_list:
- yield morphlib.util.sanitise_morphology_path(system['morph'])
- if 'subsystems' in system:
- for subsystem in cls.iterate_systems(system['subsystems']):
- yield subsystem
-
- def iterate_cluster_deployments(cls, cluster_morph):
- for system in cluster_morph['systems']:
- path = morphlib.util.sanitise_morphology_path(system['morph'])
- defaults = system.get('deploy-defaults', {})
- for name, options in system['deploy'].iteritems():
- config = dict(defaults)
- config.update(options)
- yield path, name, config
-
- def load_cluster_systems(self, cluster_morph):
- for system_path in set(self.iterate_systems(cluster_morph['systems'])):
- system_morph = self.load_morphology(system_path)
- yield system_path, system_morph
-
-
-class TimeoutError(cliapp.AppException):
-
- """Error to be raised when a connection waits too long"""
-
- def __init__(self, msg):
- super(TimeoutError, self).__init__(msg)
-
-
-class VMHost(object):
-
- def __init__(self, user, address, disk_path):
- self.user = user
- self.address = address
- self.disk_path = disk_path
-
- @property
- def ssh_host(self):
- return '{user}@{address}'.format(user=self.user, address=self.address)
-
- def runcmd(self, *args, **kwargs):
- cliapp.ssh_runcmd(self.ssh_host, *args, **kwargs)
-
-
-class DeployedSystemInstance(object):
-
- def __init__(self, deployment, config, host_machine, vm_id, rootfs_path,
- ip_addr, hostname):
- self.deployment = deployment
- self.config = config
- self.ip_address = ip_addr
- self.host_machine = host_machine
- self.vm_id = vm_id
- self.rootfs_path = rootfs_path
- self.hostname = hostname
-
- @property
- def ssh_host(self):
- # TODO: Stop assuming we ssh into test instances as root
- return 'root@{host}'.format(host=self.ip_address)
-
- def runcmd(self, argv, chdir='.', **kwargs):
- ssh_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
- '-o', 'UserKnownHostsFile=/dev/null', self.ssh_host]
- cmd = ['sh', '-c', 'cd "$1" && shift && exec "$@"', '-', chdir]
- cmd += argv
- ssh_cmd.append(' '.join(map(pipes.quote, cmd)))
- return cliapp.runcmd(ssh_cmd, **kwargs)
-
- def _wait_for_dhcp(self, timeout):
- '''Block until given hostname resolves successfully.
-
- Raises TimeoutError if the hostname has not appeared in 'timeout'
- seconds.
-
- '''
- start_time = time.time()
- while True:
- try:
- socket.gethostbyname(self.ip_address)
- return
- except socket.gaierror:
- pass
- if time.time() > start_time + timeout:
- raise TimeoutError("Host %s did not appear after %i seconds" %
- (self.ip_address, timeout))
- time.sleep(0.5)
-
- def _wait_for_ssh(self, timeout):
- """Wait until the deployed VM is responding via SSH"""
- start_time = time.time()
- while True:
- try:
- self.runcmd(['true'], stdin=None, stdout=None, stderr=None)
- return
- except cliapp.AppException:
- # TODO: Stop assuming the ssh part of the command is what failed
- if time.time() > start_time + timeout:
- raise TimeoutError("%s sshd did not start after %i seconds"
- % (self.ip_address, timeout))
- time.sleep(0.5)
-
- def _wait_for_cloud_init(self, timeout):
- """Wait until cloud init has resized the disc"""
- start_time = time.time()
- while True:
- try:
- out = self.runcmd(['sh', '-c',
- 'test -e "$1" && echo exists || echo does not exist',
- '-',
- '/root/cloud-init-finished'])
- except:
- import traceback
- traceback.print_exc()
- raise
- if out.strip() == 'exists':
- return
- if time.time() > start_time + timeout:
- raise TimeoutError("Disc size not increased after %i seconds"
- % (timeout))
- time.sleep(3)
-
- def wait_until_online(self, timeout=120):
- self._wait_for_dhcp(timeout)
- self._wait_for_ssh(timeout)
- self._wait_for_cloud_init(timeout)
- print "Test system %s ready to run tests." % (self.hostname)
-
- def delete(self):
- # Stop and remove VM
- print "Deleting %s test instance" % (self.hostname)
- try:
- cliapp.runcmd(['nova', 'delete', self.hostname])
- except cliapp.AppException as e:
- # TODO: Stop assuming that delete failed because the instance
- # wasn't running
- print "- Failed"
- pass
- print "Deleting %s test disc image" % (self.hostname)
- try:
- cliapp.runcmd(['nova', 'image-delete', self.hostname])
- except cliapp.AppException as e:
- # TODO: Stop assuming that image-delete failed because it was
- # already removed
- print "- Failed"
- pass
-
-
-class Deployment(object):
-
- def __init__(self, cluster_path, name, deployment_config,
- host_machine, net_id):
- self.cluster_path = cluster_path
- self.name = name
- self.deployment_config = deployment_config
- self.host_machine = host_machine
- self.net_id = net_id
-
- @staticmethod
- def _ssh_host_key_exists(hostname):
- """Check if an ssh host key exists in known_hosts"""
- if not os.path.exists('/root/.ssh/known_hosts'):
- return False
- with open('/root/.ssh/known_hosts', 'r') as known_hosts:
- return any(line.startswith(hostname) for line in known_hosts)
-
- def _update_known_hosts(self):
- if not self._ssh_host_key_exists(self.host_machine.address):
- with open('/root/.ssh/known_hosts', 'a') as known_hosts:
- cliapp.runcmd(['ssh-keyscan', self.host_machine.address],
- stdout=known_hosts)
-
- @staticmethod
- def _generate_sshkey_config(tempdir, config):
- manifest = os.path.join(tempdir, 'manifest')
- with open(manifest, 'w') as f:
- f.write('0040700 0 0 /root/.ssh\n')
- f.write('overwrite 0100600 0 0 /root/.ssh/authorized_keys\n')
- authkeys = os.path.join(tempdir, 'root', '.ssh', 'authorized_keys')
- os.makedirs(os.path.dirname(authkeys))
- with open(authkeys, 'w') as auth_f:
- with open('/root/.ssh/id_rsa.pub', 'r') as key_f:
- shutil.copyfileobj(key_f, auth_f)
-
- install_files = shlex.split(config.get('INSTALL_FILES', ''))
- install_files.append(manifest)
- yield 'INSTALL_FILES', ' '.join(pipes.quote(f) for f in install_files)
-
- def deploy(self):
- self._update_known_hosts()
-
- hostname = str(uuid.uuid4())
- vm_id = hostname
- image_base = self.host_machine.disk_path
- rootpath = '{image_base}/{hostname}.img'.format(image_base=image_base,
- hostname=hostname)
- loc = 'http://{ssh_host}:5000/v2.0'.format(
- ssh_host=self.host_machine.ssh_host, id=vm_id, path=rootpath)
-
- options = {
- 'type': 'openstack',
- 'location': loc,
- 'HOSTNAME': hostname,
- 'DISK_SIZE': '5G',
- 'RAM_SIZE': '2G',
- 'VERSION_LABEL': 'release-test',
- 'OPENSTACK_USER': os.environ['OS_USERNAME'],
- 'OPENSTACK_TENANT': os.environ['OS_TENANT_NAME'],
- 'OPENSTACK_PASSWORD': os.environ['OS_PASSWORD'],
- 'OPENSTACK_IMAGENAME': hostname,
- 'CLOUD_INIT': 'yes',
- 'KERNEL_ARGS': 'console=tty0 console=ttyS0',
- }
-
- tempdir = tempfile.mkdtemp()
- try:
- options.update(
- self._generate_sshkey_config(tempdir,
- self.deployment_config))
-
- # Deploy the image to openstack
- args = ['morph', 'deploy', self.cluster_path, self.name]
- for k, v in options.iteritems():
- args.append('%s.%s=%s' % (self.name, k, v))
- cliapp.runcmd(args, stdin=None, stdout=None, stderr=None)
-
- config = dict(self.deployment_config)
- config.update(options)
-
- # Boot an instance from the image
- args = ['nova', 'boot',
- '--flavor', 'm1.medium',
- '--image', hostname,
- '--user-data', '/usr/lib/mason/os-init-script',
- '--nic', "net-id=%s" % (self.net_id),
- hostname]
- output = cliapp.runcmd(args)
-
- # Print nova boot output, with adminPass line removed
- output_lines = output.split('\n')
- for line in output_lines:
- if line.find('adminPass') != -1:
- password_line = line
- output_lines.remove(password_line)
- output = '\n'.join(output_lines)
- print output
-
- # Get ip address from nova list
- nl = NovaList()
- ip_addr = nl.get_nova_ip_for_instance_timeout(hostname)
- print "IP address for instance %s: %s" % (hostname, ip_addr)
-
- return DeployedSystemInstance(self, config, self.host_machine,
- vm_id, rootpath, ip_addr, hostname)
- finally:
- shutil.rmtree(tempdir)
-
-
-class ReleaseApp(cliapp.Application):
-
- """Cliapp application which handles automatic builds and tests"""
-
- def add_settings(self):
- """Add the command line options needed"""
- group_main = 'Program Options'
- self.settings.string_list(['deployment-host'],
- 'ARCH:HOST:PATH that VMs can be deployed to',
- default=None,
- group=group_main)
- self.settings.string(['trove-host'],
- 'Address of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['trove-id'],
- 'ID of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['build-ref-prefix'],
- 'Prefix of build branches for test systems',
- default=None,
- group=group_main)
- self.settings.string(['net-id'],
- 'Openstack network ID',
- default=None,
- group=group_main)
-
- @staticmethod
- def _run_tests(instance, system_path, system_morph,
- (trove_host, trove_id, build_ref_prefix),
- morph_helper, systems):
- instance.wait_until_online()
-
- tests = []
- def baserock_build_test(instance):
- instance.runcmd(['git', 'config', '--global', 'user.name',
- 'Test Instance of %s' % instance.deployment.name])
- instance.runcmd(['git', 'config', '--global', 'user.email',
- 'ci-test@%s' % instance.config['HOSTNAME']])
- instance.runcmd(['mkdir', '-p', '/src/ws', '/src/cache',
- '/src/tmp'])
- def morph_cmd(*args, **kwargs):
- # TODO: decide whether to use cached artifacts or not by
- # adding --artifact-cache-server= --cache-server=
- argv = ['morph', '--log=/src/morph.log', '--cachedir=/src/cache',
- '--tempdir=/src/tmp', '--log-max=100M',
- '--trove-host', trove_host, '--trove-id', trove_id,
- '--build-ref-prefix', build_ref_prefix]
- argv.extend(args)
- instance.runcmd(argv, **kwargs)
-
- repo = morph_helper.sb.root_repository_url
- ref = morph_helper.defs_repo.HEAD
- sha1 = morph_helper.defs_repo.resolve_ref_to_commit(ref)
- morph_cmd('init', '/src/ws')
- chdir = '/src/ws'
-
- morph_cmd('checkout', repo, ref, chdir=chdir)
- # TODO: Add a morph subcommand that gives the path to the root repository.
- repo_path = os.path.relpath(
- morph_helper.sb.get_git_directory_name(repo),
- morph_helper.sb.root_directory)
- chdir = os.path.join(chdir, ref, repo_path)
-
- instance.runcmd(['git', 'reset', '--hard', sha1], chdir=chdir)
- print 'Building test systems for {sys}'.format(sys=system_path)
- for to_build_path, to_build_morph in systems.iteritems():
- if to_build_morph['arch'] == system_morph['arch']:
- print 'Test building {path}'.format(path=to_build_path)
- morph_cmd('build', to_build_path, chdir=chdir,
- stdin=None, stdout=None, stderr=None)
- print 'Finished Building test systems'
-
- def python_smoke_test(instance):
- instance.runcmd(['python', '-c', 'print "Hello World"'])
-
- # TODO: Come up with a better way of determining which tests to run
- if 'devel' in system_path:
- tests.append(baserock_build_test)
- else:
- tests.append(python_smoke_test)
-
- for test in tests:
- test(instance)
-
- def deploy_and_test_systems(self, cluster_path,
- deployment_hosts, build_test_config,
- net_id):
- """Run the deployments and tests"""
-
- version = 'release-test'
-
- morph_helper = MorphologyHelper()
- cluster_morph = morph_helper.load_morphology(cluster_path)
- systems = dict(morph_helper.load_cluster_systems(cluster_morph))
-
- for system_path, deployment_name, deployment_config in \
- morph_helper.iterate_cluster_deployments(cluster_morph):
-
- system_morph = systems[system_path]
- # We can only test systems in KVM that have a BSP
- if not any('bsp' in si['morph'] for si in system_morph['strata']):
- continue
-
- # We can only test systems in KVM that we have a host for
- if system_morph['arch'] not in deployment_hosts:
- continue
- host_machine = deployment_hosts[system_morph['arch']]
- deployment = Deployment(cluster_path, deployment_name,
- deployment_config, host_machine,
- net_id)
-
- instance = deployment.deploy()
- try:
- self._run_tests(instance, system_path, system_morph,
- build_test_config, morph_helper, systems)
- finally:
- instance.delete()
-
- def process_args(self, args):
- """Process the command line args and kick off the builds/tests"""
- if self.settings['build-ref-prefix'] is None:
- self.settings['build-ref-prefix'] = (
- os.path.join(self.settings['trove-id'], 'builds'))
- for setting in ('deployment-host', 'trove-host',
- 'trove-id', 'build-ref-prefix', 'net-id'):
- self.settings.require(setting)
-
- deployment_hosts = {}
- for host_config in self.settings['deployment-host']:
- arch, address = host_config.split(':', 1)
- user, address = address.split('@', 1)
- address, disk_path = address.split(':', 1)
- if user == '':
- user = 'root'
- # TODO: Don't assume root is the user with deploy access
- deployment_hosts[arch] = VMHost(user, address, disk_path)
-
- build_test_config = (self.settings['trove-host'],
- self.settings['trove-id'],
- self.settings['build-ref-prefix'])
-
- if len(args) != 1:
- raise cliapp.AppException('Usage: release-test CLUSTER')
- cluster_path = morphlib.util.sanitise_morphology_path(args[0])
- self.deploy_and_test_systems(cluster_path, deployment_hosts,
- build_test_config,
- self.settings['net-id'])
-
-
-if __name__ == '__main__':
- ReleaseApp().run()
diff --git a/scripts/release-upload b/scripts/release-upload
deleted file mode 100755
index 273f9ed5..00000000
--- a/scripts/release-upload
+++ /dev/null
@@ -1,473 +0,0 @@
-#!/usr/bin/python
-# Copyright (C) 2014 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-'''Upload and publish Baserock binaries for a release.
-
-This utility is used for the Baserock release process. See
-http://wiki.baserock.org/guides/release-process/ for details on the
-release process.
-
-This utility uploads two sets of binaries:
-
-* The build artifacts (built chunks and strata) used to construct the
- systems being released. The systems are found in `release.morph` and
- the artifacts from the Trove used to prepare the release. They get
- uploaded to a public Trove (by default git.baserock.org). If they're
- the same Trove, then nothing happens.
-
-* The released system images (disk images, tar archives, etc)
- specified in `release.morph` get uploaded to a download server (by
- default download.baserock.org).
-
-'''
-
-
-import json
-import logging
-import os
-import pwd
-import shutil
-import sys
-import urllib
-import urllib2
-import urlparse
-
-import cliapp
-import yaml
-
-import morphlib
-
-class ReleaseUploader(cliapp.Application):
-
- def add_settings(self):
- group = 'Release upload settings'
-
- local_username = self.get_local_username()
-
- self.settings.string(
- ['build-trove-host'],
- 'get build artifacts from Trove at ADDRESS',
- metavar='ADDRESS',
- group=group)
-
- self.settings.string(
- ['public-trove-host'],
- 'publish build artifacts on Trove at ADDRESS',
- metavar='ADDRESS',
- default='git.baserock.org',
- group=group)
-
- self.settings.string(
- ['public-trove-username'],
- 'log into public trove as USER',
- metavar='USER',
- default=local_username,
- group=group)
-
- self.settings.string(
- ['public-trove-artifact-dir'],
- 'put published artifacts into DIR',
- metavar='DIR',
- default='/home/cache/artifacts',
- group=group)
-
- self.settings.string(
- ['release-artifact-dir'],
- 'get release artifacts from DIR (all files from there)',
- metavar='DIR',
- default='.',
- group=group)
-
- self.settings.string(
- ['download-server-address'],
- 'publish release artifacts on server at ADDRESS',
- metavar='ADDRESS',
- default='download.baserock.org',
- group=group)
-
- self.settings.string(
- ['download-server-username'],
- 'log into download server as USER',
- metavar='USER',
- default=local_username,
- group=group)
-
- self.settings.string(
- ['download-server-private-dir'],
- 'use DIR as the temporary location for uploaded release '
- 'artifacts',
- metavar='DIR',
- default='/srv/download.baserock.org/baserock/.publish-temp',
- group=group)
-
- self.settings.string(
- ['download-server-public-dir'],
- 'put published release artifacts in DIR',
- metavar='DIR',
- default='/srv/download.baserock.org/baserock',
- group=group)
-
- self.settings.string(
- ['local-build-artifacts-dir'],
- 'keep build artifacts to be uploaded temporarily in DIR',
- metavar='DIR',
- default='build-artifacts',
- group=group)
-
- self.settings.string(
- ['morph-cmd'],
- 'run FILE to invoke morph',
- metavar='FILE',
- default='morph',
- group=group)
-
- self.settings.string_list(
- ['arch'],
- 'Upload files from morphologies of ARCH',
- metavar='ARCH',
- default=[],
- group=group)
-
- self.settings.boolean(
- ['upload-build-artifacts'],
- 'upload build artifacts?',
- default=True)
-
- self.settings.boolean(
- ['upload-release-artifacts'],
- 'upload release artifacts (disk images etc)?',
- default=True)
-
- def get_local_username(self):
- uid = os.getuid()
- return pwd.getpwuid(uid)[0]
-
- def process_args(self, args):
- if len(args) != 1:
- raise cliapp.AppException('Usage: release-upload CLUSTER')
- cluster_morphology_path = args[0]
- self.status(msg='Uploading and publishing Baserock release')
-
- if self.settings['upload-build-artifacts']:
- self.publish_build_artifacts(cluster_morphology_path)
- else:
- self.status(
- msg='Not uploading build artifacts '
- '(upload-build-artifacts set to false')
-
- if self.settings['upload-release-artifacts']:
- self.publish_release_artifacts()
- else:
- self.status(
- msg='Not uploading release artifacts '
- '(upload-release-artifacts set to false')
-
- def publish_build_artifacts(self, cluster_morphology_path):
- publisher = BuildArtifactPublisher(self.settings, self.status)
- publisher.publish_build_artifacts(cluster_morphology_path)
- self.status(msg='Build artifacts have been published')
-
- def publish_release_artifacts(self):
- publisher = ReleaseArtifactPublisher(self.settings, self.status)
- publisher.publish_release_artifacts()
- self.status(msg='Release artifacts have been published')
-
- def status(self, msg, **kwargs):
- formatted = msg.format(**kwargs)
- logging.info(formatted)
- sys.stdout.write(formatted + '\n')
- sys.stdout.flush()
-
-
-class BuildArtifactPublisher(object):
-
- '''Publish build artifacts related to the release.'''
-
- def __init__(self, settings, status):
- self.settings = settings
- self.status = status
-
- def publish_build_artifacts(self, cluster_path):
- artifact_basenames = self.list_build_artifacts_for_release(cluster_path)
- self.status(
- msg='Found {count} build artifact files in release',
- count=len(artifact_basenames))
-
- to_be_uploaded = self.filter_away_build_artifacts_on_public_trove(
- artifact_basenames)
-
- logging.debug('List of artifacts (basenames) to upload (without already uploaded):')
- for i, basename in enumerate(to_be_uploaded):
- logging.debug(' {0}: {1}'.format(i, basename))
- logging.debug('End of artifact list (to_be_uploaded)')
-
- self.status(
- msg='Need to fetch locally, then upload {count} build artifacts',
- count=len(to_be_uploaded))
-
- self.upload_build_artifacts_to_public_trove(to_be_uploaded)
-
- def list_build_artifacts_for_release(self, cluster_morphology_path):
- self.status(msg='Find build artifacts included in release')
-
- # FIXME: These are hardcoded for simplicity. They would be
- # possible to deduce automatically from the workspace, but
- # that can happen later.
- repo = 'file://%s' % os.path.abspath('.')
- ref = 'HEAD'
-
- argv = [self.settings['morph-cmd'], 'list-artifacts', '--quiet', repo, ref]
- argv += self.find_system_morphologies(cluster_morphology_path)
- output = cliapp.runcmd(argv)
- basenames = output.splitlines()
- logging.debug('List of build artifacts in release:')
- for basename in basenames:
- logging.debug(' {0}'.format(basename))
- logging.debug('End of list of build artifacts in release')
-
- return basenames
-
- def find_system_morphologies(self, cluster_morphology_path):
- cluster = self.load_cluster_morphology(cluster_morphology_path)
- system_dicts = self.find_systems_in_parsed_cluster_morphology(cluster)
- if self.settings['arch']:
- system_dicts = self.choose_systems_for_wanted_architectures(
- system_dicts, self.settings['arch'])
- return [sd['morph'] for sd in system_dicts]
-
- def load_cluster_morphology(self, pathname):
- with open(pathname) as f:
- return yaml.load(f)
-
- def find_systems_in_parsed_cluster_morphology(self, cluster):
- return cluster['systems']
-
- def choose_systems_for_wanted_architectures(self, system_dicts, archs):
- return [
- sd
- for sd in system_dicts
- if self.system_is_for_wanted_arch(sd, archs)]
-
- def system_is_for_wanted_arch(self, system_dict, archs):
- morph = self.load_system_morphology(system_dict)
- return morph['arch'] in archs
-
- def load_system_morphology(self, system_dict):
- pathname = morphlib.util.sanitise_morphology_path(system_dict['morph'])
- return self.load_morphology_from_named_file(pathname)
-
- def load_morphology_from_named_file(self, pathname):
- finder = self.get_morphology_finder_for_root_repository()
- morphology_text = finder.read_morphology(pathname)
- loader = morphlib.morphloader.MorphologyLoader()
- return loader.load_from_string(morphology_text)
-
- def get_morphology_finder_for_root_repository(self):
- sb = morphlib.sysbranchdir.open_from_within('.')
- definitions = sb.get_git_directory_name(sb.root_repository_url)
- definitions_repo = morphlib.gitdir.GitDirectory(definitions)
- return morphlib.morphologyfinder.MorphologyFinder(definitions_repo)
-
- def filter_away_build_artifacts_on_public_trove(self, basenames):
- result = []
- logging.debug('Filtering away already existing artifacts:')
- for basename, exists in self.query_public_trove_for_artifacts(basenames):
- logging.debug(' {0}: {1}'.format(basename, exists))
- if not exists:
- result.append(basename)
- logging.debug('End of filtering away')
- return result
-
- def query_public_trove_for_artifacts(self, basenames):
- host = self.settings['public-trove-host']
-
- # FIXME: This could use
- # contextlib.closing(urllib2.urlopen(url, data=data) instead
- # of explicit closing.
- url = 'http://{host}:8080/1.0/artifacts'.format(host=host)
- data = json.dumps(basenames)
- f = urllib2.urlopen(url, data=data)
- obj = json.load(f)
- return obj.items()
-
- def upload_build_artifacts_to_public_trove(self, basenames):
- self.download_artifacts_locally(basenames)
- self.upload_artifacts_to_public_trove(basenames)
-
- def download_artifacts_locally(self, basenames):
- dirname = self.settings['local-build-artifacts-dir']
- self.create_directory_if_missing(dirname)
- for i, basename in enumerate(basenames):
- url = self.construct_artifact_url(basename)
- pathname = os.path.join(dirname, basename)
- if not os.path.exists(pathname):
- self.status(
- msg='Downloading {i}/{total} {basename}',
- basename=repr(basename), i=i, total=len(basenames))
- self.download_from_url(url, dirname, pathname)
-
- def create_directory_if_missing(self, dirname):
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- def construct_artifact_url(self, basename):
- scheme = 'http'
- netloc = '{host}:8080'.format(host=self.settings['build-trove-host'])
- path = '/1.0/artifacts'
- query = 'filename={0}'.format(urllib.quote_plus(basename))
- fragment = ''
- components = (scheme, netloc, path, query, fragment)
- return urlparse.urlunsplit(components)
-
- def download_from_url(self, url, dirname, pathname):
- logging.info(
- 'Downloading {url} to {pathname}'.format(
- url=url, pathname=pathname))
- with open(pathname, 'wb') as output:
- try:
- incoming = urllib2.urlopen(url)
- shutil.copyfileobj(incoming, output)
- incoming.close()
- except urllib2.HTTPError as e:
- if pathname.endswith('.meta'):
- return
- self.status(
- msg="ERROR: Can't download {url}: {explanation}",
- url=url,
- explanation=str(e))
- os.remove(pathname)
- raise
-
- def upload_artifacts_to_public_trove(self, basenames):
- self.status(
- msg='Upload build artifacts to {trove}',
- trove=self.settings['public-trove-host'])
- rsync_files_to_server(
- self.settings['local-build-artifacts-dir'],
- basenames,
- self.settings['public-trove-username'],
- self.settings['public-trove-host'],
- self.settings['public-trove-artifact-dir'])
- set_permissions_on_server(
- self.settings['public-trove-username'],
- self.settings['public-trove-host'],
- self.settings['public-trove-artifact-dir'],
- basenames)
-
-class ReleaseArtifactPublisher(object):
-
- '''Publish release artifacts for a release.'''
-
- def __init__(self, settings, status):
- self.settings = settings
- self.status = status
-
- def publish_release_artifacts(self):
- files = self.list_release_artifacts()
- if files:
- self.upload_release_artifacts_to_private_dir(files)
- self.move_release_artifacts_to_public_dir(files)
- self.create_symlinks_to_new_release_artifacts(files)
-
- def list_release_artifacts(self):
- self.status(msg='Find release artifacts to publish')
- return os.listdir(self.settings['release-artifact-dir'])
-
- def upload_release_artifacts_to_private_dir(self, files):
- self.status(msg='Upload release artifacts to private directory')
- path = self.settings['download-server-private-dir']
- self.create_directory_on_download_server(path)
- self.rsync_files_to_download_server(files, path)
-
- def create_directory_on_download_server(self, path):
- user = self.settings['download-server-username']
- host = self.settings['download-server-address']
- self.status(msg='Create {host}:{path}', host=host, path=path)
- target = '{user}@{host}'.format(user=user, host=host)
- cliapp.ssh_runcmd(target, ['mkdir', '-p', path])
-
- def rsync_files_to_download_server(self, files, path):
- self.status(msg='Upload release artifacts to download server')
- rsync_files_to_server(
- self.settings['release-artifact-dir'],
- files,
- self.settings['download-server-username'],
- self.settings['download-server-address'],
- path)
- set_permissions_on_server(
- self.settings['download-server-username'],
- self.settings['download-server-address'],
- path,
- files)
-
- def move_release_artifacts_to_public_dir(self, files):
- self.status(msg='Move release artifacts to public directory')
- private_dir = self.settings['download-server-private-dir']
- public_dir = self.settings['download-server-public-dir']
- self.create_directory_on_download_server(public_dir)
-
- # Move just the contents of the private dir, not the dir
- # itself (-mindepth). Avoid overwriting existing files (mv
- # -n).
- argv = ['find', private_dir, '-mindepth', '1',
- '-exec', 'mv', '-n', '{}', public_dir + '/.', ';']
-
- target = '{user}@{host}'.format(
- user=self.settings['download-server-username'],
- host=self.settings['download-server-address'])
- cliapp.ssh_runcmd(target, argv)
-
- def create_symlinks_to_new_release_artifacts(self, files):
- self.status(msg='FIXME: Create symlinks to new releas artifacts')
-
-
-def rsync_files_to_server(
- source_dir, source_filenames, user, host, target_dir):
-
- if not source_filenames:
- return
-
- argv = [
- 'rsync',
- '-a',
- '--progress',
- '--partial',
- '--human-readable',
- '--sparse',
- '--protect-args',
- '-0',
- '--files-from=-',
- source_dir,
- '{user}@{host}:{path}'.format(user=user, host=host, path=target_dir),
- ]
-
- files_list = '\0'.join(filename for filename in source_filenames)
- cliapp.runcmd(argv, feed_stdin=files_list, stdout=None, stderr=None)
-
-
-def set_permissions_on_server(user, host, target_dir, filenames):
- # If we have no files, we can't form a valid command to run on the server
- if not filenames:
- return
- target = '{user}@{host}'.format(user=user, host=host)
- argv = ['xargs', '-0', 'chmod', '0644']
- files_list = ''.join(
- '{0}\0'.format(os.path.join(target_dir, filename)) for filename in filenames)
- cliapp.ssh_runcmd(target, argv, feed_stdin=files_list, stdout=None, stderr=None)
-
-
-ReleaseUploader(description=__doc__).run()
diff --git a/scripts/release-upload.test.conf b/scripts/release-upload.test.conf
deleted file mode 100644
index 13227983..00000000
--- a/scripts/release-upload.test.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-[config]
-download-server-address = localhost
-download-server-private-dir = /tmp/private
-download-server-public-dir = /tmp/public
-build-trove-host = ct-mcr-1.ducie.codethink.co.uk
-public-trove-host = localhost
-public-trove-username = root
-public-trove-artifact-dir = /tmp/artifacts
-release-artifact-dir = t.release-files
-morph-cmd = /home/root/git-morph
diff --git a/strata/baserock-ci-tests.morph b/strata/baserock-ci-tests.morph
new file mode 100644
index 00000000..574a3333
--- /dev/null
+++ b/strata/baserock-ci-tests.morph
@@ -0,0 +1,14 @@
+name: baserock-ci-tests
+kind: stratum
+description: |
+ This stratum contains chunks which provide tests to be run by the
+ Baserock CI Pipeline (Mason).
+build-depends:
+- morph: strata/morph-utils.morph
+chunks:
+- name: system-tests
+ morph: strata/baserock-ci-tests/system-tests.morph
+ repo: baserock:baserock/system-tests
+ ref: 7fb68b7f377583dac40634338870583baaa2fe65
+ unpetrify-ref: baserock/mason-v2
+ build-depends: []
diff --git a/strata/baserock-ci-tests/system-tests.morph b/strata/baserock-ci-tests/system-tests.morph
new file mode 100644
index 00000000..2f356e19
--- /dev/null
+++ b/strata/baserock-ci-tests/system-tests.morph
@@ -0,0 +1,5 @@
+name: system-tests
+kind: chunk
+install-commands:
+- mkdir -p "$DESTDIR$PREFIX"/share/system-tests
+- cp -a mason "$DESTDIR$PREFIX"/share/system-tests/mason
diff --git a/strata/gerrit-tools.morph b/strata/gerrit-tools.morph
new file mode 100644
index 00000000..69aa493b
--- /dev/null
+++ b/strata/gerrit-tools.morph
@@ -0,0 +1,10 @@
+name: gerrit-tools
+kind: stratum
+description: Tools for use with Gerrit
+build-depends:
+- morph: strata/python-common.morph
+chunks:
+- name: pygerrit
+ repo: github:sonyxperiadev/pygerrit
+ ref: daad8e23d9bbcd8a2bc565497be50a3cc29dae6b
+ unpetrify-ref: master
diff --git a/strata/python-paramiko.morph b/strata/python-paramiko.morph
new file mode 100644
index 00000000..9edc2a35
--- /dev/null
+++ b/strata/python-paramiko.morph
@@ -0,0 +1,24 @@
+name: python-paramiko
+kind: stratum
+description: A stratum with Paramiko and its dependencies.
+build-depends:
+- morph: strata/core.morph
+chunks:
+- name: ecdsa
+ repo: upstream:python-packages/ecdsa
+ ref: 36e9cfa80fcf8b53119adc787e54a5892ec1eb2c
+ unpetrify-ref: python-ecdsa-0.11
+ build-depends: []
+- name: pycrypto
+ morph: strata/python-paramiko/pycrypto.morph
+ repo: upstream:python-packages/pycrypto
+ ref: 7fd528d03b5eae58eef6fd219af5d9ac9c83fa50
+ unpetrify-ref: v2.6.1
+ build-depends: []
+- name: paramiko
+ repo: upstream:paramiko
+ ref: 951faed80b017e553a27c4cb98f210df44341f8f
+ unpetrify-ref: baserock/morph
+ build-depends:
+ - ecdsa
+ - pycrypto
diff --git a/strata/python-paramiko/pycrypto.morph b/strata/python-paramiko/pycrypto.morph
new file mode 100644
index 00000000..51cc00f0
--- /dev/null
+++ b/strata/python-paramiko/pycrypto.morph
@@ -0,0 +1,3 @@
+name: pycrypto
+kind: chunk
+build-system: python-distutils
diff --git a/strata/zuul-ci.morph b/strata/zuul-ci.morph
new file mode 100644
index 00000000..18e3011f
--- /dev/null
+++ b/strata/zuul-ci.morph
@@ -0,0 +1,137 @@
+name: zuul-ci
+kind: stratum
+description: |
+ This stratum contains Zuul, turbo-hipster, and their dependencies.
+
+ Zuul is a project gating system which orchestrates testing of patches and
+ handling merging when certain conditions are met (one of which could be
+ the tests Zuul organises pass). See http://ci.openstack.org/zuul/ for
+ full documentation of Zuul.
+
+ turbo-hipster is a Zuul worker which handles the running of tests.
+ turbo-hipster tells a Zuul instance what tests it can run when it starts,
+ and then Zuul will request it runs a test on a given patch depending on
+ Zuul's configuration. See http://turbo-hipster.readthedocs.org/ for full
+ documentation of turbo-hipster.
+build-depends:
+- morph: strata/python-paramiko.morph
+- morph: strata/morph-utils.morph
+- morph: strata/openstack-clients.morph
+chunks:
+- name: paste
+ repo: upstream:python-packages/paste
+ ref: 4c177fce89fee925f0f4fbfde00ce2e1252562c0
+ unpetrify-ref: master
+ build-depends: []
+- name: webob
+ repo: upstream:python-packages/webob
+ ref: 48f3cf1f3d9d194d3c6dc2afb1c890cf7cf5f075
+ unpetrify-ref: 1.2.3
+ build-depends: []
+- name: smmap
+ repo: upstream:python-packages/smmap
+ ref: eb40b44ce4a6e646aabf7b7091d876738336c42f
+ unpetrify-ref: master
+ build-depends: []
+- name: gitdb
+ repo: upstream:python-packages/gitdb
+ ref: ab4520683ab325046f2a9fe6ebf127dbbab60dfe
+ unpetrify-ref: master
+ build-depends:
+ - smmap
+- name: gitpython
+ repo: upstream:python-packages/gitpython
+ ref: 87756520b17aa0cb79a881d9aba7b4c73bddbcae
+ unpetrify-ref: baserock/zuul-ci
+ build-depends:
+ - gitdb
+- name: lockfile
+ repo: upstream:python-packages/lockfile
+ ref: 777758cdf4520271370b3338b86b5c66f9b104f0
+ unpetrify-ref: master
+ build-depends: []
+- name: ordereddict
+ repo: upstream:python-packages/ordereddict-tarball
+ ref: 332cd0213829089dd827a32e7c5e64c41ce79cbc
+ unpetrify-ref: master
+ build-depends: []
+- name: python-daemon
+ repo: upstream:python-packages/python-daemon
+ ref: afcc4ea312255a2545f9c67d7c34ffefb00c80c0
+ unpetrify-ref: master
+ build-depends: []
+- name: extras
+ repo: upstream:python-packages/python-test-extras
+ ref: cdeb596f01241e9c779332e86f6edcd0c2e8e9f0
+ unpetrify-ref: master
+ build-depends: []
+- name: mock
+ repo: upstream:python-packages/mock
+ ref: 35b35f7ad239005a950f870af57b44dbdc99d66b
+ unpetrify-ref: master
+ build-depends: []
+- name: nose
+ repo: upstream:python-packages/nose
+ ref: 908b2cda43eff9ab7a5045b6f6dfe3a718fb9afd
+ unpetrify-ref: release_1.2.1
+ build-depends: []
+- name: flake8
+ repo: upstream:python-packages/flake8
+ ref: 32a273fbdb64500cb8d7f0bcae7059078524f86d
+ unpetrify-ref: 1.7.0
+ build-depends: []
+- name: pystatsd
+ repo: upstream:python-packages/pystatsd
+ ref: 97cc0e10b0f86a23096c1197aeef74e3be5da75c
+ unpetrify-ref: v2.1.2
+ build-depends:
+ - mock
+ - nose
+ - flake8
+- name: voluptuous
+ repo: upstream:python-packages/voluptuous
+ ref: edcdd9a696eb16a565842aeefc3072a713755d84
+ unpetrify-ref: 0.8.6
+ build-depends: []
+- name: gear
+ repo: upstream:python-packages/gear
+ ref: 7bad8c1b9275bfacc47e7d5b8ba5abb6204a0c21
+ unpetrify-ref: master
+ build-depends: []
+- name: apscheduler
+ repo: upstream:python-packages/apscheduler
+ ref: 398ec4dee4b6be200cfbc0acedd9d9db454fefed
+ unpetrify-ref: v2.1.2
+ build-depends: []
+- name: python-magic
+ repo: upstream:python-packages/python-magic
+ ref: 3664686df1e8e202bc449083d68ff0035ece63ea
+ unpetrify-ref: master
+ build-depends: []
+- name: zuul
+ repo: upstream:openstack/zuul
+ ref: 1f4f8e136ec33b8babf58c0f43a83860fa329e52
+ unpetrify-ref: master
+ build-depends:
+ - paste
+ - webob
+ - gitpython
+ - lockfile
+ - ordereddict
+ - python-daemon
+ - extras
+ - pystatsd
+ - voluptuous
+ - gear
+ - apscheduler
+- name: turbo-hipster
+ repo: upstream:openstack/turbo-hipster
+ ref: 479a1010f3066dd15e8e23d7deedebad7db69100
+ unpetrify-ref: baserock/allow-external-plugins
+ build-depends:
+ - python-magic
+ - lockfile
+ - gear
+ - python-daemon
+ - extras
+ - gitpython
diff --git a/systems/gerrit-system-x86_64.morph b/systems/gerrit-system-x86_64.morph
new file mode 100644
index 00000000..62c99821
--- /dev/null
+++ b/systems/gerrit-system-x86_64.morph
@@ -0,0 +1,66 @@
+name: gerrit-system-x86_64
+kind: system
+description: |
+ System for running Gerrit on Baserock.
+
+ Note this system doesn't contain Gerrit or Java: the Baserock reference
+ definitions don't have any support for Java yet. Instead, Java and Gerrit
+ are downloaded from the web and installed on first-boot by the configuration
+ management scripts.
+
+ So this system is really just a Baserock base system plus
+ Ansible and some extras.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+
+- name: lvm
+ morph: strata/lvm.morph
+
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+
+# lighttpd is needed for Lorry Controller. Gerrit uses its own web server.
+#
+# pcre-utils is a dependency of lighttpd.
+# python-cliapp and python-wsgi are needed for lorry-controller.
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: lighttpd-server
+ morph: strata/lighttpd-server.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: lorry-controller
+ morph: strata/lorry-controller.morph
+
+- name: gerrit-tools
+ morph: strata/gerrit-tools.morph
+
+# FIXME: 'tools' is needed mainly because rsync lives there and we need
+# rsync for upgrades using system-version-manager.
+- name: tools
+ morph: strata/tools.morph
+
+configuration-extensions:
+- set-hostname
+- add-config-files
+- nfsboot
+- install-files
+- cloud-init
diff --git a/systems/mason-system-x86_64-generic.morph b/systems/mason-system-x86_64-generic.morph
new file mode 100644
index 00000000..c96ef85d
--- /dev/null
+++ b/systems/mason-system-x86_64-generic.morph
@@ -0,0 +1,58 @@
+name: mason-system-x86_64-generic
+kind: system
+description: |
+ A system which contains Zuul and turbo-hipster; to set up a CI pipeline.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: morph-utils
+ morph: strata/morph-utils.morph
+- name: openstack-clients
+ morph: strata/openstack-clients.morph
+- name: openstack-common
+ morph: strata/openstack-common.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: lighttpd-server
+ morph: strata/lighttpd-server.morph
+- name: webtools
+ morph: strata/webtools.morph
+- name: ruby
+ morph: strata/ruby.morph
+- name: nodejs
+ morph: strata/nodejs.morph
+- name: python-tools
+ morph: strata/python-tools.morph
+- name: python-paramiko
+ morph: strata/python-paramiko.morph
+- name: ansible
+ morph: strata/ansible.morph
+- name: zuul-ci
+ morph: strata/zuul-ci.morph
+- name: baserock-ci-tests
+ morph: strata/baserock-ci-tests.morph
+- name: python-core
+ morph: strata/python-core.morph
+- name: python-common
+ morph: strata/python-common.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- distbuild
+- fstab
+- mason
+- cloud-init
diff --git a/tasks/create-data-volume.yml b/tasks/create-data-volume.yml
new file mode 100644
index 00000000..05b07afe
--- /dev/null
+++ b/tasks/create-data-volume.yml
@@ -0,0 +1,26 @@
+# Format a volume for data storage
+#
+# The pattern is to create an LVM volume group on the volume, with
+# one logical volume set up. Snapshots can be taken of the data LV
+# very quickly, allowing us to take backup copies without requiring
+# long periods of downtime for the relevant services.
+---
+
+- name: ensure LVM metadata service is running
+ service: name=lvm2-lvmetad enabled=yes state=started
+
+- name: LVM logical volume group on /dev/vdb
+ lvg: vg=vg0 pvs=/dev/vdb
+
+- name: logical volume for {{ lv_name }}
+ lvol: vg=vg0 lv={{ lv_name }} size={{ lv_size }}
+
+# This will NEVER overwrite an existing filesystem. Unless you add
+# 'force=yes' to the arguments. So don't do that. See:
+# http://docs.ansible.com/filesystem_module.html.
+- name: ext4 filesystem on /dev/vg0/{{ lv_name }}
+ filesystem: fstype=ext4 dev=/dev/vg0/{{ lv_name }}
+
+- name: mount {{ lv_name }} logical volume
+ mount: src=/dev/vg0/{{ lv_name }} name={{ mountpoint }} fstype=ext4 state=mounted
+