summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitreview2
-rw-r--r--README59
-rw-r--r--README.mdwn797
-rw-r--r--admin/cve-2015-0235-ghost.c40
-rw-r--r--admin/sshd_config.yaml8
-rw-r--r--admin/test-cve-2015-0235-ghost.yml18
-rw-r--r--ansible.cfg5
-rwxr-xr-xbackup-snapshot249
-rw-r--r--baserock-ops-team.cloud-config16
-rwxr-xr-xbaserock_backup/backup.sh27
-rw-r--r--baserock_backup/crond.service18
-rw-r--r--baserock_backup/instance-config.yml38
-rw-r--r--baserock_backup/ssh_config4
-rw-r--r--baserock_database/backup-snapshot.conf4
-rw-r--r--baserock_database/image-config.yml46
-rw-r--r--baserock_database/instance-backup-config.yml29
-rw-r--r--baserock_database/instance-config.yml15
-rw-r--r--baserock_database/instance-mariadb-config.yml71
-rw-r--r--baserock_frontend/haproxy.cfg122
-rw-r--r--baserock_frontend/image-config.yml34
-rw-r--r--baserock_frontend/instance-backup-config.yml29
-rw-r--r--baserock_frontend/instance-config.yml18
-rw-r--r--baserock_gerrit/All-Projects/groups16
-rw-r--r--baserock_gerrit/All-Projects/project.config125
-rw-r--r--baserock_gerrit/backup-snapshot.conf5
-rw-r--r--baserock_gerrit/baserock_gerrit.morph26
-rw-r--r--baserock_gerrit/branding/GerritSite.css15
-rw-r--r--baserock_gerrit/branding/GerritSiteHeader.html1
-rw-r--r--baserock_gerrit/branding/baserock-logo.pngbin0 -> 13765 bytes
-rw-r--r--baserock_gerrit/branding/openstack-page-bkg.jpgbin0 -> 3738 bytes
-rw-r--r--baserock_gerrit/gerrit-access-config.yml159
-rw-r--r--baserock_gerrit/gerrit.config54
-rw-r--r--baserock_gerrit/gerrit.service16
-rw-r--r--baserock_gerrit/instance-backup-config.yml29
-rw-r--r--baserock_gerrit/instance-ca-certificate-config.yml28
-rw-r--r--baserock_gerrit/instance-config.yml133
-rw-r--r--baserock_gerrit/instance-mirroring-config.yml68
-rw-r--r--baserock_gerrit/lorry-controller.conf38
-rw-r--r--baserock_gerrit/lorry-controller/minion.conf6
-rw-r--r--baserock_gerrit/lorry-controller/webapp.conf13
-rw-r--r--baserock_gerrit/lorry.conf8
-rw-r--r--baserock_gerrit/replication.config30
-rw-r--r--baserock_hosts40
-rw-r--r--baserock_irclogs/clusters/irclogs.morph17
-rw-r--r--baserock_irclogs/files/baserock.conf185
-rw-r--r--baserock_irclogs/files/irclogs-generation.service15
-rw-r--r--baserock_irclogs/files/irclogs-generation.timer9
-rw-r--r--baserock_irclogs/files/lighttpd-irclogs.conf16
-rw-r--r--baserock_irclogs/files/lighttpd-irclogs.service11
-rw-r--r--baserock_irclogs/files/supybot.service15
-rw-r--r--baserock_irclogs/irclogs.configure45
-rw-r--r--baserock_irclogs/strata/irclogs.morph18
-rw-r--r--baserock_irclogs/systems/irclogs-x86_64.morph33
-rw-r--r--baserock_mail/image-config.yml22
-rw-r--r--baserock_mail/instance-config.yml72
-rw-r--r--baserock_mason_x86_32/distbuild.conf19
-rw-r--r--baserock_mason_x86_32/mason-x86-32.morph26
-rw-r--r--baserock_mason_x86_32/mason.conf18
-rw-r--r--baserock_mason_x86_64/distbuild.conf19
-rw-r--r--baserock_mason_x86_64/mason-x86-64.morph27
-rw-r--r--baserock_mason_x86_64/mason.conf18
-rw-r--r--baserock_opengrok/baserock-export.service11
-rw-r--r--baserock_opengrok/baserock-export.timer10
-rw-r--r--baserock_opengrok/clone-and-index.service11
-rw-r--r--baserock_opengrok/clone-and-index.sh15
-rw-r--r--baserock_opengrok/clone-and-index.timer10
-rw-r--r--baserock_opengrok/export.sh38
-rw-r--r--baserock_opengrok/index.jsp3
-rw-r--r--baserock_opengrok/instance-config.yml163
-rw-r--r--baserock_openid_provider/baserock_openid_provider/__init__.py17
-rw-r--r--baserock_openid_provider/baserock_openid_provider/forms.py29
-rw-r--r--baserock_openid_provider/baserock_openid_provider/settings.py174
-rw-r--r--baserock_openid_provider/baserock_openid_provider/signals.py26
-rw-r--r--baserock_openid_provider/baserock_openid_provider/static/style.css268
-rw-r--r--baserock_openid_provider/baserock_openid_provider/urls.py12
-rw-r--r--baserock_openid_provider/baserock_openid_provider/views.py53
-rw-r--r--baserock_openid_provider/baserock_openid_provider/wsgi.py14
-rw-r--r--baserock_openid_provider/cherokee.conf300
-rw-r--r--baserock_openid_provider/image-config.yml77
-rw-r--r--baserock_openid_provider/instance-config.yml36
-rw-r--r--baserock_openid_provider/manage.py10
-rw-r--r--baserock_openid_provider/openid_provider/__init__.py0
-rw-r--r--baserock_openid_provider/openid_provider/admin.py17
-rw-r--r--baserock_openid_provider/openid_provider/conf.py27
-rw-r--r--baserock_openid_provider/openid_provider/models.py42
-rw-r--r--baserock_openid_provider/openid_provider/south_migrations/0001_initial.py89
-rw-r--r--baserock_openid_provider/openid_provider/south_migrations/__init__.py0
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/base.html1
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/decide.html41
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/error.html6
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/response.html12
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/server.html9
-rw-r--r--baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml10
-rw-r--r--baserock_openid_provider/openid_provider/urls.py14
-rw-r--r--baserock_openid_provider/openid_provider/utils.py130
-rw-r--r--baserock_openid_provider/openid_provider/views.py323
-rw-r--r--baserock_openid_provider/templates/base.html38
-rw-r--r--baserock_openid_provider/templates/index.html15
-rw-r--r--baserock_openid_provider/templates/registration/activate.html8
-rw-r--r--baserock_openid_provider/templates/registration/activation_complete.html6
-rw-r--r--baserock_openid_provider/templates/registration/activation_email.txt6
-rw-r--r--baserock_openid_provider/templates/registration/activation_email_subject.txt1
-rw-r--r--baserock_openid_provider/templates/registration/login.html15
-rw-r--r--baserock_openid_provider/templates/registration/logout.html6
-rw-r--r--baserock_openid_provider/templates/registration/password_change_done.html6
-rw-r--r--baserock_openid_provider/templates/registration/password_change_form.html11
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_complete.html10
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_confirm.html21
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_done.html6
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_email.html5
-rw-r--r--baserock_openid_provider/templates/registration/password_reset_form.html11
-rw-r--r--baserock_openid_provider/templates/registration/registration_closed.html6
-rw-r--r--baserock_openid_provider/templates/registration/registration_complete.html11
-rw-r--r--baserock_openid_provider/templates/registration/registration_form.html11
-rw-r--r--baserock_openid_provider/uwsgi.ini22
-rw-r--r--baserock_storyboard/ansible-galaxy-roles.yaml4
-rw-r--r--baserock_storyboard/backup-snapshot.conf4
-rw-r--r--baserock_storyboard/instance-backup-config.yml26
-rw-r--r--baserock_storyboard/instance-config.yml35
-rw-r--r--baserock_storyboard/instance-storyboard-config.yml14
-rw-r--r--baserock_storyboard/projects.yaml47
-rw-r--r--baserock_storyboard/storyboard-vars.yml50
-rw-r--r--baserock_storyboard/users.yaml4
-rw-r--r--baserock_trove/baserock_trove.morph25
-rw-r--r--baserock_trove/configure-trove.yml51
-rw-r--r--baserock_trove/instance-config.yml28
-rw-r--r--baserock_trove/trove.conf14
-rw-r--r--baserock_webserver/README.mdwn15
-rw-r--r--baserock_webserver/etc/cherokee/cherokee.conf332
-rw-r--r--baserock_webserver/etc/fstab11
-rw-r--r--baserock_webserver/etc/selinux/config12
-rw-r--r--baserock_webserver/etc/systemd/system/generate-docs.baserock.org.service11
-rw-r--r--baserock_webserver/etc/systemd/system/generate-docs.baserock.org.timer8
-rwxr-xr-xbaserock_webserver/generate-docs.baserock.org36
-rw-r--r--baserock_webserver/image-config.yml24
-rw-r--r--baserock_webserver/instance-docs.baserock.org-config.yml43
-rw-r--r--certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert118
-rw-r--r--certs/startcom-class2-ca-chain-certificate.cert78
-rw-r--r--firewall.yaml403
l---------hosts1
-rwxr-xr-xscripts/cycle.sh61
-rw-r--r--scripts/licensecheck.pl604
-rwxr-xr-xscripts/organize-morphologies.py255
-rwxr-xr-xscripts/release-build192
-rw-r--r--scripts/release-build.test.conf6
-rwxr-xr-xscripts/release-test400
-rwxr-xr-xscripts/release-test-os526
-rwxr-xr-xscripts/release-upload472
-rw-r--r--scripts/release-upload.test.conf10
-rw-r--r--strata/docutils.morph2
-rw-r--r--strata/gerrit-tools.morph11
-rw-r--r--strata/gnome.morph46
-rw-r--r--strata/input-common.morph2
-rw-r--r--systems/gerrit-system-x86_64.morph70
-rw-r--r--tasks/create-data-volume.yml26
155 files changed, 6743 insertions, 2591 deletions
diff --git a/.gitreview b/.gitreview
index 5da687ee..bfd98b91 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,5 +1,5 @@
[gerrit]
host=gerrit.baserock.org
port=29418
-project=baserock/baserock/definitions
+project=baserock/baserock/infrastructure
defaultbranch=master
diff --git a/README b/README
deleted file mode 100644
index ec38f9df..00000000
--- a/README
+++ /dev/null
@@ -1,59 +0,0 @@
-Baserock reference system definitions
-=====================================
-
-Baserock is a system for developing embedded and appliance Linux systems. For
-more information, see <http://wiki.baserock.org>.
-
-These are some example definitions for use with Baserock tooling. You can fork
-this repo and develop your own systems directly within it, or use it as a
-reference point when developing your own set of definitions.
-
-These definitions follow the Baserock definitions format, which is defined in
-spec.git repository (http://git.baserock.org/cgit/baserock/baserock/spec.git).
-
-The spec is readable online at <http://docs.baserock.org/spec>.
-
-The systems listed in the systems/ directory are example systems
-that build and run at some point. The only ones we can be sure
-that still build in current master of definitions are the ones that
-we keep building in our ci system; they are listed in
-http://git.baserock.org/cgit/baserock/baserock/definitions.git/tree/clusters/ci.morph
-
-Keeping up to date
-------------------
-
-The Baserock definitions format is evolving. A set of automated migrations is
-provided in the migrations/ directory of spec.git, for use when the format has
-changed and you want to bring your definitions up to date.
-
-Before running the migrations, you can use the 'migrations/indent' tool to
-format the definitions in the specific style that the migrations expect.
-The migrations use the 'ruamel.yaml' Python library for editing the .morph
-files. This library preserves comments, ordering and some of the formatting
-when it rewrites a .morph file. However, it does impose a certain line width
-and indent style.
-
-It makes a lot of sense to run the migrations with a *clean Git working tree*,
-so you can clearly see what changes they made, and can then choose to either
-commit them, tweak them, or revert them with `git reset --hard` and write an
-angry email.
-
-The suggested workflow is to run this from within your definitions.git clone:
-
- git clone git://git.baserock.org/baserock/baserock/spec ../spec.git
-
- git status # ensure a clean Git tree
- ../spec/migrations/indent
- git diff # check for any spurious changes
- git commit -a -m "Fix formatting"
- ../spec/migrations/run-all
- git diff # check the results
- git commit -a -m "Migrate to version xx of Baserock definitions format"
-
-If you are working in a fork of the Baserock definitions.git repo, you can
-also keep to date with using changes in 'master' using `git merge`. In general,
-we recommend first running the migrations, committing any changes they make,
-*then* merging in changes using `git merge`. This should minimise the number of
-merge conflicts, although merge conflicts are still possible.
-
-See migrations/GUIDELINES for information on how to write new migrations.
diff --git a/README.mdwn b/README.mdwn
new file mode 100644
index 00000000..65a0a7ad
--- /dev/null
+++ b/README.mdwn
@@ -0,0 +1,797 @@
+Baserock project public infrastructure
+======================================
+
+This repository contains the definitions for all of the Baserock Project's
+infrastructure. This includes every service used by the project, except for
+the mailing lists (hosted by [Pepperfish]) and the wiki (hosted by
+[Branchable]).
+
+Some of these systems are Baserock systems. Other are Ubuntu or Fedora based.
+Eventually we want to move all of these to being Baserock systems.
+
+The infrastructure is set up in a way that parallels the preferred Baserock
+approach to deployment. All files necessary for (re)deploying the systems
+should be contained in this Git repository, with the exception of certain
+private tokens (which should be simple to inject at deploy time).
+
+[Pepperfish]: http://listmaster.pepperfish.net/cgi-bin/mailman/listinfo
+[Branchable]: http://www.branchable.com/
+
+
+General notes
+-------------
+
+When instantiating a machine that will be public, remember to give shell
+access everyone on the ops team. This can be done using a post-creation
+customisation script that injects all of their SSH keys. The SSH public
+keys of the Baserock Operations team are collected in
+`baserock-ops-team.cloud-config.`.
+
+Ensure SSH password login is disabled in all systems you deploy! See:
+<https://testbit.eu/is-ssh-insecure/> for why. The Ansible playbook
+`admin/sshd_config.yaml` can ensure that all systems have password login
+disabled.
+
+
+Administration
+--------------
+
+You can use [Ansible] to automate tasks on the baserock.org systems.
+
+To run a playbook:
+
+ ansible-playbook -i hosts $PLAYBOOK.yaml
+
+To run an ad-hoc command (upgrading, for example):
+
+ ansible -i hosts fedora -m command -a 'sudo yum update -y'
+ ansible -i hosts ubuntu -m command -a 'sudo apt-get update -y'
+
+[Ansible]: http://www.ansible.com
+
+
+Security updates
+----------------
+
+Fedora security updates can be watched here:
+<https://bodhi.fedoraproject.org/updates/?type=security>. Ubuntu issues
+security advisories here: <http://www.ubuntu.com/usn/>.
+The Baserock reference systems doesn't have such a service. The [LWN
+Alerts](https://lwn.net/Alerts/) service gives you info from all major Linux
+distributions.
+
+If there is a vulnerability discovered in some software we use, we might need
+to upgrade all of the systems that use that component at baserock.org.
+
+Bear in mind some systems are not accessible except via the frontend-haproxy
+system. Those are usually less at risk than those that face the web directly.
+Also bear in mind we use OpenStack security groups to block most ports.
+
+### Prepare the patch for Baserock systems
+
+First, you need to update the Baserock reference system definitions with a
+fixed version of the component. Build that and test that it works. Submit
+the patch to gerrit.baserock.org, get it reviewed, and merged. Then cherry
+pick that patch into infrastructure.git.
+
+This a long-winded process. There are shortcuts you can take, although
+someone still has to complete the process described above at some point.
+
+* You can modify the infrastructure.git definitions directly and start rebuilding
+ the infrastructure systems right away, to avoid waiting for the Baserock patch
+ review process.
+
+* You can add the new version of the component as a stratum that sits above
+ everything else in the build graph. For example, to do a 'hot-fix' for GLIBC,
+ add a 'glibc-hotfix' stratum containing the new version to all of the systems
+ you need to upgrade. Rebuilding them will be quick because you just need to
+ build GLIBC, and can reuse the cached artifacts for everything else. The new
+ GLIBC will overwrite the one that is lower down in the build graph in the
+ resulting filesystem. Of course, if the new version of the component is not
+ ABI compatible then this approach will break things. Be careful.
+
+### Check the inventory
+
+Make sure the Ansible inventory file is up to date, and that you have access to
+all machines. Run this:
+
+ ansible \* -i ./hosts -m ping
+
+You should see lots of this sort of output:
+
+ mail | success >> {
+ "changed": false,
+ "ping": "pong"
+ }
+
+ frontend-haproxy | success >> {
+ "changed": false,
+ "ping": "pong"
+ }
+
+You may find some host key errors like this:
+
+ paste | FAILED => SSH Error: Host key verification failed.
+ It is sometimes useful to re-run the command using -vvvv, which prints SSH debug output to help diagnose the issue.
+
+If you have a host key problem, that could be because somebody redeployed
+the system since the last time you connected to it with SSH, and did not
+transfer the SSH host keys from the old system to the new system. Check with
+other ops teams members about this. If you are sure the new host keys can
+be trusted, you can remove the old ones with `ssh-keygen -R 192.168.x.y`, where 192.168.x.y is the internal IP address of the machine. You'll then be prompted to accept the new ones when you run Ansible again.
+
+Once all machines respond to the Ansible 'ping' module, double check that
+every machine you can see in the OpenStack Horizon dashboard has a
+corresponding entry in the 'hosts' file, to ensure the next steps operate
+on all of the machines.
+
+### Check and upgrade Fedora systems
+
+> Bear in mind that only the latest 2 versions of Fedora receive security
+updates. If any machines are not running the latest version of Fedora,
+you should redeploy them with the latest version. See the instructions below
+on how to (re)deploy each machine. You should deploy a new instance of a system
+and test it *before* terminating the existing instance. Switching over should
+be a matter of changing either its floating IP address or the IP address in
+baserock_frontend/haproxy.conf.
+
+You can find out what version of Fedora is in use with this command:
+
+ ansible fedora -i hosts -m setup -a 'filter=ansible_distribution_version'
+
+Check what version of a package is in use with this command (using GLIBC as an
+example). You can compare this against Fedora package changelogs at
+[Koji](https://koji.fedoraproject.org).
+
+ ansible fedora -i hosts -m command -a 'rpm -q glibc --qf "%{VERSION}.%{RELEASE}\n"'
+
+You can see what updates are available using the `dnf updateinfo info' command.
+
+ ansible -i hosts fedora -m command -a 'dnf updateinfo info glibc'
+
+You can then use `dnf upgrade -y` to install all available updates. Or give the
+name of a package to update just that package. Be aware that DNF is quite slow,
+and if you forget to pass `-y` then it will hang forever waiting for input.
+
+You will then need to restart services. The `dnf needs-restarting` command might be
+useful, but rebooting the whole machine is probably easiest.
+
+### Check and upgrade Ubuntu systems
+
+> Bear in mind that only the latest and the latest LTS release of Ubuntu receive any
+security updates.
+
+Find out what version of Ubuntu is in use with this command:
+
+ ansible ubuntu -i hosts -m setup -a 'filter=ansible_distribution_version'
+
+Check what version of a given package is in use with this command (using GLIBC
+as an example).
+
+ ansible -i hosts ubuntu -m command -a 'dpkg-query --show libc6'
+
+Check for available updates, and what they contain:
+
+ ansible -i hosts ubuntu -m command -a 'apt-cache policy libc6'
+ ansible -i hosts ubuntu -m command -a 'apt-get changelog libc6' | head -n 20
+
+You can update all the packages with:
+
+ ansible -i hosts ubuntu -m command -a 'apt-get upgrade -y' --sudo
+
+You will then need to restart services. Rebooting the machine is probably
+easiest.
+
+### Check and upgrade Baserock systems
+
+Check what version of a given package is in use with this command (using GLIBC
+as an example). Ideally Baserock reference systems would have a query tool for
+this info, but for now we have to look at the JSON metadata file directly.
+
+ ansible -i hosts baserock -m command \
+ -a "grep '\"\(sha1\|repo\|original_ref\)\":' /baserock/glibc-bins.meta"
+
+The default Baserock machine layout uses Btrfs for the root filesystem. Filling
+up a Btrfs disk results in unpredictable behaviour. Before deploying any system
+upgrades, check that each machine has enough free disk space to hold an
+upgrade. Allow for at least 4GB free space, to be safe.
+
+ ansible -i hosts baserock -m command -a "df -h /"
+
+A good way to free up space is to remove old system-versions using the
+`system-version-manager` tool. There may be other things that are
+unnecessarily taking up space in the root file system, too.
+
+Ideally, at this point you've prepared a patch for definitions.git to fix
+the security issue in the Baserock reference systems, and it has been merged.
+In that case, pull from the reference systems into infrastructure.git, using
+`git pull git://git.baserock.org/baserock/baserock/definitions master`.
+
+If the necessary patch isn't merged in definitions.git, it's still best to
+merge 'master' from there into infrastructure.git, and then cherry-pick the
+patch from Gerrit on top.
+
+You then need to build and upgrade the systems one by one. Do this from the
+'devel-system' machine in the same OpenStack cloud that hosts the
+infrastructure. Baserock upgrades currently involve transferring the whole
+multi-gigabyte system image, so you *must* have a fast connection to the
+target.
+
+Each Baserock system has its own deployment instructions. Each should have
+a deployment .morph file that you can pass to `morph upgrade`. For example,
+to deploy an upgrade git.baserock.org:
+
+ morph upgrade --local-changes=ignore \
+ baserock_trove/baserock_trove.morph gbo.VERSION_LABEL=2016-02-19
+
+Once this completes successfully, rebooting the system should bring up the
+new system. You may want to check that the new `/etc` is correct; you can
+do this inside the machine by mounting `/dev/vda` and looking in `systems/$VERSION_LABEL/run/etc`.
+
+If you want to revert the upgrade, use `system-version-manager list` and
+`system-version-manager set-default <old-version>` to set the previous
+version as the default, then reboot. If the system doesn't boot at all,
+reboot it while you have the graphical console open in Horizon, and you
+should be able to press `ESC` fast enough to get the boot menu open. This
+will allow booting into previous versions of the system. (You shouldn't
+have any problems though since of course we test everything regularly).
+
+Beware of <https://storyboard.baserock.org/#!/story/77>.
+
+For cache.baserock.org, you can reuse the deployment instructions for
+git.baserock.org. Try:
+
+ morph upgrade --local-changes=ignore \
+ baserock_trove/baserock_trove.morph \
+ gbo.update-location=root@cache.baserock.org
+ gbo.VERSION_LABEL=2016-02-19
+
+Deployment to OpenStack
+-----------------------
+
+The intention is that all of the systems defined here are deployed to an
+OpenStack cloud. The instructions here harcode some details about the specific
+tenancy at [DataCentred](http://www.datacentred.io) that the Baserock project
+uses. It should be easy to adapt them for other OpenStack hosts, though.
+
+### Credentials
+
+The instructions below assume you have the following environment variables set
+according to the OpenStack host you are deploying to:
+
+ - `OS_AUTH_URL`
+ - `OS_TENANT_NAME`
+ - `OS_USERNAME`
+ - `OS_PASSWORD`
+
+When using `morph deploy` to deploy to OpenStack, you will need to set these
+variables, because currently Morph does not honour the standard ones. See:
+<https://storyboard.baserock.org/#!/story/35>.
+
+ - `OPENSTACK_USER=$OS_USERNAME`
+ - `OPENSTACK_PASSWORD=$OS_PASSWORD`
+ - `OPENSTACK_TENANT=$OS_TENANT_NAME`
+
+The `location` field in the deployment .morph file will also need to point to
+the correct `$OS_AUTH_URL`.
+
+### Firewall / Security Groups
+
+The instructions assume the presence of a set of security groups. You can
+create these by running the following Ansible playbook. You'll need the
+OpenStack Ansible modules cloned from
+`https://github.com/openstack-ansible/openstack-ansible-modules/`.
+
+ ANSIBLE_LIBRARY=../openstack-ansible-modules ansible-playbook -i hosts \
+ firewall.yaml
+
+### Placeholders
+
+The commands below use a couple of placeholders like $network_id, you can set
+them in your environment to allow you to copy and paste the commands below
+as-is.
+
+ - `export fedora_image_id=...` (find this with `glance image-list`)
+ - `export network_id=...` (find this with `neutron net-list`)
+ - `export keyname=...` (find this with `nova keypair-list`)
+
+The `$fedora_image_id` should reference a Fedora Cloud image. You can import
+these from <http://www.fedoraproject.org/>. At time of writing, these
+instructions were tested with Fedora Cloud 23 for x86_64.
+
+Backups
+-------
+
+Backups of git.baserock.org's data volume are run by and stored on on a
+Codethink-managed machine named 'access'. They will need to migrate off this
+system before long. The backups are taken without pausing services or
+snapshotting the data, so they will not be 100% clean. The current
+git.baserock.org data volume does not use LVM and cannot be easily snapshotted.
+
+Backups of 'gerrit' and 'database' are handled by the
+'baserock_backup/backup.py' script. This currently runs on an instance in
+Codethink's internal OpenStack cloud.
+
+Instances themselves are not backed up. In the event of a crisis we will
+redeploy them from the infrastructure.git repository. There should be nothing
+valuable stored outside of the data volumes that are backed up.
+
+To prepare the infrastructure to run the backup scripts you will need to run
+the following playbooks:
+
+ ansible-playbook -i hosts baserock_frontend/instance-backup-config.yml
+ ansible-playbook -i hosts baserock_database/instance-backup-config.yml
+ ansible-playbook -i hosts baserock_gerrit/instance-backup-config.yml
+
+NOTE: to run these playbooks you need to have the public ssh key of the backups
+instance in `keys/backup.key.pub`.
+
+
+Systems
+-------
+
+### Front-end
+
+The front-end provides a reverse proxy, to allow more flexible routing than
+simply pointing each subdomain to a different instance using separate public
+IPs. It also provides a starting point for future load-balancing and failover
+configuration.
+
+To deploy this system:
+
+ nova boot frontend-haproxy \
+ --key-name=$keyname \
+ --flavor=dc1.1x0 \
+ --image=$fedora_image_id \
+ --nic="net-id=$network_id" \
+ --security-groups default,gerrit,web-server \
+ --user-data ./baserock-ops-team.cloud-config
+ ansible-playbook -i hosts baserock_frontend/image-config.yml
+ ansible-playbook -i hosts baserock_frontend/instance-config.yml
+ ansible-playbook -i hosts baserock_frontend/instance-backup-config.yml
+
+ ansible -i hosts -m service -a 'name=haproxy enabled=true state=started' \
+ --sudo frontend-haproxy
+
+The baserock_frontend system is stateless.
+
+Full HAProxy 1.5 documentation: <https://cbonte.github.io/haproxy-dconv/configuration-1.5.html>.
+
+If you want to add a new service to the Baserock Project infrastructure via
+the frontend, do the following:
+
+- request a subdomain that points at 185.43.218.170 (frontend)
+- alter the haproxy.cfg file in the baserock_frontend/ directory in this repo
+ as necessary to proxy requests to the real instance
+- run the baserock_frontend/instance-config.yml playbook
+- run `ansible -i hosts -m service -a 'name=haproxy enabled=true
+ state=restarted' --sudo frontend-haproxy`
+
+OpenStack doesn't provide any kind of internal DNS service, so you must put the
+fixed IP of each instance.
+
+The internal IP address of this machine is hardcoded in some places (beyond the
+usual haproxy.cfg file), use 'git grep' to find all of them. You'll need to
+update all the relevant config files. We really need some internal DNS system
+to avoid this hassle.
+
+### Database
+
+Baserock infrastructure uses a shared [MariaDB] database. MariaDB was chosen
+because Storyboard only supports MariaDB.
+
+To deploy this system to production:
+
+ nova boot database-mariadb \
+ --key-name=$keyname \
+ --flavor dc1.1x1 \
+ --image=$fedora_image_id \
+ --nic="net-id=$network_id,v4-fixed-ip=192.168.222.146" \
+ --security-groups default,database-mysql \
+ --user-data ./baserock-ops-team.cloud-config
+ nova volume-create \
+ --display-name database-volume \
+ --display-description 'Database volume' \
+ --volume-type Ceph \
+ 100
+ nova volume-attach database-mariadb <volume ID> /dev/vdb
+
+ ansible-playbook -i hosts baserock_database/image-config.yml
+ ansible-playbook -i hosts baserock_database/instance-config.yml
+ ansible-playbook -i hosts baserock_database/instance-backup-config.yml
+
+At this point, if you are restoring from a backup, rsync the data across
+from your backup server on the instance, then start the mariadb service and you
+are done.
+
+ sudo --preserve-env -- rsync --archive --chown mysql:mysql --hard-links \
+ --info=progress2 --partial --sparse \
+ root@backupserver:/srv/backup/database/* /var/lib/mysql
+ sudo systemctl enable mariadb.service
+ sudo systemctl start mariadb.service
+
+NOTE: If you see the following message in the journal:
+
+ The datadir located at /var/lib/mysql needs to be upgraded using 'mysql_upgrade' tool. This can be done using the following steps
+
+This is because the backup you are importing is from an older version of
+MariaDB. To fix this, as the message says, you only need to run:
+
+ sudo -u mysql mysql_upgrade -u root -p
+
+If you are starting from scratch, you need to prepare the system by adding
+the required users and databases. Run the following playbook, which can
+be altered and rerun whenever you need to add more users or databases, or
+you want to check the database configuration matches what you expect.
+
+ ansible -i hosts -m service -a 'name=mariadb enabled=true state=started'
+ ansible-playbook -i hosts baserock_database/instance-mariadb-config.yml
+
+The internal IP address of this machine is hardcoded in some places (beyond the
+usual haproxy.cfg file), use 'git grep' to find all of them. You'll need to
+update all the relevant config files. We really need some internal DNS system
+to avoid this hassle.
+
+[MariaDB]: https://www.mariadb.org
+
+### Mail relay
+
+The mail relay is currently a Fedora Cloud 23 image running Exim.
+
+It is configured to only listen on its internal IP. It's not intended to
+receive mail, or relay mail sent by systems outside the baserock.org cloud.
+
+To deploy it:
+
+ nova boot mail \
+ --key-name $keyname \
+ --flavor dc1.1x0 \
+ --image $fedora_image_id \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.145" \
+ --security-groups default,internal-mail-relay \
+ --user-data ./baserock-ops-team.cloud-config
+
+ ansible-playbook -i hosts baserock_mail/image-config.yml
+ ansible-playbook -i hosts baserock_mail/instance-config.yml
+
+The mail relay machine is stateless.
+
+The internal IP address of this machine is hardcoded in some places (beyond the
+usual haproxy.cfg file), use 'git grep' to find all of them. You'll need to
+update all the relevant config files. We really need some internal DNS system
+to avoid this hassle.
+
+### OpenID provider
+
+To deploy this system to production:
+
+ vim baserock_openid_provider/baserock_openid_provider/settings.py
+
+Check the DATABASE_HOST IP, and check the other settings against the [Django
+deployment
+checklist](https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/).
+
+ nova boot openid.baserock.org \
+ --key-name $keyname \
+ --flavor dc1.1x1 \
+ --image $fedora_image_id \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.144" \
+ --security-groups default,web-server \
+ --user-data ./baserock-ops-team.cloud-config
+
+ ansible-playbook -i hosts baserock_openid_provider/image-config.yml
+ ansible-playbook -i hosts baserock_openid_provider/instance-config.yml
+
+The baserock_openid_provider system is stateless.
+
+To change Cherokee configuration, it's usually easiest to use the
+cherokee-admin tool in a running instance. SSH in as normal but forward port
+9090 to localhost (pass `-L9090:localhost:9090` to SSH). Backup the old
+/etc/cherokee/cherokee.conf file, then run `cherokee-admin`, and log in using
+the creditials it gives you. After changing the configuration, please update
+the cherokee.conf in infrastructure.git to match the changes `cherokee-admin`
+made.
+
+### Gerrit
+
+To deploy to production, run these commands in a Baserock 'devel'
+or 'build' system.
+
+ nova volume-create \
+ --display-name gerrit-volume \
+ --display-description 'Gerrit volume' \
+ --volume-type Ceph \
+ 100
+
+ git clone git://git.baserock.org/baserock/baserock/infrastructure.git
+ cd infrastructure
+
+ morph build systems/gerrit-system-x86_64.morph
+ morph deploy baserock_gerrit/baserock_gerrit.morph
+
+ nova boot gerrit.baserock.org \
+ --key-name $keyname \
+ --flavor 'dc1.2x4.40' \
+ --image baserock_gerrit \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.69" \
+ --security-groups default,gerrit,git-server,web-server \
+ --user-data baserock-ops-team.cloud-config
+
+ nova volume-attach gerrit.baserock.org <volume-id> /dev/vdb
+
+Accept the license and download the latest Java Runtime Environment from
+http://www.oracle.com/technetwork/java/javase/downloads/server-jre8-downloads-2133154.html
+
+Accept the license and download the latest Java Cryptography Extensions from
+http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html
+
+Save these two files in the baserock_gerrit/ folder. The instance-config.yml
+Ansible playbook will upload them to the new system.
+
+ # Don't copy-paste this! Use the Oracle website instead!
+ wget --no-cookies --no-check-certificate \
+ --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" \
+ "http://download.oracle.com/otn-pub/java/jdk/8u40-b25/server-jre-8u40-linux-x64.tar.gz"
+ wget --no-cookies --no-check-certificate \
+ --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" \
+ "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip"
+
+ ansible-playbook -i hosts baserock_gerrit/instance-config.yml
+
+For baserock.org Gerrit you will also need to run:
+
+ ansible-playbook -i hosts baserock_gerrit/instance-ca-certificate-config.yml
+
+If you are restoring from a backup, rsync the data across from your
+backup server on the instance, then start the gerrit service.
+
+ systemctl stop gerrit.service
+ rm -r /srv/gerrit/*
+
+ rsync --archive --chown gerrit:gerrit --hard-links \
+ --info=progress2 --partial --sparse \
+ root@backupserver:/srv/backup/gerrit/* /srv/gerrit/
+
+ systemctl start gerrit.service
+
+NOTE: If you are restoring a backup from an older version of Gerrit, you
+might need to run some of the following commands to migrate the schemas of the
+database, and also gerrit data (This was needed to move from 2.9.4 to 2.11.4):
+
+ java -jar /opt/gerrit/gerrit-2.11.3.war init -d /srv/gerrit
+ java -jar /opt/gerrit/gerrit-2.11.3.war reindex -d /srv/gerrit
+
+
+#### Access control
+
+Gerrit should now be up and running and accessible through the web interface.
+By default this is on port 8080. Log into the new Gerrit instance with your
+credentials. Make sure you're the first one to have registered, and you will
+automatically have been added to the Administrators group.
+
+You can add more users into the Administrators group later on using the [gerrit
+set-members] command, or the web interface.
+
+Go to the settings page, 'HTTP Password' and generate a HTTP password for
+yourself. You'll need it in the next step. The password can take a long time to
+appear for some reason, or it might not work at all. Click off the page and
+come back to it and it might suddenly have appeared. I've not investigated why
+this happens.
+
+Generate the SSH keys you need, if you don't have them.
+
+ mkdir -p keys
+ ssh-keygen -t rsa -b 4096 -C 'lorry@gerrit.baserock.org' -N '' -f keys/lorry-gerrit.key
+
+Now set up the Gerrit access configuration. This Ansible playbook requires a
+couple of non-standard packages.
+
+ git clone git://git.baserock.org/delta/python-packages/pygerrit.git
+ git clone git://github.com/ssssam/ansible-gerrit
+ cd ansible-gerrit && make; cd -
+
+ export GERRIT_URL=gerrit web URL
+ export GERRIT_ADMIN_USERNAME=your username
+ export GERRIT_ADMIN_PASSWORD=your generated HTTP password
+ export GERRIT_ADMIN_REPO=ssh://you@gerrit:29418/All-Projects.git
+
+ ANSIBLE_LIBRARY=./ansible-gerrit PYTHONPATH=./pygerrit \
+ ansible-playbook baserock_gerrit/gerrit-access-config.yml
+
+[gerrit set-members]: https://gerrit-documentation.storage.googleapis.com/Documentation/2.9.4/cmd-set-members.html
+
+#### Mirroring
+
+Run:
+
+ ansible-playbook -i hosts baserock_gerrit/instance-mirroring-config.yml
+
+Now clone the Gerrit's lorry-controller configuration repository, commit the
+configuration file to it, and push.
+
+ # FIXME: we could use the git_commit_and_push Ansible module for this now,
+ # instead of doing it manually.
+
+ git clone ssh://$GERRIT_ADMIN_USERNAME@gerrit.baserock.org:29418/local-config/lorries.git /tmp/lorries
+ cp baserock_gerrit/lorry-controller.conf /tmp/lorries
+ cd /tmp/lorries
+ git checkout -b master
+ git add .
+ git commit -m "Add initial Lorry Controller mirroring configuration"
+ git push origin master
+ cd -
+
+Now SSH in as 'root' to gerrit.baserock.org, tunnelling the lorry-controller
+webapp's port to your local machine:
+
+ ssh -L 12765:localhost:12765 root@gerrit.baserock.org
+
+Visit <http://localhost/1.0/status-html>. You should see the lorry-controller
+status page. Click 'Re-read configuration', if there are any errors in the
+configuration it'll tell you. If not, it should start mirroring stuff from
+your Trove.
+
+Create a Gitano account on the Trove you want to push changes to for the Gerrit
+user. The `instance-config.yml` Ansible playbook will have generated an SSH
+key. Run these commands on the Gerrit instance:
+
+ ssh git@git.baserock.org user add gerrit "gerrit.baserock.org" gerrit@baserock.org
+ ssh git@git.baserock.org as gerrit sshkey add main < ~gerrit/.ssh/id_rsa.pub
+
+Add the 'gerrit' user to the necessary -writers groups on the Trove, to allow
+the gerrit-replication plugin to push merged changes to 'master' in the Trove.
+
+ ssh git@git.baserock.org group adduser baserock-writers gerrit
+ ssh git@git.baserock.org group adduser local-config-writers gerrit
+
+Add the host key of the remote trove, to the Gerrit system:
+
+ sudo -u gerrit sh -c 'ssh-keyscan git.baserock.org >> ~gerrit/.ssh/known_hosts'
+
+Check the 'gerrit' user's Trove account is working.
+
+ sudo -u gerrit ssh git@git.baserock.org whoami
+
+Now enable the gerrit-replication plugin, check that it's now in the list of
+plugins, and manually start a replication cycle. You should see log output from
+the final SSH command showing any errors.
+
+ ssh $GERRIT_ADMIN_USERNAME@gerrit.baserock.org -p 29418 gerrit plugin enable replication
+ ssh $GERRIT_ADMIN_USERNAME@gerrit.baserock.org -p 29418 gerrit plugin ls
+ ssh $GERRIT_ADMIN_USERNAME@gerrit.baserock.org -p 29418 replication start --all --wait
+
+### StoryBoard
+
+ ansible-galaxy install -r baserock_storyboard/ansible-galaxy-roles.yaml -p ./baserock_storyboard/roles
+
+ nova volume-create \
+ --display-name storyboard-volume \
+ --display-description 'Storyboard volume' \
+ --volume-type Ceph \
+ 100
+
+ nova boot storyboard.baserock.org \
+ --key-name $keyname \
+ --flavor 'dc1.1x1.20' \
+ --image $ubuntu_image_id \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.131" \
+ --security-groups default,web-server \
+ --user-data baserock-ops-team.cloud-config
+
+ nova volume-attach storyboard.baserock.org <volume-id> /dev/vdb
+
+ ansible-playbook -i hosts baserock_storyboard/instance-config.yml
+ ansible-playbook -i hosts baserock_storyboard/instance-backup-config.yml
+ ansible-playbook -i hosts baserock_storyboard/instance-storyboard-config.yml
+
+### Masons
+
+Mason is the name we use for an automated build and test system used in the
+Baserock project. The V2 Mason that runs as <https://mason-x86-32.baserock.org/>
+and <https://mason-x86-64.baserock.org/> lives in definitions.git, and is thus
+available in infrastructure.git too by default.
+
+To build mason-x86-64:
+
+ git clone git://git.baserock.org/baserock/baserock/infrastructure.git
+ cd infrastructure
+
+ morph build systems/build-system-x86_64.morph
+ morph deploy baserock_mason_x86_64/mason-x86-64.morph
+
+ nova boot mason-x86-64.baserock.org \
+ --key-name $keyname \
+ --flavor 'dc1.2x2' \
+ --image baserock_mason_x86_64 \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.80" \
+ --security-groups internal-only,mason-x86
+ --user-data baserock-ops-team.cloud-config
+
+The mason-x86-32 system is the same, just subsitute '64' for '32' in the above
+commands.
+
+Note that the Masons are NOT in the 'default' security group, they are in
+'internal-only'. This is a way of enforcing the [policy] that the Baserock
+reference system definitions can only use source code hosted on
+git.baserock.org, by making it impossible to fetch code from anywhere else.
+
+[policy]: http://wiki.baserock.org/policies/
+
+### Trove
+
+To deploy to production, run these commands in a Baserock 'devel'
+or 'build' system.
+
+ nova volume-create \
+ --display-name git.baserock.org-home \
+ --display-description '/home partition of git.baserock.org' \
+ --volume-type Ceph \
+ 300
+
+ git clone git://git.baserock.org/baserock/baserock/infrastructure.git
+ cd infrastructure
+
+ morph build systems/trove-system-x86_64.morph
+ morph deploy baserock_trove/baserock_trove.morph
+
+ nova boot git.baserock.org \
+ --key-name $keyname \
+ --flavor 'dc1.8x16' \
+ --image baserock_trove \
+ --nic "net-id=$network_id,v4-fixed-ip=192.168.222.58" \
+ --security-groups default,git-server,web-server,shared-artifact-cache \
+ --user-data baserock-ops-team.cloud-config
+
+ nova volume-attach git.baserock.org <volume-id> /dev/vdb
+
+ # Note, if this floating IP is not available, you will have to change
+ # the DNS in the DNS provider.
+ nova add-floating-ip git.baserock.org 185.43.218.183
+
+ ansible-playbook -i hosts baserock_trove/instance-config.yml
+
+ # Before configuring the Trove you will need to create some ssh
+ # keys for it. You can also use existing keys.
+
+ mkdir private
+ ssh-keygen -N '' -f private/lorry.key
+ ssh-keygen -N '' -f private/worker.key
+ ssh-keygen -N '' -f private/admin.key
+
+ # Now you can finish the configuration of the Trove with:
+
+ ansible-playbook -i hosts baserock_trove/configure-trove.yml
+
+
+Creating new repos
+------------------
+
+This is a quick guide on how to create a new repo to hold Baserock project
+stuff.
+
+The creation of the repo must have been proposed on baserock-dev and had
+two +1s.
+
+Ideally, don't create a new repo. We don't want development to be split across
+dozens of different repos, and we don't want Gerrit and the
+<git.baserock.org/baserock/baserock> to become full of clutter. If you're
+prototyping something, use a different Git server
+([Github](https://www.github.com/), for example). But it is sometimes
+necessary.
+
+1. Create repo on git.baserock.org:
+
+ ssh git@git.baserock.org create baserock/baserock/$NAME
+ ssh git@git.baserock.org config baserock/baserock/$NAME \
+ set project.description "$DESCRIPTION"
+
+ The 'lorry-controller' service on gerrit.baserock.org will automatically
+ create the corresponding project in Gerrit whenever it next runs.
+
+2. Add project in Storyboard. First edit `baserock_storyboard/projects.yaml`
+ add the new project to the list, then:
+
+ scp baserock_storyboard/projects.yaml ubuntu@storyboard.baserock.org:
+ ssh ubuntu@storyboard.baserock.org storyboard-db-manage load_projects projects.yaml
+
+3. Submit a patch for infrastructure.git with your changes, and submit to Gerrit.
diff --git a/admin/cve-2015-0235-ghost.c b/admin/cve-2015-0235-ghost.c
new file mode 100644
index 00000000..3615ff57
--- /dev/null
+++ b/admin/cve-2015-0235-ghost.c
@@ -0,0 +1,40 @@
+/* From http://www.openwall.com/lists/oss-security/2015/01/27/9 */
+
+#include <netdb.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#define CANARY "in_the_coal_mine"
+
+struct {
+ char buffer[1024];
+ char canary[sizeof(CANARY)];
+} temp = { "buffer", CANARY };
+
+int main(void) {
+ struct hostent resbuf;
+ struct hostent *result;
+ int herrno;
+ int retval;
+
+ /*** strlen (name) = size_needed - sizeof (*host_addr) - sizeof (*h_addr_ptrs) - 1; ***/
+ size_t len = sizeof(temp.buffer) - 16*sizeof(unsigned char) - 2*sizeof(char *) - 1;
+ char name[sizeof(temp.buffer)];
+ memset(name, '0', len);
+ name[len] = '\0';
+
+ retval = gethostbyname_r(name, &resbuf, temp.buffer, sizeof(temp.buffer), &result, &herrno);
+
+ if (strcmp(temp.canary, CANARY) != 0) {
+ puts("vulnerable");
+ exit(EXIT_SUCCESS);
+ }
+ if (retval == ERANGE) {
+ puts("not vulnerable");
+ exit(EXIT_SUCCESS);
+ }
+ puts("should not happen");
+ exit(EXIT_FAILURE);
+}
diff --git a/admin/sshd_config.yaml b/admin/sshd_config.yaml
new file mode 100644
index 00000000..aba6a9f1
--- /dev/null
+++ b/admin/sshd_config.yaml
@@ -0,0 +1,8 @@
+---
+- hosts: all
+ gather_facts: false
+ tasks:
+ - name: ensure SSH login with password is disabled
+ lineinfile:
+ dest=/etc/ssh/sshd_config
+ line='PasswordAuthentication no'
diff --git a/admin/test-cve-2015-0235-ghost.yml b/admin/test-cve-2015-0235-ghost.yml
new file mode 100644
index 00000000..6090eb2b
--- /dev/null
+++ b/admin/test-cve-2015-0235-ghost.yml
@@ -0,0 +1,18 @@
+# Test systems for CVE-2015-0235 GHOST
+#
+# http://www.openwall.com/lists/oss-security/2015/01/27/9
+---
+- hosts: all
+ gather_facts: False
+ tasks:
+ - name: copy in the cve-2015-0235-ghost-x86-64 test program
+ copy: src=cve-2015-0235-ghost-x86-64 dest=~ mode=755
+
+ - name: run the test program
+ command: ~/cve-2015-0235-ghost-x86-64
+ register: test_output
+
+ - debug: var=test_output.stdout_lines
+
+ - name: remove test program again
+ file: path=~/cve-2015-0235-ghost-x86-64 state=absent
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 00000000..b81f6a5d
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,5 @@
+# Proxy SSH connection through the frontend-haproxy machine to access
+# the instances that don't have public IPs.
+[ssh_connection]
+ssh_args = -o ProxyCommand="ssh -q -A fedora@185.43.218.170 'nc %h %p'"
+
diff --git a/backup-snapshot b/backup-snapshot
new file mode 100755
index 00000000..30172c22
--- /dev/null
+++ b/backup-snapshot
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''Create a temporary backup snapshot of a volume.
+
+This program is intended as a wrapper for `rsync`, to allow copying data out
+of the system with a minimum of service downtime. You can't copy data from a
+volume used by a service like MariaDB or Gerrit while that service is running,
+because the contents will change underneath your feet while you copy them. This
+script assumes the data is stored on an LVM volume, so you can stop the
+services, snapshot the volume, start the services again and then copy the data
+out from the snapshot.
+
+To use it, you need to use the 'command' feature of the .ssh/authorized_keys
+file, which causes OpenSSH to run a given command whenever a given SSH key
+connects (instead of allowing the owner of the key to run any command). This
+ensures that even if the backup key is compromised, all the attacker can do is
+make backups, and only then if they are connecting from the IP listed in 'from'
+
+ command=/usr/bin/backup-snapshot <key details>
+
+You'll need to create a YAML configuration file in /etc/backup-snapshot.conf
+that describes how to create the snapshot. Here's an example:
+
+ services:
+ - lorry-controller-minion@1.service
+ - gerrit.service
+
+ volume: /dev/vg0/gerrit
+
+To test this out, run:
+
+ rsync root@192.168.0.1: /srv/backup --rsync-path="/usr/bin/backup-snapshot"
+
+There is a Perl script named 'rrsync' that does something similar:
+
+ http://git.baserock.org/cgi-bin/cgit.cgi/delta/rsync.git/tree/support/rrsync
+
+'''
+
+
+import contextlib
+import logging
+import os
+import signal
+import shlex
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import yaml
+
+
+CONFIG_FILE = '/etc/backup-snapshot.conf'
+
+
+def status(msg, *format):
+ # Messages have to go on stderr because rsync communicates on stdout.
+ logging.info(msg, *format)
+ sys.stderr.write(msg % format + '\n')
+
+
+def run_command(argv):
+ '''Run a command, raising an exception on failure.
+
+ Output on stdout is returned.
+ '''
+ logging.debug("Running: %s", argv)
+ output = subprocess.check_output(argv, close_fds=True)
+
+ logging.debug("Output: %s", output)
+ return output
+
+
+@contextlib.contextmanager
+def pause_services(services):
+ '''Stop a set of systemd services for the duration of a 'with' block.'''
+
+ logging.info("Pausing services: %s", services)
+ try:
+ for service in services:
+ run_command(['systemctl', 'stop', service])
+ yield
+ finally:
+ for service in services:
+ run_command(['systemctl', 'start', service])
+ logging.info("Restarted services: %s", services)
+
+
+def snapshot_volume(volume_path, suffix=None):
+ '''Create a snapshot of an LVM volume.'''
+
+ volume_group_path, volume_name = os.path.split(volume_path)
+
+ if suffix is None:
+ suffix = time.strftime('-backup-%Y-%m-%d')
+ snapshot_name = volume_name + suffix
+
+ logging.info("Snapshotting volume %s as %s", volume_path, snapshot_name)
+ run_command(['lvcreate', '--name', snapshot_name, '--snapshot', volume_path, '--extents', '100%ORIGIN', '--permission=r'])
+
+ snapshot_path = os.path.join(volume_group_path, snapshot_name)
+ return snapshot_path
+
+
+def delete_volume(volume_path):
+ '''Delete an LVM volume or snapshot.'''
+
+ # Sadly, --force seems necessary, because activation applies to the whole
+ # volume group rather than to the individual volumes so we can't deactivate
+ # only the snapshot before removing it.
+ logging.info("Deleting volume %s", volume_path)
+ run_command(['lvremove', '--force', volume_path])
+
+
+@contextlib.contextmanager
+def mount(block_device, path=None):
+ '''Mount a block device for the duration of 'with' block.'''
+
+ if path is None:
+ path = tempfile.mkdtemp()
+ tempdir = path
+ logging.debug('Created temporary directory %s', tempdir)
+ else:
+ tempdir = None
+
+ try:
+ run_command(['mount', block_device, path])
+ try:
+ yield path
+ finally:
+ run_command(['umount', path])
+ finally:
+ if tempdir is not None:
+ logging.debug('Removed temporary directory %s', tempdir)
+ os.rmdir(tempdir)
+
+
+def load_config(filename):
+ '''Load configuration from a YAML file.'''
+
+ logging.info("Loading config from %s", filename)
+ with open(filename, 'r') as f:
+ config = yaml.safe_load(f)
+
+ logging.debug("Config: %s", config)
+ return config
+
+
+def get_rsync_sender_flag(rsync_commandline):
+ '''Parse an 'rsync --server' commandline to get the --sender ID.
+
+ This parses a remote commandline, so be careful.
+
+ '''
+ args = shlex.split(rsync_commandline)
+ if args[0] != 'rsync':
+ raise RuntimeError("Not passed an rsync commandline.")
+
+ for i, arg in enumerate(args):
+ if arg == '--sender':
+ sender = args[i + 1]
+ return sender
+ else:
+ raise RuntimeError("Did not find --sender flag.")
+
+
+def run_rsync_server(source_path, sender_flag):
+ # Adding '/' to the source_path tells rsync that we want the /contents/
+ # of that directory, not the directory itself.
+ #
+ # You'll have realised that it doesn't actually matter what remote path the
+ # user passes to their local rsync.
+ rsync_command = ['rsync', '--server', '--sender', sender_flag, '.',
+ source_path + '/']
+ logging.debug("Running: %s", rsync_command)
+ subprocess.check_call(rsync_command, stdout=sys.stdout)
+
+
+def main():
+ logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ filename='/var/log/backup-snapshot.log',
+ level=logging.DEBUG)
+
+ logging.debug("Running as UID %i GID %i", os.getuid(), os.getgid())
+
+ # Ensure that clean up code (various 'finally' blocks in the functions
+ # above) always runs. This is important to ensure we never leave services
+ # stopped if the process is interrupted somehow.
+
+ signal.signal(signal.SIGHUP, signal.default_int_handler)
+
+ config = load_config(CONFIG_FILE)
+
+ # Check commandline early, so we don't stop services just to then
+ # give an error message.
+ rsync_command = os.environ.get('SSH_ORIGINAL_COMMAND', '')
+ logging.info("Original SSH command: %s", rsync_command)
+
+ if len(rsync_command) == 0:
+ # For testing only -- this can only happen if
+ # ~/.ssh/authorized_keys isn't set up as described above.
+ logging.info("Command line: %s", sys.argv)
+ rsync_command = 'rsync ' + ' '.join(sys.argv[1:])
+
+ # We want to ignore as much as possible of the
+ # SSH_ORIGINAL_COMMAND, because it's a potential attack vector.
+ # If an attacker has somehow got hold of the backup SSH key,
+ # they can pass whatever they want, so we hardcode the 'rsync'
+ # commandline here instead of honouring what the user passed
+ # in. We can anticipate everything except the '--sender' flag.
+ sender_flag = get_rsync_sender_flag(rsync_command)
+
+ with pause_services(config['services']):
+ snapshot_path = snapshot_volume(config['volume'])
+
+ try:
+ with mount(snapshot_path) as mount_path:
+ run_rsync_server(mount_path, sender_flag)
+
+ status("rsync server process exited with success.")
+ finally:
+ delete_volume(snapshot_path)
+
+
+try:
+ status('backup-snapshot started')
+ main()
+except RuntimeError as e:
+ sys.stderr.write('ERROR: %s' % e)
+except Exception as e:
+ logging.debug(traceback.format_exc())
+ raise
diff --git a/baserock-ops-team.cloud-config b/baserock-ops-team.cloud-config
new file mode 100644
index 00000000..275deeae
--- /dev/null
+++ b/baserock-ops-team.cloud-config
@@ -0,0 +1,16 @@
+#cloud-config
+
+# The contents of this cloud-config script should be included in the
+# post-creation 'customisation script' for every instance in the public
+# baserock.org infrastructure. It gives access to all members the Baserock Ops
+# team, so that any member of the team can deploy security updates.
+
+ssh_authorized_keys:
+ # Pedro Alvarez
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPuirtOH8D/6iNAb3DE079FcTmDlDgusVBJ2FC0O/FHSxwAgNwHhUbCxHAcO/N+HICSvDmjp16Ki0ti2ZxfworG88shPiMOGQfuJaRv1X15AV7NsO80Llsqy/x8X+WdA5iwpUyKM011vv/pS/DhSCHJFJ/vQFgox12HQSKZuzGIOupCiZfHES5t5oEPAcoQYCC0hO4ZevyeO0ZixrOGf/iyXHyb2BoQJAehixt28YOfdaW7Z29SssCGf7QvtADYg+vF5Tazln51vp1M+fo1oF0aa/VLN3gYuf+BI6x6sEc4N/ZQaCR5+oBP3/gIVlIwOOftzC9G+l6PBOS4368nZTv pedro.alvarez@codethink.co.uk
+
+ # Gary Perkins
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQvRqmYpSVpff0MJq9aigjNQX22PdMkDiXpcV7EbDWdE3QLk7D818dljMKy2SvmgiEO7e/5jn8K7b9Dr88GF4dM/Oxc2k2yP9fzMoW+cE/drHBH+zDb9Zw1xa+t1AcMtl0XAEZft/hvpgx+Tp2XaEv6t7O9Ogxw1ahKtbkgDprhrnC9cVctu3VJhu8amY4BYZC9hRZUa02pCQl1i0klYq7E61zF8I25hS6HP0fbD/O+hAt5N3VqmkN+4QmCP8kkXSmyjKOurnXcGKPWonpOyB3cwVk3DO7krsw2qIIVoe/9PIK112oHNJxM01UUF+ZiPGEWawQfHRNG8Y03KQJanaf gary@garyp
+
+ # Sam Thursfield
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkDPLAg9ueRzIVdPbfsGboXbnd7HIwQ9oiFh1JP8NvyZ0ZWejmY7m5k6FOTDBp70Rwx1/6+rzLyCsbT5SN+rK0Ywl145A09jODRt2PWZ3ddsUDfXTY7Ycu3MLOsyjIiY5o9zhSmu+/pU90TlvlE16TFUPnyc4PDqI1DALCUf7OxDVihcecn4Fhd0XQI8FBM/c47CjvyD2g+xr2b5Qa7eCfBEFTCqpQegDOQN3Hlq1t1VLLXv+srcQkI+uh4wseJ3GcQ4T/+6w6axlGd6a2v8IjKALxveCKyI5bHirKTMJZg+BCulb+ucoafbRbLcNpmrEVfhUE5O4/ffBExaEiwni1 sam.thursfield@codethink.co.uk
diff --git a/baserock_backup/backup.sh b/baserock_backup/backup.sh
new file mode 100755
index 00000000..a65b1445
--- /dev/null
+++ b/baserock_backup/backup.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+# These aren't normal invocations of rsync: the targets use the
+# 'command' option in /root/.ssh/authorized_keys to force execution of
+# the 'backup-snapshot' script at the remote end, which then starts the
+# rsync server process. So the backup SSH key can only be used to make
+# backups, nothing more.
+
+# Don't make the mistake of trying to run this from a systemd unit. There is
+# some brokenness in systemd that causes the SSH connection forwarding to not
+# work, so you will not be able to connect to the remote machines.
+
+# Database
+/usr/bin/rsync --archive --delete-before --delete-excluded \
+ --hard-links --human-readable --progress --sparse \
+ root@192.168.222.146: /srv/backup/database \
+ && date > /srv/backup/database.timestamp
+
+# Gerrit
+/usr/bin/rsync --archive --delete-before --delete-excluded \
+ --hard-links --human-readable --progress --sparse \
+ --exclude='cache/' --exclude='tmp/' \
+ root@192.168.222.69: /srv/backup/gerrit \
+ && date > /srv/backup/gerrit.timestamp
+
+# FIXME: Storyboard database is not currently backed up, see:
+# <https://storyboard.baserock.org/#!/story/74>.
diff --git a/baserock_backup/crond.service b/baserock_backup/crond.service
new file mode 100644
index 00000000..07efb807
--- /dev/null
+++ b/baserock_backup/crond.service
@@ -0,0 +1,18 @@
+# Busybox Cron is used to run the 'baserock-backup' script.
+#
+# It'd be better to run that script using a systemd timer unit, but the
+# script didn't work when run by systemd, when I last tested it. Seems like
+# it does something clever to stdin/stdout which breaks the clever thing
+# baserock-backup does with rsync.
+
+[Unit]
+Description=Busybox Cron daemon
+Requires=local-fs.target
+After=local-fs.target
+
+[Service]
+Type=simple
+ExecStart=/usr/sbin/crond -f
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_backup/instance-config.yml b/baserock_backup/instance-config.yml
new file mode 100644
index 00000000..8ccbb584
--- /dev/null
+++ b/baserock_backup/instance-config.yml
@@ -0,0 +1,38 @@
+# Configuration for a machine that runs data backups of baserock.org.
+#
+# The current backup machine is not a reproducible deployment, but this
+# playbook should be easily adaptable to produce a properly reproducible
+# one.
+---
+- hosts: baserock-backup1
+ gather_facts: false
+ tasks:
+ - name: user for running backups
+ user: name=backup
+
+ # You'll need to copy in the SSH key manually for this user.
+
+ - name: SSH config for backup user
+ copy: src=ssh_config dest=/home/backup/.ssh/config
+
+ - name: backup script
+ copy: src=backup.sh dest=/home/backup/backup.sh mode=755
+
+ # You will need https://github.com/ansible/ansible-modules-core/pull/986
+ # for this to work.
+ - name: backup cron job, runs every day at midnight
+ cron:
+ hour: 00
+ minute: 00
+ job: /home/backup/backup.sh
+ name: baserock.org data backup
+ user: backup
+
+ # As the .service file says, running the backup script via 'cron' is
+ # a workaround for what may be a bug in systemd. I don't have the energy
+ # to investigate the problem right now.
+ - name: .service file for Busybox 'cron' daemon
+ copy: src=crond.service dest=/etc/systemd/system mode=644
+
+ - name: service for Busybox 'cron' daemon
+ service: name=crond enabled=yes state=started
diff --git a/baserock_backup/ssh_config b/baserock_backup/ssh_config
new file mode 100644
index 00000000..e14b38a0
--- /dev/null
+++ b/baserock_backup/ssh_config
@@ -0,0 +1,4 @@
+# SSH configuration to route all requests to baserock.org systems
+# via the frontend system, 185.43.218.170.
+Host 192.168.222.*
+ ProxyCommand ssh backup@185.43.218.170 -W %h:%p
diff --git a/baserock_database/backup-snapshot.conf b/baserock_database/backup-snapshot.conf
new file mode 100644
index 00000000..cb3a2ff0
--- /dev/null
+++ b/baserock_database/backup-snapshot.conf
@@ -0,0 +1,4 @@
+services:
+ - mariadb.service
+
+volume: /dev/vg0/database
diff --git a/baserock_database/image-config.yml b/baserock_database/image-config.yml
new file mode 100644
index 00000000..7b89e700
--- /dev/null
+++ b/baserock_database/image-config.yml
@@ -0,0 +1,46 @@
+# System configuration for Baserock database server.
+#
+# This Ansible playbook expects to be run on a Fedora 23 Cloud image.
+---
+- hosts: database-mariadb
+ gather_facts: False
+ sudo: True
+ tasks:
+ # See: https://fedoramagazine.org/getting-ansible-working-fedora-23/
+ - name: install Python2 and required deps for Ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python
+
+ - name: ensure system up to date
+ dnf: name=* state=latest
+
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: install lvm2 tools
+ dnf: name=lvm2 state=latest
+
+ - name: install MariaDB
+ dnf: name={{ item }} state=latest
+ with_items:
+ - mariadb
+ - mariadb-server
+ - MySQL-python
+
+ # By default this is set to /var/lib/mysql, but this causes a hidden
+ # directory to be created in /var/lib/mysql (.local/share/systemd) which
+ # breaks MariaDB because it expects each directory in there to represent a
+ # database, and you see this when upgrading:
+ #
+ # Phase 2/6: Fixing views
+ # mysqlcheck: Got error: 1102: Incorrect database name '#mysql50#.local' when selecting the database
+ #
+ - name: fix home directory of MySQL user
+ user: name=mysql home=/
+
+ - name: disable SELinux on subsequent boots
+ selinux: state=disabled
+
+ - name: disable SELinux on current boot
+ command: setenforce 0
diff --git a/baserock_database/instance-backup-config.yml b/baserock_database/instance-backup-config.yml
new file mode 100644
index 00000000..d04e809b
--- /dev/null
+++ b/baserock_database/instance-backup-config.yml
@@ -0,0 +1,29 @@
+# Instance backup configuration for the baserock.org database.
+---
+- hosts: database-mariadb
+ gather_facts: false
+ sudo: yes
+ vars:
+ FRONTEND_IP: 192.168.222.143
+ tasks:
+ - name: pyyaml for Python 2
+ dnf: PyYAML state=latest
+
+ - name: backup-snapshot script
+ copy: src=../backup-snapshot dest=/usr/bin/backup-snapshot mode=755
+
+ - name: backup-snapshot config
+ copy: src=backup-snapshot.conf dest=/etc/backup-snapshot.conf
+
+ # We need to give the backup automation 'root' access, because it needs to
+ # manage system services, LVM volumes, and mounts, and because it needs to
+ # be able to read private data. The risk of having the backup key
+ # compromised is mitigated by only allowing it to execute the
+ # 'backup-snapshot' script, and limiting the hosts it can be used from.
+ - name: access for backup SSH key
+ authorized_key:
+ user: root
+ key: "{{ lookup('file', '../keys/backup.key.pub') }}"
+ # Quotes are important in this options, the OpenSSH server will reject
+ # the entry if the 'from' or 'command' values are not quoted.
+ key_options: 'from="{{FRONTEND_IP}}",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,command="/usr/bin/backup-snapshot"'
diff --git a/baserock_database/instance-config.yml b/baserock_database/instance-config.yml
new file mode 100644
index 00000000..b3f6a8c6
--- /dev/null
+++ b/baserock_database/instance-config.yml
@@ -0,0 +1,15 @@
+# Instance configuration for Baserock database server.
+#
+# This script expects a volume to be available at /dev/vdb.
+---
+- hosts: database-mariadb
+ gather_facts: False
+ sudo: yes
+ tasks:
+ - include: ../tasks/create-data-volume.yml lv_name=database lv_size=25g mountpoint=/var/lib/mysql
+
+ - name: ensure mysql user owns /var/lib/mysql
+ file: path=/var/lib/mysql owner=mysql group=mysql mode=600 state=directory
+
+ - name: start MariaDB service
+ service: name=mariadb state=started
diff --git a/baserock_database/instance-mariadb-config.yml b/baserock_database/instance-mariadb-config.yml
new file mode 100644
index 00000000..0febaaf4
--- /dev/null
+++ b/baserock_database/instance-mariadb-config.yml
@@ -0,0 +1,71 @@
+# MariaDB configuration for Baserock database server.
+#
+# The relevant .database_password.yml files will need to be available already.
+# Create these manually and keep them somewhere safe and secret.
+---
+- hosts: database-mariadb
+ gather_facts: False
+ vars_files:
+ - root.database_password.yml
+ - baserock_gerrit.database_password.yml
+ - baserock_openid_provider.database_password.yml
+ - baserock_storyboard.database_password.yml
+ tasks:
+ - name: creating root database user
+ mysql_user: |
+ name=root
+ password={{ root_password }}
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+ check_implicit_admin=yes
+
+ - name: remove the MySQL test database
+ mysql_db:
+ name=test state=absent
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+
+ # Note that UTF-8 encoding and collation is *not* the default. Don't remove
+ # those lines or you will end up with a horrible disaster of a database.
+ - name: adding databases
+ mysql_db: |
+ name={{ item }}
+ state=present
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+ collation=utf8_unicode_ci
+ encoding=utf8
+ with_items:
+ - gerrit
+ - openid_provider
+ - storyboard
+
+ # We could probably restrict the privileges of these users further...
+ #
+ # I feel like setting 'host="%"' (i.e. not enforcing that the account can
+ # only be used by IPs within the cloud's local network, or even a single
+ # known IP adress) is kind of bad practice, but since the database server
+ # is not exposed to the internet anyway I don't think it's important right
+ # now.
+ - name: adding other database users
+ mysql_user: |
+ name="{{ item.name }}"
+ host="%"
+ password={{ item.password }}
+ priv={{ item.priv }}
+ login_host=127.0.0.1
+ login_user=root
+ login_password={{ root_password }}
+ with_items:
+ - name: gerrit
+ password: "{{ baserock_gerrit_password }}"
+ priv: gerrit.*:ALL
+ - name: openid
+ password: "{{ baserock_openid_provider_password }}"
+ priv: openid_provider.*:ALL
+ - name: storyboard
+ password: "{{ baserock_storyboard_password }}"
+ priv: storyboard.*:ALL
diff --git a/baserock_frontend/haproxy.cfg b/baserock_frontend/haproxy.cfg
new file mode 100644
index 00000000..bb0120cc
--- /dev/null
+++ b/baserock_frontend/haproxy.cfg
@@ -0,0 +1,122 @@
+# HAProxy configuration for Baserock Project front-end proxy.
+
+global
+ maxconn 4000
+
+ daemon
+ pidfile /var/run/haproxy.pid
+ user haproxy
+ group haproxy
+
+ log /dev/log local0
+ stats socket /var/lib/haproxy/stats
+
+ # Maximum number of bits used when generating temporary
+ # keys for DHE key exchange. Higher values involve more CPU
+ # usage, lower values are less secure. HAProxy's default is
+ # 1024, which is too low and HAProxy actually warns if you use
+ # the default.
+ tune.ssl.default-dh-param 2048
+
+ ssl-default-bind-ciphers EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH
+
+defaults
+ mode http
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+ log global
+ option httplog
+
+frontend http-in
+ # All HTTP traffic is redirected to HTTPS using the '301 Moved' HTTP code.
+ bind *:80
+ redirect scheme https code 301
+
+frontend https-in
+ # We do 'SSL termination' with HAProxy. So secure requests are received in
+ # the frontend, then decrypted and sent over HTTP on the internal network.
+ # This means we only need to have the certificate in one place, and the
+ # configuration of the other instances is simpler. It does mean that we
+ # need to avoid having any insecure machines in the cloud.
+ bind *:443 ssl no-sslv3 crt /etc/pki/tls/private/baserock.pem
+ reqadd X-Forwarded-Proto:\ https
+
+ # Rules below here implement the URL-based forwarding to the
+ # appropriate instance. The hdr(host) call means 'extract the
+ # first Host header from the HTTP request or response', the '-m beg'
+ # switch means 'match against the beginning of it' and the '-i' flag
+ # makes the match case-insensitive.
+ #
+ # See <https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#7>
+ # for more documentation than you ever dreamed of.
+
+ acl host_gerrit hdr(host) -m beg -i gerrit
+ use_backend baserock_gerrit_http if host_gerrit
+
+ acl host_irclogs hdr(host) -m beg -i irclogs
+ use_backend baserock_irclogs_http if host_irclogs
+
+ acl host_mason_x86_32 hdr(host) -m beg -i mason-x86-32
+ use_backend baserock_mason_x86_32_http if host_mason_x86_32
+
+ acl host_mason_x86_64 hdr(host) -m beg -i mason-x86-64
+ use_backend baserock_mason_x86_64_http if host_mason_x86_64
+
+ acl host_download hdr(host) -m beg -i download
+ use_backend baserock_webserver_http if host_download
+
+ acl host_docs hdr(host) -m beg -i docs
+ use_backend baserock_webserver_http if host_docs
+
+ acl host_opengrok hdr(host) -m beg -i opengrok
+ use_backend baserock_opengrok_http if host_opengrok
+
+ use_backend baserock_openid_provider_http if { hdr(host) -m beg -i openid }
+
+frontend ssh-in:
+ # FIXME: it'd be better if we could limit traffic on port 29418 to
+ # gerrit.baserock.org. There's no way of knowing from an SSH request
+ # which subdomain the user tried to connect to, so for now they can
+ # clone repos from 'ssh://openid.baserock.org:29418' and such like.
+ # For this reason it's probably worth pointing gerrit.baserock.org to
+ # a different floating IP that serves only the gerrit instance.
+ mode tcp
+ bind *:29418
+ default_backend baserock_gerrit_ssh
+
+ # It's very annoying for 'gerrit stream-events' to have disconnection
+ # after 50 seconds!
+ timeout client 1h
+
+# Entries here locate each server backend.
+
+backend baserock_gerrit_http
+ server baserock_gerrit 192.168.222.69:8080
+
+backend baserock_gerrit_ssh
+ mode tcp
+ server baserock_gerrit 192.168.222.69:29418
+
+ # It's very annoying for 'gerrit stream-events' to have disconnection
+ # after 50 seconds!
+ timeout server 1h
+
+backend baserock_irclogs_http
+ server baserock_irclogs 192.168.222.74:80
+
+backend baserock_mason_x86_32_http
+ server baserock_mason_x86_32 192.168.222.81:80
+
+backend baserock_mason_x86_64_http
+ server baserock_mason_x86_64 192.168.222.80:80
+
+backend baserock_openid_provider_http
+ server baserock_openid_provider 192.168.222.144:80
+
+backend baserock_webserver_http
+ server baserock_webserver 192.168.222.127:80
+
+backend baserock_opengrok_http
+ server baserock_opengrok 192.168.222.149:8080
diff --git a/baserock_frontend/image-config.yml b/baserock_frontend/image-config.yml
new file mode 100644
index 00000000..08a9b64a
--- /dev/null
+++ b/baserock_frontend/image-config.yml
@@ -0,0 +1,34 @@
+# System configuration for Baserock HAProxy instance.
+---
+- hosts: frontend-haproxy
+ gather_facts: false
+ sudo: yes
+ tasks:
+ # See: https://fedoramagazine.org/getting-ansible-working-fedora-23/
+ - name: install Python2 and required deps for Ansible modules
+ raw: dnf install -y python2 python2-dnf
+
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: ensure system up to date
+ dnf: name=* state=latest
+
+ - name: HAProxy installed
+ dnf: name=haproxy state=latest
+
+ - name: netcat installed
+ dnf: name=nc state=latest
+
+ # Yes, SELinux prevents HAProxy from working. In this case I think it's
+ # because we ask it to listen on port 29418 for Gerrit's SSH connections.
+ - name: install libselinux-python, so Ansible can control selinux
+ dnf: name=libselinux-python state=latest
+
+ - name: disable SELinux on subsequent boots
+ selinux: state=disabled
+
+ - name: disable SELinux on current boot
+ command: setenforce 0
diff --git a/baserock_frontend/instance-backup-config.yml b/baserock_frontend/instance-backup-config.yml
new file mode 100644
index 00000000..e17cf586
--- /dev/null
+++ b/baserock_frontend/instance-backup-config.yml
@@ -0,0 +1,29 @@
+# Instance backup configuration for the baserock.org frontend system.
+#
+# We don't need to back anything up from this system, but the backup
+# SSH key needs access to it in order to SSH to the other systems on the
+# internal network.
+---
+- hosts: frontend-haproxy
+ gather_facts: false
+ sudo: yes
+ vars:
+ # The 'backup' key cannot be used to SSH into the 'frontend' machine except
+ # from these IPs.
+ #
+ # 82.70.136.246 is Codethink's Zen Internet line.
+ # 86.134.108.84 is a BT Internet IP currently in use by Codethink.
+ #
+ # The backup SSH private key is the main secret needed to connect as the
+ # backup user. This IP restriction is just for a bit of extra safety.
+ PERMITTED_BACKUP_HOSTS: 82.70.136.246/32,86.134.108.84/32
+ tasks:
+ - name: backup user
+ user:
+ name: backup
+
+ - name: authorize backup public key
+ authorized_key:
+ user: backup
+ key: "{{ lookup('file', '../keys/backup.key.pub') }}"
+ key_options: 'from="{{ PERMITTED_BACKUP_HOSTS }}",no-agent-forwarding,no-X11-forwarding'
diff --git a/baserock_frontend/instance-config.yml b/baserock_frontend/instance-config.yml
new file mode 100644
index 00000000..d7ce842b
--- /dev/null
+++ b/baserock_frontend/instance-config.yml
@@ -0,0 +1,18 @@
+# Instance configuration for Baserock HAProxy instance.
+#
+# This playbook should be run after starting an instance of the Baserock
+# frontend image.
+---
+- hosts: frontend-haproxy
+ gather_facts: false
+ sudo: yes
+ tasks:
+ # To create the .pem file, simply concatenate
+ # certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert with
+ # the private key for that certificate (which is not committed to Git, of
+ # course).
+ - name: install SSL certificate
+ copy: src=../private/baserock.org-ssl-certificate-temporary-dsilverstone.pem dest=/etc/pki/tls/private/baserock.pem owner=haproxy mode=400
+
+ - name: HAProxy configuration
+ copy: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg
diff --git a/baserock_gerrit/All-Projects/groups b/baserock_gerrit/All-Projects/groups
new file mode 100644
index 00000000..da2baa74
--- /dev/null
+++ b/baserock_gerrit/All-Projects/groups
@@ -0,0 +1,16 @@
+# UUID Group Name
+#
+global:Anonymous-Users Anonymous Users
+global:Project-Owners Project Owners
+global:Registered-Users Registered Users
+
+# This file is filled in with the other group IDs by the
+# gerrit-access-config.yml Ansible playbook.
+b660c33b68509db9dbd9578ae00035da90c0d5eb Administrators
+8e467a11f116bb716a65ac85e28bf09ebfeb0d63 Non-Interactive Users
+898d9c4232b8fcac6a3b128f7264c5d4c8b1eead Developers
+b8fc45c681b94669fe3fa965c48d5221a515a3a6 Mergers
+8c788c828285c3dd0a8c1cc152de6735085def9f Mirroring Tools
+a7a9cc6639bd943e47da0d20b39267a08b43cd91 Release Team
+d643abb0ad6e9d5ac33093af5cd3a3d4e484d95d Reviewers
+cea6c19a08e11b74e63a567e050bec2c6eeb14dc Testers
diff --git a/baserock_gerrit/All-Projects/project.config b/baserock_gerrit/All-Projects/project.config
new file mode 100644
index 00000000..f3069904
--- /dev/null
+++ b/baserock_gerrit/All-Projects/project.config
@@ -0,0 +1,125 @@
+# Top-level access controls for projects on Baserock Gerrit.
+
+# These can be overridden by a project's own project.config file. They are also
+# overridden by the config of a project's parent repo, if it is set to something
+# other than the default parent project 'All-Projects'.
+
+# Useful references:
+#
+# https://gerrit-documentation.storage.googleapis.com/Documentation/2.11/access-control.html
+# https://git.openstack.org/cgit/openstack-infra/system-config/tree/doc/source/gerrit.rst
+
+# To deploy changes to this file, you need to manually commit it and push it to
+# the 'refs/meta/config' ref of the All-Projects repo in Gerrit.
+
+[project]
+ description = Access inherited by all other projects.
+
+[receive]
+ requireContributorAgreement = false
+ requireSignedOffBy = false
+ requireChangeId = true
+
+[submit]
+ mergeContent = true
+ action = rebase if necessary
+
+[capability]
+ administrateServer = group Administrators
+ priority = batch group Non-Interactive Users
+ streamEvents = group Non-Interactive Users
+
+ createProject = group Mirroring Tools
+
+# Everyone can read everything.
+[access "refs/*"]
+ read = group Administrators
+ read = group Anonymous Users
+
+
+# Developers can propose changes. All 'Registered Users' are 'Developers'.
+[access "refs/for/refs/*"]
+ push = group Developers
+ pushMerge = group Developers
+
+
+[access "refs/heads/*"]
+ forgeAuthor = group Developers
+ rebase = group Developers
+ label-Code-Review = -2..+2 group Mergers
+ submit = group Mergers
+ label-Code-Review = -1..+1 group Reviewers
+# label-Verified = -1..+1 group Testers
+
+ create = group Administrators
+ forgeAuthor = group Administrators
+ forgeCommitter = group Administrators
+ push = group Administrators
+ create = group Project Owners
+ forgeAuthor = group Project Owners
+ forgeCommitter = group Project Owners
+ push = group Project Owners
+ create = group Mergers
+ forgeAuthor = group Mergers
+ push = +force group Mergers
+
+ create = group Mirroring Tools
+ forgeAuthor = group Mirroring Tools
+ forgeCommitter = group Mirroring Tools
+ push = +force group Mirroring Tools
+
+
+# Nobody should be able to force push to 'master'. In particular, if Lorry
+# can force-push master then it will do, in the course of mirroring from
+# git.baserock.org, and this may undo merges that Gerrit just did and really
+# confuse things.
+[access "refs/heads/master"]
+ exclusiveGroupPermissions = push
+ push = block +force group Mergers
+ push = block +force group Mirroring Tools
+
+
+[access "refs/tags/*"]
+ pushTag = group Release Team
+ pushSignedTag = group Release Team
+
+ pushTag = group Administrators
+ pushSignedTag = group Administrators
+ pushTag = group Project Owners
+ pushSignedTag = group Project Owners
+
+ create = group Mirroring Tools
+ forgeAuthor = group Mirroring Tools
+ forgeCommitter = group Mirroring Tools
+ push = +force group Mirroring Tools
+ pushTag = +force group Mirroring Tools
+ pushSignedTag = +force group Mirroring Tools
+
+
+# Changing project configuration is allowed for Administrators only. (In theory
+# anyone who owns a project can change its permissions, but right now all
+# projects should be owned by the Administrators group).
+[access "refs/meta/config"]
+ exclusiveGroupPermissions = read
+
+ read = group Administrators
+ push = group Administrators
+ read = group Project Owners
+ push = group Project Owners
+
+[label "Code-Review"]
+ function = MaxWithBlock
+ copyMinScore = true
+ value = -2 Do not merge
+ value = -1 This patch needs further work before it can be merged
+ value = 0 No score
+ value = +1 Looks good to me, but someone else must approve
+ value = +2 Looks good to me, approved
+
+# Disabled for now, because there is no automated test tool hooked up to our
+# Gerrit yet.
+#[label "Verified"]
+# function = MaxWithBlock
+# value = -1 Failed
+# value = 0 No score
+# value = +1 Verified
diff --git a/baserock_gerrit/backup-snapshot.conf b/baserock_gerrit/backup-snapshot.conf
new file mode 100644
index 00000000..e8e2f3fc
--- /dev/null
+++ b/baserock_gerrit/backup-snapshot.conf
@@ -0,0 +1,5 @@
+services:
+ - lorry-controller-minion@1.service
+ - gerrit.service
+
+volume: /dev/vg0/gerrit
diff --git a/baserock_gerrit/baserock_gerrit.morph b/baserock_gerrit/baserock_gerrit.morph
new file mode 100644
index 00000000..c9b3b736
--- /dev/null
+++ b/baserock_gerrit/baserock_gerrit.morph
@@ -0,0 +1,26 @@
+name: baserock_gerrit
+kind: cluster
+
+description: |
+ Deployment .morph for baserock.org Gerrit system.
+
+ Configuration of the system is handled separately, with a series of
+ Ansible playbooks that should be run after an instance of the system
+ is up and running. See the README for instructions.
+
+systems:
+- morph: systems/gerrit-system-x86_64.morph
+ deploy:
+ gerrit.baserock.org:
+ type: extensions/openstack
+ location: https://compute.datacentred.io:5000/v2.0
+
+ # You can use this method to deploy upgrades over SSH, after the
+ # machine is deployed.
+ upgrade-type: extensions/ssh-rsync
+ upgrade-location: root@192.168.222.69
+
+ OPENSTACK_IMAGENAME: baserock_gerrit
+ CLOUD_INIT: true
+ DISK_SIZE: 3G
+ KERNEL_ARGS: console=tty0 console=ttyS0
diff --git a/baserock_gerrit/branding/GerritSite.css b/baserock_gerrit/branding/GerritSite.css
new file mode 100644
index 00000000..6a17f43d
--- /dev/null
+++ b/baserock_gerrit/branding/GerritSite.css
@@ -0,0 +1,15 @@
+body {color: #000 !important; background: url("static/openstack-page-bkg.jpg") no-repeat scroll 0 0 white !important; position: static}
+#gerrit_header {display: block !important; position: relative; top: -60px; margin-bottom: -60px; width: 200px; padding-left: 17px}
+#gerrit_header h1 {font-family: 'PT Sans', sans-serif; font-weight: normal; letter-spacing: -1px}
+
+#gerrit_topmenu {background: none; position:relative; top: 0px; left: 220px; margin-right: 220px}
+
+#gerrit_topmenu tbody tr td table {border: 0}
+
+#gerrit_topmenu tbody tr td table.gwt-TabBar {color: #353535; border-bottom: 1px solid #C5E2EA;}
+#gerrit_topmenu .gwt-Button {padding: 3px 6px}
+.gwt-TabBarItem-selected {color: #CF2F19 !important; border-bottom: 3px solid #CF2F19;}
+.gwt-TabBarItem {color: #353535; border-right: 0 !important}
+.gwt-TabBar .gwt-TabBarItem, .gwt-TabBar .gwt-TabBarRest, .gwt-TabPanelBottom {background: 0 !important;}
+
+#gerrit_topmenu .gwt-TextBox {width: 250px}
diff --git a/baserock_gerrit/branding/GerritSiteHeader.html b/baserock_gerrit/branding/GerritSiteHeader.html
new file mode 100644
index 00000000..5ad8d902
--- /dev/null
+++ b/baserock_gerrit/branding/GerritSiteHeader.html
@@ -0,0 +1 @@
+<h2 class="typo3-logo"> <a href="/"><img src="/static/baserock-logo.png" width="200" /></a> </h2>
diff --git a/baserock_gerrit/branding/baserock-logo.png b/baserock_gerrit/branding/baserock-logo.png
new file mode 100644
index 00000000..65811263
--- /dev/null
+++ b/baserock_gerrit/branding/baserock-logo.png
Binary files differ
diff --git a/baserock_gerrit/branding/openstack-page-bkg.jpg b/baserock_gerrit/branding/openstack-page-bkg.jpg
new file mode 100644
index 00000000..f788c41c
--- /dev/null
+++ b/baserock_gerrit/branding/openstack-page-bkg.jpg
Binary files differ
diff --git a/baserock_gerrit/gerrit-access-config.yml b/baserock_gerrit/gerrit-access-config.yml
new file mode 100644
index 00000000..cb8c4fea
--- /dev/null
+++ b/baserock_gerrit/gerrit-access-config.yml
@@ -0,0 +1,159 @@
+# Baserock Gerrit access controls, and predefined users, groups and projects.
+#
+# This Ansible playbook requires the ansible-gerrit modules:
+#
+# https://www.github.com/ssssam/ansible-gerrit
+#
+# These modules depend on pygerrit:
+#
+# https://www.github.com/sonyxperiadev/pygerrit/
+#
+# If you want to change the configuration, just edit this script and rerun it,
+# as described in the README.
+#
+# This script currently doesn't handle committing changes to the access control
+# rules for the 'All-Projects' project. To set up or modify the access control
+# rules, you'll need to manually commit project.config (in the All-Projects
+# subdirectory) to the 'refs/meta/config' ref of the All-Projects repo in
+# Gerrit. The 'groups' file will need to list all the groups referenced in
+# project.config. This script will add the UUIDs of all groups listed below
+# to the All-Projects/groups file, so you don't have to create it manually.
+---
+- hosts: localhost
+ tasks:
+ # System groups:
+ # - Anonymous Users
+ # - Change Owner
+ # - Project Owners
+ # - Registered Users
+
+ # Prefined groups:
+ # - Administrators
+ # - Non-Interactive Users
+
+ - gerrit_group:
+ name: Administrators
+ register: administrators_group
+
+ - gerrit_group:
+ name: Non-Interactive Users
+ register: non_interactive_users_group
+
+ # The 'owner' of a group defines who can modify that group. Users
+ # who are in the 'owner' group for a group 'Groupies' can add and remove
+ # people (and other groups) from 'Groupies' and can change the name,
+ # description and owner of 'Groupies.' Since we don't want the
+ # names, descriptions or owners of these predefined groups being
+ # changed, they are all left owned by the Administrators group.
+
+ - gerrit_group:
+ name: Developers
+ description: Registered users who choose to submit changes for consideration.
+ owner: Administrators
+ included_groups:
+ - Registered Users
+ register: developers_group
+
+ # Right now all Mergers are in the Release Team by default.
+ - gerrit_group:
+ name: Release Team
+ description: Developers who can tag releases
+ owner: Administrators
+ included_groups:
+ - Mergers
+ register: release_team_group
+
+ - gerrit_group:
+ name: Mergers
+ description: Developers who can trigger the actual merging of a change.
+ owner: Administrators
+ register: mergers_group
+
+ - gerrit_group:
+ name: Mirroring Tools
+ description: Programs that pull changes from external repositories into Gerrit's Git server
+ owner: Administrators
+ register: mirroring_tools_group
+
+ - gerrit_group:
+ name: Reviewers
+ description: Registered users who choose to give +1 / -1 reviews to proposed changes.
+ owner: Administrators
+ included_groups:
+ - Registered Users
+ register: reviewers_group
+
+ - gerrit_group:
+ name: Testers
+ description: Testers that can give +1 / -1 Verified to proposed changes.
+ owner: Administrators
+ register: testers_group
+
+ # Non-interactive accounts.
+
+ - gerrit_account:
+ username: firehose
+ fullname: Firehose integration bot
+ email: firehose@baserock.org
+ groups:
+ - Non-Interactive Users
+ - Developers
+ #ssh_key: xx
+
+ - gerrit_account:
+ username: lorry
+ fullname: Lorry mirroring service
+ email: lorry@baserock.org
+ groups:
+ - Mirroring Tools
+ - Non-Interactive Users
+ # FIXME: ansible-gerrit module should be able to handle a filename
+ # here, instead of needing this hack to read the contents.
+ ssh_key: "{{ lookup('file', '../keys/lorry-gerrit.key.pub') }}"
+
+ - gerrit_account:
+ username: mason
+ fullname: Mason automated tester
+ email: mason@baserock.org
+ groups:
+ - Non-Interactive Users
+ - Testers
+ #ssh_key: xx
+
+ # It'd make more sense to do this in the mirroring-config.yml file, but
+ # then the admin would need to supply their Gerrit credentials to that
+ # playbook too (which is more tricky, because it doesn't run on
+ # 'localhost').
+ - name: repo to hold Lorry Controller mirroring configuration
+ gerrit_project:
+ name: local-config/lorries
+ description: Configuration for Lorry for mirroring from Trove
+
+ - name: create 'groups' mapping required by Gerrit
+ lineinfile:
+ create: yes
+ dest: All-Projects/groups
+ line: "{{ item.group_info.id }}\t{{ item.group_info.name }}"
+ with_items:
+ - "{{ administrators_group }}"
+ - "{{ non_interactive_users_group }}"
+ - "{{ developers_group }}"
+ - "{{ mergers_group }}"
+ - "{{ mirroring_tools_group }}"
+ - "{{ release_team_group }}"
+ - "{{ reviewers_group }}"
+ - "{{ testers_group }}"
+
+ - name: push access configuration for all repos
+ git_commit_and_push:
+ repo: "{{ ansible_env.GERRIT_ADMIN_REPO }}"
+ ref: refs/meta/config
+ files:
+ - ./All-Projects/groups
+ - ./All-Projects/project.config
+ strip_path_components: 1
+ commit_message: |
+ Update global project access control rules.
+
+ This commit was made by an Ansible playbook living in
+ git://git.baserock.org/baserock/baserock/infrastructure.
diff --git a/baserock_gerrit/gerrit.config b/baserock_gerrit/gerrit.config
new file mode 100644
index 00000000..e162f052
--- /dev/null
+++ b/baserock_gerrit/gerrit.config
@@ -0,0 +1,54 @@
+# This is the main Gerrit configuration. If you make changes to this
+# file, rerun `ansible-playbook -i hosts baserock_gerrit/instance-config.yml`
+# to deploy them to production.
+
+[gerrit]
+ basePath = git
+ canonicalWebUrl = https://gerrit.baserock.org/
+[database]
+ type = mysql
+ hostname = 192.168.222.30
+ database = gerrit
+ username = gerrit
+[index]
+ type = LUCENE
+[auth]
+ type = OPENID_SSO
+ allowedOpenID = https://openid.baserock.org/
+ trustedOpenID = https://openid.baserock.org/
+ # XRDS is a mechanism for saying 'here are the services I provide'. Gerrit
+ # expects the URL provided here to describe the OpenID provider service
+ # using XRDS.
+ openIdSsoUrl = https://openid.baserock.org/openid/xrds/
+[sendemail]
+ smtpServer = 192.168.222.145
+ # Send mails as '${user} (Code Review) <gerrit.baserock.org>'
+ # The gerrit@baserock.org email comes from the user.email setting
+ # below
+ from = MIXED
+[user]
+ name = Baserock Gerrit
+ email = gerrit@baserock.org
+[sshd]
+ listenAddress = *:29418
+[httpd]
+ listenUrl = proxy-https://*:8080/
+[cache]
+ directory = cache
+[cache "web_sessions"]
+ # Remember user logins for a year (default is 12 hours, which gets a
+ # bit annoying).
+ maxAge = 1 y
+[user]
+ email = "gerrit@baserock.org"
+
+# It seems like a bad idea to enable remote administration of plugins, but
+# there is absolutely no information available on how to do 'local'
+# administration of Gerrit plugins, so we can't really avoid it.
+[plugins]
+ allowRemoteAdmin = true
+[container]
+ user = gerrit
+ javaHome = {{ JRE_DIR }}/jre
+[receive]
+ enableSignedPush = false
diff --git a/baserock_gerrit/gerrit.service b/baserock_gerrit/gerrit.service
new file mode 100644
index 00000000..478693c3
--- /dev/null
+++ b/baserock_gerrit/gerrit.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Gerrit Code Review Server
+After=network.target
+
+[Service]
+User=gerrit
+Group=gerrit
+Type=simple
+StandardOutput=syslog
+StandardError=syslog
+SyslogIdentifier=gerrit
+ExecStart={{ run_gerrit }} daemon --site-path /srv/gerrit --console-log
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_gerrit/instance-backup-config.yml b/baserock_gerrit/instance-backup-config.yml
new file mode 100644
index 00000000..cc647285
--- /dev/null
+++ b/baserock_gerrit/instance-backup-config.yml
@@ -0,0 +1,29 @@
+# Instance backup configuration for the baserock.org Gerrit system.
+---
+- hosts: gerrit
+ gather_facts: false
+ vars:
+ FRONTEND_IP: 192.168.222.143
+ tasks:
+ - name: backup-snapshot script
+ copy: src=../backup-snapshot dest=/usr/bin/backup-snapshot mode=755
+
+ - name: backup-snapshot config
+ copy: src=backup-snapshot.conf dest=/etc/backup-snapshot.conf
+
+ # Would be good to limit this to 'backup' user.
+ - name: passwordless sudo
+ lineinfile: dest=/etc/sudoers state=present line='%wheel ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
+
+ # We need to give the backup automation 'root' access, because it needs to
+ # manage system services, LVM volumes, and mounts, and because it needs to
+ # be able to read private data. The risk of having the backup key
+ # compromised is mitigated by only allowing it to execute the
+ # 'backup-snapshot' script, and limiting the hosts it can be used from.
+ - name: access for backup SSH key
+ authorized_key:
+ user: root
+ key: "{{ lookup('file', '../keys/backup.key.pub') }}"
+ # Quotes are important in this options, the OpenSSH server will reject
+ # the entry if the 'from' or 'command' values are not quoted.
+ key_options: 'from="{{FRONTEND_IP}}",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,command="/usr/bin/backup-snapshot"'
diff --git a/baserock_gerrit/instance-ca-certificate-config.yml b/baserock_gerrit/instance-ca-certificate-config.yml
new file mode 100644
index 00000000..0424b176
--- /dev/null
+++ b/baserock_gerrit/instance-ca-certificate-config.yml
@@ -0,0 +1,28 @@
+# The CA chain needed for the baserock.org certificate we use is present in
+# the system, but it's not present in the set of trusted root certificates
+# bundled with Java.
+#
+# We need Gerrit to trust the baserock.org certificate so that it will trust
+# https://openid.baserock.org/.
+#
+# This playbook is a hack at present: the second time you run it, the command
+# will fail because the certificate is already present. There is a proposed
+# Ansible module that can do this in a nicer way:
+# <https://github.com/ansible/ansible-modules-extras/pull/286/commits>.
+---
+- hosts: gerrit
+ gather_facts: False
+ vars:
+ JRE_DIR: /opt/jdk1.8.0_40
+ tasks:
+ - name: baserock.org SSL certificate with chain of trust
+ copy: src=../certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert dest=/home/gerrit
+
+ - name: install SSL certificate into Java certificate keystore
+ shell: >
+ {{ JRE_DIR }}/jre/bin/keytool \
+ -file /home/gerrit/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert \
+ -importcert \
+ -keystore {{ JRE_DIR }}/jre/lib/security/cacerts \
+ -storepass changeit \
+ -noprompt
diff --git a/baserock_gerrit/instance-config.yml b/baserock_gerrit/instance-config.yml
new file mode 100644
index 00000000..c4c6030f
--- /dev/null
+++ b/baserock_gerrit/instance-config.yml
@@ -0,0 +1,133 @@
+# Instance-specific configuration for the baserock.org Gerrit system.
+#
+# You must have the Java SE Runtime Environment binary available in the
+# baserock_gerrit directory when you run this script.
+#
+# Download it from here:
+# <http://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html>
+#
+- hosts: gerrit
+ gather_facts: False
+ vars:
+ GERRIT_VERSION: 2.12.2
+
+ # Download from http://www.oracle.com/technetwork/java/javase/downloads/server-jre8-downloads-2133154.html
+ JRE_FILE: server-jre-8u40-linux-x64.tar.gz
+ # This path should correspond to where the JRE ends up if you extract the
+ # downloaded tarball in /opt.
+ JRE_DIR: /opt/jdk1.8.0_40
+
+ # Download from http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html
+ JCE_FILE: jce_policy-8.zip
+
+ run_gerrit: "{{ JRE_DIR }}/bin/java -jar /opt/gerrit/gerrit-{{ GERRIT_VERSION }}.war"
+ vars_files:
+ - ../baserock_database/baserock_gerrit.database_password.yml
+ tasks:
+ - name: add gerrit user
+ user:
+ name: gerrit
+ shell: /bin/false
+ generate_ssh_key: yes
+ ssh_key_comment: gerrit@baserock.org
+
+ - name: unpack the Java Runtime Environment
+ unarchive: src={{ JRE_FILE }} dest=/opt owner=root group=root creates={{ JRE_DIR }}
+
+ # The Java Cryptography Extensions are needed in order to enable all SSH
+ # ciphers, due to US export restrictions.
+ - name: unpack the Java Cryptography Extensions
+ unarchive: src={{ JCE_FILE }} dest=/opt owner=root group=root creates=/opt/UnlimitedJCEPolicyJDK8/
+
+ - name: install the Java Cryptography Extensions
+ file: src=/opt/UnlimitedJCEPolicyJDK8/{{ item }} dest={{ JRE_DIR }}/jre/lib/security/{{ item }} state=link force=yes
+ with_items:
+ - local_policy.jar
+ - US_export_policy.jar
+
+ - name: create /opt/gerrit
+ file: path=/opt/gerrit state=directory
+
+ - name: download Gerrit
+ get_url:
+ url: https://gerrit-releases.storage.googleapis.com/gerrit-{{ GERRIT_VERSION }}.war
+ dest: /opt/gerrit/gerrit-{{ GERRIT_VERSION }}.war
+
+ - include: ../tasks/create-data-volume.yml lv_name=gerrit lv_size=25g mountpoint=/srv/gerrit
+
+ - name: ensure 'gerrit' user owns /srv/gerrit
+ file: path=/srv/gerrit owner=gerrit group=gerrit state=directory
+
+ - name: initialise Gerrit application directory
+ command: "{{ run_gerrit }} init -d /srv/gerrit creates=/srv/gerrit/etc/gerrit.config"
+ sudo: yes
+ sudo_user: gerrit
+
+ - name: extract and install some plugins for gerrit
+ shell: unzip /opt/gerrit/gerrit-{{ GERRIT_VERSION}}.war WEB-INF/plugins/{{ item }}.jar -p > /srv/gerrit/plugins/{{ item }}.jar
+ args:
+ creates: /srv/gerrit/plugins/{{ item }}.jar
+ with_items:
+ - replication
+ - download-commands
+ sudo: yes
+ sudo_user: gerrit
+
+ # WARNING Non core plugins are not compiled inside gerrit.war file, we need to
+ # download them from somwhere else (https://gerrit-ci.gerritforge.com/ or
+ # http://builds.quelltextlich.at/gerrit/nightly/index.html).
+ #
+ # We install them from there, but some of the plugins don't have an stable branch for
+ # a given gerrit version. Check before runnig this script that this task
+ # is pointing to the right version (API compatible) of the plugin
+ - name: install non-core plugins for gerrit
+ shell: wget https://gerrit-ci.gerritforge.com/job/plugin-{{ item }}-master/lastBuild/artifact/buck-out/gen/plugins/{{ item }}/{{ item }}.jar -O /srv/gerrit/plugins/{{ item }}.jar
+ args:
+ creates: /srv/gerrit/plugins/{{ item }}.jar
+ with_items:
+ - avatars-gravatar
+ sudo: yes
+ sudo_user: gerrit
+
+ - name: download extra Java libraries
+ get_url:
+ url: "{{ item }}"
+ dest: /srv/gerrit/lib
+ with_items:
+ # MySQL Java Connector
+ - http://repo2.maven.org/maven2/mysql/mysql-connector-java/5.1.21/mysql-connector-java-5.1.21.jar
+
+ # Bouncy Castle Crypto APIs for Java. The interactive `gerrit init`
+ # command recommends installing these libraries, and who am I to argue?
+ - http://www.bouncycastle.org/download/bcpkix-jdk15on-152.jar
+ - http://www.bouncycastle.org/download/bcprov-jdk15on-152.jar
+
+ - name: install gerrit.config
+ template: src=gerrit.config dest=/srv/gerrit/etc/gerrit.config
+
+ - name: install images for branding
+ copy: src=branding/{{ item }} dest=/srv/gerrit/static/{{ item }}
+ with_items:
+ - baserock-logo.png
+ - openstack-page-bkg.jpg
+ sudo: yes
+ sudo_user: gerrit
+
+ - name: install HTML and CSS for branding
+ copy: src=branding/{{ item }} dest=/srv/gerrit/etc/{{ item }}
+ with_items:
+ - GerritSiteHeader.html
+ - GerritSite.css
+ sudo: yes
+ sudo_user: gerrit
+
+ - name: set database password
+ command: git config -f /srv/gerrit/etc/secure.config database.password "{{ baserock_gerrit_password }}"
+ sudo: yes
+ sudo_user: gerrit
+
+ - name: install gerrit.service
+ template: src=gerrit.service dest=/etc/systemd/system/gerrit.service
+
+ - name: start Gerrit service
+ service: name=gerrit enabled=yes state=restarted
diff --git a/baserock_gerrit/instance-mirroring-config.yml b/baserock_gerrit/instance-mirroring-config.yml
new file mode 100644
index 00000000..19ac76cc
--- /dev/null
+++ b/baserock_gerrit/instance-mirroring-config.yml
@@ -0,0 +1,68 @@
+# This Ansible playbook configures mirroring in and out of Gerrit.
+#
+# To run it, use:
+# ansible-playbook -i hosts baserock_gerrit/instance-mirroring-config.yml
+#
+# It expects the SSH key for the 'lorry' user to exist at
+# ../keys/lorry-gerrit.key.
+#
+# This script currently doesn't handle the lorry-controller.conf file that
+# controls what lorry-controller mirrors into Gerrit. To set up or modify
+# lorry-controller configuration you need to commit your changes to the
+# 'local-config/lorries' project on the Gerrit.
+---
+- hosts: gerrit
+ gather_facts: no
+ sudo: yes
+ tasks:
+ - name: Lorry user
+ user: name=lorry comment="Lorry mirroring service"
+
+ # Ansible can generate a new SSH key for Lorry when we add the user,
+ # but it seems tricky to then extract this and add it to the 'lorry' Gerrit
+ # user.
+ - name: SSH private key for Lorry user
+ copy: src=../keys/lorry-gerrit.key dest=~/.ssh/id_rsa mode=600
+ sudo_user: lorry
+
+ - name: SSH public key for Lorry user
+ copy: src=../keys/lorry-gerrit.key.pub dest=~/.ssh/id_rsa.pub mode=644
+ sudo_user: lorry
+
+ - name: directory in /etc for Lorry Controller system configuration
+ file: dest=/etc/lorry-controller state=directory
+
+ - name: Lorry tool configuration
+ copy: src=lorry.conf dest=/etc/lorry.conf
+
+ - name: Lorry Controller system configuration
+ copy:
+ src=lorry-controller/{{ item }}
+ dest=/etc/lorry-controller/{{ item }}
+ with_items:
+ - minion.conf
+ - webapp.conf
+
+ - name: enable and restart core lorry controller services.
+ service: name={{ item }} enabled=yes state=restarted
+ with_items:
+ - lighttpd-lorry-controller-webapp.service
+ - lorry-controller-minion@1.service
+
+ - name: enable lorry-controller scheduled activity timers
+ service: name={{ item }} enabled=yes
+ with_items:
+ - lorry-controller-ls-troves.timer
+ - lorry-controller-readconf.timer
+ - lorry-controller-remove-ghost-jobs.timer
+ - lorry-controller-remove-old-jobs.timer
+ - lorry-controller-status.timer
+
+ - name: gerrit-replication configuration
+ copy: src=replication.config dest=/srv/gerrit/etc
+ notify:
+ - restart gerrit
+
+handlers:
+ - name: restart gerrit
+ service: name=gerrit state=restarted
diff --git a/baserock_gerrit/lorry-controller.conf b/baserock_gerrit/lorry-controller.conf
new file mode 100644
index 00000000..3f4818fe
--- /dev/null
+++ b/baserock_gerrit/lorry-controller.conf
@@ -0,0 +1,38 @@
+[
+ {
+ "type": "trove",
+
+ "trovehost": "git.baserock.org",
+ "protocol": "http",
+
+ "prefixmap": {
+ "baserock": "baserock",
+ "delta": "delta"
+ },
+
+ "ignore": [
+ "baserock/baserock/documentation",
+ "baserock/baserock/jenkins-config",
+ "baserock/baserock/lorries",
+ "baserock/baserock/morph-cache-server",
+ "baserock/baserock/morphs",
+ "baserock/baserock/remo",
+ "baserock/local-config/mason",
+ "baserock/site/*",
+ "baserock/tests/*",
+ "delta/*"
+ ],
+
+ "ls-interval": "4H",
+ "interval": "2M"
+ },
+
+ {
+ "type": "lorries",
+ "interval": "2M",
+ "prefix": "delta",
+ "globs": [
+ "delta-lorries/*.lorry"
+ ]
+ }
+]
diff --git a/baserock_gerrit/lorry-controller/minion.conf b/baserock_gerrit/lorry-controller/minion.conf
new file mode 100644
index 00000000..99abdba8
--- /dev/null
+++ b/baserock_gerrit/lorry-controller/minion.conf
@@ -0,0 +1,6 @@
+[config]
+log = syslog
+log-level = debug
+webapp-host = localhost
+webapp-port = 12765
+webapp-timeout = 3600
diff --git a/baserock_gerrit/lorry-controller/webapp.conf b/baserock_gerrit/lorry-controller/webapp.conf
new file mode 100644
index 00000000..755dd61e
--- /dev/null
+++ b/baserock_gerrit/lorry-controller/webapp.conf
@@ -0,0 +1,13 @@
+[config]
+log = /home/lorry/webapp.log
+log-max = 100M
+log-keep = 1
+log-level = debug
+statedb = /home/lorry/webapp.db
+configuration-directory = /home/lorry/confgit
+status-html = /home/lorry/lc-status.html
+wsgi = yes
+debug-port = 12765
+templates = /usr/share/lorry-controller/templates
+confgit-url = http://localhost:8080/local-config/lorries
+git-server-type = gerrit
diff --git a/baserock_gerrit/lorry.conf b/baserock_gerrit/lorry.conf
new file mode 100644
index 00000000..03c1177b
--- /dev/null
+++ b/baserock_gerrit/lorry.conf
@@ -0,0 +1,8 @@
+[config]
+mirror-base-url-push = ssh://lorry@localhost:29418/
+bundle = never
+tarball = never
+working-area = /home/lorry/working-area
+verbose = yes
+log = /dev/stdout
+log-level = debug
diff --git a/baserock_gerrit/replication.config b/baserock_gerrit/replication.config
new file mode 100644
index 00000000..067acc9b
--- /dev/null
+++ b/baserock_gerrit/replication.config
@@ -0,0 +1,30 @@
+# Configuration for gerrit-replication plugin.
+#
+# This handles pushing changes from gerrit.baserock.org to git.baserock.org.
+#
+# To deploy changes in this file to production, run:
+# ansible-playbook -i hosts baserock_gerrit/instance-mirroring-config.yml
+
+[remote "trove"]
+ url = ssh://git@git.baserock.org/${name}.git
+
+ # Disable force-pushing and only sync 'master' and tags.
+ #
+ # This will probably prove annoying and we'll need to mirror more branches in
+ # future. But right now there are hundreds of personal branches and I want to
+ # avoid potential push errors for branches we don't care about.
+ push = refs/heads/master:refs/heads/master
+ push = refs/tags/*:refs/tags/*
+
+ createMissingRepositories = false
+ replicatePermissions = false
+
+ # What to sync: this is a regexp that must match the whole project name.
+ projects = ^baserock/.*$
+
+ # If true, gerrit-replication will remove remote branches that are absent in
+ # the trove. This is a bit dangerous, but necessary if we are to make gerrit
+ # the 'master'. Note that if you set 'authGroup', branches that are not
+ # visible to the configured authorisation group will also be removed. So do
+ # not set 'authGroup' to anything.
+ mirror = false
diff --git a/baserock_hosts b/baserock_hosts
new file mode 100644
index 00000000..8a376921
--- /dev/null
+++ b/baserock_hosts
@@ -0,0 +1,40 @@
+# Ansible hosts file for Baserock infrastructure.
+# See: <http://docs.ansible.com/intro_inventory.html>.
+
+# We don't have DNS working for instances in the OpenStack cloud we use, which
+# makes this file a lot more fiddly than it would be otherwise. Access to these
+# machines works because the `ansible.cfg` file in the same directory redirects
+# all SSH access through the frontend machine.
+
+[baserock]
+cache ansible_ssh_host=192.168.222.14
+devel-system-64b ansible_ssh_host=192.168.222.41
+gerrit ansible_ssh_host=192.168.222.69
+git ansible_ssh_host=192.168.222.58
+irclogs ansible_ssh_host=192.168.222.74
+#mason-armv7lhf ansible_ssh_host=192.168.222.15
+mason-x86-32 ansible_ssh_host=192.168.222.81
+mason-x86-64 ansible_ssh_host=192.168.222.80
+
+[fedora]
+frontend-haproxy ansible_ssh_host=185.43.218.170
+database-mariadb ansible_ssh_host=192.168.222.146
+mail ansible_ssh_host=192.168.222.145
+openid ansible_ssh_host=192.168.222.144
+webserver ansible_ssh_host=192.168.222.127
+opengrok ansible_ssh_host=192.168.222.149
+
+[ubuntu]
+paste ansible_ssh_host=192.168.222.6
+storyboard ansible_ssh_host=192.168.222.131
+#testgerrit ansible_ssh_host=192.168.222.46
+
+
+[baserock:vars]
+ansible_ssh_user=root
+
+[ubuntu:vars]
+ansible_ssh_user=ubuntu
+
+[fedora:vars]
+ansible_ssh_user=fedora
diff --git a/baserock_irclogs/clusters/irclogs.morph b/baserock_irclogs/clusters/irclogs.morph
new file mode 100644
index 00000000..60a0bd07
--- /dev/null
+++ b/baserock_irclogs/clusters/irclogs.morph
@@ -0,0 +1,17 @@
+name: irclogs
+kind: cluster
+systems:
+- morph: baserock_irclogs/systems/irclogs-x86_64.morph
+ deploy:
+ irclogs:
+ type: extensions/openstack
+ location: http://compute.datacentred.io:5000/v2.0/
+
+ upgrade-type: extensions/ssh-rsync
+ upgrade-location: root@192.168.222.74
+
+ DISK_SIZE: 4G
+ HOSTNAME: irclogs
+ CLOUD_INIT: true
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ OPENSTACK_IMAGENAME: irclogs
diff --git a/baserock_irclogs/files/baserock.conf b/baserock_irclogs/files/baserock.conf
new file mode 100644
index 00000000..8fbea935
--- /dev/null
+++ b/baserock_irclogs/files/baserock.conf
@@ -0,0 +1,185 @@
+supybot.nick: brlogger
+supybot.nick.alternates: %s` %s_
+supybot.ident: supybot
+supybot.user:
+supybot.networks: freenode
+supybot.networks.freenode.password:
+supybot.networks.freenode.servers: irc.freenode.com:6667
+supybot.networks.freenode.channels: #automotive #baserock
+supybot.networks.freenode.channels.key:
+supybot.networks.freenode.ssl: False
+supybot.reply.format.time: %I:%M %p, %B %d, %Y
+supybot.reply.format.time.elapsed.short: False
+supybot.reply.maximumLength: 131072
+supybot.reply.mores: True
+supybot.reply.mores.maximum: 50
+supybot.reply.mores.length: 0
+supybot.reply.mores.instant: 1
+supybot.reply.oneToOne: True
+supybot.reply.whenNotCommand: True
+supybot.reply.error.detailed: False
+supybot.reply.error.inPrivate: False
+supybot.reply.error.withNotice: False
+supybot.reply.error.noCapability: False
+supybot.reply.inPrivate: False
+supybot.reply.withNotice: False
+supybot.reply.withNoticeWhenPrivate: False
+supybot.reply.withNickPrefix: True
+supybot.reply.whenNotAddressed: False
+supybot.reply.requireChannelCommandsToBeSentInChannel: False
+supybot.reply.showSimpleSyntax: False
+supybot.reply.whenAddressedBy.chars:
+supybot.reply.whenAddressedBy.strings:
+supybot.reply.whenAddressedBy.nick: True
+supybot.reply.whenAddressedBy.nick.atEnd: False
+supybot.reply.whenAddressedBy.nicks:
+supybot.followIdentificationThroughNickChanges: False
+supybot.alwaysJoinOnInvite: False
+supybot.replies.success: The operation succeeded.
+supybot.replies.error: An error has occurred and has been logged. Please\
+ contact this bot's administrator for more\
+ information.
+supybot.replies.incorrectAuthentication: Your hostmask doesn't match or your\
+ password is wrong.
+supybot.replies.noUser: I can't find %s in my user database. If you didn't\
+ give a user name, then I might not know what your\
+ user is, and you'll need to identify before this\
+ command might work.
+supybot.replies.notRegistered: You must be registered to use this command.\
+ If you are already registered, you must\
+ either identify (using the identify command)\
+ or add a hostmask matching your current\
+ hostmask (using the "hostmask add" command).
+supybot.replies.noCapability: You don't have the %s capability. If you think\
+ that you should have this capability, be sure\
+ that you are identified before trying again.\
+ The 'whoami' command can tell you if you're\
+ identified.
+supybot.replies.genericNoCapability: You're missing some capability you\
+ need. This could be because you\
+ actually possess the anti-capability\
+ for the capability that's required of\
+ you, or because the channel provides\
+ that anti-capability by default, or\
+ because the global capabilities include\
+ that anti-capability. Or, it could be\
+ because the channel or\
+ supybot.capabilities.default is set to\
+ False, meaning that no commands are\
+ allowed unless explicitly in your\
+ capabilities. Either way, you can't do\
+ what you want to do.
+supybot.replies.requiresPrivacy: That operation cannot be done in a channel.
+supybot.replies.possibleBug: This may be a bug. If you think it is, please\
+ file a bug report at <http://sourceforge.net/tr\
+ acker/?func=add&group_id=58965&atid=489447>.
+supybot.snarfThrottle: 10.0
+supybot.upkeepInterval: 3600
+supybot.flush: True
+supybot.commands.quotes: "
+supybot.commands.nested: True
+supybot.commands.nested.maximum: 10
+supybot.commands.nested.brackets: []
+supybot.commands.nested.pipeSyntax: False
+supybot.commands.defaultPlugins.addcapability: Admin
+supybot.commands.defaultPlugins.capabilities: User
+supybot.commands.defaultPlugins.disable: Owner
+supybot.commands.defaultPlugins.enable: Owner
+supybot.commands.defaultPlugins.help: Misc
+supybot.commands.defaultPlugins.ignore: Admin
+supybot.commands.defaultPlugins.importantPlugins: Plugin Admin Misc User Owner Config Channel
+supybot.commands.defaultPlugins.list: Misc
+supybot.commands.defaultPlugins.reload: Owner
+supybot.commands.defaultPlugins.removecapability: Admin
+supybot.commands.defaultPlugins.unignore: Admin
+supybot.commands.disabled:
+supybot.abuse.flood.command: True
+supybot.abuse.flood.command.maximum: 12
+supybot.abuse.flood.command.punishment: 300
+supybot.abuse.flood.command.invalid: True
+supybot.abuse.flood.command.invalid.maximum: 5
+supybot.abuse.flood.command.invalid.punishment: 600
+supybot.drivers.poll: 1.0
+supybot.drivers.module: default
+supybot.drivers.maxReconnectWait: 300.0
+supybot.directories.conf: /home/supybot/conf
+supybot.directories.data: /home/supybot/data
+supybot.directories.data.tmp: /home/supybot/data/tmp
+supybot.directories.backup: /home/supybot/backup
+supybot.directories.plugins: /home/supybot/plugins
+supybot.directories.log: /home/supybot/logs
+supybot.plugins: Admin ChannelLogger Misc User Owner Config Channel
+supybot.plugins.Admin: True
+supybot.plugins.Admin.public: True
+supybot.plugins.Channel: True
+supybot.plugins.Channel.public: True
+supybot.plugins.Channel.alwaysRejoin: True
+supybot.plugins.ChannelLogger: True
+supybot.plugins.ChannelLogger.public: True
+supybot.plugins.ChannelLogger.enable: True
+supybot.plugins.ChannelLogger.flushImmediately: False
+supybot.plugins.ChannelLogger.stripFormatting: True
+supybot.plugins.ChannelLogger.timestamp: True
+supybot.plugins.ChannelLogger.noLogPrefix: [nolog]
+supybot.plugins.ChannelLogger.rotateLogs: True
+supybot.plugins.ChannelLogger.filenameTimestamp: %Y-%m-%d
+supybot.plugins.ChannelLogger.directories: True
+supybot.plugins.ChannelLogger.directories.network: True
+supybot.plugins.ChannelLogger.directories.channel: True
+supybot.plugins.ChannelLogger.directories.timestamp: False
+supybot.plugins.ChannelLogger.directories.timestamp.format: %B
+supybot.plugins.Config: True
+supybot.plugins.Config.public: True
+supybot.plugins.Misc: True
+supybot.plugins.Misc.public: True
+supybot.plugins.Misc.listPrivatePlugins: True
+supybot.plugins.Misc.timestampFormat: [%H:%M:%S]
+supybot.plugins.Misc.last.nested.includeTimestamp: False
+supybot.plugins.Misc.last.nested.includeNick: False
+supybot.plugins.Owner: True
+supybot.plugins.Owner.public: True
+supybot.plugins.Owner.quitMsg:
+supybot.plugins.User: True
+supybot.plugins.User.public: True
+supybot.plugins.alwaysLoadImportant: True
+supybot.databases:
+supybot.databases.users.filename: users.conf
+supybot.databases.users.timeoutIdentification: 0
+supybot.databases.users.allowUnregistration: False
+supybot.databases.ignores.filename: ignores.conf
+supybot.databases.channels.filename: channels.conf
+supybot.databases.plugins.channelSpecific: True
+supybot.databases.plugins.channelSpecific.link: #
+supybot.databases.plugins.channelSpecific.link.allow: True
+supybot.databases.types.cdb: True
+supybot.databases.types.cdb.maximumModifications: 0.5
+supybot.protocols.irc.banmask: host user
+supybot.protocols.irc.strictRfc: False
+supybot.protocols.irc.umodes:
+supybot.protocols.irc.vhost:
+supybot.protocols.irc.maxHistoryLength: 1000
+supybot.protocols.irc.throttleTime: 1.0
+supybot.protocols.irc.ping: True
+supybot.protocols.irc.ping.interval: 120
+supybot.protocols.irc.queuing.duplicates: False
+supybot.protocols.irc.queuing.rateLimit.join: 0.0
+supybot.protocols.http.peekSize: 4096
+supybot.protocols.http.proxy:
+supybot.defaultIgnore: False
+supybot.externalIP:
+supybot.defaultSocketTimeout: 10
+supybot.pidFile:
+supybot.debug.threadAllCommands: False
+supybot.debug.flushVeryOften: False
+supybot.log.format: %(levelname)s %(asctime)s %(name)s %(message)s
+supybot.log.level: INFO
+supybot.log.timestampFormat: %Y-%m-%dT%H:%M:%S
+supybot.log.stdout: True
+supybot.log.stdout.colorized: False
+supybot.log.stdout.wrap: True
+supybot.log.stdout.format: %(levelname)s %(asctime)s %(message)s
+supybot.log.stdout.level: INFO
+supybot.log.plugins.individualLogfiles: False
+supybot.log.plugins.format: %(levelname)s %(asctime)s %(message)s
+supybot.capabilities: -owner -admin -trusted
+supybot.capabilities.default: True
diff --git a/baserock_irclogs/files/irclogs-generation.service b/baserock_irclogs/files/irclogs-generation.service
new file mode 100644
index 00000000..89dd6009
--- /dev/null
+++ b/baserock_irclogs/files/irclogs-generation.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Irclogs generation
+Requires=supybot.service
+
+[Service]
+Type=oneshot
+User=supybot
+ExecStart=/usr/bin/logs2html -t 'IRC logs for #baserock' -p 'IRC logs for #baserock for ' /home/supybot/logs/ChannelLogger/freenode/#baserock/
+ExecStart=/bin/sh -c "/usr/bin/rsync -a /home/supybot/logs/ChannelLogger/freenode/\#baserock/*html /home/supybot/logs/ChannelLogger/freenode/\#baserock/*css /srv/irclogs/"
+
+ExecStart=/usr/bin/logs2html -t 'IRC logs for #automotive' -p 'IRC logs for #automotive for ' /home/supybot/logs/ChannelLogger/freenode/#automotive/
+ExecStart=/bin/sh -c "/usr/bin/rsync -a /home/supybot/logs/ChannelLogger/freenode/\#automotive/*html /home/supybot/logs/ChannelLogger/freenode/\#automotive/*css /srv/irclogs/automotive"
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_irclogs/files/irclogs-generation.timer b/baserock_irclogs/files/irclogs-generation.timer
new file mode 100644
index 00000000..c236c3d6
--- /dev/null
+++ b/baserock_irclogs/files/irclogs-generation.timer
@@ -0,0 +1,9 @@
+[Unit]
+Description=Generates the irclogs in html every 5 minutes
+
+[Timer]
+OnUnitActiveSec=5min
+Unit=irclogs-generation.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_irclogs/files/lighttpd-irclogs.conf b/baserock_irclogs/files/lighttpd-irclogs.conf
new file mode 100644
index 00000000..0b40346a
--- /dev/null
+++ b/baserock_irclogs/files/lighttpd-irclogs.conf
@@ -0,0 +1,16 @@
+server.document-root = "/srv/irclogs/"
+
+server.port = 80
+
+server.username = "supybot"
+server.groupname = "supybot"
+
+mimetype.assign = (
+ ".html" => "text/html",
+ ".css" => "text/css",
+ ".txt" => "text/plain",
+ ".jpg" => "image/jpeg",
+ ".png" => "image/png"
+)
+
+index-file.names = ( "index.html" )
diff --git a/baserock_irclogs/files/lighttpd-irclogs.service b/baserock_irclogs/files/lighttpd-irclogs.service
new file mode 100644
index 00000000..1c09b0d9
--- /dev/null
+++ b/baserock_irclogs/files/lighttpd-irclogs.service
@@ -0,0 +1,11 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Lighttpd Web Server
+After=network.target
+
+[Service]
+ExecStart=/usr/sbin/lighttpd -f /etc/lighttpd-irclogs.conf -D
+Restart=always
+
diff --git a/baserock_irclogs/files/supybot.service b/baserock_irclogs/files/supybot.service
new file mode 100644
index 00000000..49720f70
--- /dev/null
+++ b/baserock_irclogs/files/supybot.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Run supybot daemon
+Requires=network-online.target
+After=network-online.target
+# If there's a shared /home or /var subvolume, it must be
+# mounted before this unit runs.
+Requires=local-fs.target
+After=local-fs.target
+
+ConditionPathExists=/home/supybot/supybot-baserock.conf
+
+[Service]
+ExecStart=/usr/bin/supybot /home/supybot/supybot-baserock.conf
+User=supybot
+Restart=always
diff --git a/baserock_irclogs/irclogs.configure b/baserock_irclogs/irclogs.configure
new file mode 100644
index 00000000..8a2421ef
--- /dev/null
+++ b/baserock_irclogs/irclogs.configure
@@ -0,0 +1,45 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+echo 'supybot:x:1010:1010:Supybot User:/home/supybot:/bin/bash' >> "$ROOT/etc/passwd"
+echo 'supybot:x:1010:' >> "$ROOT/etc/group"
+mkdir -p "$ROOT/home/supybot"
+mkdir -p "$ROOT/srv/irclogs"
+chown -R 1010:1010 "$ROOT/home/supybot"
+chown -R 1010:1010 "$ROOT/srv/irclogs"
+
+# Copy supybot configuration
+install -m 644 -g 1010 -o 1010 baserock_irclogs/files/baserock.conf "$ROOT"/home/supybot/supybot-baserock.conf
+
+
+# Enable all the services needed
+services="irclogs-generation.service \
+irclogs-generation.timer \
+lighttpd-irclogs.service \
+supybot.service"
+
+for service in $services; do
+ cp "baserock_irclogs/files/$service" "$ROOT/etc/systemd/system/$service"
+ ln -sf "/etc/systemd/system/$service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$service"
+done
+
+# Copy lighttpd configuration
+cp baserock_irclogs/files/lighttpd-irclogs.conf "$ROOT"/etc/lighttpd-irclogs.conf
diff --git a/baserock_irclogs/strata/irclogs.morph b/baserock_irclogs/strata/irclogs.morph
new file mode 100644
index 00000000..3dd7081c
--- /dev/null
+++ b/baserock_irclogs/strata/irclogs.morph
@@ -0,0 +1,18 @@
+name: irclogs
+kind: stratum
+description: Tools to create irclogs of a IRC channel
+build-depends:
+- morph: strata/python2-core.morph
+chunks:
+- name: supybot
+ repo: http://gitorious.org/supybot/supybot.git
+ ref: 27a4ef0ed338a38f34180012cee7ec55a5ae11d9
+ unpetrify-ref: v0.83.4.1
+ build-depends: []
+ build-system: python-distutils
+- name: irclog2html
+ repo: git://github.com/mgedmin/irclog2html
+ ref: 2e399c2bdbe2442794d0ac7aa3a3941f826c74dc
+ unpetrify-ref: 2.14.0
+ build-depends: []
+ build-system: python-distutils
diff --git a/baserock_irclogs/systems/irclogs-x86_64.morph b/baserock_irclogs/systems/irclogs-x86_64.morph
new file mode 100644
index 00000000..03eb7409
--- /dev/null
+++ b/baserock_irclogs/systems/irclogs-x86_64.morph
@@ -0,0 +1,33 @@
+name: irclogs-system-x86_64
+kind: system
+description: |
+ The IRC logging system that runs at <http://irclogs.baserock.org>.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: python2-core
+ morph: strata/python2-core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: irclogs
+ morph: baserock_irclogs/strata/irclogs.morph
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: lighttpd-server
+ morph: strata/lighttpd-server.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+configuration-extensions:
+- extensions/set-hostname
+- extensions/add-config-files
+- extensions/nfsboot
+- extensions/install-files
+- extensions/cloud-init
+- baserock_irclogs/irclogs
diff --git a/baserock_mail/image-config.yml b/baserock_mail/image-config.yml
new file mode 100644
index 00000000..8d65b4f7
--- /dev/null
+++ b/baserock_mail/image-config.yml
@@ -0,0 +1,22 @@
+# System configuration for Baserock mail relay.
+#
+# This Ansible playbook expects to be run on a Fedora 23 Cloud image.
+---
+- hosts: mail
+ gather_facts: false
+ sudo: yes
+ tasks:
+ # See: https://fedoramagazine.org/getting-ansible-working-fedora-23/
+ - name: install Python2 and required deps for Ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python
+
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: ensure system up to date
+ dnf: name=* state=latest
+
+ - name: exim4 installation
+ dnf: name=exim state=installed
diff --git a/baserock_mail/instance-config.yml b/baserock_mail/instance-config.yml
new file mode 100644
index 00000000..b3cd3999
--- /dev/null
+++ b/baserock_mail/instance-config.yml
@@ -0,0 +1,72 @@
+# Configuration for Baserock mail relay
+#
+# This Ansible playbook expects to be run after the image-config.yml playbook.
+---
+- hosts: mail
+ gather_facts: false
+ sudo: yes
+ vars:
+ LOCAL_IP: 192.168.222.145
+ PUBLIC_DOMAIN_NAME: mail.baserock.org
+ tasks:
+ # Fedora provides a default /etc/exim/exim.conf. Rather than copy it and
+ # overwrite it, since we only need to make a few changes, I've used the
+ # lineinfile module to do search-and-replace. It's a bit ugly though. It
+ # may be better to just embed exim.conf.
+
+ # Several restrictions here are also enforced by the internal-mail-relay
+ # security group in firewall.yml, which only opens port 25, and only for
+ # traffic from the local network.
+
+ # This machine is only for sending mail.
+ - name: do not accept any incoming mail
+ lineinfile:
+ regexp: '^domainlist\s+local_domains.*$'
+ line: 'domainlist local_domains = '
+ dest: /etc/exim/exim.conf
+
+ - name: only accept mail from local network
+ lineinfile:
+ regexp: '^hostlist\s+relay_from_hosts.*$'
+ line: 'hostlist relay_from_hosts = 192.168.222.0/24'
+ dest: /etc/exim/exim.conf
+
+ - name: only listen on internal interface
+ lineinfile:
+ regexp: '^#?local_interfaces.*$'
+ line: 'local_interfaces = <; ::1 ; 127.0.0.1 ; {{ LOCAL_IP }}'
+ insertbefore: BOF
+ dest: /etc/exim/exim.conf
+
+ # The automation email addresses like gerrit@baserock.org do have aliases,
+ # but these are currently configured at Pepperfish, where our MX (mail)
+ # records for baserock.org point. So Exim thinks they are not routable
+ # and refuses to send mail from them, unless we disable this. Note that
+ # the address does have to be routable by something, or the receiving mail
+ # server may reject the mail anyway.
+ - name: do not verify that sender is routable within this Exim instance
+ lineinfile:
+ regexp: '^#?\s*require\s+verify\s+=\s+sender.*$'
+ line: '# require verify = sender'
+ dest: /etc/exim/exim.conf
+
+ # We don't have DNS in the internal baserock.org cloud right now, so this
+ # would be pointless.
+ - name: do not try to resolve hosts making SMTP requests
+ lineinfile:
+ regexp: '^#?\s+host_lookup = .*$'
+ line: '# host_lookup = *'
+ dest: /etc/exim/exim.conf
+
+ # The hostname of the machine will be 'mail', which isn't a fully-qualified
+ # domain name so will be rejected by SMTP servers. Ideally we would have
+ # mail.baserock.org set up and pointing to the floating IP of this machine.
+ # For now, we just have the IP.
+ - name: set primary hostname to public IP
+ lineinfile:
+ regexp: '^#?\s+primary_hostname =.*$'
+ line: 'primary_hostname = {{ PUBLIC_DOMAIN_NAME }}'
+ dest: /etc/exim/exim.conf
+
+ - name: exim4 service
+ service: name=exim state=started enabled=yes
diff --git a/baserock_mason_x86_32/distbuild.conf b/baserock_mason_x86_32/distbuild.conf
new file mode 100644
index 00000000..2b344783
--- /dev/null
+++ b/baserock_mason_x86_32/distbuild.conf
@@ -0,0 +1,19 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+CONTROLLERHOST: mason-x86-32
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+DISTBUILD_CONTROLLER: true
+DISTBUILD_WORKER: true
+
+TROVE_HOST: git.baserock.org
+TROVE_ID: baserock
+WORKERS: mason-x86-32
+
+# Do not change the following
+WORKER_SSH_KEY: /etc/distbuild/worker.key
diff --git a/baserock_mason_x86_32/mason-x86-32.morph b/baserock_mason_x86_32/mason-x86-32.morph
new file mode 100644
index 00000000..ceeb66da
--- /dev/null
+++ b/baserock_mason_x86_32/mason-x86-32.morph
@@ -0,0 +1,26 @@
+name: mason-x86-32
+kind: cluster
+description: |
+ Generic x86_32 Mason image.
+systems:
+- morph: systems/build-system-x86_32.morph
+ deploy:
+ mason:
+ upgrade-type: extensions/ssh-rsync
+ upgrade-location: root@192.168.222.81
+
+ type: openstack
+ location: https://compute.datacentred.io:5000/v2.0
+ CLOUD_INIT: true
+ DISK_SIZE: 3G
+ DISTBUILD_GENERIC: true
+ INSTALL_FILES: install-files/distbuild/manifest
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ MASON_GENERIC: true
+ OPENSTACK_IMAGENAME: baserock_mason_x86_32
+ # Note that this *must* match the name of the instance, as
+ # cloud-init will override this hostname.
+ HOSTNAME: mason-x86-32
+ # Set a hosts entry for git.baserock.org. It will be
+ # only possible to reach it using the internal IP
+ HOSTS_gbo: 192.168.222.58 git.baserock.org
diff --git a/baserock_mason_x86_32/mason.conf b/baserock_mason_x86_32/mason.conf
new file mode 100644
index 00000000..83625415
--- /dev/null
+++ b/baserock_mason_x86_32/mason.conf
@@ -0,0 +1,18 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph
+MASON_DEFINITIONS_REF: master
+MASON_DISTBUILD_ARCH: x86_32
+MASON_TEST_HOST: None
+
+TROVE_HOST: git.baserock.org
+TROVE_ID: baserock
+
+CONTROLLERHOST: mason-x86-32
+TEST_INFRASTRUCTURE_TYPE: none
diff --git a/baserock_mason_x86_64/distbuild.conf b/baserock_mason_x86_64/distbuild.conf
new file mode 100644
index 00000000..b184e16b
--- /dev/null
+++ b/baserock_mason_x86_64/distbuild.conf
@@ -0,0 +1,19 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+CONTROLLERHOST: mason-x86-64
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+DISTBUILD_CONTROLLER: true
+DISTBUILD_WORKER: true
+
+TROVE_HOST: git.baserock.org
+TROVE_ID: baserock
+WORKERS: mason-x86-64
+
+# Do not change the following
+WORKER_SSH_KEY: /etc/distbuild/worker.key
diff --git a/baserock_mason_x86_64/mason-x86-64.morph b/baserock_mason_x86_64/mason-x86-64.morph
new file mode 100644
index 00000000..7717f03d
--- /dev/null
+++ b/baserock_mason_x86_64/mason-x86-64.morph
@@ -0,0 +1,27 @@
+name: mason-x86-64
+kind: cluster
+description: |
+ Generic x86_64 Mason image.
+systems:
+- morph: systems/build-system-x86_64.morph
+ deploy:
+ mason:
+ type: extensions/openstack
+ location: https://compute.datacentred.io:5000/v2.0
+
+ upgrade-type: extensions/ssh-rsync
+ upgrade-location: root@192.168.222.80
+
+ CLOUD_INIT: true
+ DISK_SIZE: 3G
+ DISTBUILD_GENERIC: true
+ INSTALL_FILES: install-files/distbuild/manifest
+ KERNEL_ARGS: console=ttyS0 console=tty0
+ MASON_GENERIC: true
+ OPENSTACK_IMAGENAME: baserock_mason_x86_64
+ # Note that this *must* match the name of the instance, as
+ # cloud-init will override this hostname.
+ HOSTNAME: mason-x86-64
+ # Set a hosts entry for git.baserock.org. It will be
+ # only possible to reach it using the internal IP
+ HOSTS_gbo: 192.168.222.58 git.baserock.org
diff --git a/baserock_mason_x86_64/mason.conf b/baserock_mason_x86_64/mason.conf
new file mode 100644
index 00000000..4532720f
--- /dev/null
+++ b/baserock_mason_x86_64/mason.conf
@@ -0,0 +1,18 @@
+# This machine is not allowed to make outgoing network connections outside
+# the local network, so it cannot use DNS. You must use IP addresses instead
+# of hostnames in this file.
+
+# This is the IP of cache.baserock.org. Note that the shared-artifact-cache
+# secgroup only allows write access with this local IP.
+ARTIFACT_CACHE_SERVER: 192.168.222.14
+
+MASON_CLUSTER_MORPHOLOGY: clusters/ci.morph
+MASON_DEFINITIONS_REF: master
+MASON_DISTBUILD_ARCH: x86_64
+MASON_TEST_HOST: None
+
+TROVE_HOST: git.baserock.org
+TROVE_ID: baserock
+
+CONTROLLERHOST: mason-x86-64
+TEST_INFRASTRUCTURE_TYPE: none
diff --git a/baserock_opengrok/baserock-export.service b/baserock_opengrok/baserock-export.service
new file mode 100644
index 00000000..5b48152a
--- /dev/null
+++ b/baserock_opengrok/baserock-export.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Baserock Export daemon
+After=local-fs.target network-online.target
+
+[Service]
+User=opengrok
+ExecStart={{ EXPORT_WORKDIR }}/export.sh
+
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_opengrok/baserock-export.timer b/baserock_opengrok/baserock-export.timer
new file mode 100644
index 00000000..89e9647b
--- /dev/null
+++ b/baserock_opengrok/baserock-export.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Runs baserock-export with 5 min between calls
+
+[Timer]
+#Time between baserock-export finishing and calling it again
+OnUnitActiveSec=5min
+Unit=baserock-export.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_opengrok/clone-and-index.service b/baserock_opengrok/clone-and-index.service
new file mode 100644
index 00000000..ff9db508
--- /dev/null
+++ b/baserock_opengrok/clone-and-index.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=OpenGrok index daemon
+After=local-fs.target network-online.target
+
+[Service]
+User=opengrok
+ExecStart={{ OPENGROK_BASE }}/clone-and-index.sh
+
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_opengrok/clone-and-index.sh b/baserock_opengrok/clone-and-index.sh
new file mode 100644
index 00000000..10a8faac
--- /dev/null
+++ b/baserock_opengrok/clone-and-index.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+
+dir={{ OPENGROK_BASE }}/source
+if [ ! -d $dir/.git ]; then
+ git clone /opt/export-workdir/exported-definitions/ $dir
+fi
+
+git --git-dir="$dir/.git" --work-tree="$dir" pull
+(cd $dir && git submodule init)
+(cd $dir && git submodule sync)
+(cd $dir && git submodule update)
+git --git-dir="$dir/.git" --work-tree="$dir" clean -xdff
+
+OPENGROK_INSTANCE_BASE={{ OPENGROK_BASE }} {{ OPENGROK_BASE }}/bin/OpenGrok index {{ OPENGROK_BASE }}/source/
diff --git a/baserock_opengrok/clone-and-index.timer b/baserock_opengrok/clone-and-index.timer
new file mode 100644
index 00000000..e7cc4259
--- /dev/null
+++ b/baserock_opengrok/clone-and-index.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Runs OpenGrok index with 5 min between calls
+
+[Timer]
+#Time between clone-and-index finishing and calling it again
+OnUnitActiveSec=5min
+Unit=clone-and-index.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/baserock_opengrok/export.sh b/baserock_opengrok/export.sh
new file mode 100644
index 00000000..d6a18d46
--- /dev/null
+++ b/baserock_opengrok/export.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+DEFINITIONS_DIR="{{ EXPORT_WORKDIR }}/definitions"
+DEFINITIONS_URL="git://git.baserock.org/baserock/baserock/definitions"
+
+MORPH_DIR="{{ EXPORT_WORKDIR }}/morph"
+MORPH_URL="git://git.baserock.org/baserock/baserock/morph"
+
+EXPORT_DIR="{{ EXPORT_WORKDIR }}/exported-definitions"
+
+clone_or_pull() {
+ repo=$1
+ dir=$2
+ if [ -d "$dir" ]; then
+ git --git-dir="$dir/.git" --work-tree="$dir" pull
+ else
+ git clone $repo $dir
+ fi
+}
+
+
+clone_or_pull $DEFINITIONS_URL $DEFINITIONS_DIR
+clone_or_pull $MORPH_URL $MORPH_DIR
+
+if [ ! -d "$EXPORT_DIR" ]; then
+ git init "$EXPORT_DIR"
+fi
+
+
+git config --global user.email "export@baserock.com"
+git config --global user.name "Baserock Export Daemon"
+
+PYTHONPATH={{ EXPORT_WORKDIR }}/morph python \
+ {{ BASEROCK_EXPORT }}/baserock-export-git-submodules.py \
+ --git-cache-dir {{ EXPORT_WORKDIR }}/cache \
+ --mode submodule \
+ $DEFINITIONS_DIR/systems/minimal-system-x86_64-generic.morph \
+ "$EXPORT_DIR"
diff --git a/baserock_opengrok/index.jsp b/baserock_opengrok/index.jsp
new file mode 100644
index 00000000..418c98f2
--- /dev/null
+++ b/baserock_opengrok/index.jsp
@@ -0,0 +1,3 @@
+<%
+ response.sendRedirect("/source");
+%>
diff --git a/baserock_opengrok/instance-config.yml b/baserock_opengrok/instance-config.yml
new file mode 100644
index 00000000..836e805b
--- /dev/null
+++ b/baserock_opengrok/instance-config.yml
@@ -0,0 +1,163 @@
+# Configuration for Baserock OpenGrok system image.
+#
+# This expects to be run on a Fedora 23 cloud image.
+---
+- hosts: opengrok
+ gather_facts: false
+ sudo: yes
+ vars:
+ OG_VERSION: 0.12.1.5
+ EXPORT_WORKDIR: /opt/export-workdir
+ BASEROCK_EXPORT: /opt/baserock-export
+ OPENGROK_BASE: /opt/opengrok
+ tasks:
+ # See: https://fedoramagazine.org/getting-ansible-working-fedora-23/
+ - name: install Python2 and required deps for Ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python
+
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: ensure system up to date
+ dnf: name=* state=latest
+
+ - name: Install Tomcat, wget, git, and ctags packages
+ dnf:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - tomcat
+ - wget
+ - git
+ - ctags
+
+ - name: Enable and start Tomcat
+ service:
+ name: tomcat
+ enabled: yes
+
+ - name: add opengrok user
+ user:
+ name: opengrok
+ shell: /bin/false
+
+ - name: create /opt/.. directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: opengrok
+ group: opengrok
+ with_items:
+ - "{{ OPENGROK_BASE }}"
+ - /opt/downloads
+ - "{{ BASEROCK_EXPORT }}"
+ - "{{ EXPORT_WORKDIR }}"
+
+ - name: Download opengrok
+ shell: wget https://java.net/projects/opengrok/downloads/download/opengrok-{{ OG_VERSION }}.tar.gz -O /opt/downloads/opengrok-{{ OG_VERSION }}.tar.gz
+ args:
+ creates: /opt/downloads/opengrok-{{ OG_VERSION }}.tar.gz
+ sudo_user: opengrok
+
+ - name: Unpack opengrok
+ unarchive:
+ src: /opt/downloads/opengrok-{{ OG_VERSION }}.tar.gz
+ copy: no
+ dest: /opt/downloads
+ owner: opengrok
+ group: opengrok
+ creates: /opt/downloads/opengrok-{{ OG_VERSION }}
+ register: opengrok_unpacked
+
+ - name: Copy OpenGrok to the right location
+ shell: cp -r /opt/downloads/opengrok-{{ OG_VERSION }}/* "{{ OPENGROK_BASE }}"
+ when: opengrok_unpacked|changed
+
+ - name: Install morph dependencies
+ pip:
+ name: "{{ item }}"
+ with_items:
+ - fs
+ - pylru
+ - pyyaml
+ - jsonschema
+
+ - name: Downloading baserock-export scripts
+ git:
+ repo: git://github.com/ssssam/baserock-export
+ dest: "{{ BASEROCK_EXPORT }}"
+ accept_hostkey: yes
+ sudo_user: opengrok
+
+ - name: Install baserock-export wrapper script
+ template:
+ src: export.sh
+ dest: "{{ EXPORT_WORKDIR }}/export.sh"
+ mode: 0755
+ owner: opengrok
+ group: opengrok
+
+ - name: Install baserock-export service
+ template:
+ src: baserock-export.service
+ dest: /etc/systemd/system/baserock-export.service
+
+ - name: Install baserock-export timer
+ copy:
+ src: baserock-export.timer
+ dest: /etc/systemd/system/baserock-export.timer
+
+ - name: Enable and start baserock-export services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - baserock-export.timer
+ - baserock-export.service
+
+ - name: Deploy OpenGrok app in Tomcat
+ shell: OPENGROK_TOMCAT_BASE=/var/lib/tomcat "{{ OPENGROK_BASE }}/bin/OpenGrok" deploy
+
+ - name: Create ROOT folder for Tomcat
+ file:
+ path: /var/lib/tomcat/webapps/ROOT
+ state: directory
+ owner: tomcat
+ group: tomcat
+
+ - name: Redirect / to /source (OpenGrok) in Tomcat
+ copy:
+ src: index.jsp
+ dest: /var/lib/tomcat/webapps/ROOT/index.jsp
+ owner: tomcat
+ group: tomcat
+
+ - name: Install clone-and-index wrapper script
+ template:
+ src: clone-and-index.sh
+ dest: "{{ OPENGROK_BASE }}/clone-and-index.sh"
+ mode: 0755
+ owner: opengrok
+ group: opengrok
+
+ - name: Install clone-and-index service
+ template:
+ src: clone-and-index.service
+ dest: /etc/systemd/system/clone-and-index.service
+
+ - name: Install clone-and-index timer
+ copy:
+ src: clone-and-index.timer
+ dest: /etc/systemd/system/clone-and-index.timer
+
+ - name: Enable and start clone-and-index services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - clone-and-index.timer
+ - clone-and-index.service
diff --git a/baserock_openid_provider/baserock_openid_provider/__init__.py b/baserock_openid_provider/baserock_openid_provider/__init__.py
new file mode 100644
index 00000000..8dd54d2a
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import signals
diff --git a/baserock_openid_provider/baserock_openid_provider/forms.py b/baserock_openid_provider/baserock_openid_provider/forms.py
new file mode 100644
index 00000000..dd6a414d
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/forms.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from registration.forms import RegistrationForm
+
+from django import forms
+from django.utils.translation import ugettext_lazy as _
+
+
+class RegistrationFormWithNames(RegistrationForm):
+ # I'd rather just have a 'Full name' box, but django.contrib.auth is
+ # already set up to separate first_name and last_name.
+
+ first_name = forms.CharField(label=_("First name(s)"),
+ required=False)
+ last_name = forms.CharField(label=_("Surname"))
diff --git a/baserock_openid_provider/baserock_openid_provider/settings.py b/baserock_openid_provider/baserock_openid_provider/settings.py
new file mode 100644
index 00000000..b4d38c2c
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/settings.py
@@ -0,0 +1,174 @@
+"""
+Django settings for baserock_openid_provider project.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.7/topics/settings/
+
+For the full list of settings and their values, see
+https://docs.djangoproject.com/en/1.7/ref/settings/
+"""
+
+import yaml
+
+import os
+
+# You must ensure this is the correct IP address!
+DATABASE_HOST = '192.168.222.146'
+
+BASE_DIR = os.path.dirname(os.path.dirname(__file__))
+
+# Quick-start development settings - unsuitable for production
+# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
+
+# SECURITY WARNING: keep the secret key used in production secret!
+secret_key_file = '/etc/baserock_openid_provider.secret_key.yml'
+with open(secret_key_file) as f:
+ data = yaml.load(f)
+ SECRET_KEY = data['baserock_openid_provider_secret_key']
+
+# SECURITY WARNING: don't run with debug turned on in production!
+DEBUG = False
+
+TEMPLATE_DEBUG = True
+
+ALLOWED_HOSTS = [
+ 'openid.baserock.org',
+]
+
+# All connections for openid.baserock.org are forced through HTTPS by HAProxy.
+# This line is necessary so that the Django code generates https:// rather than
+# http:// URLs for internal redirects.
+#
+# You MUST remove this line if this application is not running behind a proxy
+# that forces all traffic through HTTPS.
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+
+
+# Application definition
+
+INSTALLED_APPS = (
+ 'baserock_openid_provider',
+ 'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.messages',
+ 'django.contrib.staticfiles',
+ 'openid_provider',
+ 'registration'
+)
+
+MIDDLEWARE_CLASSES = (
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ 'django.middleware.csrf.CsrfViewMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.contrib.messages.middleware.MessageMiddleware',
+ 'django.middleware.clickjacking.XFrameOptionsMiddleware',
+)
+
+ROOT_URLCONF = 'baserock_openid_provider.urls'
+
+WSGI_APPLICATION = 'baserock_openid_provider.wsgi.application'
+
+
+# Logging
+
+LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'simple': {
+ 'format': '%(asctime)s %(message)s'
+ }
+ },
+ 'handlers': {
+ 'file': {
+ 'level': 'DEBUG',
+ 'formatter': 'simple',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': '/var/log/baserock_openid_provider/debug.log',
+ 'maxBytes': 10 * 1024 * 1024,
+ 'backupCount': 0,
+ }
+ },
+ 'loggers': {
+ 'django.request': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+ 'openid_provider.views': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ }
+ }
+}
+
+
+# Database
+# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.mysql',
+ 'NAME': 'openid_provider',
+ 'USER': 'openid',
+ 'PORT': '3306',
+
+ 'HOST': DATABASE_HOST
+ }
+}
+
+
+pw_file = '/etc/baserock_openid_provider.database_password.yml'
+with open(pw_file) as f:
+ data = yaml.load(f)
+ password = data['baserock_openid_provider_password']
+ DATABASES['default']['PASSWORD'] = password
+
+# Internationalization
+# https://docs.djangoproject.com/en/1.7/topics/i18n/
+
+LANGUAGE_CODE = 'en-us'
+
+TIME_ZONE = 'UTC'
+
+USE_I18N = True
+
+USE_L10N = True
+
+USE_TZ = True
+
+
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/1.7/howto/static-files/
+
+STATIC_URL = '/static/'
+
+STATIC_ROOT = '/var/www/static'
+
+TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
+
+
+# Other stuff
+
+LOGIN_REDIRECT_URL = '/'
+
+
+# We get mailed when stuff breaks.
+ADMINS = (
+ ('Sam Thursfield', 'sam.thursfield@codethink.co.uk'),
+)
+
+# FIXME: this email address doesn't actually exist.
+DEFAULT_FROM_EMAIL = 'openid@baserock.org'
+
+EMAIL_HOST = 'localhost'
+EMAIL_PORT = 25
+
+
+# django-registration-redux settings
+
+ACCOUNT_ACTIVATION_DAYS = 3
diff --git a/baserock_openid_provider/baserock_openid_provider/signals.py b/baserock_openid_provider/baserock_openid_provider/signals.py
new file mode 100644
index 00000000..dc2a7f78
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/signals.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from django.dispatch import receiver
+import registration.signals
+
+import logging
+
+
+@receiver(registration.signals.user_activated)
+def user_activation_handler(sender, user, request, **kwargs):
+ logging.info('Creating OpenID for user %s' % (user.username))
+ user.openid_set.create(openid=user.username)
diff --git a/baserock_openid_provider/baserock_openid_provider/static/style.css b/baserock_openid_provider/baserock_openid_provider/static/style.css
new file mode 100644
index 00000000..e8237b40
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/static/style.css
@@ -0,0 +1,268 @@
+// Baserock-ish stylesheet
+// Fetched from http://wiki.baserock.org/local.css/ on 2015-01-23.
+
+/* HTML5 display-role reset for older browsers */
+article, aside, details, figcaption, figure,
+footer, header, hgroup, menu, nav, section {
+ display: block;
+}
+body {
+ line-height: 1;
+}
+ol, ul {
+ padding: 0 0 0 1.5em;
+ margin: 0 0 1.2em;
+}
+li > ul, li > ol {
+ margin: 0;
+}
+ul {
+ list-style: disc;
+}
+ol {
+ list-style: decimal;
+}
+blockquote, q {
+ quotes: none;
+}
+blockquote:before, blockquote:after,
+q:before, q:after {
+ content: '';
+ content: none;
+}
+table {
+ border-collapse: collapse;
+ border-spacing: 0;
+}
+i, em {
+ font-style: italic;
+}
+b, strong {
+ font-weight: bold;
+}
+
+/*
+Main elements
+*/
+
+html, body {
+ font-size: 15px;
+ font-family: 'Open Sans', sans-serif;
+ line-height: 1.6em;
+}
+h1 {
+ color: #58595B;
+ font-size: 1.6em;
+ font-weight: bold;
+ margin: 0 0 0.4em;
+ padding: 1em 0 0.3em;
+}
+h2 {
+ border-bottom: 2px solid #E0E0E0;
+ border-top: 2px solid #E0E0E0;
+ background: #fafafa;
+ color: #58595B;
+ font-size: 1.4em;
+ font-weight: bold;
+ margin: 1.2em 0 0.4em;
+ padding: 0.4em 0;
+}
+h3 {
+ border-bottom: 2px solid #E0E0E0;
+ color: #58595B;
+ font-size: 1.2em;
+ font-weight: bold;
+ margin: 2em 0 0.3em;
+}
+h4 {
+ color: #58595B;
+ font-size: 1.1em;
+ font-weight: bold;
+ margin: 1.7em 0 0.3em;
+}
+h5 {
+ color: #58595B;
+ font-size: 1em;
+ font-weight: bold;
+ margin: 1.7em 0 0.3em;
+}
+a {
+ color: #bf2400;
+}
+p {
+ padding: 0;
+ margin: 0 0 1.2em;
+}
+table {
+ margin-bottom: 1.2em;
+}
+th, td {
+ padding: 0.2em 1em;
+}
+th {
+ font-weight: bold;
+ text-align: left;
+ border-bottom: 1px solid #ddd;
+}
+pre {
+ border: 1px solid #aaa;
+ border-radius: 0.5em;
+ padding: 1em 2em;
+ margin: 0 0 1.2em 2em;
+ background: #faf8f7;
+ font-size: 80%;
+}
+pre, code {
+ font-family: monospace;
+}
+code {
+ background: #faf8f7;
+ padding: 0.2em 0.4em;
+ border: 1px solid #ddd;
+ border-radius: 0.3em;
+ font-size: 0.9em;
+}
+pre > code {
+ background: none;
+ padding: 0;
+ border: none;
+ font-size: 1em;
+}
+blockquote {
+ border: .4em solid #ffaa55;
+ border-left-width: 3em;
+ padding: 0.3em 1em;
+ margin: 1.2em 3em;
+ border-radius: 2.2em 0 0 2.2em;
+}
+blockquote p {
+ margin: 0;
+}
+/*
+*/
+.max960 {
+ max-width: 960px;
+ margin: 0 auto;
+ position: relative;
+ height: 80px;
+}
+input#searchbox {
+ background: url("wikiicons/search-bg.gif") no-repeat scroll 100% 50% #FFFFFF;
+ color: #000000;
+ padding: 0 16px 0 10px;
+ border: solid 1px #CCC;
+ width: 180px;
+ height: 20px;
+ border-radius: 10px;
+}
+#searchform {
+ right: 0 !important;
+}
+.page {
+ max-width: 960px;
+ padding: 0 10px;
+ margin: 0 auto;
+}
+.pageheader {
+ background-color: #FFF;
+ border-bottom:2px solid #E65837;
+ color: #009099;
+ padding: 10px 10px 0 !important;
+ height: 80px;
+ background: #333;
+}
+.pageheader span a {
+ color: #FFF;
+}
+.pageheader span.title {
+ color: #E65837;
+}
+.pageheader .actions ul li {
+ background: none !important;
+ border-color: #28170B;
+ border-style: solid solid none;
+ border-width: 0;
+ margin: 0;
+ width: auto !important;
+ color: #FFF;
+ padding: 0 !important;
+}
+.pageheader li a:hover {
+ background: #E65837;
+ color: #FFF;
+}
+.header span {
+ display: inline-block;
+ padding: 6px 0;
+}
+.header span span {
+ padding: 0;
+}
+.parentlinks {
+ font: 13px 'Open Sans', sans-serif;
+}
+
+.title {
+ font: 13px 'Open Sans', sans-serif;
+ margin-top: 0.2em;
+ display:inline;
+}
+
+#logo a {
+ height: 40px;
+ width: 282px;
+ display: block;
+ padding-bottom: 10px;
+ background: url(logo.png) no-repeat;
+}
+#logo a span {
+ display: none;
+}
+#logo a:hover {
+ text-decoration: none;
+}
+.pageheader .actions {
+ position: static !important;
+ width: auto !important;
+ padding: 0 !important;
+}
+.pageheader .actions ul {
+ position: absolute;
+ right: 0;
+ bottom: 0;
+ height: auto !important;
+ padding: 0 !important;
+}
+.pageheader .actions a {
+ color: #FFF;
+ padding: 5px 0.5em;
+ display: inline-block;
+ background: #666;
+}
+
+div.header {
+ background-repeat: no-repeat;
+ min-width: 282px;
+ padding-top: 0px;
+}
+#pageinfo {
+ border-top: 0;
+}
+
+#content {
+ max-width: 51em;
+}
+#content, #comments, #footer {
+ margin: 1em 2em 1em 0 !important;
+}
+.pagedate {
+ font-size:10px;
+}
+.sidebar {
+ padding: 10px !important;
+ border: solid 1px #CCC !important;
+ background: #F2F2F2 !important;
+ margin: 1em 0 2em 1em !important;
+}
+
+
diff --git a/baserock_openid_provider/baserock_openid_provider/urls.py b/baserock_openid_provider/baserock_openid_provider/urls.py
new file mode 100644
index 00000000..8af8ade5
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/urls.py
@@ -0,0 +1,12 @@
+from django.conf.urls import patterns, include, url
+from django.contrib import admin
+
+from . import views
+
+urlpatterns = patterns('',
+ url(r'^$', views.index, name='index'),
+
+ url(r'^accounts/', include('registration.backends.default.urls')),
+ url(r'^admin/', include(admin.site.urls)),
+ url(r'^openid/', include('openid_provider.urls')),
+)
diff --git a/baserock_openid_provider/baserock_openid_provider/views.py b/baserock_openid_provider/baserock_openid_provider/views.py
new file mode 100644
index 00000000..d067f66a
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/views.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import registration.backends.default.views
+
+from registration import signals
+from registration.users import UserModel
+
+from django.contrib.auth import authenticate
+from django.contrib.auth import login
+from django.shortcuts import render
+
+from . import forms
+
+
+def index(request):
+ return render(request, '../templates/index.html')
+
+
+class RegistrationViewWithNames(registration.backends.default.views.RegistrationView):
+ # Overrides the django-registration default view so that the extended form
+ # including the full name gets used.
+ form_class = forms.RegistrationFormWithNames
+
+ def register(self, form):
+ # Calling the base class first means that we don't have to copy and
+ # paste the contents of the register() function, but it has the
+ # downside that we don't know the user's name when we send the
+ # activation email.
+ superclass = super(RegistrationViewWithNames, self)
+ user = superclass.register(form)
+
+ user.first_name = form.cleaned_data['first_name']
+ user.last_name = form.cleaned_data['last_name']
+ user.save()
+
+ return user
+
+
+registration.backends.default.views.RegistrationView = RegistrationViewWithNames
diff --git a/baserock_openid_provider/baserock_openid_provider/wsgi.py b/baserock_openid_provider/baserock_openid_provider/wsgi.py
new file mode 100644
index 00000000..5993d3e5
--- /dev/null
+++ b/baserock_openid_provider/baserock_openid_provider/wsgi.py
@@ -0,0 +1,14 @@
+"""
+WSGI config for baserock_openid_provider project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
+"""
+
+import os
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "baserock_openid_provider.settings")
+
+from django.core.wsgi import get_wsgi_application
+application = get_wsgi_application()
diff --git a/baserock_openid_provider/cherokee.conf b/baserock_openid_provider/cherokee.conf
new file mode 100644
index 00000000..38c4f1fa
--- /dev/null
+++ b/baserock_openid_provider/cherokee.conf
@@ -0,0 +1,300 @@
+# Cherokee configuration to run the Baserock OpenID provider, using
+# uWSGI to run the Django app from /srv/baserock_openid_provider.
+
+config!version = 001002103
+
+# Overall server config
+server!bind!1!port = 80
+server!group = cherokee
+server!keepalive = 1
+server!keepalive_max_requests = 500
+server!panic_action = /usr/bin/cherokee-panic
+server!pid_file = /var/run/cherokee.pid
+server!server_tokens = full
+server!timeout = 15
+server!user = cherokee
+
+# One virtual server which communicates with the uwsgi-django code and
+# also serves static files.
+vserver!1!directory_index = index.html
+vserver!1!document_root = /var/www/cherokee
+vserver!1!error_writer!filename = /var/log/cherokee/error_log
+vserver!1!error_writer!type = file
+vserver!1!logger = combined
+vserver!1!logger!access!buffsize = 16384
+vserver!1!logger!access!filename = /var/log/cherokee/access_log
+vserver!1!logger!access!type = file
+vserver!1!nick = default
+vserver!1!rule!110!document_root = /var/www/static
+vserver!1!rule!110!handler = file
+vserver!1!rule!110!match = directory
+vserver!1!rule!110!match!directory = /static
+vserver!1!rule!10!document_root = /var/www
+vserver!1!rule!10!handler = uwsgi
+vserver!1!rule!10!handler!balancer = round_robin
+vserver!1!rule!10!handler!balancer!source!10 = 1
+vserver!1!rule!10!handler!iocache = 1
+vserver!1!rule!10!match = default
+source!1!env_inherited = 1
+source!1!host = 127.0.0.1:45023
+source!1!interpreter = /usr/sbin/uwsgi --socket 127.0.0.1:45023 --ini=/srv/baserock_openid_provider/uwsgi.ini
+source!1!nick = uwsgi-django
+source!1!type = interpreter
+
+# Icons and mime types.
+icons!default = page_white.png
+icons!directory = folder.png
+icons!file!bomb.png = core
+icons!file!page_white_go.png = *README*
+icons!parent_directory = arrow_turn_left.png
+icons!suffix!camera.png = jpg,jpeg,jpe
+icons!suffix!cd.png = iso,ngr,cue
+icons!suffix!color_wheel.png = png,gif,xcf,bmp,pcx,tiff,tif,cdr,psd,xpm,xbm
+icons!suffix!control_play.png = bin,exe,com,msi,out
+icons!suffix!css.png = css
+icons!suffix!cup.png = java,class,jar
+icons!suffix!email.png = eml,mbox,box,email,mbx
+icons!suffix!film.png = avi,mpeg,mpe,mpg,mpeg3,dl,fli,qt,mov,movie,flv,webm
+icons!suffix!font.png = ttf
+icons!suffix!html.png = html,htm
+icons!suffix!music.png = au,snd,mid,midi,kar,mpga,mpega,mp2,mp3,sid,wav,aif,aiff,aifc,gsm,m3u,wma,wax,ra,rm,ram,pls,sd2,ogg
+icons!suffix!package.png = tar,gz,bz2,zip,rar,ace,lha,7z,dmg,cpk
+icons!suffix!page_white_acrobat.png = pdf
+icons!suffix!page_white_c.png = c,h,cpp
+icons!suffix!page_white_office.png = doc,ppt,xls
+icons!suffix!page_white_php.png = php
+icons!suffix!page_white_text.png = txt,text,rtf,sdw
+icons!suffix!printer.png = ps,eps
+icons!suffix!ruby.png = rb
+icons!suffix!script.png = sh,csh,ksh,tcl,tk,py,pl
+mime!application/bzip2!extensions = bz2
+mime!application/gzip!extensions = gz
+mime!application/hta!extensions = hta
+mime!application/java-archive!extensions = jar
+mime!application/java-serialized-object!extensions = ser
+mime!application/java-vm!extensions = class
+mime!application/json!extensions = json
+mime!application/mac-binhex40!extensions = hqx
+mime!application/msaccess!extensions = mdb
+mime!application/msword!extensions = doc,dot
+mime!application/octet-stream!extensions = bin
+mime!application/octetstream!extensions = ace
+mime!application/oda!extensions = oda
+mime!application/ogg!extensions = ogx
+mime!application/pdf!extensions = pdf
+mime!application/pgp-keys!extensions = key
+mime!application/pgp-signature!extensions = pgp
+mime!application/pics-rules!extensions = prf
+mime!application/postscript!extensions = ps,ai,eps
+mime!application/rar!extensions = rar
+mime!application/rdf+xml!extensions = rdf
+mime!application/rss+xml!extensions = rss
+mime!application/smil!extensions = smi,smil
+mime!application/vnd.mozilla.xul+xml!extensions = xul
+mime!application/vnd.ms-excel!extensions = xls,xlb,xlt
+mime!application/vnd.ms-pki.seccat!extensions = cat
+mime!application/vnd.ms-pki.stl!extensions = stl
+mime!application/vnd.ms-powerpoint!extensions = ppt,pps
+mime!application/vnd.oasis.opendocument.chart!extensions = odc
+mime!application/vnd.oasis.opendocument.database!extensions = odb
+mime!application/vnd.oasis.opendocument.formula!extensions = odf
+mime!application/vnd.oasis.opendocument.graphics!extensions = odg
+mime!application/vnd.oasis.opendocument.image!extensions = odi
+mime!application/vnd.oasis.opendocument.presentation!extensions = odp
+mime!application/vnd.oasis.opendocument.spreadsheet!extensions = ods
+mime!application/vnd.oasis.opendocument.text!extensions = odt
+mime!application/vnd.oasis.opendocument.text-master!extensions = odm
+mime!application/vnd.oasis.opendocument.text-web!extensions = oth
+mime!application/vnd.pkg5.info!extensions = p5i
+mime!application/vnd.visio!extensions = vsd
+mime!application/vnd.wap.wbxml!extensions = wbxml
+mime!application/vnd.wap.wmlc!extensions = wmlc
+mime!application/vnd.wap.wmlscriptc!extensions = wmlsc
+mime!application/x-7z-compressed!extensions = 7z
+mime!application/x-abiword!extensions = abw
+mime!application/x-apple-diskimage!extensions = dmg
+mime!application/x-bcpio!extensions = bcpio
+mime!application/x-bittorrent!extensions = torrent
+mime!application/x-cdf!extensions = cdf
+mime!application/x-cpio!extensions = cpio
+mime!application/x-csh!extensions = csh
+mime!application/x-debian-package!extensions = deb,udeb
+mime!application/x-director!extensions = dcr,dir,dxr
+mime!application/x-dvi!extensions = dvi
+mime!application/x-flac!extensions = flac
+mime!application/x-font!extensions = pfa,pfb,gsf,pcf,pcf.Z
+mime!application/x-freemind!extensions = mm
+mime!application/x-gnumeric!extensions = gnumeric
+mime!application/x-gtar!extensions = gtar,tgz,taz
+mime!application/x-gzip!extensions = gz,tgz
+mime!application/x-httpd-php!extensions = phtml,pht,php
+mime!application/x-httpd-php-source!extensions = phps
+mime!application/x-httpd-php3!extensions = php3
+mime!application/x-httpd-php3-preprocessed!extensions = php3p
+mime!application/x-httpd-php4!extensions = php4
+mime!application/x-internet-signup!extensions = ins,isp
+mime!application/x-iphone!extensions = iii
+mime!application/x-iso9660-image!extensions = iso
+mime!application/x-java-jnlp-file!extensions = jnlp
+mime!application/x-javascript!extensions = js
+mime!application/x-kchart!extensions = chrt
+mime!application/x-killustrator!extensions = kil
+mime!application/x-koan!extensions = skp,skd,skt,skm
+mime!application/x-kpresenter!extensions = kpr,kpt
+mime!application/x-kspread!extensions = ksp
+mime!application/x-kword!extensions = kwd,kwt
+mime!application/x-latex!extensions = latex
+mime!application/x-lha!extensions = lha
+mime!application/x-lzh!extensions = lzh
+mime!application/x-lzx!extensions = lzx
+mime!application/x-ms-wmd!extensions = wmd
+mime!application/x-ms-wmz!extensions = wmz
+mime!application/x-msdos-program!extensions = com,exe,bat,dll
+mime!application/x-msi!extensions = msi
+mime!application/x-netcdf!extensions = nc
+mime!application/x-ns-proxy-autoconfig!extensions = pac
+mime!application/x-nwc!extensions = nwc
+mime!application/x-object!extensions = o
+mime!application/x-oz-application!extensions = oza
+mime!application/x-pkcs7-certreqresp!extensions = p7r
+mime!application/x-pkcs7-crl!extensions = crl
+mime!application/x-python-code!extensions = pyc,pyo
+mime!application/x-quicktimeplayer!extensions = qtl
+mime!application/x-redhat-package-manager!extensions = rpm
+mime!application/x-sh!extensions = sh
+mime!application/x-shar!extensions = shar
+mime!application/x-shockwave-flash!extensions = swf,swfl
+mime!application/x-stuffit!extensions = sit,sea
+mime!application/x-sv4cpio!extensions = sv4cpio
+mime!application/x-sv4crc!extensions = sv4crc
+mime!application/x-tar!extensions = tar
+mime!application/x-tcl!extensions = tcl
+mime!application/x-tex-pk!extensions = pk
+mime!application/x-texinfo!extensions = texinfo,texi
+mime!application/x-trash!extensions = ~,bak,old,sik
+mime!application/x-troff!extensions = t,tr,roff
+mime!application/x-troff-man!extensions = man
+mime!application/x-troff-me!extensions = me
+mime!application/x-troff-ms!extensions = ms
+mime!application/x-ustar!extensions = ustar
+mime!application/x-x509-ca-cert!extensions = crt
+mime!application/x-xcf!extensions = xcf
+mime!application/x-xfig!extensions = fig
+mime!application/x-xpinstall!extensions = xpi
+mime!application/xhtml+xml!extensions = xhtml,xht
+mime!application/xml!extensions = xml,xsl
+mime!application/zip!extensions = zip
+mime!audio/basic!extensions = au,snd
+mime!audio/midi!extensions = mid,midi,kar
+mime!audio/mpeg!extensions = mpga,mpega,mp2,mp3,m4a
+mime!audio/ogg!extensions = ogg,oga
+mime!audio/prs.sid!extensions = sid
+mime!audio/x-aiff!extensions = aif,aiff,aifc
+mime!audio/x-gsm!extensions = gsm
+mime!audio/x-mpegurl!extensions = m3u
+mime!audio/x-ms-wax!extensions = wax
+mime!audio/x-ms-wma!extensions = wma
+mime!audio/x-pn-realaudio!extensions = ra,rm,ram
+mime!audio/x-realaudio!extensions = ra
+mime!audio/x-scpls!extensions = pls
+mime!audio/x-sd2!extensions = sd2
+mime!audio/x-wav!extensions = wav
+mime!chemical/x-cache!extensions = cac,cache
+mime!chemical/x-cache-csf!extensions = csf
+mime!chemical/x-cdx!extensions = cdx
+mime!chemical/x-cif!extensions = cif
+mime!chemical/x-cmdf!extensions = cmdf
+mime!chemical/x-cml!extensions = cml
+mime!chemical/x-compass!extensions = cpa
+mime!chemical/x-crossfire!extensions = bsd
+mime!chemical/x-csml!extensions = csml,csm
+mime!chemical/x-ctx!extensions = ctx
+mime!chemical/x-cxf!extensions = cxf,cef
+mime!chemical/x-isostar!extensions = istr,ist
+mime!chemical/x-jcamp-dx!extensions = jdx,dx
+mime!chemical/x-kinemage!extensions = kin
+mime!chemical/x-pdb!extensions = pdb,ent
+mime!chemical/x-swissprot!extensions = sw
+mime!chemical/x-vamas-iso14976!extensions = vms
+mime!chemical/x-vmd!extensions = vmd
+mime!chemical/x-xtel!extensions = xtel
+mime!chemical/x-xyz!extensions = xyz
+mime!image/gif!extensions = gif
+mime!image/jpeg!extensions = jpeg,jpg,jpe
+mime!image/pcx!extensions = pcx
+mime!image/png!extensions = png
+mime!image/svg+xml!extensions = svg,svgz
+mime!image/tiff!extensions = tiff,tif
+mime!image/vnd.djvu!extensions = djvu,djv
+mime!image/vnd.wap.wbmp!extensions = wbmp
+mime!image/x-icon!extensions = ico
+mime!image/x-ms-bmp!extensions = bmp
+mime!image/x-photoshop!extensions = psd
+mime!image/x-portable-anymap!extensions = pnm
+mime!image/x-portable-bitmap!extensions = pbm
+mime!image/x-portable-graymap!extensions = pgm
+mime!image/x-portable-pixmap!extensions = ppm
+mime!image/x-xbitmap!extensions = xbm
+mime!image/x-xpixmap!extensions = xpm
+mime!image/x-xwindowdump!extensions = xwd
+mime!model/iges!extensions = igs,iges
+mime!model/mesh!extensions = msh,mesh,silo
+mime!model/vrml!extensions = wrl,vrml
+mime!text/calendar!extensions = ics,icz
+mime!text/comma-separated-values!extensions = csv
+mime!text/css!extensions = css
+mime!text/h323!extensions = 323
+mime!text/html!extensions = html,htm,shtml
+mime!text/iuls!extensions = uls
+mime!text/mathml!extensions = mml
+mime!text/plain!extensions = asc,txt,text,diff,pot
+mime!text/richtext!extensions = rtx
+mime!text/rtf!extensions = rtf
+mime!text/scriptlet!extensions = sct,wsc
+mime!text/tab-separated-values!extensions = tsv
+mime!text/vnd.sun.j2me.app-descriptor!extensions = jad
+mime!text/vnd.wap.wml!extensions = wml
+mime!text/vnd.wap.wmlscript!extensions = wmls
+mime!text/x-boo!extensions = boo
+mime!text/x-c++hdr!extensions = h++,hpp,hxx,hh
+mime!text/x-c++src!extensions = c++,cpp,cxx,cc
+mime!text/x-chdr!extensions = h
+mime!text/x-csh!extensions = csh
+mime!text/x-csrc!extensions = c
+mime!text/x-dsrc!extensions = d
+mime!text/x-haskell!extensions = hs
+mime!text/x-java!extensions = java
+mime!text/x-literate-haskell!extensions = lhs
+mime!text/x-moc!extensions = moc
+mime!text/x-pascal!extensions = p,pas
+mime!text/x-pcs-gcd!extensions = gcd
+mime!text/x-perl!extensions = pl,pm
+mime!text/x-python!extensions = py
+mime!text/x-setext!extensions = etx
+mime!text/x-sh!extensions = sh
+mime!text/x-tcl!extensions = tcl,tk
+mime!text/x-tex!extensions = tex,ltx,sty,cls
+mime!text/x-vcalendar!extensions = vcs
+mime!text/x-vcard!extensions = vcf
+mime!video/dl!extensions = dl
+mime!video/dv!extensions = dif,dv
+mime!video/fli!extensions = fli
+mime!video/gl!extensions = gl
+mime!video/mp4!extensions = mp4
+mime!video/mpeg!extensions = mpeg,mpg,mpe
+mime!video/ogg!extensions = ogv
+mime!video/quicktime!extensions = qt,mov
+mime!video/vnd.mpegurl!extensions = mxu
+mime!video/webm!extensions = webm
+mime!video/x-flv!extensions = flv
+mime!video/x-la-asf!extensions = lsf,lsx
+mime!video/x-mng!extensions = mng
+mime!video/x-ms-asf!extensions = asf,asx
+mime!video/x-ms-wm!extensions = wm
+mime!video/x-ms-wmv!extensions = wmv
+mime!video/x-ms-wmx!extensions = wmx
+mime!video/x-ms-wvx!extensions = wvx
+mime!video/x-msvideo!extensions = avi
+mime!video/x-sgi-movie!extensions = movie
+mime!x-conference/x-cooltalk!extensions = ice
+mime!x-world/x-vrml!extensions = vrm,vrml,wrl
diff --git a/baserock_openid_provider/image-config.yml b/baserock_openid_provider/image-config.yml
new file mode 100644
index 00000000..4aa939f8
--- /dev/null
+++ b/baserock_openid_provider/image-config.yml
@@ -0,0 +1,77 @@
+# Image configuration for Baserock OpenID provider.
+---
+- hosts: openid
+ gather_facts: False
+ sudo: yes
+ tasks:
+ # See: https://fedoramagazine.org/getting-ansible-working-fedora-23/
+ - name: install Python2 and required deps for Ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python
+
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: ensure system up to date
+ dnf: name=* state=latest
+
+ - name: install Cherokee web server
+ dnf: name=cherokee state=latest
+
+ - name: install Sendmail mail transfer agent
+ dnf: name=sendmail state=latest
+
+ - name: install uWSGI application container server and Python plugin
+ dnf: name=uwsgi-plugin-python state=latest
+
+ - name: install PyYAML
+ dnf: name=PyYAML state=latest
+
+ # Authentication in Gerrit fails if OpenID clock is not set correctly
+ - name: Install ntp
+ dnf: name=ntp
+
+ # All this stuff is installed with Pip, which isn't really necessary except
+ # for django-registration-redux. Fedora packages django-registration but not
+ # the better django-registration-redux (I think).
+ #
+ - name: install Django
+ pip: name=django executable=pip2.7
+
+ - name: install South (Django migrations tool)
+ pip: name=South executable=pip2.7
+
+ # This is a fork of django-registration which supports Django 1.7.
+ # Source: https://github.com/macropin/django-registration
+ # The original django-registration (which seems to be abandoned) lives at:
+ # https://bitbucket.org/ubernostrum/django-registration/
+ - name: install django-registration-redux
+ pip: name=django-registration-redux executable=pip2.7
+
+ - name: install python-openid
+ pip: name=python-openid executable=pip2.7
+
+ # Install the MySQL-python package from DNF, because if it's installed from
+ # PyPI you need to have the mariadb-devel package installed to build the C
+ # code and that's an extra 21MB of dependencies or so. Note that this driver
+ # doesn't support Python 3, but there is a fork available which does, see:
+ # https://docs.djangoproject.com/en/dev/ref/databases/#mysql-db-api-drivers
+ - name: install MySQL-python
+ dnf: name=MySQL-python state=latest
+
+ - name: install Cherokee configuration
+ file: src=/srv/baserock_openid_provider/cherokee.conf dest=/etc/cherokee/cherokee.conf state=link force=yes
+
+ - name: create log directory for baserock_openid_provider
+ file: path=/var/log/baserock_openid_provider owner=cherokee group=cherokee state=directory
+
+ - name: upload application
+ copy: src=. dest=/srv owner=fedora group=fedora
+
+ # Yes, SELinux prevents Cherokee from working.
+ - name: disable SELinux on subsequent boots
+ selinux: state=disabled
+
+ - name: disable SELinux on current boot
+ command: setenforce 0
diff --git a/baserock_openid_provider/instance-config.yml b/baserock_openid_provider/instance-config.yml
new file mode 100644
index 00000000..ffd7b66f
--- /dev/null
+++ b/baserock_openid_provider/instance-config.yml
@@ -0,0 +1,36 @@
+# Instance configuration for Baserock OpenID provider.
+#
+# This playbook should be run after starting an instance of the Baserock
+# OpenID Provider image.
+---
+- hosts: openid
+ gather_facts: False
+ sudo: yes
+ tasks:
+ - name: install database password
+ copy: src=../database/baserock_openid_provider.database_password.yml dest=/etc owner=cherokee group=cherokee mode=400
+
+ - name: install Django secret key
+ copy: src=baserock_openid_provider.secret_key.yml dest=/etc owner=cherokee group=cherokee mode=400
+
+ # This step could be part of image creation, except that because the secret
+ # key file wouldn't be available at that time, the 'manage.py' script would
+ # fail to run.
+ - name: install static content
+ django_manage: app_path=/srv/baserock_openid_provider command=collectstatic
+ sudo_user: cherokee
+
+ - name: run database migrations
+ django_manage: app_path=/srv/baserock_openid_provider command=migrate
+ sudo_user: cherokee
+
+ # Default configuration of Sendmail in Fedora is to only accept connections from
+ # localhost. This is what we want, so no extra config required.
+ - name: enable and start sendmail service
+ service: name=sendmail enabled=yes state=started
+
+ - name: enable and start Cherokee service
+ service: name=cherokee enabled=yes state=restarted
+
+ - name: enable and start ntpd service
+ service: name=ntpd enabled=yes state=restarted
diff --git a/baserock_openid_provider/manage.py b/baserock_openid_provider/manage.py
new file mode 100644
index 00000000..924662bf
--- /dev/null
+++ b/baserock_openid_provider/manage.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+import os
+import sys
+
+if __name__ == "__main__":
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "baserock_openid_provider.settings")
+
+ from django.core.management import execute_from_command_line
+
+ execute_from_command_line(sys.argv)
diff --git a/baserock_openid_provider/openid_provider/__init__.py b/baserock_openid_provider/openid_provider/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/__init__.py
diff --git a/baserock_openid_provider/openid_provider/admin.py b/baserock_openid_provider/openid_provider/admin.py
new file mode 100644
index 00000000..0d1b62aa
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/admin.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# vim: set ts=4 sw=4 : */
+
+from django.contrib import admin
+
+from openid_provider.models import TrustedRoot, OpenID
+
+class TrustedRootInline(admin.TabularInline):
+ model = TrustedRoot
+
+class OpenIDAdmin(admin.ModelAdmin):
+ list_display = ['openid', 'user', 'default']
+ inlines = [TrustedRootInline, ]
+ raw_id_fields = ("user",)
+ search_fields = ('user__email',)
+
+admin.site.register(OpenID, OpenIDAdmin)
diff --git a/baserock_openid_provider/openid_provider/conf.py b/baserock_openid_provider/openid_provider/conf.py
new file mode 100644
index 00000000..7355c840
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/conf.py
@@ -0,0 +1,27 @@
+import os
+from django.conf import settings
+
+STORE = getattr(settings, 'OPENID_PROVIDER_STORE',
+ 'openid.store.filestore.FileOpenIDStore')
+
+if STORE == 'openid.store.filestore.FileOpenIDStore':
+ import tempfile
+ tempdir = tempfile.gettempdir()
+
+ FILESTORE_PATH = getattr(settings, 'OPENID_PROVIDER_FILESTORE_PATH',
+ os.path.join(tempdir, 'openid-filestore'))
+
+SREG_DATA_CALLBACK = getattr(settings, 'OPENID_PROVIDER_SREG_DATA_CALLBACK',
+ 'openid_provider.utils.get_default_sreg_data')
+
+AX_DATA_CALLBACK = getattr(settings, 'OPENID_PROVIDER_AX_DATA_CALLBACK',
+ 'openid_provider.utils.get_default_ax_data')
+
+AX_EXTENSION = getattr(settings, 'OPENID_PROVIDER_AX_EXTENSION', False)
+
+AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
+
+# RPs without relying party verification mechanisms will be each time
+# redirected to decide page, set to True to disable this:
+FAILED_DISCOVERY_AS_VALID = getattr(
+ settings, 'OPENID_FAILED_DISCOVERY_AS_VALID', False)
diff --git a/baserock_openid_provider/openid_provider/models.py b/baserock_openid_provider/openid_provider/models.py
new file mode 100644
index 00000000..bad24d9a
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/models.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# vim: set ts=4 sw=4 : */
+
+from django.utils.translation import ugettext_lazy as _
+from django.db import models
+
+from openid_provider.conf import AUTH_USER_MODEL
+from openid_provider.utils import get_username
+
+class OpenID(models.Model):
+ user = models.ForeignKey(AUTH_USER_MODEL)
+ openid = models.CharField(max_length=200, blank=True, unique=True)
+ default = models.BooleanField(default=False)
+
+ class Meta:
+ verbose_name = _('OpenID')
+ verbose_name_plural = _('OpenIDs')
+ ordering = ['openid']
+
+ def __unicode__(self):
+ return u"%s|%s" % (get_username(self.user), self.openid)
+
+ def save(self, *args, **kwargs):
+ if self.openid in ['', u'', None]:
+ from hashlib import sha1
+ import random, base64
+ sha = sha1()
+ sha.update(unicode(get_username(self.user)).encode('utf-8'))
+ sha.update(str(random.random()))
+ value = str(base64.b64encode(sha.digest()))
+ value = value.replace('/', '').replace('+', '').replace('=', '')
+ self.openid = value
+ super(OpenID, self).save(*args, **kwargs)
+ if self.default:
+ self.user.openid_set.exclude(pk=self.pk).update(default=False)
+
+class TrustedRoot(models.Model):
+ openid = models.ForeignKey(OpenID)
+ trust_root = models.CharField(max_length=200)
+
+ def __unicode__(self):
+ return unicode(self.trust_root)
diff --git a/baserock_openid_provider/openid_provider/south_migrations/0001_initial.py b/baserock_openid_provider/openid_provider/south_migrations/0001_initial.py
new file mode 100644
index 00000000..1857f59a
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/south_migrations/0001_initial.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding model 'OpenID'
+ db.create_table('openid_provider_openid', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
+ ('openid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200, blank=True)),
+ ('default', self.gf('django.db.models.fields.BooleanField')(default=False)),
+ ))
+ db.send_create_signal('openid_provider', ['OpenID'])
+
+ # Adding model 'TrustedRoot'
+ db.create_table('openid_provider_trustedroot', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('openid', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['openid_provider.OpenID'])),
+ ('trust_root', self.gf('django.db.models.fields.CharField')(max_length=200)),
+ ))
+ db.send_create_signal('openid_provider', ['TrustedRoot'])
+
+
+ def backwards(self, orm):
+ # Deleting model 'OpenID'
+ db.delete_table('openid_provider_openid')
+
+ # Deleting model 'TrustedRoot'
+ db.delete_table('openid_provider_trustedroot')
+
+
+ models = {
+ 'auth.group': {
+ 'Meta': {'object_name': 'Group'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
+ 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
+ },
+ 'auth.permission': {
+ 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
+ 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
+ },
+ 'auth.user': {
+ 'Meta': {'object_name': 'User'},
+ 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
+ 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
+ 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
+ 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
+ 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
+ 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
+ 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
+ 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
+ },
+ 'contenttypes.contenttype': {
+ 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
+ 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
+ },
+ 'openid_provider.openid': {
+ 'Meta': {'ordering': "['openid']", 'object_name': 'OpenID'},
+ 'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'openid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'}),
+ 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
+ },
+ 'openid_provider.trustedroot': {
+ 'Meta': {'object_name': 'TrustedRoot'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'openid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['openid_provider.OpenID']"}),
+ 'trust_root': ('django.db.models.fields.CharField', [], {'max_length': '200'})
+ }
+ }
+
+ complete_apps = ['openid_provider'] \ No newline at end of file
diff --git a/baserock_openid_provider/openid_provider/south_migrations/__init__.py b/baserock_openid_provider/openid_provider/south_migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/south_migrations/__init__.py
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/base.html b/baserock_openid_provider/openid_provider/templates/openid_provider/base.html
new file mode 100644
index 00000000..94d9808c
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/base.html
@@ -0,0 +1 @@
+{% extends "base.html" %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/decide.html b/baserock_openid_provider/openid_provider/templates/openid_provider/decide.html
new file mode 100644
index 00000000..5b87f824
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/decide.html
@@ -0,0 +1,41 @@
+{% extends "openid_provider/base.html" %}
+
+{% block content %}
+{% ifequal trust_root_valid "Valid" %}
+ <!-- Trust root has been validated by OpenID 2 mechanism. -->
+ <p>The site <tt>{{ trust_root|escape }}</tt> has requested verification
+ of your OpenID.</p>
+{% endifequal %}
+{% ifequal trust_root_valid "Invalid" %}
+<div class="error">
+ <p>This request claims to be from {{ trust_root|escape }} but I have
+ determined that <em>it is a pack of lies</em>. Beware, if you release
+ information to them, they are likely to do unconscionable things with it,
+ being the lying liars that they are.</p>
+ <p>Please tell the <em>real</em> {{ trust_root|escape }} that someone is
+ trying to abuse your trust in their good name.</p>
+</div>
+{% endifequal %}
+{% ifequal trust_root_valid "Unreachable" %}
+ <p>The site <tt>{{ trust_root|escape }}</tt> has requested verification
+ of your OpenID. I have failed to reach it and thus cannot vouch for its
+ authenticity. Perhaps it is on your local network.</p>
+{% endifequal %}
+{% ifequal trust_root_valid "DISCOVERY_FAILED" %}
+ <p>The site <tt>{{ trust_root|escape }}</tt> has requested verification
+ of your OpenID. However, <tt>{{ trust_root|escape }}</tt> does not
+ implement OpenID 2.0's relying party verification mechanism. Please use
+ extra caution in deciding whether to release information to this party,
+ and ask <tt>{{ trust_root|escape }}</tt> to implement relying party
+ verification for your future transactions.</p>
+ <p>You will return to <tt>{{ return_to|escape }}</tt></p>
+{% endifequal %}
+
+<form method="post">{% csrf_token %}
+Verify your identity to the relying party?
+<br/>
+<input type="hidden" name="decide_page" value="True" />
+<input type="submit" value="Yes (Allow)" name="allow" />
+<input type="submit" value="No (Cancel)" name="cancel" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/error.html b/baserock_openid_provider/openid_provider/templates/openid_provider/error.html
new file mode 100644
index 00000000..11b77b21
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/error.html
@@ -0,0 +1,6 @@
+{% extends "openid_provider/base.html" %}
+
+{% block content %}
+<h1>{{ title }}</h1>
+{{ msg }}
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/response.html b/baserock_openid_provider/openid_provider/templates/openid_provider/response.html
new file mode 100644
index 00000000..5f7e46fa
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/response.html
@@ -0,0 +1,12 @@
+{% extends "openid_provider/base.html" %}
+
+{% block content %}
+<div id="openid-body">
+ {{ body|safe }}
+</div>
+<script type="text/javascript">
+ // the url is too long (> 2047) to be submitted via GET. It needs to be POSTed.
+ // the should not require to click the "Continue"-Button, therefore we submit it via js
+ document.getElementById('openid-body').getElementsByTagName('form')[0].submit();
+</script>
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/server.html b/baserock_openid_provider/openid_provider/templates/openid_provider/server.html
new file mode 100644
index 00000000..80615157
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/server.html
@@ -0,0 +1,9 @@
+{% extends "openid_provider/base.html" %}
+
+{% block extrahead %}{{ block.super }}
+<meta http-equiv="x-xrds-location" content="{{ xrds_location }}">
+{% endblock %}
+
+{% block content %}
+This is an OpenID server.
+{% endblock %}
diff --git a/baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml b/baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml
new file mode 100644
index 00000000..960685b0
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/templates/openid_provider/xrds.xml
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<xrds:XRDS xmlns:xrds="xri://$xrds" xmlns="xri://$xrd*($v*2.0)">
+ <XRD>
+ <Service priority="0">{% for uri in types %}
+ <Type>{{ uri|escape }}</Type>
+ {% endfor %}{% for endpoint in endpoints %}
+ <URI>{{ endpoint }}</URI>
+ {% endfor %}</Service>
+ </XRD>
+</xrds:XRDS>
diff --git a/baserock_openid_provider/openid_provider/urls.py b/baserock_openid_provider/openid_provider/urls.py
new file mode 100644
index 00000000..33f79ce7
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/urls.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# vim: set ts=4 sw=4 : */
+
+try:
+ from django.conf.urls import patterns, url
+except ImportError: # Django < 1.4
+ from django.conf.urls.defaults import patterns, url
+
+urlpatterns = patterns('openid_provider.views',
+ url(r'^$', 'openid_server', name='openid-provider-root'),
+ url(r'^decide/$', 'openid_decide', name='openid-provider-decide'),
+ url(r'^xrds/$', 'openid_xrds', name='openid-provider-xrds'),
+ url(r'^(?P<id>.*)/$', 'openid_xrds', {'identity': True}, name='openid-provider-identity'),
+)
diff --git a/baserock_openid_provider/openid_provider/utils.py b/baserock_openid_provider/openid_provider/utils.py
new file mode 100644
index 00000000..dc0c714f
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/utils.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*- vim: set et ts=4 sw=4 :
+# some code from http://www.djangosnippets.org/snippets/310/ by simon
+# and from examples/djopenid from python-openid-2.2.4
+from hashlib import sha1
+from openid_provider import conf
+from openid.extensions import ax, sreg
+from openid.server.server import Server, BROWSER_REQUEST_MODES
+from openid.server.trustroot import verifyReturnTo
+from openid.yadis.discover import DiscoveryFailure
+from openid.fetchers import HTTPFetchingError
+
+from django.core.exceptions import ImproperlyConfigured
+from django.core.urlresolvers import reverse
+from django.http import HttpResponse
+from django.shortcuts import render_to_response
+
+from importlib import import_module
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+def import_module_attr(path):
+ package, module = path.rsplit('.', 1)
+ return getattr(import_module(package), module)
+
+def get_username(u):
+ if hasattr(u, 'get_username'):
+ return u.get_username()
+ return u.username
+
+def get_default_sreg_data(request, orequest):
+ return {
+ 'email': request.user.email,
+ 'nickname': get_username(request.user),
+ 'fullname': request.user.get_full_name(),
+ }
+
+def get_default_ax_data(request, orequest):
+ return {
+ 'http://axschema.org/contact/email': request.user.email,
+ 'http://axschema.org/namePerson': request.user.get_full_name(),
+ 'http://axschema.org/namePerson/friendly': get_username(request.user),
+ 'http://axschema.org/namePerson/first': request.user.first_name,
+ 'http://axschema.org/namePerson/last': request.user.last_name,
+ }
+
+def add_sreg_data(request, orequest, oresponse):
+ callback = get_sreg_callback()
+ if callback is None or not callable(callback):
+ return
+ sreg_data = callback(request, orequest)
+ sreg_req = sreg.SRegRequest.fromOpenIDRequest(orequest)
+ sreg_resp = sreg.SRegResponse.extractResponse(sreg_req, sreg_data)
+ oresponse.addExtension(sreg_resp)
+
+def add_ax_data(request, orequest, oresponse):
+ callback = get_ax_callback()
+ if callback is None or not callable(callback):
+ return
+ ax_data = callback(request, orequest)
+ ax_req = ax.FetchRequest.fromOpenIDRequest(orequest)
+ ax_resp = ax.FetchResponse(ax_req)
+ if ax_req is not None:
+ for attr in ax_req.getRequiredAttrs():
+ value = ax_data.get(attr, None)
+ if value is not None:
+ ax_resp.addValue(attr, value)
+ oresponse.addExtension(ax_resp)
+
+def get_sreg_callback():
+ try:
+ return import_module_attr(conf.SREG_DATA_CALLBACK)
+ except (ImportError, AttributeError):
+ return None
+
+def get_ax_callback():
+ try:
+ return import_module_attr(conf.AX_DATA_CALLBACK)
+ except (ImportError, AttributeError):
+ return None
+
+def get_store(request):
+ try:
+ store_class = import_module_attr(conf.STORE)
+ except ImportError:
+ raise ImproperlyConfigured(
+ "OpenID store %r could not be imported" % conf.STORE)
+ # The FileOpenIDStore requires a path to save the user files.
+ if conf.STORE == 'openid.store.filestore.FileOpenIDStore':
+ return store_class(conf.FILESTORE_PATH)
+ return store_class()
+
+def trust_root_validation(orequest):
+ """
+ OpenID specs 9.2.1: using realm for return url verification
+ """
+ try:
+ return verifyReturnTo(
+ orequest.trust_root, orequest.return_to) and "Valid" or "Invalid"
+ except HTTPFetchingError:
+ return "Unreachable"
+ except DiscoveryFailure:
+ return "DISCOVERY_FAILED"
+
+def get_trust_session_key(orequest):
+ return 'OPENID_' + sha1(
+ orequest.trust_root + orequest.return_to).hexdigest()
+
+def prep_response(request, orequest, oresponse, server=None):
+ # Convert a webresponse from the OpenID library in to a Django HttpResponse
+
+ if not server:
+ server = Server(get_store(request),
+ op_endpoint=request.build_absolute_uri(
+ reverse('openid-provider-root')))
+ webresponse = server.encodeResponse(oresponse)
+ if webresponse.code == 200 and orequest.mode in BROWSER_REQUEST_MODES:
+ response = render_to_response('openid_provider/response.html', {
+ 'body': webresponse.body,
+ }, context_instance=RequestContext(request))
+ logger.debug('rendering browser response')
+ else:
+ response = HttpResponse(webresponse.body)
+ response.status_code = webresponse.code
+ for key, value in webresponse.headers.items():
+ response[key] = value
+ logger.debug('rendering raw response')
+ return response
+
diff --git a/baserock_openid_provider/openid_provider/views.py b/baserock_openid_provider/openid_provider/views.py
new file mode 100644
index 00000000..1b8ef6d5
--- /dev/null
+++ b/baserock_openid_provider/openid_provider/views.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+# some code from http://www.djangosnippets.org/snippets/310/ by simon
+# and from examples/djopenid from python-openid-2.2.4
+import urlparse
+import logging
+from urllib import urlencode, quote
+
+from django.conf import settings
+from django.core.urlresolvers import reverse
+from django.http import HttpResponse, HttpResponseRedirect, QueryDict
+from django.shortcuts import render_to_response
+from django.template import RequestContext
+from django.utils.translation import ugettext as _
+
+from django.utils.encoding import smart_str
+try:
+ from django.views.decorators.csrf import csrf_exempt
+except ImportError:
+ from django.contrib.csrf.middleware import csrf_exempt
+
+from django.contrib.auth import REDIRECT_FIELD_NAME
+
+from openid.association import default_negotiator, encrypted_negotiator
+from openid.consumer.discover import OPENID_IDP_2_0_TYPE, OPENID_2_0_TYPE
+from openid.extensions import sreg, ax
+from openid.server.server import Server, BROWSER_REQUEST_MODES
+from openid.yadis.constants import YADIS_CONTENT_TYPE
+
+from openid_provider import conf
+from openid_provider.utils import add_sreg_data, add_ax_data, get_store, \
+ trust_root_validation, get_trust_session_key, prep_response
+from openid_provider.models import TrustedRoot
+
+logger = logging.getLogger(__name__)
+
+
+# Special URL which means 'let the user choose whichever identity'.
+IDENTIFIER_SELECT_URL = 'http://specs.openid.net/auth/2.0/identifier_select'
+
+
+@csrf_exempt
+def openid_server(request):
+ """
+ This view is the actual OpenID server - running at the URL pointed to by
+ the <link rel="openid.server"> tag.
+ """
+ logger.debug('server request %s: %s',
+ request.method, request.POST or request.GET)
+ server = openid_get_server(request)
+
+ if not request.is_secure():
+ # if request is not secure allow only encrypted association sessions
+ server.negotiator = encrypted_negotiator
+
+ # Clear AuthorizationInfo session var, if it is set
+ if request.session.get('AuthorizationInfo', None):
+ del request.session['AuthorizationInfo']
+
+ if request.method == 'GET':
+ querydict = dict(request.GET.items())
+ elif request.method == 'POST':
+ querydict = dict(request.POST.items())
+ else:
+ return HTTPResponseNotAllowed(['GET', 'POST'])
+
+ orequest = server.decodeRequest(querydict)
+ if not orequest:
+ orequest = server.decodeRequest(request.session.get('OPENID_REQUEST', None))
+ if orequest:
+ # remove session stored data:
+ del request.session['OPENID_REQUEST']
+ else:
+ # not request, render info page:
+ data = {
+ 'host': request.build_absolute_uri('/'),
+ 'xrds_location': request.build_absolute_uri(
+ reverse('openid-provider-xrds')),
+ }
+ logger.debug('invalid request, sending info: %s', data)
+ return render_to_response('openid_provider/server.html',
+ data,
+ context_instance=RequestContext(request))
+
+ if orequest.mode in BROWSER_REQUEST_MODES:
+ if not request.user.is_authenticated():
+ logger.debug('no local authentication, sending landing page')
+ return landing_page(request, orequest)
+
+ openid = openid_is_authorized(request, orequest.identity,
+ orequest.trust_root)
+
+ # verify return_to:
+ trust_root_valid = trust_root_validation(orequest)
+ validated = False
+
+ if conf.FAILED_DISCOVERY_AS_VALID:
+ if trust_root_valid == 'DISCOVERY_FAILED':
+ validated = True
+ else:
+ # if in decide already took place, set as valid:
+ if request.session.get(get_trust_session_key(orequest), False):
+ validated = True
+
+ if openid is not None and (validated or trust_root_valid == 'Valid'):
+ if orequest.identity == IDENTIFIER_SELECT_URL:
+ id_url = request.build_absolute_uri(
+ reverse('openid-provider-identity', args=[openid.openid]))
+ else:
+ # We must return exactly the identity URL that was requested,
+ # otherwise the openid.server module raises an error.
+ id_url = orequest.identity
+
+ oresponse = orequest.answer(True, identity=id_url)
+ logger.debug('orequest.answer(True, identity="%s")', id_url)
+ elif orequest.immediate:
+ logger.debug('checkid_immediate mode not supported')
+ raise Exception('checkid_immediate mode not supported')
+ else:
+ request.session['OPENID_REQUEST'] = orequest.message.toPostArgs()
+ request.session['OPENID_TRUSTROOT_VALID'] = trust_root_valid
+ logger.debug(
+ 'Set OPENID_REQUEST to %s in session %s',
+ request.session['OPENID_REQUEST'], request.session)
+ logger.debug(
+ 'Set OPENID_TRUSTROOT_VALID to %s in session %s',
+ request.session['OPENID_TRUSTROOT_VALID'], request.session)
+ logger.debug('redirecting to decide page')
+ return HttpResponseRedirect(reverse('openid-provider-decide'))
+ else:
+ oresponse = server.handleRequest(orequest)
+ if request.user.is_authenticated():
+ add_sreg_data(request, orequest, oresponse)
+ if conf.AX_EXTENSION:
+ add_ax_data(request, orequest, oresponse)
+
+ return prep_response(request, orequest, oresponse, server)
+
+def openid_xrds(request, identity=False, id=None):
+ if identity:
+ types = [OPENID_2_0_TYPE]
+ else:
+ types = [OPENID_IDP_2_0_TYPE, sreg.ns_uri]
+ if conf.AX_EXTENSION:
+ types.append(ax.AXMessage.ns_uri)
+ endpoints = [request.build_absolute_uri(reverse('openid-provider-root'))]
+ return render_to_response('openid_provider/xrds.xml', {
+ 'host': request.build_absolute_uri('/'),
+ 'types': types,
+ 'endpoints': endpoints,
+ }, context_instance=RequestContext(request), content_type=YADIS_CONTENT_TYPE)
+
+
+def url_for_openid(request, openid):
+ return request.build_absolute_uri(
+ reverse('openid-provider-identity', args=[openid.openid]))
+
+
+def openid_not_found_error_message(request, identity_url):
+ ids = request.user.openid_set
+ if ids.count() == 0:
+ message = "You have no OpenIDs configured. Contact the administrator."
+ else:
+ id_urls = [url_for_openid(request, id) for id in ids.iterator()]
+ id_urls = ', '.join(id_urls)
+ if ids.count() != 1:
+ message = "You somehow have multiple OpenIDs: " + id_urls
+ else:
+ message = "Your OpenID URL is: " + id_urls
+ return "You do not have the OpenID '%s'. %s" % (identity_url, message)
+
+
+def openid_decide(request):
+ """
+ The page that asks the user if they really want to sign in to the site, and
+ lets them add the consumer to their trusted whitelist.
+ # If user is logged in, ask if they want to trust this trust_root
+ # If they are NOT logged in, show the landing page
+ """
+ server = openid_get_server(request)
+ orequest = server.decodeRequest(request.session.get('OPENID_REQUEST'))
+ trust_root_valid = request.session.get('OPENID_TRUSTROOT_VALID')
+
+ logger.debug('Got OPENID_REQUEST %s, OPENID_TRUSTROOT_VALID %s from '
+ 'session %s', orequest, trust_root_valid, request.session)
+
+ if not request.user.is_authenticated():
+ return landing_page(request, orequest)
+
+ if orequest is None:
+ # This isn't normal, but can occur if the user uses the 'back' button
+ # or if the session data is otherwise lost for some reason.
+ return error_page(
+ request, "I've lost track of your session now. Sorry! Please go "
+ "back to the site you are logging in to with a Baserock "
+ "OpenID and, if you're not yet logged in, try again.")
+
+ openid = openid_get_identity(request, orequest.identity)
+ if openid is None:
+ # User should only ever have one OpenID, created for them when they
+ # registered.
+ message = openid_not_found_error_message(request, orequest.identity)
+ return error_page(request, message)
+
+ if request.method == 'POST' and request.POST.get('decide_page', False):
+ if request.POST.get('allow', False):
+ TrustedRoot.objects.get_or_create(
+ openid=openid, trust_root=orequest.trust_root)
+ if not conf.FAILED_DISCOVERY_AS_VALID:
+ request.session[get_trust_session_key(orequest)] = True
+ return HttpResponseRedirect(reverse('openid-provider-root'))
+
+ oresponse = orequest.answer(False)
+ logger.debug('orequest.answer(False)')
+ return prep_response(request, orequest, oresponse)
+
+ return render_to_response('openid_provider/decide.html', {
+ 'title': _('Trust this site?'),
+ 'trust_root': orequest.trust_root,
+ 'trust_root_valid': trust_root_valid,
+ 'return_to': orequest.return_to,
+ 'identity': orequest.identity,
+ }, context_instance=RequestContext(request))
+
+def error_page(request, msg):
+ return render_to_response('openid_provider/error.html', {
+ 'title': _('Error'),
+ 'msg': msg,
+ }, context_instance=RequestContext(request))
+
+class SafeQueryDict(QueryDict):
+ """
+ A custom QueryDict class that implements a urlencode method
+ knowing how to excempt some characters as safe.
+
+ Backported from Django 1.3
+ """
+ def urlencode(self, safe=None):
+ output = []
+ if safe:
+ encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
+ else:
+ encode = lambda k, v: urlencode({k: v})
+ for k, list_ in self.lists():
+ k = smart_str(k, self.encoding)
+ output.extend([encode(k, smart_str(v, self.encoding))
+ for v in list_])
+ return '&'.join(output)
+
+def landing_page(request, orequest, login_url=None,
+ redirect_field_name=REDIRECT_FIELD_NAME):
+ """
+ The page shown when the user attempts to sign in somewhere using OpenID
+ but is not authenticated with the site. For idproxy.net, a message telling
+ them to log in manually is displayed.
+ """
+ request.session['OPENID_REQUEST'] = orequest.message.toPostArgs()
+ logger.debug(
+ 'Set OPENID_REQUEST to %s in session %s',
+ request.session['OPENID_REQUEST'], request.session)
+ if not login_url:
+ login_url = settings.LOGIN_URL
+ path = request.get_full_path()
+ login_url_parts = list(urlparse.urlparse(login_url))
+ if redirect_field_name:
+ querystring = SafeQueryDict(login_url_parts[4], mutable=True)
+ querystring[redirect_field_name] = path
+ login_url_parts[4] = querystring.urlencode(safe='/')
+ return HttpResponseRedirect(urlparse.urlunparse(login_url_parts))
+
+def openid_is_authorized(request, identity_url, trust_root):
+ """
+ Check that they own the given identity URL, and that the trust_root is
+ in their whitelist of trusted sites.
+ """
+ if not request.user.is_authenticated():
+ return None
+
+ openid = openid_get_identity(request, identity_url)
+ if openid is None:
+ return None
+
+ if openid.trustedroot_set.filter(trust_root=trust_root).count() < 1:
+ return None
+
+ return openid
+
+
+def url_is_equivalent(a, b):
+ """
+ Test if two URLs are equivalent OpenIDs.
+ """
+ return a.rstrip('/') == b.rstrip('/')
+
+
+def openid_get_identity(request, identity_url):
+ """
+ Select openid based on claim (identity_url).
+ If none was claimed identity_url will be
+ 'http://specs.openid.net/auth/2.0/identifier_select'
+ - in that case return default one
+ - if user has no default one, return any
+ - in other case return None!
+ """
+ logger.debug('Looking for %s in user %s set of OpenIDs %s',
+ identity_url, request.user, request.user.openid_set)
+ for openid in request.user.openid_set.iterator():
+ if url_is_equivalent(identity_url, url_for_openid(request, openid)):
+ return openid
+ if identity_url == IDENTIFIER_SELECT_URL:
+ # no claim was made, choose user default openid:
+ openids = request.user.openid_set.filter(default=True)
+ if openids.count() == 1:
+ return openids[0]
+ if request.user.openid_set.count() > 0:
+ return request.user.openid_set.all()[0]
+ return None
+
+
+def openid_get_server(request):
+ return Server(
+ get_store(request),
+ op_endpoint=request.build_absolute_uri(
+ reverse('openid-provider-root')))
diff --git a/baserock_openid_provider/templates/base.html b/baserock_openid_provider/templates/base.html
new file mode 100644
index 00000000..25a6135d
--- /dev/null
+++ b/baserock_openid_provider/templates/base.html
@@ -0,0 +1,38 @@
+{% load i18n %}
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+ <link rel="stylesheet" href="{{ STATIC_URL }}style.css" />
+ <title>{% block title %}Baserock OpenID Provider{% endblock %}</title>
+</head>
+
+<body>
+ <div id="header">
+ {% block header %}
+ <a href="{% url 'index' %}">{% trans "Home" %}</a> |
+
+ {% if user.is_authenticated %}
+ {% trans "Logged in" %}: {{ user.username }}
+ (<a href="{% url 'auth_logout' %}">{% trans "Log out" %}</a> |
+ <a href="{% url 'auth_password_change' %}">{% trans "Change password" %}</a>)
+ {% else %}
+ <a href="{% url 'auth_login' %}">{% trans "Log in" %}</a> |
+ <a href="{% url 'registration_register' %}">{% trans "Register" %}</a>
+ {% endif %}
+ <hr />
+ {% endblock %}
+ </div>
+
+ <div id="content">
+ {% block content %}{% endblock %}
+ </div>
+
+ <div id="footer">
+ {% block footer %}
+ <hr />
+ {% endblock %}
+ </div>
+</body>
+
+</html>
diff --git a/baserock_openid_provider/templates/index.html b/baserock_openid_provider/templates/index.html
new file mode 100644
index 00000000..1cb4bf73
--- /dev/null
+++ b/baserock_openid_provider/templates/index.html
@@ -0,0 +1,15 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>This is the Baserock OpenID provider.</p>
+
+{% if user.is_authenticated %}
+ <p>You are registered as {{ user.get_full_name }}.</p>
+
+ <p>Your OpenID is:
+ <a href="https://openid.baserock.org/openid/{{ user.username }}/">https://openid.baserock.org/openid/{{ user.username }}/</a>
+ </p>
+{% endif %}
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/activate.html b/baserock_openid_provider/templates/registration/activate.html
new file mode 100644
index 00000000..8deb01c8
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activate.html
@@ -0,0 +1,8 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+
+<p>{% trans "Account activation failed" %}</p>
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/activation_complete.html b/baserock_openid_provider/templates/registration/activation_complete.html
new file mode 100644
index 00000000..df2efd55
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activation_complete.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Your account is now activated. Please log in." %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/activation_email.txt b/baserock_openid_provider/templates/registration/activation_email.txt
new file mode 100644
index 00000000..bfa784d9
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activation_email.txt
@@ -0,0 +1,6 @@
+{% load i18n %}
+{% trans "Activate account at" %} {{ site.name }}:
+
+https://{{ site.domain }}{% url 'registration_activate' activation_key %}
+
+{% blocktrans %}Link is valid for {{ expiration_days }} days.{% endblocktrans %}
diff --git a/baserock_openid_provider/templates/registration/activation_email_subject.txt b/baserock_openid_provider/templates/registration/activation_email_subject.txt
new file mode 100644
index 00000000..24f477cb
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/activation_email_subject.txt
@@ -0,0 +1 @@
+{% load i18n %}{% trans "Account activation on" %} {{ site.name }}
diff --git a/baserock_openid_provider/templates/registration/login.html b/baserock_openid_provider/templates/registration/login.html
new file mode 100644
index 00000000..9b245989
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/login.html
@@ -0,0 +1,15 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Log in' %}" />
+ <input type="hidden" name="next" value="{{ next }}" />
+</form>
+
+<p>{% trans "Forgot password" %}? <a href="{% url 'auth_password_reset' %}">{% trans "Reset it" %}</a>!</p>
+<p>{% trans "Not member" %}? <a href="{% url 'registration_register' %}">{% trans "Register" %}</a>!</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/logout.html b/baserock_openid_provider/templates/registration/logout.html
new file mode 100644
index 00000000..f8da51fa
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/logout.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Logged out" %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_change_done.html b/baserock_openid_provider/templates/registration/password_change_done.html
new file mode 100644
index 00000000..659be0a4
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_change_done.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Password changed" %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_change_form.html b/baserock_openid_provider/templates/registration/password_change_form.html
new file mode 100644
index 00000000..10b1fc13
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_change_form.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_complete.html b/baserock_openid_provider/templates/registration/password_reset_complete.html
new file mode 100644
index 00000000..55993e85
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_complete.html
@@ -0,0 +1,10 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+
+<p>{% trans "Password reset successfully" %}</p>
+
+<p><a href="{% url 'auth_login' %}">{% trans "Log in" %}</a></p>
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_confirm.html b/baserock_openid_provider/templates/registration/password_reset_confirm.html
new file mode 100644
index 00000000..33bd276a
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_confirm.html
@@ -0,0 +1,21 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+
+{% if validlink %}
+
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+
+{% else %}
+
+<p>{% trans "Password reset failed" %}</p>
+
+{% endif %}
+
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_done.html b/baserock_openid_provider/templates/registration/password_reset_done.html
new file mode 100644
index 00000000..6057ccbe
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_done.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>{% trans "Email with password reset instructions has been sent." %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_email.html b/baserock_openid_provider/templates/registration/password_reset_email.html
new file mode 100644
index 00000000..c78893ed
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_email.html
@@ -0,0 +1,5 @@
+{% load i18n %}
+{% blocktrans %}Reset password at {{ site_name }}{% endblocktrans %}:
+{% block reset_link %}
+{{ protocol }}://{{ domain }}{% url 'auth_password_reset_confirm' uid token %}
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/password_reset_form.html b/baserock_openid_provider/templates/registration/password_reset_form.html
new file mode 100644
index 00000000..10b1fc13
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/password_reset_form.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/registration_closed.html b/baserock_openid_provider/templates/registration/registration_closed.html
new file mode 100644
index 00000000..c73cfacc
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/registration_closed.html
@@ -0,0 +1,6 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+ <p>{% trans "Registration is currently closed." %}</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/registration_complete.html b/baserock_openid_provider/templates/registration/registration_complete.html
new file mode 100644
index 00000000..757bd50c
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/registration_complete.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<p>You are now registered. An activation email has been sent to you with
+a link that you will need to click to activate your account.</p>
+
+<p>The mail should arrive within 15 minutes, depending on your mail provider's
+use of <a href="https://en.wikipedia.org/wiki/Greylisting">greylisting.</a></p>
+</p>
+{% endblock %}
diff --git a/baserock_openid_provider/templates/registration/registration_form.html b/baserock_openid_provider/templates/registration/registration_form.html
new file mode 100644
index 00000000..6d0854d6
--- /dev/null
+++ b/baserock_openid_provider/templates/registration/registration_form.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+{% load i18n %}
+
+{% block content %}
+<form method="post" action=".">
+ {% csrf_token %}
+ {{ form.as_p }}
+
+ <input type="submit" value="{% trans 'Submit' %}" />
+</form>
+{% endblock %}
diff --git a/baserock_openid_provider/uwsgi.ini b/baserock_openid_provider/uwsgi.ini
new file mode 100644
index 00000000..0849096d
--- /dev/null
+++ b/baserock_openid_provider/uwsgi.ini
@@ -0,0 +1,22 @@
+# Configuration for uWSGI web application gateway for Baserock OpenID provider.
+#
+# System-wide configuration should live in /etc/uwsgi.ini.
+#
+# Some good reading for uWSGI:
+# - http://uwsgi-docs.readthedocs.org/en/latest/ThingsToKnow.html
+# - http://uwsgi-docs.readthedocs.org/en/latest/Configuration.html
+
+[uwsgi]
+need-plugin = python
+
+# This slightly weird setup seems the only way to avoid
+# django.ImproperlyConfigured exceptions.
+pythonpath = /srv/baserock_openid_provider
+chdir = /srv/baserock_openid_provider/baserock_openid_provider
+wsgi = wsgi
+
+# These numbers are pulled completely out of my arse. Testing should
+# be done to find good values.
+processes = 1
+
+buffer-size = 32768
diff --git a/baserock_storyboard/ansible-galaxy-roles.yaml b/baserock_storyboard/ansible-galaxy-roles.yaml
new file mode 100644
index 00000000..ffd7b6c3
--- /dev/null
+++ b/baserock_storyboard/ansible-galaxy-roles.yaml
@@ -0,0 +1,4 @@
+# Ansible Galaxy roles needed
+- palvarez89.storyboard
+- Mayeu.RabbitMQ,1.4.0
+- geerlingguy.mysql,1.5.0
diff --git a/baserock_storyboard/backup-snapshot.conf b/baserock_storyboard/backup-snapshot.conf
new file mode 100644
index 00000000..8a5dd8d3
--- /dev/null
+++ b/baserock_storyboard/backup-snapshot.conf
@@ -0,0 +1,4 @@
+services:
+ - mysql.service
+
+volume: /dev/vg0/database-storyboard
diff --git a/baserock_storyboard/instance-backup-config.yml b/baserock_storyboard/instance-backup-config.yml
new file mode 100644
index 00000000..0d011f8d
--- /dev/null
+++ b/baserock_storyboard/instance-backup-config.yml
@@ -0,0 +1,26 @@
+# Instance backup configuration for the baserock.org database.
+---
+- hosts: storyboard
+ gather_facts: false
+ sudo: yes
+ vars:
+ FRONTEND_IP: 192.168.222.143
+ tasks:
+ - name: backup-snapshot script
+ copy: src=../backup-snapshot dest=/usr/bin/backup-snapshot mode=755
+
+ - name: backup-snapshot config
+ copy: src=backup-snapshot.conf dest=/etc/backup-snapshot.conf
+
+ # We need to give the backup automation 'root' access, because it needs to
+ # manage system services, LVM volumes, and mounts, and because it needs to
+ # be able to read private data. The risk of having the backup key
+ # compromised is mitigated by only allowing it to execute the
+ # 'backup-snapshot' script, and limiting the hosts it can be used from.
+ - name: access for backup SSH key
+ authorized_key:
+ user: root
+ key: "{{ lookup('file', '../keys/backup.key.pub') }}"
+ # Quotes are important in this options, the OpenSSH server will reject
+ # the entry if the 'from' or 'command' values are not quoted.
+ key_options: 'from="{{FRONTEND_IP}}",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,command="/usr/bin/backup-snapshot"'
diff --git a/baserock_storyboard/instance-config.yml b/baserock_storyboard/instance-config.yml
new file mode 100644
index 00000000..6a1b2cf5
--- /dev/null
+++ b/baserock_storyboard/instance-config.yml
@@ -0,0 +1,35 @@
+# Instance configuration for Baserock MySQL on for StoryBoard host
+#
+# This script expects a volume to be available at /dev/vdb.
+---
+- hosts: storyboard
+ gather_facts: False
+ sudo: yes
+ vars:
+ - lv_size: 25g
+ - mountpoint: /var/lib/mysql
+ - lv_name: database-storyboard
+ tasks:
+ - name: install lvm2 tools
+ apt: name=lvm2 state=latest
+
+ - name: LVM logical volume group on /dev/vdb
+ lvg: vg=vg0 pvs=/dev/vdb
+
+# Duplicated from:
+#- include: ../tasks/create-data-volume.yml lv_name=database-storyboard lv_size=25g mountpoint=/var/lib/mysql
+# given that is not ubuntu compatible
+
+ - name: logical volume for {{ lv_name }}
+ lvol: vg=vg0 lv={{ lv_name }} size={{ lv_size }}
+
+# This will NEVER overwrite an existing filesystem. Unless you add
+# 'force=yes' to the arguments. So don't do that. See:
+# http://docs.ansible.com/filesystem_module.html.
+#
+ - name: ext4 filesystem on /dev/vg0/{{ lv_name }}
+ filesystem: fstype=ext4 dev=/dev/vg0/{{ lv_name }}
+
+ - name: mount {{ lv_name }} logical volume
+ mount: src=/dev/vg0/{{ lv_name }} name={{ mountpoint }} fstype=ext4 state=mounted
+# End of duplication
diff --git a/baserock_storyboard/instance-storyboard-config.yml b/baserock_storyboard/instance-storyboard-config.yml
new file mode 100644
index 00000000..cf74f551
--- /dev/null
+++ b/baserock_storyboard/instance-storyboard-config.yml
@@ -0,0 +1,14 @@
+# Instance-specific configuration for the baserock.org StoryBoard instance.
+---
+- hosts: storyboard
+ vars_files:
+ - ../baserock_database/baserock_storyboard.database_password.yml
+ - ../baserock_database/root.database_password.yml
+ - storyboard-vars.yml
+ sudo: yes
+ roles:
+ # We are using a new database here because StoryBoard is not yet compatible
+ # with MariaDB
+ - { role: geerlingguy.mysql }
+ - { role: Mayeu.RabbitMQ }
+ - { role: palvarez89.storyboard }
diff --git a/baserock_storyboard/projects.yaml b/baserock_storyboard/projects.yaml
new file mode 100644
index 00000000..b70a333e
--- /dev/null
+++ b/baserock_storyboard/projects.yaml
@@ -0,0 +1,47 @@
+# Projects defined for Baserock Storyboard
+
+# This file lives in <http://git.baserock.org/baserock/baserock/infrastructure>.
+# This is a temporary version for the work-in-progress storyboard.
+
+# If you update this list, you'll need to log into storyboard.baserock.org and
+# run the following:
+#
+# sudo -u apache storyboard-db-manage \
+# --config-file /etc/storyboard/storyboard.conf \
+# load_projects ./projects.yaml
+
+- project: baserock/definitions
+ description: Baserock reference system definitions
+ use-storyboard: true
+
+- project: baserock/firehose
+ description: Firehose automated integration tool
+ use-storyboard: true
+
+- project: baserock/import
+ description: Baserock Import Tool
+ use-storyboard: true
+
+- project: baserock/lorry
+ description: Lorry mirroring tool
+ use-storyboard: true
+
+- project: baserock/lorry-controller
+ description: Lorry Controller scheduling and management tool
+ use-storyboard: true
+
+- project: baserock/morph
+ description: Morph build tool
+ use-storyboard: true
+
+- project: baserock/infrastructure
+ description: baserock.org infrastructure
+ use-storyboard: true
+
+- project: baserock/spec
+ description: Specification for Baserock definitions format
+ use-storyboard: true
+
+- project: baserock/wiki
+ description: Baserock Wiki at http://wiki.baserock.org/
+ use-storyboard: true
diff --git a/baserock_storyboard/storyboard-vars.yml b/baserock_storyboard/storyboard-vars.yml
new file mode 100644
index 00000000..1b7f3663
--- /dev/null
+++ b/baserock_storyboard/storyboard-vars.yml
@@ -0,0 +1,50 @@
+# For rabbitmq role
+rabbitmq_host: localhost
+rabbitmq_port: 5672
+rabbitmq_vhost: '/'
+rabbitmq_user: storyboard
+rabbitmq_user_password: storyboard
+rabbitmq_ssl: false
+rabbitmq_vhost_definitions:
+ - name: "{{ rabbitmq_vhost }}"
+rabbitmq_users_definitions:
+ - vhost: "{{ rabbitmq_vhost }}"
+ user: "{{ rabbitmq_user }}"
+ password: "{{ rabbitmq_user_password }}"
+rabbitmq_conf_tcp_listeners_address: '127.0.0.1'
+
+# For mysql role
+mysql_host: localhost
+mysql_port: 3306
+mysql_database: storyboard
+mysql_user: storyboard
+mysql_user_password: "{{ baserock_storyboard_password }}"
+mysql_root_password: "{{ root_password }}"
+mysql_databases:
+ - name: "{{ mysql_database }}"
+mysql_users:
+ - name: "{{ mysql_user }}"
+ host: "{{ mysql_host }}"
+ password: "{{ mysql_user_password }}"
+ priv: "{{ mysql_database }}.*:ALL"
+mysql_packages:
+ - mysql-server-5.6
+ - python-mysqldb
+
+storyboard_enable_email: 'True'
+storyboard_email_sender: StoryBoard (Do Not Reply) <do_not_reply@baserock.org>
+storyboard_email_smtp_host: 192.168.222.145
+storyboard_email_smtp_timeout: 10
+
+storyboard_fqdn: storyboard.baserock.org
+storyboard_openid_url: https://openid.baserock.org/openid/
+
+storyboard_projects: projects.yaml
+storyboard_superusers: users.yaml
+storyboard_mysql_user_password: "{{ baserock_storyboard_password }}"
+
+storyboard_ssl_cert: ../certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert
+storyboard_ssl_key: ../private/baserock.org-ssl-certificate-temporary-dsilverstone.pem
+storyboard_resolved_ssl_ca: ../certs/startcom-class2-ca-chain-certificate.cert
+
+storyboard_access_token_ttl: 31622400
diff --git a/baserock_storyboard/users.yaml b/baserock_storyboard/users.yaml
new file mode 100644
index 00000000..b42efca9
--- /dev/null
+++ b/baserock_storyboard/users.yaml
@@ -0,0 +1,4 @@
+- openid: https://openid.baserock.org/openid/pedroalvarez/
+ email: pedro.alvarez@codethink.co.uk
+- openid: https://openid.baserock.org/openid/samthursfield/
+ email: sam.thursfield@codethink.co.uk
diff --git a/baserock_trove/baserock_trove.morph b/baserock_trove/baserock_trove.morph
new file mode 100644
index 00000000..ed93896f
--- /dev/null
+++ b/baserock_trove/baserock_trove.morph
@@ -0,0 +1,25 @@
+name: trove.baserock.org-upgrade
+kind: cluster
+description: |
+ Deployment .morph for baserock.org Trove system.
+
+ Configuration of the system is handled separately, with a series of
+ Ansible playbooks that should be run after an instance of the system
+ is up and running. See the README for instructions.
+
+systems:
+- morph: systems/trove-system-x86_64.morph
+ deploy-defaults:
+ deploy:
+ gbo:
+ type: extensions/openstack
+ location: https://compute.datacentred.io:5000/v2.0
+
+ upgrade-type: extensions/ssh-rsync
+ upgrade-location: root@git.baserock.org
+
+ TROVE_GENERIC: yes
+ OPENSTACK_IMAGENAME: baserock_trove
+ CLOUD_INIT: true
+ DISK_SIZE: 3G
+ KERNEL_ARGS: console=tty0 console=ttyS0
diff --git a/baserock_trove/configure-trove.yml b/baserock_trove/configure-trove.yml
new file mode 100644
index 00000000..f832e810
--- /dev/null
+++ b/baserock_trove/configure-trove.yml
@@ -0,0 +1,51 @@
+# Instance configuration for Baserock Trove server.
+#
+# This configuration can be easily done using the 'TROVE_' variables of trove.configure
+# extension, but it's better to deploy the Trove as 'TROVE_GENERIC' and configure
+# it later using this playbook. This is for:
+#
+# - Making upgrades easier. After initial deployment and post-deployment configuration,
+# you will only need to deploy a generic Trove as an upgrade.
+#
+# - Not storing private data in images in OpenStack. We have shared our images with
+# other tenants by mistake in the past, and I'd like to avoid this possibility.
+---
+- hosts: git
+ gather_facts: False
+ sudo: yes
+ tasks:
+
+ # To create the .pem file, simply concatenate
+ # certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert with
+ # the private key for that certificate (which is not committed to Git, of
+ # course).
+ - name: Install SSL certificate
+ copy:
+ src: ../private/baserock.org-ssl-certificate-temporary-dsilverstone.pem
+ dest: /etc/trove/baserock.pem
+ mode: 400
+
+ - name: Install CA chain certificate
+ copy:
+ src: ../certs/startcom-class2-ca-chain-certificate.cert
+ dest: /etc/trove/startcom-ca.pem
+
+ - name: Install trove.conf configuration file
+ copy:
+ src: trove.conf
+ dest: /etc/trove/trove.conf
+
+ - name: Copy ssh keys
+ copy:
+ src: ../private/{{ item }}
+ dest: /etc/trove/{{ item }}
+ with_items:
+ - admin.key.pub
+ - lorry.key
+ - lorry.key.pub
+ - worker.key.pub
+
+ - name: Restart the trove-setup service to configure the trove
+ service:
+ name: trove-setup
+ state: restarted
diff --git a/baserock_trove/instance-config.yml b/baserock_trove/instance-config.yml
new file mode 100644
index 00000000..d8006fc2
--- /dev/null
+++ b/baserock_trove/instance-config.yml
@@ -0,0 +1,28 @@
+# Instance configuration for Baserock Trove server.
+#
+# This script expects a volume to be available at /dev/vdb.
+---
+- hosts: git
+ gather_facts: False
+ sudo: yes
+ tasks:
+
+ - name: Create an ext4 filesystem in /dev/vdb
+ filesystem:
+ fstype: ext4
+ dev: /dev/vdb
+ opts: -L home
+
+ - name: mount home volume
+ mount:
+ src: LABEL=home
+ name: /home
+ fstype: ext4
+ state: mounted
+ register: home_mounted
+
+# Ansible doesn't remount /home properly, so reboot system here
+ - name: Reboot server to mount everything properly
+ shell: reboot
+ when: home_mounted|changed
+ ignore_errors: true
diff --git a/baserock_trove/trove.conf b/baserock_trove/trove.conf
new file mode 100644
index 00000000..4de7cd19
--- /dev/null
+++ b/baserock_trove/trove.conf
@@ -0,0 +1,14 @@
+HOSTNAME: firehose1
+LORRY_CONTROLLER_MINIONS: '4'
+LORRY_SSH_KEY: /etc/trove/lorry.key
+LORRY_SSH_PUBKEY: /etc/trove/lorry.key.pub
+TROVE_ADMIN_EMAIL: daniel.silverstone@codethink.co.uk
+TROVE_ADMIN_NAME: Daniel Silverstone
+TROVE_ADMIN_SSH_PUBKEY: /etc/trove/admin.key.pub
+TROVE_ADMIN_USER: danielsilverstone
+TROVE_COMPANY: Baserock
+TROVE_HOSTNAME: git.baserock.org
+TROVE_ID: baserock
+WORKER_SSH_PUBKEY: /etc/trove/worker.key.pub
+TROVE_SSL_PEMFILE: /etc/trove/baserock.pem
+TROVE_SSL_CA_FILE: /etc/trove/startcom-ca.pem
diff --git a/baserock_webserver/README.mdwn b/baserock_webserver/README.mdwn
new file mode 100644
index 00000000..29b62419
--- /dev/null
+++ b/baserock_webserver/README.mdwn
@@ -0,0 +1,15 @@
+##
+## baserock.org - webserver
+##
+## Currently being used to host files for download.baserock.org
+##
+
+* Webserver config file:
+ - etc/charokee/cherokee.conf
+
+* SELinux needs to be tweaked:
+ - etc/selinux/config
+
+* An attached volume which holds all the download files is mounted @ /srv/download.baserock.org:
+ - etc/fstab
+
diff --git a/baserock_webserver/etc/cherokee/cherokee.conf b/baserock_webserver/etc/cherokee/cherokee.conf
new file mode 100644
index 00000000..987f22ea
--- /dev/null
+++ b/baserock_webserver/etc/cherokee/cherokee.conf
@@ -0,0 +1,332 @@
+config!version = 001002103
+server!bind!1!port = 80
+server!collector = rrd
+server!group = cherokee
+server!keepalive = 1
+server!keepalive_max_requests = 500
+server!panic_action = /usr/bin/cherokee-panic
+server!pid_file = /var/run/cherokee.pid
+server!server_tokens = full
+server!timeout = 15
+server!user = cherokee
+vserver!1!collector!enabled = 1
+vserver!1!directory_index = index.html
+vserver!1!document_root = /var/www/cherokee
+vserver!1!error_writer!filename = /var/log/cherokee/error_log
+vserver!1!error_writer!type = file
+vserver!1!logger = combined
+vserver!1!logger!access!buffsize = 16384
+vserver!1!logger!access!filename = /var/log/cherokee/access_log
+vserver!1!logger!access!type = file
+vserver!1!nick = default
+vserver!1!rule!5!encoder!gzip = allow
+vserver!1!rule!5!handler = server_info
+vserver!1!rule!5!handler!type = just_about
+vserver!1!rule!5!match = directory
+vserver!1!rule!5!match!directory = /about
+vserver!1!rule!4!document_root = /usr/lib/cgi-bin
+vserver!1!rule!4!handler = cgi
+vserver!1!rule!4!match = directory
+vserver!1!rule!4!match!directory = /cgi-bin
+vserver!1!rule!3!document_root = /usr/share/cherokee/themes
+vserver!1!rule!3!handler = file
+vserver!1!rule!3!match = directory
+vserver!1!rule!3!match!directory = /cherokee_themes
+vserver!1!rule!2!document_root = /usr/share/cherokee/icons
+vserver!1!rule!2!handler = file
+vserver!1!rule!2!match = directory
+vserver!1!rule!2!match!directory = /cherokee_icons
+vserver!1!rule!1!handler = common
+vserver!1!rule!1!handler!iocache = 1
+vserver!1!rule!1!match = default
+vserver!2!collector!enabled = 1
+vserver!2!document_root = /srv/download.baserock.org
+vserver!2!nick = download.baserock.org
+vserver!2!rule!103!handler = common
+vserver!2!rule!103!handler!allow_dirlist = 1
+vserver!2!rule!103!handler!allow_pathinfo = 1
+vserver!2!rule!103!handler!theme = white
+vserver!2!rule!103!match = directory
+vserver!2!rule!103!match!directory = /baserock
+vserver!2!rule!3!document_root = /usr/share/cherokee/themes
+vserver!2!rule!3!handler = common
+vserver!2!rule!3!match = directory
+vserver!2!rule!3!match!directory = /cherokee_themes
+vserver!2!rule!2!document_root = /usr/share/cherokee/icons
+vserver!2!rule!2!handler = file
+vserver!2!rule!2!match = directory
+vserver!2!rule!2!match!directory = /cherokee_icons
+vserver!2!rule!1!handler = redir
+vserver!2!rule!1!handler!rewrite!10!regex = ^.*$
+vserver!2!rule!1!handler!rewrite!10!show = 1
+vserver!2!rule!1!handler!rewrite!10!substring = /baserock/
+vserver!2!rule!1!match = default
+vserver!3!directory_index = index.html
+vserver!3!document_root = /srv/docs.baserock.org
+vserver!3!nick = docs.baserock.org
+vserver!3!rule!3!document_root = /usr/share/cherokee/themes
+vserver!3!rule!3!handler = file
+vserver!3!rule!3!match = directory
+vserver!3!rule!3!match!directory = /cherokee_themes
+vserver!3!rule!2!document_root = /usr/share/cherokee/icons
+vserver!3!rule!2!handler = file
+vserver!3!rule!2!match = directory
+vserver!3!rule!2!match!directory = /cherokee_icons
+vserver!3!rule!1!handler = common
+vserver!3!rule!1!match = default
+icons!default = page_white.png
+icons!directory = folder.png
+icons!file!bomb.png = core
+icons!file!page_white_go.png = *README*
+icons!parent_directory = arrow_turn_left.png
+icons!suffix!camera.png = jpg,jpeg,jpe
+icons!suffix!cd.png = iso,ngr,cue
+icons!suffix!color_wheel.png = png,gif,xcf,bmp,pcx,tiff,tif,cdr,psd,xpm,xbm
+icons!suffix!control_play.png = bin,exe,com,msi,out
+icons!suffix!css.png = css
+icons!suffix!cup.png = java,class,jar
+icons!suffix!email.png = eml,mbox,box,email,mbx
+icons!suffix!film.png = avi,mpeg,mpe,mpg,mpeg3,dl,fli,qt,mov,movie,flv,webm
+icons!suffix!font.png = ttf
+icons!suffix!html.png = html,htm
+icons!suffix!music.png = au,snd,mid,midi,kar,mpga,mpega,mp2,mp3,sid,wav,aif,aiff,aifc,gsm,m3u,wma,wax,ra,rm,ram,pls,sd2,ogg
+icons!suffix!package.png = tar,gz,bz2,zip,rar,ace,lha,7z,dmg,cpk
+icons!suffix!page_white_acrobat.png = pdf
+icons!suffix!page_white_c.png = c,h,cpp
+icons!suffix!page_white_office.png = doc,ppt,xls
+icons!suffix!page_white_php.png = php
+icons!suffix!page_white_text.png = txt,text,rtf,sdw
+icons!suffix!printer.png = ps,eps
+icons!suffix!ruby.png = rb
+icons!suffix!script.png = sh,csh,ksh,tcl,tk,py,pl
+mime!application/bzip2!extensions = bz2
+mime!application/gzip!extensions = gz
+mime!application/hta!extensions = hta
+mime!application/java-archive!extensions = jar
+mime!application/java-serialized-object!extensions = ser
+mime!application/java-vm!extensions = class
+mime!application/json!extensions = json
+mime!application/mac-binhex40!extensions = hqx
+mime!application/msaccess!extensions = mdb
+mime!application/msword!extensions = doc,dot
+mime!application/octet-stream!extensions = bin
+mime!application/octetstream!extensions = ace
+mime!application/oda!extensions = oda
+mime!application/ogg!extensions = ogx
+mime!application/pdf!extensions = pdf
+mime!application/pgp-keys!extensions = key
+mime!application/pgp-signature!extensions = pgp
+mime!application/pics-rules!extensions = prf
+mime!application/postscript!extensions = ps,ai,eps
+mime!application/rar!extensions = rar
+mime!application/rdf+xml!extensions = rdf
+mime!application/rss+xml!extensions = rss
+mime!application/smil!extensions = smi,smil
+mime!application/vnd.mozilla.xul+xml!extensions = xul
+mime!application/vnd.ms-excel!extensions = xls,xlb,xlt
+mime!application/vnd.ms-pki.seccat!extensions = cat
+mime!application/vnd.ms-pki.stl!extensions = stl
+mime!application/vnd.ms-powerpoint!extensions = ppt,pps
+mime!application/vnd.oasis.opendocument.chart!extensions = odc
+mime!application/vnd.oasis.opendocument.database!extensions = odb
+mime!application/vnd.oasis.opendocument.formula!extensions = odf
+mime!application/vnd.oasis.opendocument.graphics!extensions = odg
+mime!application/vnd.oasis.opendocument.image!extensions = odi
+mime!application/vnd.oasis.opendocument.presentation!extensions = odp
+mime!application/vnd.oasis.opendocument.spreadsheet!extensions = ods
+mime!application/vnd.oasis.opendocument.text!extensions = odt
+mime!application/vnd.oasis.opendocument.text-master!extensions = odm
+mime!application/vnd.oasis.opendocument.text-web!extensions = oth
+mime!application/vnd.pkg5.info!extensions = p5i
+mime!application/vnd.visio!extensions = vsd
+mime!application/vnd.wap.wbxml!extensions = wbxml
+mime!application/vnd.wap.wmlc!extensions = wmlc
+mime!application/vnd.wap.wmlscriptc!extensions = wmlsc
+mime!application/x-7z-compressed!extensions = 7z
+mime!application/x-abiword!extensions = abw
+mime!application/x-apple-diskimage!extensions = dmg
+mime!application/x-bcpio!extensions = bcpio
+mime!application/x-bittorrent!extensions = torrent
+mime!application/x-cdf!extensions = cdf
+mime!application/x-cpio!extensions = cpio
+mime!application/x-csh!extensions = csh
+mime!application/x-debian-package!extensions = deb,udeb
+mime!application/x-director!extensions = dcr,dir,dxr
+mime!application/x-dvi!extensions = dvi
+mime!application/x-flac!extensions = flac
+mime!application/x-font!extensions = pfa,pfb,gsf,pcf,pcf.Z
+mime!application/x-freemind!extensions = mm
+mime!application/x-gnumeric!extensions = gnumeric
+mime!application/x-gtar!extensions = gtar,tgz,taz
+mime!application/x-gzip!extensions = gz,tgz
+mime!application/x-httpd-php!extensions = phtml,pht,php
+mime!application/x-httpd-php-source!extensions = phps
+mime!application/x-httpd-php3!extensions = php3
+mime!application/x-httpd-php3-preprocessed!extensions = php3p
+mime!application/x-httpd-php4!extensions = php4
+mime!application/x-internet-signup!extensions = ins,isp
+mime!application/x-iphone!extensions = iii
+mime!application/x-iso9660-image!extensions = iso
+mime!application/x-java-jnlp-file!extensions = jnlp
+mime!application/x-javascript!extensions = js
+mime!application/x-kchart!extensions = chrt
+mime!application/x-killustrator!extensions = kil
+mime!application/x-koan!extensions = skp,skd,skt,skm
+mime!application/x-kpresenter!extensions = kpr,kpt
+mime!application/x-kspread!extensions = ksp
+mime!application/x-kword!extensions = kwd,kwt
+mime!application/x-latex!extensions = latex
+mime!application/x-lha!extensions = lha
+mime!application/x-lzh!extensions = lzh
+mime!application/x-lzx!extensions = lzx
+mime!application/x-ms-wmd!extensions = wmd
+mime!application/x-ms-wmz!extensions = wmz
+mime!application/x-msdos-program!extensions = com,exe,bat,dll
+mime!application/x-msi!extensions = msi
+mime!application/x-netcdf!extensions = nc
+mime!application/x-ns-proxy-autoconfig!extensions = pac
+mime!application/x-nwc!extensions = nwc
+mime!application/x-object!extensions = o
+mime!application/x-oz-application!extensions = oza
+mime!application/x-pkcs7-certreqresp!extensions = p7r
+mime!application/x-pkcs7-crl!extensions = crl
+mime!application/x-python-code!extensions = pyc,pyo
+mime!application/x-quicktimeplayer!extensions = qtl
+mime!application/x-redhat-package-manager!extensions = rpm
+mime!application/x-sh!extensions = sh
+mime!application/x-shar!extensions = shar
+mime!application/x-shockwave-flash!extensions = swf,swfl
+mime!application/x-stuffit!extensions = sit,sea
+mime!application/x-sv4cpio!extensions = sv4cpio
+mime!application/x-sv4crc!extensions = sv4crc
+mime!application/x-tar!extensions = tar
+mime!application/x-tcl!extensions = tcl
+mime!application/x-tex-pk!extensions = pk
+mime!application/x-texinfo!extensions = texinfo,texi
+mime!application/x-trash!extensions = ~,bak,old,sik
+mime!application/x-troff!extensions = t,tr,roff
+mime!application/x-troff-man!extensions = man
+mime!application/x-troff-me!extensions = me
+mime!application/x-troff-ms!extensions = ms
+mime!application/x-ustar!extensions = ustar
+mime!application/x-x509-ca-cert!extensions = crt
+mime!application/x-xcf!extensions = xcf
+mime!application/x-xfig!extensions = fig
+mime!application/x-xpinstall!extensions = xpi
+mime!application/xhtml+xml!extensions = xhtml,xht
+mime!application/xml!extensions = xml,xsl
+mime!application/zip!extensions = zip
+mime!audio/basic!extensions = au,snd
+mime!audio/midi!extensions = mid,midi,kar
+mime!audio/mpeg!extensions = mpga,mpega,mp2,mp3,m4a
+mime!audio/ogg!extensions = ogg,oga
+mime!audio/prs.sid!extensions = sid
+mime!audio/x-aiff!extensions = aif,aiff,aifc
+mime!audio/x-gsm!extensions = gsm
+mime!audio/x-mpegurl!extensions = m3u
+mime!audio/x-ms-wax!extensions = wax
+mime!audio/x-ms-wma!extensions = wma
+mime!audio/x-pn-realaudio!extensions = ra,rm,ram
+mime!audio/x-realaudio!extensions = ra
+mime!audio/x-scpls!extensions = pls
+mime!audio/x-sd2!extensions = sd2
+mime!audio/x-wav!extensions = wav
+mime!chemical/x-cache!extensions = cac,cache
+mime!chemical/x-cache-csf!extensions = csf
+mime!chemical/x-cdx!extensions = cdx
+mime!chemical/x-cif!extensions = cif
+mime!chemical/x-cmdf!extensions = cmdf
+mime!chemical/x-cml!extensions = cml
+mime!chemical/x-compass!extensions = cpa
+mime!chemical/x-crossfire!extensions = bsd
+mime!chemical/x-csml!extensions = csml,csm
+mime!chemical/x-ctx!extensions = ctx
+mime!chemical/x-cxf!extensions = cxf,cef
+mime!chemical/x-isostar!extensions = istr,ist
+mime!chemical/x-jcamp-dx!extensions = jdx,dx
+mime!chemical/x-kinemage!extensions = kin
+mime!chemical/x-pdb!extensions = pdb,ent
+mime!chemical/x-swissprot!extensions = sw
+mime!chemical/x-vamas-iso14976!extensions = vms
+mime!chemical/x-vmd!extensions = vmd
+mime!chemical/x-xtel!extensions = xtel
+mime!chemical/x-xyz!extensions = xyz
+mime!image/gif!extensions = gif
+mime!image/jpeg!extensions = jpeg,jpg,jpe
+mime!image/pcx!extensions = pcx
+mime!image/png!extensions = png
+mime!image/svg+xml!extensions = svg,svgz
+mime!image/tiff!extensions = tiff,tif
+mime!image/vnd.djvu!extensions = djvu,djv
+mime!image/vnd.wap.wbmp!extensions = wbmp
+mime!image/x-icon!extensions = ico
+mime!image/x-ms-bmp!extensions = bmp
+mime!image/x-photoshop!extensions = psd
+mime!image/x-portable-anymap!extensions = pnm
+mime!image/x-portable-bitmap!extensions = pbm
+mime!image/x-portable-graymap!extensions = pgm
+mime!image/x-portable-pixmap!extensions = ppm
+mime!image/x-xbitmap!extensions = xbm
+mime!image/x-xpixmap!extensions = xpm
+mime!image/x-xwindowdump!extensions = xwd
+mime!model/iges!extensions = igs,iges
+mime!model/mesh!extensions = msh,mesh,silo
+mime!model/vrml!extensions = wrl,vrml
+mime!text/calendar!extensions = ics,icz
+mime!text/comma-separated-values!extensions = csv
+mime!text/css!extensions = css
+mime!text/h323!extensions = 323
+mime!text/html!extensions = html,htm,shtml
+mime!text/iuls!extensions = uls
+mime!text/mathml!extensions = mml
+mime!text/plain!extensions = asc,txt,text,diff,pot
+mime!text/richtext!extensions = rtx
+mime!text/rtf!extensions = rtf
+mime!text/scriptlet!extensions = sct,wsc
+mime!text/tab-separated-values!extensions = tsv
+mime!text/vnd.sun.j2me.app-descriptor!extensions = jad
+mime!text/vnd.wap.wml!extensions = wml
+mime!text/vnd.wap.wmlscript!extensions = wmls
+mime!text/x-boo!extensions = boo
+mime!text/x-c++hdr!extensions = h++,hpp,hxx,hh
+mime!text/x-c++src!extensions = c++,cpp,cxx,cc
+mime!text/x-chdr!extensions = h
+mime!text/x-csh!extensions = csh
+mime!text/x-csrc!extensions = c
+mime!text/x-dsrc!extensions = d
+mime!text/x-haskell!extensions = hs
+mime!text/x-java!extensions = java
+mime!text/x-literate-haskell!extensions = lhs
+mime!text/x-moc!extensions = moc
+mime!text/x-pascal!extensions = p,pas
+mime!text/x-pcs-gcd!extensions = gcd
+mime!text/x-perl!extensions = pl,pm
+mime!text/x-python!extensions = py
+mime!text/x-setext!extensions = etx
+mime!text/x-sh!extensions = sh
+mime!text/x-tcl!extensions = tcl,tk
+mime!text/x-tex!extensions = tex,ltx,sty,cls
+mime!text/x-vcalendar!extensions = vcs
+mime!text/x-vcard!extensions = vcf
+mime!video/dl!extensions = dl
+mime!video/dv!extensions = dif,dv
+mime!video/fli!extensions = fli
+mime!video/gl!extensions = gl
+mime!video/mp4!extensions = mp4
+mime!video/mpeg!extensions = mpeg,mpg,mpe
+mime!video/ogg!extensions = ogv
+mime!video/quicktime!extensions = qt,mov
+mime!video/vnd.mpegurl!extensions = mxu
+mime!video/webm!extensions = webm
+mime!video/x-flv!extensions = flv
+mime!video/x-la-asf!extensions = lsf,lsx
+mime!video/x-mng!extensions = mng
+mime!video/x-ms-asf!extensions = asf,asx
+mime!video/x-ms-wm!extensions = wm
+mime!video/x-ms-wmv!extensions = wmv
+mime!video/x-ms-wmx!extensions = wmx
+mime!video/x-ms-wvx!extensions = wvx
+mime!video/x-msvideo!extensions = avi
+mime!video/x-sgi-movie!extensions = movie
+mime!x-conference/x-cooltalk!extensions = ice
+mime!x-world/x-vrml!extensions = vrm,vrml,wrl
diff --git a/baserock_webserver/etc/fstab b/baserock_webserver/etc/fstab
new file mode 100644
index 00000000..981c22b7
--- /dev/null
+++ b/baserock_webserver/etc/fstab
@@ -0,0 +1,11 @@
+
+#
+# /etc/fstab
+# Created by anaconda on Thu May 21 23:58:09 2015
+#
+# Accessible filesystems, by reference, are maintained under '/dev/disk'
+# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
+#
+UUID=01bc7316-b1f4-45c9-a23a-00c5a2336ef2 / ext4 defaults,noatime 1 1
+UUID=4f098a3a-24ed-4df1-839c-2413f29f9c00 /srv/download.baserock.org ext4 defaults,noatime 1 1
+
diff --git a/baserock_webserver/etc/selinux/config b/baserock_webserver/etc/selinux/config
new file mode 100644
index 00000000..f6f1adbd
--- /dev/null
+++ b/baserock_webserver/etc/selinux/config
@@ -0,0 +1,12 @@
+# This file controls the state of SELinux on the system.
+# SELINUX= can take one of these three values:
+# enforcing - SELinux security policy is enforced.
+# permissive - SELinux prints warnings instead of enforcing.
+# disabled - No SELinux policy is loaded.
+#SELINUX=enforcing
+SELINUX=permissive
+# SELINUXTYPE= can take one of these three values:
+# targeted - Targeted processes are protected,
+# minimum - Modification of targeted policy. Only selected processes are protected.
+# mls - Multi Level Security protection.
+SELINUXTYPE=targeted
diff --git a/baserock_webserver/etc/systemd/system/generate-docs.baserock.org.service b/baserock_webserver/etc/systemd/system/generate-docs.baserock.org.service
new file mode 100644
index 00000000..93ec6e7f
--- /dev/null
+++ b/baserock_webserver/etc/systemd/system/generate-docs.baserock.org.service
@@ -0,0 +1,11 @@
+[Unit]
+Description = Generate docs.baserock.org content
+
+[Service]
+Type = oneshot
+ExecStart = /home/fedora/generate-docs.baserock.org
+User = fedora
+Group = fedora
+
+[Install]
+WantedBy=default.target
diff --git a/baserock_webserver/etc/systemd/system/generate-docs.baserock.org.timer b/baserock_webserver/etc/systemd/system/generate-docs.baserock.org.timer
new file mode 100644
index 00000000..d78f677b
--- /dev/null
+++ b/baserock_webserver/etc/systemd/system/generate-docs.baserock.org.timer
@@ -0,0 +1,8 @@
+[Unit]
+Description = Generate docs.baserock.org content
+
+[Timer]
+OnUnitActiveSec = 10min
+
+[Install]
+WantedBy = default.target
diff --git a/baserock_webserver/generate-docs.baserock.org b/baserock_webserver/generate-docs.baserock.org
new file mode 100755
index 00000000..581ab5de
--- /dev/null
+++ b/baserock_webserver/generate-docs.baserock.org
@@ -0,0 +1,36 @@
+#!/bin/sh
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Generator script for docs.baserock.org website.
+#
+# This can be run as a cron job or Git post-receive hook, to get auto-updating
+# documentation built from Git repos.
+
+
+set -e
+
+cd ~
+if [ ! -d spec.git ]; then
+ git clone git://git.baserock.org/baserock/baserock/spec.git spec.git
+ cd spec.git
+else
+ cd spec.git
+ git checkout master
+ git pull origin master
+fi
+
+~/.local/bin/mkdocs build
diff --git a/baserock_webserver/image-config.yml b/baserock_webserver/image-config.yml
new file mode 100644
index 00000000..1244faac
--- /dev/null
+++ b/baserock_webserver/image-config.yml
@@ -0,0 +1,24 @@
+# Configuration for Baserock webserver system image.
+#
+# This expects to be run on a Fedora 22 cloud image.
+---
+- hosts: webserver
+ gather_facts: false
+ sudo: yes
+ tasks:
+ - name: enable persistant journal
+ shell: mkdir /var/log/journal
+ args:
+ creates: /var/log/journal
+
+ - name: ensure system up to date
+ dnf: name=* state=latest
+
+ - name: SELinux configuration (setting it to 'permissive' mode)
+ copy: src=etc/selinux/config dest=/etc/selinux/
+
+ - name: Cherokee webserver package
+ dnf: name=cherokee state=latest
+
+ - name: Cherokee configuration
+ copy: src=etc/cherokee/cherokee.conf dest=/etc/cherokee/
diff --git a/baserock_webserver/instance-docs.baserock.org-config.yml b/baserock_webserver/instance-docs.baserock.org-config.yml
new file mode 100644
index 00000000..e5910579
--- /dev/null
+++ b/baserock_webserver/instance-docs.baserock.org-config.yml
@@ -0,0 +1,43 @@
+# Configuration for docs.baserock.org site.
+#
+# This expects to be run after image-config.yml.
+- hosts: webserver
+ gather_facts: False
+ tasks:
+ - name: /srv/docs.baserock.org/
+ file: path=/srv/docs.baserock.org/ owner=fedora state=directory
+
+ - name: git
+ dnf: name=git state=latest
+ sudo: yes
+
+ - name: mkdocs documentation generator
+ pip: name=mkdocs executable=pip3.4 extra_args="--user"
+
+ # A lot of the mkdocs themes are totally broken without Javascript, which
+ # is stupid. This one looks a little bit ugly without Javascript, but it
+ # is mostly usable.
+ - name: mkdocs 'material' theme
+ pip: name=mkdocs-material executable=pip3.4 extra_args="--user"
+
+ - name: generate-docs.baserock.org script
+ copy: src=generate-docs.baserock.org dest=/home/fedora/ mode=755
+
+ - name: generate-docs.baserock.org systemd unit
+ copy: src=etc/systemd/system/generate-docs.baserock.org.service dest=/etc/systemd/system/
+ sudo: yes
+
+ - name: generate-docs.baserock.org systemd timer
+ copy: src=etc/systemd/system/generate-docs.baserock.org.timer dest=/etc/systemd/system/
+ sudo: yes
+
+ # FIXME: it would be much cooler to monitor the output of `gerrit
+ # stream-events`, or have a git post-receive hook installed on
+ # git.baserock.org to trigger this.
+ - name: enable generate-docs.baserock.org timer
+ service: name=generate-docs.baserock.org.timer state=started enabled=yes
+ sudo: yes
+
+ - name: enable generate-docs.baserock.org service
+ service: name=generate-docs.baserock.org.service enabled=yes
+ sudo: yes
diff --git a/certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert b/certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert
new file mode 100644
index 00000000..78a80f5e
--- /dev/null
+++ b/certs/baserock.org-ssl-certificate-temporary-dsilverstone.full.cert
@@ -0,0 +1,118 @@
+-----BEGIN CERTIFICATE-----
+MIIGkTCCBXmgAwIBAgIHBv5yWci2CjANBgkqhkiG9w0BAQsFADCBjDELMAkGA1UE
+BhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0ZC4xKzApBgNVBAsTIlNlY3VyZSBE
+aWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcxODA2BgNVBAMTL1N0YXJ0Q29tIENs
+YXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUgU2VydmVyIENBMB4XDTE1MDMyNjIz
+MjEyM1oXDTE3MDMyNzA5MjcxOFowgZUxCzAJBgNVBAYTAkdCMRMwEQYDVQQIEwpN
+YW5jaGVzdGVyMRMwEQYDVQQHEwpNYW5jaGVzdGVyMRswGQYDVQQKExJEYW5pZWwg
+U2lsdmVyc3RvbmUxFzAVBgNVBAMUDiouYmFzZXJvY2sub3JnMSYwJAYJKoZIhvcN
+AQkBFhdwb3N0bWFzdGVyQGJhc2Vyb2NrLm9yZzCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAM4nR/R7lmJ9wdtp9AqG3AWjCPrKcZp1JSnCe6K5y4tqvT+A
+GJ6FyJHLzEaiYTpvTIALTQQEhqj/POG3wVZunj9MUJLuXRtFfyEmcHwmKDYhfxsW
+KrqI3N4K5WYZWA/W8Ly8shltp968ub0KP6PW9kPDgtAZ2Ds96T2bqTiVZkrS/pwq
+8mNtdIhxUs3L5j/8zGY5gpmFcDQiRTsHf6qwpce0xzK425WhPjdjeSIf/LTEEebI
+jdausyhssM1TSA3obfV5pfovRG/tr2FOt38WoH7qeImDWFW8s5l5089XbgD4nXiZ
+9RG4FIQnlaIYyBOIXw56ZjeOsQASNE8Z5J1icssCAwEAAaOCAuswggLnMAkGA1Ud
+EwQCMAAwCwYDVR0PBAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
+ATAdBgNVHQ4EFgQU74kXy6Zm5IHkNEdJpyROwvWGBBUwHwYDVR0jBBgwFoAUEdsj
+Rf1UzGpxb4SKA9e+9wEvJoYwJwYDVR0RBCAwHoIOKi5iYXNlcm9jay5vcmeCDGJh
+c2Vyb2NrLm9yZzCCAVYGA1UdIASCAU0wggFJMAgGBmeBDAECAjCCATsGCysGAQQB
+gbU3AQIDMIIBKjAuBggrBgEFBQcCARYiaHR0cDovL3d3dy5zdGFydHNzbC5jb20v
+cG9saWN5LnBkZjCB9wYIKwYBBQUHAgIwgeowJxYgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwAwIBARqBvlRoaXMgY2VydGlmaWNhdGUgd2FzIGlzc3Vl
+ZCBhY2NvcmRpbmcgdG8gdGhlIENsYXNzIDIgVmFsaWRhdGlvbiByZXF1aXJlbWVu
+dHMgb2YgdGhlIFN0YXJ0Q29tIENBIHBvbGljeSwgcmVsaWFuY2Ugb25seSBmb3Ig
+dGhlIGludGVuZGVkIHB1cnBvc2UgaW4gY29tcGxpYW5jZSBvZiB0aGUgcmVseWlu
+ZyBwYXJ0eSBvYmxpZ2F0aW9ucy4wNQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2Ny
+bC5zdGFydHNzbC5jb20vY3J0Mi1jcmwuY3JsMIGOBggrBgEFBQcBAQSBgTB/MDkG
+CCsGAQUFBzABhi1odHRwOi8vb2NzcC5zdGFydHNzbC5jb20vc3ViL2NsYXNzMi9z
+ZXJ2ZXIvY2EwQgYIKwYBBQUHMAKGNmh0dHA6Ly9haWEuc3RhcnRzc2wuY29tL2Nl
+cnRzL3N1Yi5jbGFzczIuc2VydmVyLmNhLmNydDAjBgNVHRIEHDAahhhodHRwOi8v
+d3d3LnN0YXJ0c3NsLmNvbS8wDQYJKoZIhvcNAQELBQADggEBAI8iIwqDTd673Dvk
+JNon+bcUoTfWAOasthSAbE646Xly3G4o29egj+FciVRk37arEXU2tJYOt+ypBEgZ
+zWHwdf/uZaUFnxZCPbb1KuAeNnKMS2OWWrQGSwqw5aGiKR2ianDpNXsmNtroTjFM
+5rXCs0s5WWTBE2Jgw7shGG0GD1xaHUlMQg0vRQeRRFd/NHvEuKC7Ry8zKlMHRIbu
+Osr+lwq95GsC96vcXteL8ELnIuPUrWrixnqMPyMvi/01YA2P/r3BWlEmWTphVBrQ
+VoE7IHAL9DUzw0nxVDO/tvyqbAgpckat1zsIYQIAgo75ExXo+kJznJllT9BsDsfD
+5D3fiF8=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGNDCCBBygAwIBAgIBGjANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDcxMDI0MjA1NzA5WhcNMTcxMDI0MjA1NzA5WjCB
+jDELMAkGA1UEBhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0ZC4xKzApBgNVBAsT
+IlNlY3VyZSBEaWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcxODA2BgNVBAMTL1N0
+YXJ0Q29tIENsYXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUgU2VydmVyIENBMIIB
+IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4k85L6GMmoWtCA4IPlfyiAEh
+G5SpbOK426oZGEY6UqH1D/RujOqWjJaHeRNAUS8i8gyLhw9l33F0NENVsTUJm9m8
+H/rrQtCXQHK3Q5Y9upadXVACHJuRjZzArNe7LxfXyz6CnXPrB0KSss1ks3RVG7RL
+hiEs93iHMuAW5Nq9TJXqpAp+tgoNLorPVavD5d1Bik7mb2VsskDPF125w2oLJxGE
+d2H2wnztwI14FBiZgZl1Y7foU9O6YekO+qIw80aiuckfbIBaQKwn7UhHM7BUxkYa
+8zVhwQIpkFR+ZE3EMFICgtffziFuGJHXuKuMJxe18KMBL47SLoc6PbQpZ4rEAwID
+AQABo4IBrTCCAakwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFBHbI0X9VMxqcW+EigPXvvcBLyaGMB8GA1UdIwQYMBaAFE4L7xqkQFul
+F2mHMMo0aEPQQa7yMGYGCCsGAQUFBwEBBFowWDAnBggrBgEFBQcwAYYbaHR0cDov
+L29jc3Auc3RhcnRzc2wuY29tL2NhMC0GCCsGAQUFBzAChiFodHRwOi8vd3d3LnN0
+YXJ0c3NsLmNvbS9zZnNjYS5jcnQwWwYDVR0fBFQwUjAnoCWgI4YhaHR0cDovL3d3
+dy5zdGFydHNzbC5jb20vc2ZzY2EuY3JsMCegJaAjhiFodHRwOi8vY3JsLnN0YXJ0
+c3NsLmNvbS9zZnNjYS5jcmwwgYAGA1UdIAR5MHcwdQYLKwYBBAGBtTcBAgEwZjAu
+BggrBgEFBQcCARYiaHR0cDovL3d3dy5zdGFydHNzbC5jb20vcG9saWN5LnBkZjA0
+BggrBgEFBQcCARYoaHR0cDovL3d3dy5zdGFydHNzbC5jb20vaW50ZXJtZWRpYXRl
+LnBkZjANBgkqhkiG9w0BAQUFAAOCAgEAnQfh7pB2MWcWRXCMy4SLS1doRKWJwfJ+
+yyiL9edwd9W29AshYKWhdHMkIoDW2LqNomJdCTVCKfs5Y0ULpLA4Gmj0lRPM4EOU
+7Os5GuxXKdmZbfWEzY5zrsncavqenRZkkwjHHMKJVJ53gJD2uSl26xNnSFn4Ljox
+uMnTiOVfTtIZPUOO15L/zzi24VuKUx3OrLR2L9j3QGPV7mnzRX2gYsFhw3XtsntN
+rCEnME5ZRmqTF8rIOS0Bc2Vb6UGbERecyMhK76F2YC2uk/8M1TMTn08Tzt2G8fz4
+NVQVqFvnhX76Nwn/i7gxSZ4Nbt600hItuO3Iw/G2QqBMl3nf/sOjn6H0bSyEd6Si
+BeEX/zHdmvO4esNSwhERt1Axin/M51qJzPeGmmGSTy+UtpjHeOBiS0N9PN7WmrQQ
+oUCcSyrcuNDUnv3xhHgbDlePaVRCaHvqoO91DweijHOZq1X1BwnSrzgDapADDC+P
+4uhDwjHpb62H5Y29TiyJS1HmnExUdsASgVOb7KD8LJzaGJVuHjgmQid4YAjff20y
+6NjAbx/rJnWfk/x7G/41kNxTowemP4NVCitOYoIlzmYwXSzg+RkbdbmdmFamgyd6
+0Y+NWZP8P3PXLrQsldiL98l+x/ydrHIEH9LMF/TtNGCbnkqXBP7dcg5XVFEGcE3v
+qhykguAzx/Q=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
+ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
+LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
+BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
+dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
+cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
+YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
+dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
+bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
+YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
+TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
+9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
+jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
+FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
+ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
+ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
+EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
+L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
+O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
+um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
+NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
+-----END CERTIFICATE-----
diff --git a/certs/startcom-class2-ca-chain-certificate.cert b/certs/startcom-class2-ca-chain-certificate.cert
new file mode 100644
index 00000000..b1e8a06e
--- /dev/null
+++ b/certs/startcom-class2-ca-chain-certificate.cert
@@ -0,0 +1,78 @@
+-----BEGIN CERTIFICATE-----
+MIIF2TCCA8GgAwIBAgIHHKs2Ry2cUTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQG
+EwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERp
+Z2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDcxMDE0MjA1NzA5WhcNMjIxMDE0MjA1
+NzA5WjCBjDELMAkGA1UEBhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0ZC4xKzAp
+BgNVBAsTIlNlY3VyZSBEaWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcxODA2BgNV
+BAMTL1N0YXJ0Q29tIENsYXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUgU2VydmVy
+IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4k85L6GMmoWtCA4I
+PlfyiAEhG5SpbOK426oZGEY6UqH1D/RujOqWjJaHeRNAUS8i8gyLhw9l33F0NENV
+sTUJm9m8H/rrQtCXQHK3Q5Y9upadXVACHJuRjZzArNe7LxfXyz6CnXPrB0KSss1k
+s3RVG7RLhiEs93iHMuAW5Nq9TJXqpAp+tgoNLorPVavD5d1Bik7mb2VsskDPF125
+w2oLJxGEd2H2wnztwI14FBiZgZl1Y7foU9O6YekO+qIw80aiuckfbIBaQKwn7UhH
+M7BUxkYa8zVhwQIpkFR+ZE3EMFICgtffziFuGJHXuKuMJxe18KMBL47SLoc6PbQp
+Z4rEAwIDAQABo4IBTDCCAUgwEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8E
+BAMCAQYwHQYDVR0OBBYEFBHbI0X9VMxqcW+EigPXvvcBLyaGMB8GA1UdIwQYMBaA
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGkGCCsGAQUFBwEBBF0wWzAnBggrBgEFBQcw
+AYYbaHR0cDovL29jc3Auc3RhcnRzc2wuY29tL2NhMDAGCCsGAQUFBzAChiRodHRw
+Oi8vYWlhLnN0YXJ0c3NsLmNvbS9jZXJ0cy9jYS5jcnQwMgYDVR0fBCswKTAnoCWg
+I4YhaHR0cDovL2NybC5zdGFydHNzbC5jb20vc2ZzY2EuY3JsMEMGA1UdIAQ8MDow
+OAYEVR0gADAwMC4GCCsGAQUFBwIBFiJodHRwOi8vd3d3LnN0YXJ0c3NsLmNvbS9w
+b2xpY3kucGRmMA0GCSqGSIb3DQEBCwUAA4ICAQBSyb3zvcv566LEMsqGcvzPv6cw
+tf2R99WB4SEErQBM/+mLJ9r/8iTN/B8Pf9LR5YGSI3gW7msDLp0ASE+ugmUuh2/u
+agdfS1Zu95ZGQebd/kW5Yiqainbprb3Wc7O8MSvQLNVsa7xqOiWHqailDdeF8Wxs
+BQ70wWjLuyqBWKU+mcSf9x+EjqB60U3buAGcDYE0yoL+I2JNP22kUsBMXvJpSLHy
+36xEZGmwRinHrfDywJ1oI4qoZ3EiF77OiXp2vlRsk1yL8Bpuru2OrsIFrhNX5rnn
+cMgzuJ79SjDjmNQTa+5Ouebs387qoJ52apeq6t80RUL12k3Wh3Zt/85phnqBX9uy
+T86w4GdgOUSwRRCFZZcSed/Ul9h4IQyEmM67T2sPGdqFaZFBbBccxrn2FK7yoYB6
+4umV7yKKzP842/whVuyA/W2ihZEpA+qrA70sYESCADXnFGx2O0CDVdVc38coo1nV
+iXg+D+AG/dVXiiQcp2I4HYWTS/mTf/NE+mOYnu0miZ32/vhDbCX/B/kSPJ4RsNOA
+7uyrOwykcgOSFDbpvuaKOpGLrQwGqLODgm+p9TY5giMMjur9XH7TS1wz02dIz07u
+y2NwYWdV67vcnAt6QxRISap5RbaPviyQZxz4nFaSlTAwHoPaW1yuVS11tmsROMlR
+RNvbaAxIU4U67YaZSw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
+ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
+LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
+BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
+dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
+cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
+YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
+dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
+bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
+YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
+TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
+9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
+jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
+FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
+ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
+ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
+EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
+L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
+O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
+um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
+NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
+-----END CERTIFICATE-----
diff --git a/firewall.yaml b/firewall.yaml
new file mode 100644
index 00000000..64c9200c
--- /dev/null
+++ b/firewall.yaml
@@ -0,0 +1,403 @@
+# OpenStack firewall setup for baserock.org
+#
+# This rather ugly and verbose Ansible script defines the firewall
+# configuration for the baserock.org cloud.
+#
+# OpenStack security group rules are all ACCEPT rules, and an instance
+# can be in multiple security groups.
+#
+# Note that many systems don't have a floating IP assigned and thus are
+# isolated from the internet. Requests to them are proxied by the
+# frontend-haproxy system.
+#
+# This playbook requires the 'neutron_sec_group' module, available in
+# <https://github.com/openstack-ansible/openstack-ansible-modules/>.
+
+- hosts: localhost
+ tasks:
+ - name: default security group
+ neutron_sec_group:
+ name: default
+ description: Allow all outgoing traffic, and allow incoming ICMP (ping) and SSH connections
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ - direction: egress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: udp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # ICMP: allow ping!
+ - direction: ingress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 22: Allow SSH access to all instances.
+ - direction: ingress
+ port_range_min: 22
+ port_range_max: 22
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # This group is an alternative to 'default' for machines that should be
+ # prevented from connecting to the outside world.
+ - name: internal-only security group
+ neutron_sec_group:
+ name: internal-only
+ description: Allow outgoing connections only to local network, and incoming ICMP and SSH
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ - direction: egress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 192.168.222.0/24
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 192.168.222.0/24
+
+ - direction: egress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: udp
+ remote_ip_prefix: 192.168.222.0/24
+
+ # ICMP: allow ping!
+ - direction: ingress
+ port_range_min: 0
+ port_range_max: 255
+ ethertype: IPv4
+ protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 22: Allow SSH access to all instances.
+ - direction: ingress
+ port_range_min: 22
+ port_range_max: 22
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: open security group
+ neutron_sec_group:
+ name: open
+ description: Allow inbound traffic on all ports. DO NOT USE EXCEPT FOR TESTING!!!
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ - direction: ingress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - direction: ingress
+ port_range_min: 1
+ port_range_max: 65535
+ ethertype: IPv4
+ protocol: udp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: database-mysql security group
+ neutron_sec_group:
+ name: database-mysql
+ description: Allow internal machines to access MariaDB database.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 3306: MariaDB
+ - direction: ingress
+ port_range_min: 3306
+ port_range_max: 3306
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: gerrit security group
+ neutron_sec_group:
+ name: gerrit
+ description: Allow access to Gerrit SSH daemon port 29418, plus HTTP, HTTPS and Git protocol.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 80: HTTP, for browsing repos with cgit, and Git-over-HTTP.
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS, for browsing repos with cgit, and Git-over-HTTPS.
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 8080: HTTP, for Gerrit web frontend
+ - direction: ingress
+ port_range_min: 8080
+ port_range_max: 8080
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 9418: Git.
+ - direction: ingress
+ port_range_min: 9418
+ port_range_max: 9418
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 29418: Gerrit SSH daemon.
+ - direction: ingress
+ port_range_min: 29418
+ port_range_max: 29418
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: git-server security group
+ neutron_sec_group:
+ name: git-server
+ description: Allow inbound SSH, HTTP, HTTPS and Git.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 22: SSH, for Git-over-SSH access.
+ - direction: ingress
+ port_range_min: 22
+ port_range_max: 22
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 80: HTTP, for browsing repos with cgit, and Git-over-HTTP.
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS, for browsing repos with cgit, and Git-over-HTTPS.
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 9418: Git.
+ - direction: ingress
+ port_range_min: 9418
+ port_range_max: 9418
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ - name: internal mail relay security group
+ neutron_sec_group:
+ name: internal-mail-relay
+ description: Allow receiving internal-only connections on port 25 for SMTP
+ state: present
+
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ # 25: SMTP, for sending emails.
+ - direction: ingress
+ port_range_min: 25
+ port_range_max: 25
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 192.168.222.0/24
+
+ - name: Mason x86 security group
+ neutron_sec_group:
+ name: mason-x86
+ description: Allow inbound HTTP and HTTPS, and cache server fetches from port 8080.
+ state: present
+
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+
+ rules:
+ # 80: HTTP
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 7878: morph distbuild controller port. This rule allows a devel
+ # system inside the Baserock cloud to use the Mason instances for
+ # distbuilding, which is useful when building a Baserock release
+ # among other things.
+ - direction: ingress
+ port_range_min: 7878
+ port_range_max: 7878
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 192.168.222.0/24
+
+ # 8080: morph-cache-server server port. The x86 Masons use
+ # cache.baserock.org as the cache server for their distbuild
+ # networks. So cache.baserock.org needs to be able to connect to
+ # them on this port to fetch artifacts.
+ - direction: ingress
+ port_range_min: 8080
+ port_range_max: 8080
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 185.43.218.0/0
+ # It'd be nice to limit access by security group, but it doesn't
+ # seem to actually work. Perhaps because we use external IP to
+ # access instead of internal IP.
+ #remote_group_id: "{{ default_group.sec_group.id }}"
+
+ - name: shared-artifact-cache security group
+ neutron_sec_group:
+ name: shared-artifact-cache
+ description: Allow inbound HTTP, HTTPS and read-only Morph artifact cache access. Allow writable Morph artifact cache access from internal IPs.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 80: HTTP for cache server web frontend (at the time of writing, this
+ # is a useless and empty cgit page, but we may improve it in future).
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS.
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 8080: Read-only Morph artifact cache server.
+ - direction: ingress
+ port_range_min: 8080
+ port_range_max: 8080
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 8081: 'writable cache server' port. Anyone who can connect
+ # to this port can delete or overwrite cached artifacts.
+ #
+ # FIXME: because the Masons use cache.baserock.org instead of
+ # 192.168.0.16 to access the shared artifact cache, we need to
+ # permit traffic from our public IP range. This provides a
+ # theoritical attack vector from other tenancies, so we should
+ # fix the Masons and remove this rule.
+ - direction: ingress
+ port_range_min: 8081
+ port_range_max: 8081
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 185.43.218.0/0
+ # It'd be nice to limit access by security group, but it doesn't
+ # seem to actually work. Perhaps because we use external IP to
+ # access instead of internal IP.
+ #remote_group_id: "{{ default_group.sec_group.id }}"
+
+ - name: web-server security group
+ neutron_sec_group:
+ name: web-server
+ description: Allow inbound HTTP and HTTPS.
+ state: present
+ auth_url: "{{ ansible_env.OS_AUTH_URL }}"
+ login_username: "{{ ansible_env.OS_USERNAME }}"
+ login_password: "{{ ansible_env.OS_PASSWORD }}"
+ login_tenant_name: "{{ ansible_env.OS_TENANT_NAME }}"
+ rules:
+ # 80: HTTP
+ - direction: ingress
+ port_range_min: 80
+ port_range_max: 80
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
+
+ # 443: HTTPS
+ - direction: ingress
+ port_range_min: 443
+ port_range_max: 443
+ ethertype: IPv4
+ protocol: tcp
+ remote_ip_prefix: 0.0.0.0/0
diff --git a/hosts b/hosts
new file mode 120000
index 00000000..c6d24b13
--- /dev/null
+++ b/hosts
@@ -0,0 +1 @@
+baserock_hosts \ No newline at end of file
diff --git a/scripts/cycle.sh b/scripts/cycle.sh
deleted file mode 100755
index c0e2aa67..00000000
--- a/scripts/cycle.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/sh
-# Copyright (C) 2014 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-usage() {
- echo "Usage: cycle.sh some-system some-cluster [newversion]"
- echo
- echo "This builds and deploys the current checked out version of"
- echo "some-system, applying it as a self-upgrade to the system you"
- echo "are working in, using configuration from some-cluster."
- echo "The upgrade is labelled TEST by default, or [newversion] if"
- echo "specified, and is set to be the default for next boot."
-}
-
-if [ -z "$1" ] || [ -z "$2" ] || [ ! -z "$4" ] ; then
- usage
- exit 1
-fi
-
-newversion=TEST
-if [ ! -z "$3" ] ; then
- newversion=$3
- if (echo "$newversion" | grep ' ' > /dev/null 2>&1) ; then
- echo 'Version label must not contain spaces.'
- exit 1
- fi
-fi
-
-if system-version-manager get-running | grep -q "^$newversion$"; then
- echo "You are currently running the $newversion system."
- echo "Maybe you want to boot into a different system version?"
- exit 1
-fi
-
-set -e
-set -v
-
-runningversion=`system-version-manager get-running`
-system-version-manager set-default $runningversion
-if system-version-manager list | grep -q "^$newversion$"; then
- system-version-manager remove $newversion
-fi
-
-morph gc
-morph build "$1"
-
-sed -i "s|^- morph: .*$|- morph: $1|" "$2"
-morph deploy --upgrade "$2" self.HOSTNAME=$(hostname) self.VERSION_LABEL=$newversion
-system-version-manager list
diff --git a/scripts/licensecheck.pl b/scripts/licensecheck.pl
deleted file mode 100644
index 5b6d0d33..00000000
--- a/scripts/licensecheck.pl
+++ /dev/null
@@ -1,604 +0,0 @@
-#!/usr/bin/perl
-# This script was originally based on the script of the same name from
-# the KDE SDK (by dfaure@kde.org)
-#
-# This version is
-# Copyright (C) 2007, 2008 Adam D. Barratt
-# Copyright (C) 2012 Francesco Poli
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <https://www.gnu.org/licenses/>.
-
-=head1 NAME
-
-licensecheck - simple license checker for source files
-
-=head1 SYNOPSIS
-
-B<licensecheck> B<--help>|B<--version>
-
-B<licensecheck> [B<--no-conf>] [B<--verbose>] [B<--copyright>]
-[B<-l>|B<--lines=>I<N>] [B<-i>|B<--ignore=>I<regex>] [B<-c>|B<--check=>I<regex>]
-[B<-m>|B<--machine>] [B<-r>|B<--recursive>]
-I<list of files and directories to check>
-
-=head1 DESCRIPTION
-
-B<licensecheck> attempts to determine the license that applies to each file
-passed to it, by searching the start of the file for text belonging to
-various licenses.
-
-If any of the arguments passed are directories, B<licensecheck> will add
-the files contained within to the list of files to process.
-
-=head1 OPTIONS
-
-=over 4
-
-=item B<--verbose>, B<--no-verbose>
-
-Specify whether to output the text being processed from each file before
-the corresponding license information.
-
-Default is to be quiet.
-
-=item B<-l=>I<N>, B<--lines=>I<N>
-
-Specify the number of lines of each file's header which should be parsed
-for license information. (Default is 60).
-
-=item B<-i=>I<regex>, B<--ignore=>I<regex>
-
-When processing the list of files and directories, the regular
-expression specified by this option will be used to indicate those which
-should not be considered (e.g. backup files, VCS metadata).
-
-=item B<-r>, B<--recursive>
-
-Specify that the contents of directories should be added
-recursively.
-
-=item B<-c=>I<regex>, B<--check=>I<regex>
-
-Specify a pattern against which filenames will be matched in order to
-decide which files to check the license of.
-
-The default includes common source files.
-
-=item B<--copyright>
-
-Also display copyright text found within the file
-
-=item B<-m>, B<--machine>
-
-Display the information in a machine readable way, i.e. in the form
-<file><tab><license>[<tab><copyright>] so that it can be easily sorted
-and/or filtered, e.g. with the B<awk> and B<sort> commands.
-Note that using the B<--verbose> option will kill the readability.
-
-=item B<--no-conf>, B<--noconf>
-
-Do not read any configuration files. This can only be used as the first
-option given on the command line.
-
-=back
-
-=head1 CONFIGURATION VARIABLES
-
-The two configuration files F</etc/devscripts.conf> and
-F<~/.devscripts> are sourced by a shell in that order to set
-configuration variables. Command line options can be used to override
-configuration file settings. Environment variable settings are
-ignored for this purpose. The currently recognised variables are:
-
-=over 4
-
-=item B<LICENSECHECK_VERBOSE>
-
-If this is set to I<yes>, then it is the same as the B<--verbose> command
-line parameter being used. The default is I<no>.
-
-=item B<LICENSECHECK_PARSELINES>
-
-If this is set to a positive number then the specified number of lines
-at the start of each file will be read whilst attempting to determine
-the license(s) in use. This is equivalent to the B<--lines> command line
-option.
-
-=back
-
-=head1 LICENSE
-
-This code is copyright by Adam D. Barratt <I<adam@adam-barratt.org.uk>>,
-all rights reserved; based on a script of the same name from the KDE
-SDK, which is copyright by <I<dfaure@kde.org>>.
-This program comes with ABSOLUTELY NO WARRANTY.
-You are free to redistribute this code under the terms of the GNU
-General Public License, version 2 or later.
-
-=head1 AUTHOR
-
-Adam D. Barratt <adam@adam-barratt.org.uk>
-
-=cut
-
-use strict;
-use warnings;
-use Getopt::Long qw(:config gnu_getopt);
-use File::Basename;
-
-my $progname = basename($0);
-
-# From dpkg-source
-my $default_ignore_regex = '
-# Ignore general backup files
-(?:^|/).*~$|
-# Ignore emacs recovery files
-(?:^|/)\.#.*$|
-# Ignore vi swap files
-(?:^|/)\..*\.swp$|
-# Ignore baz-style junk files or directories
-(?:^|/),,.*(?:$|/.*$)|
-# File-names that should be ignored (never directories)
-(?:^|/)(?:DEADJOE|\.cvsignore|\.arch-inventory|\.bzrignore|\.gitignore)$|
-# File or directory names that should be ignored
-(?:^|/)(?:CVS|RCS|\.pc|\.deps|\{arch\}|\.arch-ids|\.svn|\.hg|_darcs|\.git|
-\.shelf|_MTN|\.bzr(?:\.backup|tags)?)(?:$|/.*$)
-';
-
-# Take out comments and newlines
-$default_ignore_regex =~ s/^#.*$//mg;
-$default_ignore_regex =~ s/\n//sg;
-
-my $default_check_regex = '\.(c(c|pp|xx)?|h(h|pp|xx)?|f(77|90)?|go|p(l|m)|xs|sh|php|py(|x)|rb|java|js|vala|el|sc(i|e)|cs|pas|inc|dtd|xsl|mod|m|tex|mli?|(c|l)?hs)$';
-
-my $modified_conf_msg;
-
-my %OPT=(
- verbose => '',
- lines => '',
- noconf => '',
- ignore => '',
- check => '',
- recursive => 0,
- copyright => 0,
- machine => 0,
-);
-
-my $def_lines = 60;
-
-# Read configuration files and then command line
-# This is boilerplate
-
-if (@ARGV and $ARGV[0] =~ /^--no-?conf$/) {
- $modified_conf_msg = " (no configuration files read)";
- shift;
-} else {
- my @config_files = ('/etc/devscripts.conf', '~/.devscripts');
- my %config_vars = (
- 'LICENSECHECK_VERBOSE' => 'no',
- 'LICENSECHECK_PARSELINES' => $def_lines,
- );
- my %config_default = %config_vars;
-
- my $shell_cmd;
- # Set defaults
- foreach my $var (keys %config_vars) {
- $shell_cmd .= qq[$var="$config_vars{$var}";\n];
- }
- $shell_cmd .= 'for file in ' . join(" ", @config_files) . "; do\n";
- $shell_cmd .= '[ -f $file ] && . $file; done;' . "\n";
- # Read back values
- foreach my $var (keys %config_vars) { $shell_cmd .= "echo \$$var;\n" }
- my $shell_out = `/bin/bash -c '$shell_cmd'`;
- @config_vars{keys %config_vars} = split /\n/, $shell_out, -1;
-
- # Check validity
- $config_vars{'LICENSECHECK_VERBOSE'} =~ /^(yes|no)$/
- or $config_vars{'LICENSECHECK_VERBOSE'} = 'no';
- $config_vars{'LICENSECHECK_PARSELINES'} =~ /^[1-9][0-9]*$/
- or $config_vars{'LICENSECHECK_PARSELINES'} = $def_lines;
-
- foreach my $var (sort keys %config_vars) {
- if ($config_vars{$var} ne $config_default{$var}) {
- $modified_conf_msg .= " $var=$config_vars{$var}\n";
- }
- }
- $modified_conf_msg ||= " (none)\n";
- chomp $modified_conf_msg;
-
- $OPT{'verbose'} = $config_vars{'LICENSECHECK_VERBOSE'} eq 'yes' ? 1 : 0;
- $OPT{'lines'} = $config_vars{'LICENSECHECK_PARSELINES'};
-}
-
-GetOptions(\%OPT,
- "help|h",
- "check|c=s",
- "copyright",
- "ignore|i=s",
- "lines|l=i",
- "machine|m",
- "noconf|no-conf",
- "recursive|r",
- "verbose!",
- "version|v",
-) or die "Usage: $progname [options] filelist\nRun $progname --help for more details\n";
-
-$OPT{'lines'} = $def_lines if $OPT{'lines'} !~ /^[1-9][0-9]*$/;
-$OPT{'ignore'} = $default_ignore_regex if ! length $OPT{'ignore'};
-$OPT{'check'} = $default_check_regex if ! length $OPT{'check'};
-
-if ($OPT{'noconf'}) {
- fatal("--no-conf is only acceptable as the first command-line option!");
-}
-if ($OPT{'help'}) { help(); exit 0; }
-if ($OPT{'version'}) { version(); exit 0; }
-
-die "Usage: $progname [options] filelist\nRun $progname --help for more details\n" unless @ARGV;
-
-$OPT{'lines'} = $def_lines if not defined $OPT{'lines'};
-
-my @files = ();
-my @find_args = ();
-my $files_count = @ARGV;
-
-push @find_args, qw(-maxdepth 1) unless $OPT{'recursive'};
-push @find_args, qw(-follow -type f -print);
-
-while (@ARGV) {
- my $file = shift @ARGV;
-
- if (-d $file) {
- open my $FIND, '-|', 'find', $file, @find_args
- or die "$progname: couldn't exec find: $!\n";
-
- while (<$FIND>) {
- chomp;
- next unless m%$OPT{'check'}%;
- # Skip empty files
- next if (-z $_);
- push @files, $_ unless m%$OPT{'ignore'}%;
- }
- close $FIND;
- } else {
- next unless ($files_count == 1) or $file =~ m%$OPT{'check'}%;
- push @files, $file unless $file =~ m%$OPT{'ignore'}%;
- }
-}
-
-while (@files) {
- my $file = shift @files;
- my $content = '';
- my $copyright_match;
- my $copyright = '';
- my $license = '';
- my %copyrights;
-
- open (my $F, '<' ,$file) or die "Unable to access $file\n";
- while (<$F>) {
- last if ($. > $OPT{'lines'});
- $content .= $_;
- $copyright_match = parse_copyright($_);
- if ($copyright_match) {
- $copyrights{lc("$copyright_match")} = "$copyright_match";
- }
- }
- close($F);
-
- $copyright = join(" / ", reverse sort values %copyrights);
-
- print qq(----- $file header -----\n$content----- end header -----\n\n)
- if $OPT{'verbose'};
-
- $license = parselicense(clean_comments($content));
-
- if ($OPT{'machine'}) {
- print "$file\t$license";
- print "\t" . ($copyright or "*No copyright*") if $OPT{'copyright'};
- print "\n";
- } else {
- print "$file: ";
- print "*No copyright* " unless $copyright;
- print $license . "\n";
- print " [Copyright: " . $copyright . "]\n"
- if $copyright and $OPT{'copyright'};
- print "\n" if $OPT{'copyright'};
- }
-}
-
-sub parse_copyright {
- my $copyright = '';
- my $match;
-
- my $copyright_indicator_regex = '
- (?:copyright # The full word
- |copr\. # Legally-valid abbreviation
- |\x{00a9} # Unicode character COPYRIGHT SIGN
- |\xc2\xa9 # Unicode copyright sign encoded in iso8859
- |\(c\) # Legally-null representation of sign
- )';
- my $copyright_disindicator_regex = '
- \b(?:info(?:rmation)? # Discussing copyright information
- |(notice|statement|claim|string)s? # Discussing the notice
- |and|or|is|in|to # Part of a sentence
- |(holder|owner)s? # Part of a sentence
- |ownership # Part of a sentence
- )\b';
- my $copyright_predisindicator_regex = '(
- ^[#]define\s+.*\(c\) # #define foo(c) -- not copyright
- )';
-
- if ( ! m%$copyright_predisindicator_regex%ix) {
-
- if (m%$copyright_indicator_regex(?::\s*|\s+)(\S.*)$%ix) {
- $match = $1;
-
- # Ignore lines matching "see foo for copyright information" etc.
- if ($match !~ m%^\s*$copyright_disindicator_regex%ix) {
- # De-cruft
- $match =~ s/([,.])?\s*$//;
- $match =~ s/$copyright_indicator_regex//igx;
- $match =~ s/^\s+//;
- $match =~ s/\s{2,}/ /g;
- $match =~ s/\\@/@/g;
- $copyright = $match;
- }
- }
- }
-
- return $copyright;
-}
-
-sub clean_comments {
- local $_ = shift or return q{};
-
- # Remove generic comments: look for 4 or more lines beginning with
- # regular comment pattern and trim it. Fall back to old algorithm
- # if no such pattern found.
- my @matches = m/^\s*([^a-zA-Z0-9\s]{1,3})\s\w/mg;
- if (@matches >= 4) {
- my $comment_re = qr/\s*[\Q$matches[0]\E]{1,3}\s*/;
- s/^$comment_re//mg;
- }
-
- # Remove Fortran comments
- s/^[cC] //gm;
- tr/\t\r\n/ /;
-
- # Remove C / C++ comments
- s#(\*/|/[/*])##g;
- tr% A-Za-z.,@;0-9\(\)/-%%cd;
- tr/ //s;
-
- return $_;
-}
-
-sub help {
- print <<"EOF";
-Usage: $progname [options] filename [filename ...]
-Valid options are:
- --help, -h Display this message
- --version, -v Display version and copyright info
- --no-conf, --noconf Don't read devscripts config files; must be
- the first option given
- --verbose Display the header of each file before its
- license information
- --lines, -l Specify how many lines of the file header
- should be parsed for license information
- (Default: $def_lines)
- --check, -c Specify a pattern indicating which files should
- be checked
- (Default: '$default_check_regex')
- --machine, -m Display in a machine readable way (good for awk)
- --recursive, -r Add the contents of directories recursively
- --copyright Also display the file's copyright
- --ignore, -i Specify that files / directories matching the
- regular expression should be ignored when
- checking files
- (Default: '$default_ignore_regex')
-
-Default settings modified by devscripts configuration files:
-$modified_conf_msg
-EOF
-}
-
-sub version {
- print <<"EOF";
-This is $progname, from the Debian devscripts package, version ###VERSION###
-Copyright (C) 2007, 2008 by Adam D. Barratt <adam\@adam-barratt.org.uk>; based
-on a script of the same name from the KDE SDK by <dfaure\@kde.org>.
-
-This program comes with ABSOLUTELY NO WARRANTY.
-You are free to redistribute this code under the terms of the
-GNU General Public License, version 2, or (at your option) any
-later version.
-EOF
-}
-
-sub parselicense {
- my ($licensetext) = @_;
-
- my $gplver = "";
- my $extrainfo = "";
- my $license = "";
-
- if ($licensetext =~ /version ([^, ]+?)[.,]? (?:\(?only\)?.? )?(?:of the GNU (Affero )?(Lesser |Library )?General Public License )?(as )?published by the Free Software Foundation/i or
- $licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License (?:as )?published by the Free Software Foundation[;,] version ([^, ]+?)[.,]? /i) {
-
- $gplver = " (v$1)";
- } elsif ($licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License, version (\d+(?:\.\d+)?)[ \.]/) {
- $gplver = " (v$1)";
- } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or \(at your option\) any later version/) {
- $gplver = " (v$1 or later)";
- } elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or \(at your option\) version (\d(?:[\.-]\d+)*)/) {
- $gplver = " (v$1 or v$2)";
- }
-
- if ($licensetext =~ /(?:675 Mass Ave|59 Temple Place|51 Franklin Steet|02139|02111-1307)/i) {
- $extrainfo = " (with incorrect FSF address)$extrainfo";
- }
-
- if ($licensetext =~ /permission (?:is (also granted|given))? to link (the code of )?this program with (any edition of )?(Qt|the Qt library)/i) {
- $extrainfo = " (with Qt exception)$extrainfo"
- }
-
- if ($licensetext =~ /(All changes made in this file will be lost|DO NOT (EDIT|delete this file)|Generated (automatically|by|from)|generated.*file)/i) {
- $license = "GENERATED FILE";
- }
-
- if ($licensetext =~ /((is free software.? )?you can redistribute (it|them) and\/or modify (it|them)|is licensed) under the terms of (version [^ ]+ of )?the (GNU (Library |Lesser )General Public License|LGPL)/i) {
- $license = "LGPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /is free software.? you can redistribute (it|them) and\/or modify (it|them) under the terms of the (GNU Affero General Public License|AGPL)/i) {
- $license = "AGPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /(is free software.? )?you (can|may) redistribute (it|them) and\/or modify (it|them) under the terms of (?:version [^ ]+ (?:\(?only\)? )?of )?the GNU General Public License/i) {
- $license = "GPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /is distributed under the terms of the GNU General Public License,/
- and length $gplver) {
- $license = "GPL$gplver$extrainfo $license";
- }
-
- if ($licensetext =~ /is distributed.*terms.*GPL/) {
- $license = "GPL (unversioned/unknown version) $license";
- }
-
- if ($licensetext =~ /This file is part of the .*Qt GUI Toolkit. This file may be distributed under the terms of the Q Public License as defined/) {
- $license = "QPL (part of Qt) $license";
- } elsif ($licensetext =~ /may (be distributed|redistribute it) under the terms of the Q Public License/) {
- $license = "QPL $license";
- }
-
- if ($licensetext =~ /opensource\.org\/licenses\/mit-license\.php/) {
- $license = "MIT/X11 (BSD like) $license";
- } elsif ($licensetext =~ /Permission is hereby granted, free of charge, to any person obtaining a copy of this software and(\/or)? associated documentation files \(the (Software|Materials)\), to deal in the (Software|Materials)/) {
- $license = "MIT/X11 (BSD like) $license";
- } elsif ($licensetext =~ /Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy, modify, and distribute this software and its documentation for any purpose/) {
- $license = "MIT/X11 (BSD like) $license";
- }
-
- if ($licensetext =~ /Permission to use, copy, modify, and(\/or)? distribute this software for any purpose with or without fee is hereby granted, provided.*copyright notice.*permission notice.*all copies/) {
- $license = "ISC $license";
- }
-
- if ($licensetext =~ /THIS SOFTWARE IS PROVIDED .*AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY/) {
- if ($licensetext =~ /All advertising materials mentioning features or use of this software must display the following acknowledge?ment.*This product includes software developed by/i) {
- $license = "BSD (4 clause) $license";
- } elsif ($licensetext =~ /(The name(?:\(s\))? .*? may not|Neither the (names? .*?|authors?) nor the names of( (its|their|other|any))? contributors may) be used to endorse or promote products derived from this software/i) {
- $license = "BSD (3 clause) $license";
- } elsif ($licensetext =~ /Redistributions of source code must retain the above copyright notice/i) {
- $license = "BSD (2 clause) $license";
- } else {
- $license = "BSD $license";
- }
- }
-
- if ($licensetext =~ /Mozilla Public License,? (Version|v\.) (\d+(?:\.\d+)?)/) {
- $license = "MPL (v$2) $license";
- }
-
- if ($licensetext =~ /Released under the terms of the Artistic License ([^ ]+)/) {
- $license = "Artistic (v$1) $license";
- }
-
- if ($licensetext =~ /is free software under the Artistic [Ll]icense/) {
- $license = "Artistic $license";
- }
-
- if ($licensetext =~ /This program is free software; you can redistribute it and\/or modify it under the same terms as Perl itself/) {
- $license = "Perl $license";
- }
-
- if ($licensetext =~ /under the Apache License, Version ([^ ]+)/) {
- $license = "Apache (v$1) $license";
- }
-
- if ($licensetext =~ /(THE BEER-WARE LICENSE)/i) {
- $license = "Beerware $license";
- }
-
- if ($licensetext =~ /This source file is subject to version ([^ ]+) of the PHP license/) {
- $license = "PHP (v$1) $license";
- }
-
- if ($licensetext =~ /under the terms of the CeCILL /) {
- $license = "CeCILL $license";
- }
-
- if ($licensetext =~ /under the terms of the CeCILL-([^ ]+) /) {
- $license = "CeCILL-$1 $license";
- }
-
- if ($licensetext =~ /under the SGI Free Software License B/) {
- $license = "SGI Free Software License B $license";
- }
-
- if ($licensetext =~ /is in the public domain/i) {
- $license = "Public domain $license";
- }
-
- if ($licensetext =~ /terms of the Common Development and Distribution License(, Version ([^(]+))? \(the License\)/) {
- $license = "CDDL " . ($1 ? "(v$2) " : '') . $license;
- }
-
- if ($licensetext =~ /Microsoft Permissive License \(Ms-PL\)/) {
- $license = "Ms-PL $license";
- }
-
- if ($licensetext =~ /Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license \(the \"Software\"\)/ or
- $licensetext =~ /Boost Software License([ ,-]+Version ([^ ]+)?(\.))/i) {
- $license = "BSL " . ($1 ? "(v$2) " : '') . $license;
- }
-
- if ($licensetext =~ /PYTHON SOFTWARE FOUNDATION LICENSE (VERSION ([^ ]+))/i) {
- $license = "PSF " . ($1 ? "(v$2) " : '') . $license;
- }
-
- if ($licensetext =~ /The origin of this software must not be misrepresented.*Altered source versions must be plainly marked as such.*This notice may not be removed or altered from any source distribution/ or
- $licensetext =~ /see copyright notice in zlib\.h/) {
- $license = "zlib/libpng $license";
- } elsif ($licensetext =~ /This code is released under the libpng license/) {
- $license = "libpng $license";
- }
-
- if ($licensetext =~ /Do What The Fuck You Want To Public License, Version ([^, ]+)/i) {
- $license = "WTFPL (v$1) $license";
- }
-
- if ($licensetext =~ /Do what The Fuck You Want To Public License/i) {
- $license = "WTFPL $license";
- }
-
- if ($licensetext =~ /(License WTFPL|Under (the|a) WTFPL)/i) {
- $license = "WTFPL $license";
- }
-
- $license = "UNKNOWN" if (!length($license));
-
- # Remove trailing spaces.
- $license =~ s/\s+$//;
-
- return $license;
-}
-
-sub fatal {
- my ($pack,$file,$line);
- ($pack,$file,$line) = caller();
- (my $msg = "$progname: fatal error at line $line:\n@_\n") =~ tr/\0//d;
- $msg =~ s/\n\n$/\n/;
- die $msg;
-}
diff --git a/scripts/organize-morphologies.py b/scripts/organize-morphologies.py
deleted file mode 100755
index 3072c8f8..00000000
--- a/scripts/organize-morphologies.py
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2014-2016 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-import json
-import morphlib
-import os
-import subprocess
-import sys
-import urllib
-import urllib2
-import urlparse
-import yaml
-import re
-import errno
-
-''' organize-morphologies.py:
-Tool for organizing morphologies in definitions.
-
-This script will move:
- - cluster morphologies into clusters directory
- - system morphologies into systems directory
- - stratum morphologies into strata directory
-
-This script will download the chunk morphologies for every stratum
-and placed into strata/stratum_which_the_chunk_belongs_to directory.
-
-It also modifies the morphologies fields which points to some morpholgy
-which has been moved.
-'''
-
-
-def make_request(path):
- server_url = 'http://git.baserock.org:8080/'
- url = urlparse.urljoin(server_url, '/1.0/%s' % path)
- handle = urllib2.urlopen(url)
- return handle.read()
-
-def quote(*args):
- return tuple(urllib.quote(string) for string in args)
-
-def cat_file(repo, ref, filename):
- return make_request('files?repo=%s&ref=%s&filename=%s' %
- quote(repo, ref, filename))
-
-# NOTE: This function reimplement part of morphlib's loader
-def sanitise_morphology_path(morph_field, morph_kind, belongs_to='None'):
- '''This function receives the name or the morph field of one morphology
- and returns the path of the morphology depending on the name, kind and
- if it belongs to other morphologies.
- '''
- # Dictionary which match morphology's kind and morphology's
- # directory in definitions.git
- morph_dir = { 'chunk': 'chunks', 'stratum': 'strata',
- 'system':'systems', 'cluster': 'clusters'}
- # For chunks morphologies we need to know to which stratums
- # belongs this chunk.
- if morph_kind == 'chunk':
- if belongs_to == '':
- raise morphlib.Error('Chunk morphologies need the stratum name'
- 'to create the path. Please add the stratum'
- 'which belongs this morphology')
- # Get the name of the chunk which we assume is at the end
- # of the morph file
- if '/' in morph_field:
- morph_field = os.path.basename(morph_field)
-
- # Add the stratum name to the chunk name
- morph_field = os.path.join(belongs_to, morph_field)
-
- # Reset the kind to stratum because chunk contains stratum
- # name in its path.
- morph_kind = 'stratum'
-
- # Add the morphology path to the morph field.
- if not morph_field.startswith(morph_dir[morph_kind]):
- morph_field = os.path.join(morph_dir[morph_kind], morph_field)
-
- # Add the morphology suffix if the morphology.
- if not morph_field.endswith('.morph'):
- morph_field = morph_field + '.morph'
-
- return morph_field
-
-def create_directory(name, path):
- directory = os.path.join(path, name)
- try:
- os.makedirs(directory)
- except OSError as err:
- if err.errno != errno.EEXIST:
- raise err
- else:
- pass
- return directory
-
-def move_file(morph, directory, path, loader):
- if not morph.filename.startswith(directory):
- filename = os.path.basename(morph.filename)
- new_location = os.path.join(path, filename)
- print '\nMoving %s into %s' % (filename, new_location)
- subprocess.call(['git', 'mv', morph.filename, new_location])
- morph.filename = new_location
- loader.unset_defaults(morph)
- loader.save_to_file(morph.filename, morph)
-
-def load_and_fix_chunk(chunk_str, loader, name):
- try:
- chunk_morph = loader.load_from_string(chunk_str)
- except morphlib.morphloader.InvalidFieldError as err:
- if "comments" in str(err):
- # This error is caused because there are old morphologies which
- # contain the field "comments" instead of "description".
- # Replacing "comments" field by "description" will allow the morphology
- # to pass parse_morphology_text check and ready to be written to a file.
- fixed_chunk = loader.parse_morphology_text(chunk_str, name)
- fixed_chunk['description'] = fixed_chunk.pop('comments')
- print "WARNING: Invalid 'comments' field in " \
- "%s corrected to 'description'" % name
- chunk_morph = load_and_fix_chunk(str(fixed_chunk), loader, name)
- elif "buildsystem" in str(err):
- # This error is caused because a typo in a morphology which
- # has a field "buildsystem" instead of "build-system".
- fixed_chunk = loader.parse_morphology_text(chunk_str, name)
- fixed_chunk['build-system'] = fixed_chunk.pop('buildsystem')
- print "WARNING: Invalid 'buildsystem' field in %s" \
- "corrected to 'build-system'" % name
- chunk_morph = load_and_fix_chunk(str(fixed_chunk), loader, name)
- else:
- print "ERROR: %s in chunk %s" %(err, name)
- raise err
- except morphlib.morphloader.MorphologyNotYamlError as err:
- print "WARNING: %s in chunk %s is not valid YAML, " \
- "attempting to fix..." %(err, name)
- # This error is caused because there are old morphologies written
- # in JSON which contain '\t' characters. When try to load this
- # kind of morphologies load_from_string fails when parse_morphology_text.
- # Removing this characters will make load_from_string to load the morphology
- # and translate it into a correct yaml format.
- fixed_chunk = chunk_str.replace('\t','')
- print "INFO: %s successfully fixed" % name
- chunk_morph = load_and_fix_chunk(fixed_chunk, loader, name)
- return chunk_morph
-
-def move_clusters(morphs, path, loader):
- kind = 'system'
- directory = 'clusters'
- # Move cluster morphologies to clusters folder fixing their dependent
- # morphologies which are systems.
- full_path = create_directory(directory, path)
- for morph in morphs:
- all_systems = morph['systems'][:]
- for system in morph['systems']:
- all_systems.extend(system.get('subsystems', []))
- # Add the correct path to the morph fields for systems and subsystems
- for field in all_systems:
- field['morph'] = sanitise_morphology_path(field['morph'], kind)
- move_file(morph, directory, full_path, loader)
-
-def move_systems(morphs, path, loader):
- kind = 'stratum'
- directory = 'systems'
- # Move system morphologies to systems folder fixing their dependent
- # morphologies which are strata.
- full_path = create_directory(directory, path)
- for morph in morphs:
- # Add name field and the correct path to the stratum on the morph
- # fields in strata.
- for field in morph['strata']:
- field['name'] = os.path.basename(field['morph'])
- field['morph'] = sanitise_morphology_path(field['morph'], kind)
- move_file(morph, directory, full_path, loader)
-
-def download_chunks(morph, loader):
- # Download chunks morphologies defined on the stratum and
- # add them to the directory tree.
- for chunk in morph['chunks']:
- name = chunk['name'] + '.morph'
- try:
- chunk['morph'] = sanitise_morphology_path(chunk['morph'], 'chunk', morph['name'])
- except KeyError as err:
- if 'morph' in str(err):
- chunk['morph'] = sanitise_morphology_path(chunk['name'], 'chunk', morph['name'])
- else:
- raise err
- ref = chunk['ref']
- repo = scriptslib.parse_repo_alias(chunk['repo'])
- try:
- print "\nDownloading %s from %s into %s" %(name, repo, chunk['morph'])
- chunk_str = cat_file(repo, ref, name)
- except urllib2.HTTPError as err:
- # If there is no morphology in the repository we assume that the morphology
- # system will be autodetected, so we don't have to create a new one
- # unless we shut down the autodetecting system (fallback system).
- if err.code == 404:
- print 'INFO: Morph will fall-back to build-time' \
- 'autodetection for %s' %(name)
- # Remove morph field from autodetected chunks
- del chunk['morph']
- else:
- loaded_chunk = load_and_fix_chunk(chunk_str, loader, name)
- loader.unset_defaults(loaded_chunk)
- loader.save_to_file(chunk['morph'], loaded_chunk)
-
-def move_strata(morphs, path, loader):
- # Create strata directory
- strata_dir = 'strata/'
- strata_path = create_directory(strata_dir, path)
- for morph in morphs:
- # Create stratum directory where downloading its chunks.
- stratum_path = strata_path + morph['name']
- stratum_dir = create_directory(stratum_path, path)
-
- # Download chunks which belongs to the stratum
- download_chunks(morph, loader)
-
- # Add to build-depends the correct path to the dependent stratum morphologies.
- for build_depends in morph['build-depends']:
- build_depends['morph'] = sanitise_morphology_path(build_depends['morph'], 'stratum')
- # Move stratum morphologies to strata
- move_file(morph, strata_dir, strata_path, loader)
-
-def main():
- # Load all morphologies in the definitions repo
- sb = morphlib.sysbranchdir.open_from_within('.')
- loader = morphlib.morphloader.MorphologyLoader()
- morphs = [m for m in sb.load_all_morphologies(loader)]
-
- # Clasify the morphologies regarding of their kind field
- morphologies = { kind: [m for m in morphs if m['kind'] == kind]
- for kind in ('chunk', 'stratum', 'system', 'cluster') }
-
- for kind, morphs in morphologies.iteritems():
- print 'There are: %d %s.\n' %(len(morphs), kind)
-
- # Get the path from definitions repo
- definitions_repo = sb.get_git_directory_name(sb.root_repository_url)
-
- # Move the morphologies to its directories
- move_clusters(morphologies['cluster'], definitions_repo, loader)
- move_systems(morphologies['system'], definitions_repo, loader)
- move_strata(morphologies['stratum'], definitions_repo, loader)
-
-main()
diff --git a/scripts/release-build b/scripts/release-build
deleted file mode 100755
index cb62f661..00000000
--- a/scripts/release-build
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2014 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-import cliapp
-import morphlib
-import os
-import subprocess
-import sys
-import time
-
-
-class Build(object):
- '''A single distbuild instance.'''
-
- def __init__(self, name, arch, app):
- self.system_name = name
-
- controller_netloc = app.controllers[arch].split(':')
- controller_args = [
- '--controller-initiator-address=%s' % controller_netloc[0],
- ]
- if len(controller_netloc) > 1:
- controller_args.append(
- '--controller-initiator-port=%s' % controller_netloc[1])
-
- self.command = ['morph', 'distbuild', '--local-changes=ignore']
- self.command += controller_args + [self.system_name]
-
- def start(self):
- self.process = subprocess.Popen(self.command)
-
- def completed(self):
- return (self.process.poll() is not None)
-
-
-class ReleaseApp(cliapp.Application):
-
- '''Cliapp app that handles distbuilding and deploying a cluster.'''
-
- def add_settings(self):
- self.settings.string_list(['controllers'],
- 'a list of distbuild controllers and their '
- 'architecture')
-
- self.settings.string(['trove-host'],
- 'hostname of Trove instance')
-
- self.settings.string(['artifact-cache-server'],
- 'server to fetch artifacts from', default=None)
-
- self.settings.string(['release-number'],
- 'Baserock version of the systems being built',
- default='yy.ww')
-
- def error(self, message):
- raise cliapp.AppException(message)
-
- def check_args(self, args):
- if len(args) == 0:
- self.error(
- "Please pass the name of the release cluster (e.g. "
- "clusters/release.morph)")
-
- if len(args) > 1:
- self.error("Too many arguments given.")
-
- def process_args(self, args):
- '''Process the command line'''
- self.controllers = {}
- controllers_list = self.settings['controllers']
-
- for item in controllers_list:
- arch, controller = item.split(':', 1)
- self.controllers[arch] = controller
-
- defs_repo = morphlib.definitions_repo.open(
- '.', search_for_root=True)
- self.loader = defs_repo.get_morphology_loader()
- self.finder = morphlib.morphologyfinder.MorphologyFinder(defs_repo)
-
- self.check_args(args)
-
- cluster_name = args[0]
- cluster, cluster_path = self.load_morphology(cluster_name)
-
- builds = self.prepare_builds(cluster)
- for build in builds:
- build.start()
-
- while not all(build.completed() for build in builds):
- time.sleep(1)
-
- fail = False
- for build in builds:
- if build.process.returncode != 0:
- fail = True
- sys.stderr.write(
- 'Building failed for %s\n' % build.system_name)
- if fail:
- raise cliapp.AppException('Building of systems failed')
-
- if not os.path.exists('release'):
- os.mkdir('release')
- self.deploy_images(cluster, cluster_path)
-
- def load_morphology(self, name, kind=None):
- path = morphlib.util.sanitise_morphology_path(name)
- morph = self.loader.load_from_string(
- self.finder.read_file(path))
- if kind:
- assert morph['kind'] == kind
- return morph, path
-
- def iterate_systems(self, system_list):
- for system in system_list:
- yield system['morph']
- if 'subsystems' in system:
- for subsystem in self.iterate_systems(system['subsystems']):
- yield subsystem
-
- def prepare_builds(self, cluster):
- '''Prepare a list of builds'''
- systems = set(self.iterate_systems(cluster['systems']))
- builds = []
- for system_name in systems:
- system, _ = self.load_morphology(system_name)
- if system['arch'] in self.controllers:
- builds.append(Build(system_name, system['arch'], self))
- else:
- print("Unable to build %s: no %s distbuild available" %
- (system_name, system['arch']))
- return builds
-
- def deploy_images(self, cluster, cluster_path):
- version_label = 'baserock-%s' % self.settings['release-number']
- outputs = {}
-
- for system in cluster['systems']:
- morphology_name = system['morph']
- morphology = self.load_morphology(morphology_name)[0]
- if morphology['arch'] not in self.controllers:
- continue
-
- for deployment_name, deployment_info in system['deploy'].iteritems():
- # The release.morph cluster must specify a basename for the file,
- # of name and extension. This script knows about name, but it
- # can't find out the appropriate file extension without second
- # guessing the behaviour of write extensions.
- basename = deployment_info['location']
-
- if '/' in basename or basename.startswith(version_label):
- raise cliapp.AppException(
- 'In %s: system %s.location should be just the base name, '
- 'e.g. "%s.img"' % (cluster_path, deployment_name, deployment_name))
-
- filename = os.path.join('release', '%s-%s' % (version_label, basename))
- if os.path.exists(filename):
- self.output.write('Reusing existing deployment of %s\n' % filename)
- else:
- self.output.write('Creating %s from release.morph\n' % filename)
- self.deploy_single_image(cluster_path, deployment_name, filename, version_label)
-
- def deploy_single_image(self, cluster_path, name, location, version_label):
- deploy_command = [
- 'morph', 'deploy', cluster_path, name,
- '--trove-host=%s' % self.settings['trove-host']]
- artifact_server = self.settings['artifact-cache-server']
- if artifact_server is not None:
- deploy_command.append('--artifact-cache-server=%s' % artifact_server)
- deploy_command.extend((
- '%s.location=%s' % (name, location),
- '%s.VERSION_LABEL=%s' % (name, version_label)
- ))
-
- cliapp.runcmd(deploy_command, stdout=sys.stdout)
-
-
-ReleaseApp().run()
diff --git a/scripts/release-build.test.conf b/scripts/release-build.test.conf
deleted file mode 100644
index 50083352..00000000
--- a/scripts/release-build.test.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-[config]
-trove-host = ct-mcr-1.ducie.codethink.co.uk
-controllers = x86_64:ct-mcr-1-distbuild-x86-64-majikthise-controller.dyn.ducie.codethink.co.uk,
- x86_32:ct-mcr-1-distbuild-x86-32-majikthise-controller.dyn.ducie.codethink.co.uk,
- armv7lhf:ct-mcr-1-distbuild-armv7lhf-jetson.dyn.ducie.codethink.co.uk
-release-number = 14.29
diff --git a/scripts/release-test b/scripts/release-test
deleted file mode 100755
index 4dcc6f76..00000000
--- a/scripts/release-test
+++ /dev/null
@@ -1,400 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Codethink Ltd
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-'''release-test
-
-This script deploys the set of systems in the cluster morphology it is
-instructed to read, to test that they work correctly.
-
-'''
-
-import cliapp
-import os
-import pipes
-import shlex
-import shutil
-import socket
-import tempfile
-import time
-import uuid
-
-import morphlib
-
-
-class MorphologyHelper(object):
-
- def __init__(self):
- self.defs_repo = morphlib.definitions_repo.open(
- '.', search_for_root=True)
- self.loader = morphlib.morphloader.MorphologyLoader()
- self.finder = morphlib.morphologyfinder.MorphologyFinder(self.defs_repo)
-
- def load_morphology(self, path):
- text = self.finder.read_file(path)
- return self.loader.load_from_string(text)
-
- @classmethod
- def iterate_systems(cls, systems_list):
- for system in systems_list:
- yield morphlib.util.sanitise_morphology_path(system['morph'])
- if 'subsystems' in system:
- for subsystem in cls.iterate_systems(system['subsystems']):
- yield subsystem
-
- def iterate_cluster_deployments(cls, cluster_morph):
- for system in cluster_morph['systems']:
- path = morphlib.util.sanitise_morphology_path(system['morph'])
- defaults = system.get('deploy-defaults', {})
- for name, options in system['deploy'].iteritems():
- config = dict(defaults)
- config.update(options)
- yield path, name, config
-
- def load_cluster_systems(self, cluster_morph):
- for system_path in set(self.iterate_systems(cluster_morph['systems'])):
- system_morph = self.load_morphology(system_path)
- yield system_path, system_morph
-
-
-class TimeoutError(cliapp.AppException):
-
- """Error to be raised when a connection waits too long"""
-
- def __init__(self, msg):
- super(TimeoutError, self).__init__(msg)
-
-
-class VMHost(object):
-
- def __init__(self, user, address, disk_path):
- self.user = user
- self.address = address
- self.disk_path = disk_path
-
- @property
- def ssh_host(self):
- return '{user}@{address}'.format(user=self.user, address=self.address)
-
- def runcmd(self, *args, **kwargs):
- cliapp.ssh_runcmd(self.ssh_host, *args, **kwargs)
-
- def virsh(self, *args, **kwargs):
- self.runcmd(['virsh', '-c', 'qemu:///system'] + list(args), **kwargs)
-
-
-class DeployedSystemInstance(object):
-
- def __init__(self, deployment, config, host_machine, vm_id, rootfs_path):
- self.deployment = deployment
- self.config = config
- # TODO: Stop assuming test machine can DHCP and be assigned its
- # hostname in the deployer's resolve search path.
- self.ip_address = self.config['HOSTNAME']
- self.host_machine = host_machine
- self.vm_id = vm_id
- self.rootfs_path = rootfs_path
-
- @property
- def ssh_host(self):
- # TODO: Stop assuming we ssh into test instances as root
- return 'root@{host}'.format(host=self.ip_address)
-
- def runcmd(self, argv, chdir='.', **kwargs):
- ssh_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
- '-o', 'UserKnownHostsFile=/dev/null', self.ssh_host]
- cmd = ['sh', '-c', 'cd "$1" && shift && exec "$@"', '-', chdir]
- cmd += argv
- ssh_cmd.append(' '.join(map(pipes.quote, cmd)))
- return cliapp.runcmd(ssh_cmd, **kwargs)
-
- def _wait_for_dhcp(self, timeout):
- '''Block until given hostname resolves successfully.
-
- Raises TimeoutError if the hostname has not appeared in 'timeout'
- seconds.
-
- '''
- start_time = time.time()
- while True:
- try:
- socket.gethostbyname(self.ip_address)
- return
- except socket.gaierror:
- pass
- if time.time() > start_time + timeout:
- raise TimeoutError("Host %s did not appear after %i seconds" %
- (self.ip_address, timeout))
- time.sleep(0.5)
-
- def _wait_for_ssh(self, timeout):
- """Wait until the deployed VM is responding via SSH"""
- start_time = time.time()
- while True:
- try:
- self.runcmd(['true'], stdin=None, stdout=None, stderr=None)
- return
- except cliapp.AppException:
- # TODO: Stop assuming the ssh part of the command is what failed
- if time.time() > start_time + timeout:
- raise TimeoutError("%s sshd did not start after %i seconds"
- % (self.ip_address, timeout))
- time.sleep(0.5)
-
- def wait_until_online(self, timeout=10):
- self._wait_for_dhcp(timeout)
- self._wait_for_ssh(timeout)
-
- def delete(self):
- # Stop and remove VM
- try:
- self.host_machine.virsh('destroy', self.vm_id)
- except cliapp.AppException as e:
- # TODO: Stop assuming that destroy failed because it wasn't running
- pass
- try:
- self.host_machine.virsh('undefine', self.vm_id, '--remove-all-storage')
- except cliapp.AppException as e:
- # TODO: Stop assuming that undefine failed because it was
- # already removed
- pass
-
-
-class Deployment(object):
-
- def __init__(self, cluster_path, name, deployment_config, host_machine):
- self.cluster_path = cluster_path
- self.name = name
- self.deployment_config = deployment_config
- self.host_machine = host_machine
-
- @staticmethod
- def _ssh_host_key_exists(hostname):
- """Check if an ssh host key exists in known_hosts"""
- if not os.path.exists('/root/.ssh/known_hosts'):
- return False
- with open('/root/.ssh/known_hosts', 'r') as known_hosts:
- return any(line.startswith(hostname) for line in known_hosts)
-
- def _update_known_hosts(self):
- if not self._ssh_host_key_exists(self.host_machine.address):
- with open('/root/.ssh/known_hosts', 'a') as known_hosts:
- cliapp.runcmd(['ssh-keyscan', self.host_machine.address],
- stdout=known_hosts)
-
- @staticmethod
- def _generate_sshkey_config(tempdir, config):
- manifest = os.path.join(tempdir, 'manifest')
- with open(manifest, 'w') as f:
- f.write('0040700 0 0 /root/.ssh\n')
- f.write('overwrite 0100600 0 0 /root/.ssh/authorized_keys\n')
- authkeys = os.path.join(tempdir, 'root', '.ssh', 'authorized_keys')
- os.makedirs(os.path.dirname(authkeys))
- with open(authkeys, 'w') as auth_f:
- with open('/root/.ssh/id_rsa.pub', 'r') as key_f:
- shutil.copyfileobj(key_f, auth_f)
-
- install_files = shlex.split(config.get('INSTALL_FILES', ''))
- install_files.append(manifest)
- yield 'INSTALL_FILES', ' '.join(pipes.quote(f) for f in install_files)
-
- def deploy(self):
- self._update_known_hosts()
-
- hostname = str(uuid.uuid4())
- vm_id = hostname
- image_base = self.host_machine.disk_path
- rootpath = '{image_base}/{hostname}.img'.format(image_base=image_base,
- hostname=hostname)
- loc = 'kvm+ssh://{ssh_host}/{id}/{path}'.format(
- ssh_host=self.host_machine.ssh_host, id=vm_id, path=rootpath)
-
- options = {
- 'type': 'kvm',
- 'location': loc,
- 'AUTOSTART': 'True',
- 'HOSTNAME': hostname,
- 'DISK_SIZE': '20G',
- 'RAM_SIZE': '2G',
- 'VERSION_LABEL': 'release-test',
- }
-
- tempdir = tempfile.mkdtemp()
- try:
- options.update(
- self._generate_sshkey_config(tempdir,
- self.deployment_config))
-
- args = ['morph', 'deploy', self.cluster_path, self.name]
- for k, v in options.iteritems():
- args.append('%s.%s=%s' % (self.name, k, v))
- cliapp.runcmd(args, stdin=None, stdout=None, stderr=None)
-
- config = dict(self.deployment_config)
- config.update(options)
-
- return DeployedSystemInstance(self, config, self.host_machine,
- vm_id, rootpath)
- finally:
- shutil.rmtree(tempdir)
-
-
-class ReleaseApp(cliapp.Application):
-
- """Cliapp application which handles automatic builds and tests"""
-
- def add_settings(self):
- """Add the command line options needed"""
- group_main = 'Program Options'
- self.settings.string_list(['deployment-host'],
- 'ARCH:HOST:PATH that VMs can be deployed to',
- default=None,
- group=group_main)
- self.settings.string(['trove-host'],
- 'Address of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['trove-id'],
- 'ID of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['build-ref-prefix'],
- 'Prefix of build branches for test systems',
- default=None,
- group=group_main)
-
- @staticmethod
- def _run_tests(instance, system_path, system_morph,
- (trove_host, trove_id, build_ref_prefix),
- morph_helper, systems):
- instance.wait_until_online()
-
- tests = []
- def baserock_build_test(instance):
- instance.runcmd(['git', 'config', '--global', 'user.name',
- 'Test Instance of %s' % instance.deployment.name])
- instance.runcmd(['git', 'config', '--global', 'user.email',
- 'ci-test@%s' % instance.config['HOSTNAME']])
- instance.runcmd(['mkdir', '-p', '/src/ws', '/src/cache',
- '/src/tmp'])
- def morph_cmd(*args, **kwargs):
- # TODO: decide whether to use cached artifacts or not by
- # adding --artifact-cache-server= --cache-server=
- argv = ['morph', '--log=/src/morph.log', '--cachedir=/src/cache',
- '--tempdir=/src/tmp', '--log-max=100M',
- '--trove-host', trove_host, '--trove-id', trove_id,
- '--build-ref-prefix', build_ref_prefix]
- argv.extend(args)
- instance.runcmd(argv, **kwargs)
-
- repo = morph_helper.sb.root_repository_url
- ref = morph_helper.defs_repo.HEAD
- sha1 = morph_helper.defs_repo.resolve_ref_to_commit(ref)
- morph_cmd('init', '/src/ws')
- chdir = '/src/ws'
-
- morph_cmd('checkout', repo, ref, chdir=chdir)
- # TODO: Add a morph subcommand that gives the path to the root repository.
- repo_path = os.path.relpath(
- morph_helper.sb.get_git_directory_name(repo),
- morph_helper.sb.root_directory)
- chdir = os.path.join(chdir, ref, repo_path)
-
- instance.runcmd(['git', 'reset', '--hard', sha1], chdir=chdir)
- print 'Building test systems for {sys}'.format(sys=system_path)
- for to_build_path, to_build_morph in systems.iteritems():
- if to_build_morph['arch'] == system_morph['arch']:
- print 'Test building {path}'.format(path=to_build_path)
- morph_cmd('build', to_build_path, chdir=chdir,
- stdin=None, stdout=None, stderr=None)
- print 'Finished Building test systems'
-
- def python_smoke_test(instance):
- instance.runcmd(['python', '-c', 'print "Hello World"'])
-
- # TODO: Come up with a better way of determining which tests to run
- if 'devel' in system_path:
- tests.append(baserock_build_test)
- else:
- tests.append(python_smoke_test)
-
- for test in tests:
- test(instance)
-
- def deploy_and_test_systems(self, cluster_path,
- deployment_hosts, build_test_config):
- """Run the deployments and tests"""
-
- version = 'release-test'
-
- morph_helper = MorphologyHelper()
- cluster_morph = morph_helper.load_morphology(cluster_path)
- systems = dict(morph_helper.load_cluster_systems(cluster_morph))
-
- for system_path, deployment_name, deployment_config in \
- morph_helper.iterate_cluster_deployments(cluster_morph):
-
- system_morph = systems[system_path]
- # We can only test systems in KVM that have a BSP
- if not any('bsp' in si['morph'] for si in system_morph['strata']):
- continue
-
- # We can only test systems in KVM that we have a host for
- if system_morph['arch'] not in deployment_hosts:
- continue
- host_machine = deployment_hosts[system_morph['arch']]
- deployment = Deployment(cluster_path, deployment_name,
- deployment_config, host_machine)
-
- instance = deployment.deploy()
- try:
- self._run_tests(instance, system_path, system_morph,
- build_test_config, morph_helper, systems)
- finally:
- instance.delete()
-
- def process_args(self, args):
- """Process the command line args and kick off the builds/tests"""
- if self.settings['build-ref-prefix'] is None:
- self.settings['build-ref-prefix'] = (
- os.path.join(self.settings['trove-id'], 'builds'))
- for setting in ('deployment-host', 'trove-host',
- 'trove-id', 'build-ref-prefix'):
- self.settings.require(setting)
-
- deployment_hosts = {}
- for host_config in self.settings['deployment-host']:
- arch, address = host_config.split(':', 1)
- user, address = address.split('@', 1)
- address, disk_path = address.split(':', 1)
- if user == '':
- user = 'root'
- # TODO: Don't assume root is the user with deploy access
- deployment_hosts[arch] = VMHost(user, address, disk_path)
-
- build_test_config = (self.settings['trove-host'],
- self.settings['trove-id'],
- self.settings['build-ref-prefix'])
-
- if len(args) != 1:
- raise cliapp.AppException('Usage: release-test CLUSTER')
- cluster_path = morphlib.util.sanitise_morphology_path(args[0])
- self.deploy_and_test_systems(cluster_path, deployment_hosts,
- build_test_config)
-
-
-if __name__ == '__main__':
- ReleaseApp().run()
diff --git a/scripts/release-test-os b/scripts/release-test-os
deleted file mode 100755
index 06e01daf..00000000
--- a/scripts/release-test-os
+++ /dev/null
@@ -1,526 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Codethink Ltd
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-'''release-test
-
-This script deploys the set of systems in the cluster morphology it is
-instructed to read, to test that they work correctly.
-
-'''
-
-import cliapp
-import os
-import pipes
-import shlex
-import shutil
-import socket
-import tempfile
-import time
-import uuid
-
-import morphlib
-
-
-class NovaList:
- def __init__(self):
- self.output = []
- self.lines = []
- self.instance = []
-
- def update(self):
- self.output = cliapp.runcmd(['nova', 'list'])
- self.lines = self.output.split('\n')
- self.lines = self.lines[3:-2]
-
- def get_nova_details_for_instance(self, name):
- self.update()
-
- for line in self.lines:
- entries = line.split('|')
- stripped_line = [entry.strip() for entry in entries]
- if stripped_line.count(name) == 1:
- self.instance = stripped_line
-
- def get_nova_state_for_instance(self, name):
- self.get_nova_details_for_instance(name)
- if not self.instance:
- return
- return self.instance[3]
-
- def get_nova_ip_for_instance(self, name):
- self.get_nova_details_for_instance(name)
- if not self.instance:
- return
-
- if self.get_nova_state_for_instance(name) != 'ACTIVE':
- return
-
- return self.instance[6]
-
- def get_nova_ip_for_instance_timeout(self, name, timeout=120):
- start_time = time.time()
-
- while self.get_nova_state_for_instance(name) != 'ACTIVE':
-
- if time.time() > start_time + timeout:
- print "%s not ACTIVE after %i seconds" % (name, timeout)
- return
-
- time.sleep(1)
-
- ip_addr = self.get_nova_ip_for_instance(name)
- if not ip_addr:
- return
-
- if ip_addr.count('=') == 0:
- return
-
- ip_addr = ip_addr[ip_addr.find('=') + 1:]
-
- if ip_addr.count(',') == 0:
- return ip_addr
-
- return ip_addr[:ip_addr.find(',')]
-
-
-
-class MorphologyHelper(object):
-
- def __init__(self):
- self.sb = sb = morphlib.sysbranchdir.open_from_within('.')
- defs_repo_path = sb.get_git_directory_name(sb.root_repository_url)
- self.defs_repo = morphlib.gitdir.GitDirectory(defs_repo_path)
- self.loader = morphlib.morphloader.MorphologyLoader()
- self.finder = morphlib.morphologyfinder.MorphologyFinder(self.defs_repo)
-
- def load_morphology(self, path):
- text = self.finder.read_file(path)
- return self.loader.load_from_string(text)
-
- @classmethod
- def iterate_systems(cls, systems_list):
- for system in systems_list:
- yield morphlib.util.sanitise_morphology_path(system['morph'])
- if 'subsystems' in system:
- for subsystem in cls.iterate_systems(system['subsystems']):
- yield subsystem
-
- def iterate_cluster_deployments(cls, cluster_morph):
- for system in cluster_morph['systems']:
- path = morphlib.util.sanitise_morphology_path(system['morph'])
- defaults = system.get('deploy-defaults', {})
- for name, options in system['deploy'].iteritems():
- config = dict(defaults)
- config.update(options)
- yield path, name, config
-
- def load_cluster_systems(self, cluster_morph):
- for system_path in set(self.iterate_systems(cluster_morph['systems'])):
- system_morph = self.load_morphology(system_path)
- yield system_path, system_morph
-
-
-class TimeoutError(cliapp.AppException):
-
- """Error to be raised when a connection waits too long"""
-
- def __init__(self, msg):
- super(TimeoutError, self).__init__(msg)
-
-
-class VMHost(object):
-
- def __init__(self, user, address, disk_path):
- self.user = user
- self.address = address
- self.disk_path = disk_path
-
- @property
- def ssh_host(self):
- return '{user}@{address}'.format(user=self.user, address=self.address)
-
- def runcmd(self, *args, **kwargs):
- cliapp.ssh_runcmd(self.ssh_host, *args, **kwargs)
-
-
-class DeployedSystemInstance(object):
-
- def __init__(self, deployment, config, host_machine, vm_id, rootfs_path,
- ip_addr, hostname):
- self.deployment = deployment
- self.config = config
- self.ip_address = ip_addr
- self.host_machine = host_machine
- self.vm_id = vm_id
- self.rootfs_path = rootfs_path
- self.hostname = hostname
-
- @property
- def ssh_host(self):
- # TODO: Stop assuming we ssh into test instances as root
- return 'root@{host}'.format(host=self.ip_address)
-
- def runcmd(self, argv, chdir='.', **kwargs):
- ssh_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
- '-o', 'UserKnownHostsFile=/dev/null', self.ssh_host]
- cmd = ['sh', '-c', 'cd "$1" && shift && exec "$@"', '-', chdir]
- cmd += argv
- ssh_cmd.append(' '.join(map(pipes.quote, cmd)))
- return cliapp.runcmd(ssh_cmd, **kwargs)
-
- def _wait_for_dhcp(self, timeout):
- '''Block until given hostname resolves successfully.
-
- Raises TimeoutError if the hostname has not appeared in 'timeout'
- seconds.
-
- '''
- start_time = time.time()
- while True:
- try:
- socket.gethostbyname(self.ip_address)
- return
- except socket.gaierror:
- pass
- if time.time() > start_time + timeout:
- raise TimeoutError("Host %s did not appear after %i seconds" %
- (self.ip_address, timeout))
- time.sleep(0.5)
-
- def _wait_for_ssh(self, timeout):
- """Wait until the deployed VM is responding via SSH"""
- start_time = time.time()
- while True:
- try:
- self.runcmd(['true'], stdin=None, stdout=None, stderr=None)
- return
- except cliapp.AppException:
- # TODO: Stop assuming the ssh part of the command is what failed
- if time.time() > start_time + timeout:
- raise TimeoutError("%s sshd did not start after %i seconds"
- % (self.ip_address, timeout))
- time.sleep(0.5)
-
- def _wait_for_cloud_init(self, timeout):
- """Wait until cloud init has resized the disc"""
- start_time = time.time()
- while True:
- try:
- out = self.runcmd(['sh', '-c',
- 'test -e "$1" && echo exists || echo does not exist',
- '-',
- '/root/cloud-init-finished'])
- except:
- import traceback
- traceback.print_exc()
- raise
- if out.strip() == 'exists':
- return
- if time.time() > start_time + timeout:
- raise TimeoutError("Disc size not increased after %i seconds"
- % (timeout))
- time.sleep(3)
-
- def wait_until_online(self, timeout=120):
- self._wait_for_dhcp(timeout)
- self._wait_for_ssh(timeout)
- self._wait_for_cloud_init(timeout)
- print "Test system %s ready to run tests." % (self.hostname)
-
- def delete(self):
- # Stop and remove VM
- print "Deleting %s test instance" % (self.hostname)
- try:
- cliapp.runcmd(['nova', 'delete', self.hostname])
- except cliapp.AppException as e:
- # TODO: Stop assuming that delete failed because the instance
- # wasn't running
- print "- Failed"
- pass
- print "Deleting %s test disc image" % (self.hostname)
- try:
- cliapp.runcmd(['nova', 'image-delete', self.hostname])
- except cliapp.AppException as e:
- # TODO: Stop assuming that image-delete failed because it was
- # already removed
- print "- Failed"
- pass
-
-
-class Deployment(object):
-
- def __init__(self, cluster_path, name, deployment_config,
- host_machine, net_id):
- self.cluster_path = cluster_path
- self.name = name
- self.deployment_config = deployment_config
- self.host_machine = host_machine
- self.net_id = net_id
-
- @staticmethod
- def _ssh_host_key_exists(hostname):
- """Check if an ssh host key exists in known_hosts"""
- if not os.path.exists('/root/.ssh/known_hosts'):
- return False
- with open('/root/.ssh/known_hosts', 'r') as known_hosts:
- return any(line.startswith(hostname) for line in known_hosts)
-
- def _update_known_hosts(self):
- if not self._ssh_host_key_exists(self.host_machine.address):
- with open('/root/.ssh/known_hosts', 'a') as known_hosts:
- cliapp.runcmd(['ssh-keyscan', self.host_machine.address],
- stdout=known_hosts)
-
- @staticmethod
- def _generate_sshkey_config(tempdir, config):
- manifest = os.path.join(tempdir, 'manifest')
- with open(manifest, 'w') as f:
- f.write('0040700 0 0 /root/.ssh\n')
- f.write('overwrite 0100600 0 0 /root/.ssh/authorized_keys\n')
- authkeys = os.path.join(tempdir, 'root', '.ssh', 'authorized_keys')
- os.makedirs(os.path.dirname(authkeys))
- with open(authkeys, 'w') as auth_f:
- with open('/root/.ssh/id_rsa.pub', 'r') as key_f:
- shutil.copyfileobj(key_f, auth_f)
-
- install_files = shlex.split(config.get('INSTALL_FILES', ''))
- install_files.append(manifest)
- yield 'INSTALL_FILES', ' '.join(pipes.quote(f) for f in install_files)
-
- def deploy(self):
- self._update_known_hosts()
-
- hostname = str(uuid.uuid4())
- vm_id = hostname
- image_base = self.host_machine.disk_path
- rootpath = '{image_base}/{hostname}.img'.format(image_base=image_base,
- hostname=hostname)
- loc = 'http://{ssh_host}:5000/v2.0'.format(
- ssh_host=self.host_machine.ssh_host, id=vm_id, path=rootpath)
-
- options = {
- 'type': 'openstack',
- 'location': loc,
- 'HOSTNAME': hostname,
- 'DISK_SIZE': '5G',
- 'RAM_SIZE': '2G',
- 'VERSION_LABEL': 'release-test',
- 'OPENSTACK_USER': os.environ['OS_USERNAME'],
- 'OPENSTACK_TENANT': os.environ['OS_TENANT_NAME'],
- 'OPENSTACK_PASSWORD': os.environ['OS_PASSWORD'],
- 'OPENSTACK_IMAGENAME': hostname,
- 'CLOUD_INIT': 'yes',
- 'KERNEL_ARGS': 'console=tty0 console=ttyS0',
- }
-
- tempdir = tempfile.mkdtemp()
- try:
- options.update(
- self._generate_sshkey_config(tempdir,
- self.deployment_config))
-
- # Deploy the image to openstack
- args = ['morph', 'deploy', self.cluster_path, self.name]
- for k, v in options.iteritems():
- args.append('%s.%s=%s' % (self.name, k, v))
- cliapp.runcmd(args, stdin=None, stdout=None, stderr=None)
-
- config = dict(self.deployment_config)
- config.update(options)
-
- # Boot an instance from the image
- args = ['nova', 'boot',
- '--flavor', 'm1.medium',
- '--image', hostname,
- '--user-data', '/usr/lib/mason/os-init-script',
- '--nic', "net-id=%s" % (self.net_id),
- hostname]
- output = cliapp.runcmd(args)
-
- # Print nova boot output, with adminPass line removed
- output_lines = output.split('\n')
- for line in output_lines:
- if line.find('adminPass') != -1:
- password_line = line
- output_lines.remove(password_line)
- output = '\n'.join(output_lines)
- print output
-
- # Get ip address from nova list
- nl = NovaList()
- ip_addr = nl.get_nova_ip_for_instance_timeout(hostname)
- print "IP address for instance %s: %s" % (hostname, ip_addr)
-
- return DeployedSystemInstance(self, config, self.host_machine,
- vm_id, rootpath, ip_addr, hostname)
- finally:
- shutil.rmtree(tempdir)
-
-
-class ReleaseApp(cliapp.Application):
-
- """Cliapp application which handles automatic builds and tests"""
-
- def add_settings(self):
- """Add the command line options needed"""
- group_main = 'Program Options'
- self.settings.string_list(['deployment-host'],
- 'ARCH:HOST:PATH that VMs can be deployed to',
- default=None,
- group=group_main)
- self.settings.string(['trove-host'],
- 'Address of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['trove-id'],
- 'ID of Trove for test systems to build from',
- default=None,
- group=group_main)
- self.settings.string(['build-ref-prefix'],
- 'Prefix of build branches for test systems',
- default=None,
- group=group_main)
- self.settings.string(['net-id'],
- 'Openstack network ID',
- default=None,
- group=group_main)
-
- @staticmethod
- def _run_tests(instance, system_path, system_morph,
- (trove_host, trove_id, build_ref_prefix),
- morph_helper, systems):
- instance.wait_until_online()
-
- tests = []
- def baserock_build_test(instance):
- instance.runcmd(['git', 'config', '--global', 'user.name',
- 'Test Instance of %s' % instance.deployment.name])
- instance.runcmd(['git', 'config', '--global', 'user.email',
- 'ci-test@%s' % instance.config['HOSTNAME']])
- instance.runcmd(['mkdir', '-p', '/src/ws', '/src/cache',
- '/src/tmp'])
- def morph_cmd(*args, **kwargs):
- # TODO: decide whether to use cached artifacts or not by
- # adding --artifact-cache-server= --cache-server=
- argv = ['morph', '--log=/src/morph.log', '--cachedir=/src/cache',
- '--tempdir=/src/tmp', '--log-max=100M',
- '--trove-host', trove_host, '--trove-id', trove_id,
- '--build-ref-prefix', build_ref_prefix]
- argv.extend(args)
- instance.runcmd(argv, **kwargs)
-
- repo = morph_helper.sb.root_repository_url
- ref = morph_helper.defs_repo.HEAD
- sha1 = morph_helper.defs_repo.resolve_ref_to_commit(ref)
- morph_cmd('init', '/src/ws')
- chdir = '/src/ws'
-
- morph_cmd('checkout', repo, ref, chdir=chdir)
- # TODO: Add a morph subcommand that gives the path to the root repository.
- repo_path = os.path.relpath(
- morph_helper.sb.get_git_directory_name(repo),
- morph_helper.sb.root_directory)
- chdir = os.path.join(chdir, ref, repo_path)
-
- instance.runcmd(['git', 'reset', '--hard', sha1], chdir=chdir)
- print 'Building test systems for {sys}'.format(sys=system_path)
- for to_build_path, to_build_morph in systems.iteritems():
- if to_build_morph['arch'] == system_morph['arch']:
- print 'Test building {path}'.format(path=to_build_path)
- morph_cmd('build', to_build_path, chdir=chdir,
- stdin=None, stdout=None, stderr=None)
- print 'Finished Building test systems'
-
- def python_smoke_test(instance):
- instance.runcmd(['python', '-c', 'print "Hello World"'])
-
- # TODO: Come up with a better way of determining which tests to run
- if 'devel' in system_path:
- tests.append(baserock_build_test)
- else:
- tests.append(python_smoke_test)
-
- for test in tests:
- test(instance)
-
- def deploy_and_test_systems(self, cluster_path,
- deployment_hosts, build_test_config,
- net_id):
- """Run the deployments and tests"""
-
- version = 'release-test'
-
- morph_helper = MorphologyHelper()
- cluster_morph = morph_helper.load_morphology(cluster_path)
- systems = dict(morph_helper.load_cluster_systems(cluster_morph))
-
- for system_path, deployment_name, deployment_config in \
- morph_helper.iterate_cluster_deployments(cluster_morph):
-
- system_morph = systems[system_path]
- # We can only test systems in KVM that have a BSP
- if not any('bsp' in si['morph'] for si in system_morph['strata']):
- continue
-
- # We can only test systems in KVM that we have a host for
- if system_morph['arch'] not in deployment_hosts:
- continue
- host_machine = deployment_hosts[system_morph['arch']]
- deployment = Deployment(cluster_path, deployment_name,
- deployment_config, host_machine,
- net_id)
-
- instance = deployment.deploy()
- try:
- self._run_tests(instance, system_path, system_morph,
- build_test_config, morph_helper, systems)
- finally:
- instance.delete()
-
- def process_args(self, args):
- """Process the command line args and kick off the builds/tests"""
- if self.settings['build-ref-prefix'] is None:
- self.settings['build-ref-prefix'] = (
- os.path.join(self.settings['trove-id'], 'builds'))
- for setting in ('deployment-host', 'trove-host',
- 'trove-id', 'build-ref-prefix', 'net-id'):
- self.settings.require(setting)
-
- deployment_hosts = {}
- for host_config in self.settings['deployment-host']:
- arch, address = host_config.split(':', 1)
- user, address = address.split('@', 1)
- address, disk_path = address.split(':', 1)
- if user == '':
- user = 'root'
- # TODO: Don't assume root is the user with deploy access
- deployment_hosts[arch] = VMHost(user, address, disk_path)
-
- build_test_config = (self.settings['trove-host'],
- self.settings['trove-id'],
- self.settings['build-ref-prefix'])
-
- if len(args) != 1:
- raise cliapp.AppException('Usage: release-test CLUSTER')
- cluster_path = morphlib.util.sanitise_morphology_path(args[0])
- self.deploy_and_test_systems(cluster_path, deployment_hosts,
- build_test_config,
- self.settings['net-id'])
-
-
-if __name__ == '__main__':
- ReleaseApp().run()
diff --git a/scripts/release-upload b/scripts/release-upload
deleted file mode 100755
index 106a6e49..00000000
--- a/scripts/release-upload
+++ /dev/null
@@ -1,472 +0,0 @@
-#!/usr/bin/python
-# Copyright (C) 2014 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-'''Upload and publish Baserock binaries for a release.
-
-This utility is used for the Baserock release process. See
-http://wiki.baserock.org/guides/release-process/ for details on the
-release process.
-
-This utility uploads two sets of binaries:
-
-* The build artifacts (built chunks and strata) used to construct the
- systems being released. The systems are found in `release.morph` and
- the artifacts from the Trove used to prepare the release. They get
- uploaded to a public Trove (by default git.baserock.org). If they're
- the same Trove, then nothing happens.
-
-* The released system images (disk images, tar archives, etc)
- specified in `release.morph` get uploaded to a download server (by
- default download.baserock.org).
-
-'''
-
-
-import json
-import logging
-import os
-import pwd
-import shutil
-import sys
-import urllib
-import urllib2
-import urlparse
-
-import cliapp
-import yaml
-
-import morphlib
-
-class ReleaseUploader(cliapp.Application):
-
- def add_settings(self):
- group = 'Release upload settings'
-
- local_username = self.get_local_username()
-
- self.settings.string(
- ['build-trove-host'],
- 'get build artifacts from Trove at ADDRESS',
- metavar='ADDRESS',
- group=group)
-
- self.settings.string(
- ['public-trove-host'],
- 'publish build artifacts on Trove at ADDRESS',
- metavar='ADDRESS',
- default='git.baserock.org',
- group=group)
-
- self.settings.string(
- ['public-trove-username'],
- 'log into public trove as USER',
- metavar='USER',
- default=local_username,
- group=group)
-
- self.settings.string(
- ['public-trove-artifact-dir'],
- 'put published artifacts into DIR',
- metavar='DIR',
- default='/home/cache/artifacts',
- group=group)
-
- self.settings.string(
- ['release-artifact-dir'],
- 'get release artifacts from DIR (all files from there)',
- metavar='DIR',
- default='.',
- group=group)
-
- self.settings.string(
- ['download-server-address'],
- 'publish release artifacts on server at ADDRESS',
- metavar='ADDRESS',
- default='download.baserock.org',
- group=group)
-
- self.settings.string(
- ['download-server-username'],
- 'log into download server as USER',
- metavar='USER',
- default=local_username,
- group=group)
-
- self.settings.string(
- ['download-server-private-dir'],
- 'use DIR as the temporary location for uploaded release '
- 'artifacts',
- metavar='DIR',
- default='/srv/download.baserock.org/baserock/.publish-temp',
- group=group)
-
- self.settings.string(
- ['download-server-public-dir'],
- 'put published release artifacts in DIR',
- metavar='DIR',
- default='/srv/download.baserock.org/baserock',
- group=group)
-
- self.settings.string(
- ['local-build-artifacts-dir'],
- 'keep build artifacts to be uploaded temporarily in DIR',
- metavar='DIR',
- default='build-artifacts',
- group=group)
-
- self.settings.string(
- ['morph-cmd'],
- 'run FILE to invoke morph',
- metavar='FILE',
- default='morph',
- group=group)
-
- self.settings.string_list(
- ['arch'],
- 'Upload files from morphologies of ARCH',
- metavar='ARCH',
- default=[],
- group=group)
-
- self.settings.boolean(
- ['upload-build-artifacts'],
- 'upload build artifacts?',
- default=True)
-
- self.settings.boolean(
- ['upload-release-artifacts'],
- 'upload release artifacts (disk images etc)?',
- default=True)
-
- def get_local_username(self):
- uid = os.getuid()
- return pwd.getpwuid(uid)[0]
-
- def process_args(self, args):
- if len(args) != 1:
- raise cliapp.AppException('Usage: release-upload CLUSTER')
- cluster_morphology_path = args[0]
- self.status(msg='Uploading and publishing Baserock release')
-
- if self.settings['upload-build-artifacts']:
- self.publish_build_artifacts(cluster_morphology_path)
- else:
- self.status(
- msg='Not uploading build artifacts '
- '(upload-build-artifacts set to false')
-
- if self.settings['upload-release-artifacts']:
- self.publish_release_artifacts()
- else:
- self.status(
- msg='Not uploading release artifacts '
- '(upload-release-artifacts set to false')
-
- def publish_build_artifacts(self, cluster_morphology_path):
- publisher = BuildArtifactPublisher(self.settings, self.status)
- publisher.publish_build_artifacts(cluster_morphology_path)
- self.status(msg='Build artifacts have been published')
-
- def publish_release_artifacts(self):
- publisher = ReleaseArtifactPublisher(self.settings, self.status)
- publisher.publish_release_artifacts()
- self.status(msg='Release artifacts have been published')
-
- def status(self, msg, **kwargs):
- formatted = msg.format(**kwargs)
- logging.info(formatted)
- sys.stdout.write(formatted + '\n')
- sys.stdout.flush()
-
-
-class BuildArtifactPublisher(object):
-
- '''Publish build artifacts related to the release.'''
-
- def __init__(self, settings, status):
- self.settings = settings
- self.status = status
-
- def publish_build_artifacts(self, cluster_path):
- artifact_basenames = self.list_build_artifacts_for_release(cluster_path)
- self.status(
- msg='Found {count} build artifact files in release',
- count=len(artifact_basenames))
-
- to_be_uploaded = self.filter_away_build_artifacts_on_public_trove(
- artifact_basenames)
-
- logging.debug('List of artifacts (basenames) to upload (without already uploaded):')
- for i, basename in enumerate(to_be_uploaded):
- logging.debug(' {0}: {1}'.format(i, basename))
- logging.debug('End of artifact list (to_be_uploaded)')
-
- self.status(
- msg='Need to fetch locally, then upload {count} build artifacts',
- count=len(to_be_uploaded))
-
- self.upload_build_artifacts_to_public_trove(to_be_uploaded)
-
- def list_build_artifacts_for_release(self, cluster_morphology_path):
- self.status(msg='Find build artifacts included in release')
-
- # FIXME: These are hardcoded for simplicity. They would be
- # possible to deduce automatically from the workspace, but
- # that can happen later.
- repo = 'file://%s' % os.path.abspath('.')
- ref = 'HEAD'
-
- argv = [self.settings['morph-cmd'], 'list-artifacts', '--quiet', repo, ref]
- argv += self.find_system_morphologies(cluster_morphology_path)
- output = cliapp.runcmd(argv)
- basenames = output.splitlines()
- logging.debug('List of build artifacts in release:')
- for basename in basenames:
- logging.debug(' {0}'.format(basename))
- logging.debug('End of list of build artifacts in release')
-
- return basenames
-
- def find_system_morphologies(self, cluster_morphology_path):
- cluster = self.load_cluster_morphology(cluster_morphology_path)
- system_dicts = self.find_systems_in_parsed_cluster_morphology(cluster)
- if self.settings['arch']:
- system_dicts = self.choose_systems_for_wanted_architectures(
- system_dicts, self.settings['arch'])
- return [sd['morph'] for sd in system_dicts]
-
- def load_cluster_morphology(self, pathname):
- with open(pathname) as f:
- return yaml.load(f)
-
- def find_systems_in_parsed_cluster_morphology(self, cluster):
- return cluster['systems']
-
- def choose_systems_for_wanted_architectures(self, system_dicts, archs):
- return [
- sd
- for sd in system_dicts
- if self.system_is_for_wanted_arch(sd, archs)]
-
- def system_is_for_wanted_arch(self, system_dict, archs):
- morph = self.load_system_morphology(system_dict)
- return morph['arch'] in archs
-
- def load_system_morphology(self, system_dict):
- pathname = morphlib.util.sanitise_morphology_path(system_dict['morph'])
- return self.load_morphology_from_named_file(pathname)
-
- def load_morphology_from_named_file(self, pathname):
- finder = self.get_morphology_finder_for_root_repository()
- morphology_text = finder.read_file(pathname)
- loader = morphlib.morphloader.MorphologyLoader()
- return loader.load_from_string(morphology_text)
-
- def get_morphology_finder_for_root_repository(self):
- definitions_repo = morphlib.definitions_repo.open(
- '.', search_for_root=True)
- return morphlib.morphologyfinder.MorphologyFinder(definitions_repo)
-
- def filter_away_build_artifacts_on_public_trove(self, basenames):
- result = []
- logging.debug('Filtering away already existing artifacts:')
- for basename, exists in self.query_public_trove_for_artifacts(basenames):
- logging.debug(' {0}: {1}'.format(basename, exists))
- if not exists:
- result.append(basename)
- logging.debug('End of filtering away')
- return result
-
- def query_public_trove_for_artifacts(self, basenames):
- host = self.settings['public-trove-host']
-
- # FIXME: This could use
- # contextlib.closing(urllib2.urlopen(url, data=data) instead
- # of explicit closing.
- url = 'http://{host}:8080/1.0/artifacts'.format(host=host)
- data = json.dumps(basenames)
- f = urllib2.urlopen(url, data=data)
- obj = json.load(f)
- return obj.items()
-
- def upload_build_artifacts_to_public_trove(self, basenames):
- self.download_artifacts_locally(basenames)
- self.upload_artifacts_to_public_trove(basenames)
-
- def download_artifacts_locally(self, basenames):
- dirname = self.settings['local-build-artifacts-dir']
- self.create_directory_if_missing(dirname)
- for i, basename in enumerate(basenames):
- url = self.construct_artifact_url(basename)
- pathname = os.path.join(dirname, basename)
- if not os.path.exists(pathname):
- self.status(
- msg='Downloading {i}/{total} {basename}',
- basename=repr(basename), i=i, total=len(basenames))
- self.download_from_url(url, dirname, pathname)
-
- def create_directory_if_missing(self, dirname):
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- def construct_artifact_url(self, basename):
- scheme = 'http'
- netloc = '{host}:8080'.format(host=self.settings['build-trove-host'])
- path = '/1.0/artifacts'
- query = 'filename={0}'.format(urllib.quote_plus(basename))
- fragment = ''
- components = (scheme, netloc, path, query, fragment)
- return urlparse.urlunsplit(components)
-
- def download_from_url(self, url, dirname, pathname):
- logging.info(
- 'Downloading {url} to {pathname}'.format(
- url=url, pathname=pathname))
- with open(pathname, 'wb') as output:
- try:
- incoming = urllib2.urlopen(url)
- shutil.copyfileobj(incoming, output)
- incoming.close()
- except urllib2.HTTPError as e:
- if pathname.endswith('.meta'):
- return
- self.status(
- msg="ERROR: Can't download {url}: {explanation}",
- url=url,
- explanation=str(e))
- os.remove(pathname)
- raise
-
- def upload_artifacts_to_public_trove(self, basenames):
- self.status(
- msg='Upload build artifacts to {trove}',
- trove=self.settings['public-trove-host'])
- rsync_files_to_server(
- self.settings['local-build-artifacts-dir'],
- basenames,
- self.settings['public-trove-username'],
- self.settings['public-trove-host'],
- self.settings['public-trove-artifact-dir'])
- set_permissions_on_server(
- self.settings['public-trove-username'],
- self.settings['public-trove-host'],
- self.settings['public-trove-artifact-dir'],
- basenames)
-
-class ReleaseArtifactPublisher(object):
-
- '''Publish release artifacts for a release.'''
-
- def __init__(self, settings, status):
- self.settings = settings
- self.status = status
-
- def publish_release_artifacts(self):
- files = self.list_release_artifacts()
- if files:
- self.upload_release_artifacts_to_private_dir(files)
- self.move_release_artifacts_to_public_dir(files)
- self.create_symlinks_to_new_release_artifacts(files)
-
- def list_release_artifacts(self):
- self.status(msg='Find release artifacts to publish')
- return os.listdir(self.settings['release-artifact-dir'])
-
- def upload_release_artifacts_to_private_dir(self, files):
- self.status(msg='Upload release artifacts to private directory')
- path = self.settings['download-server-private-dir']
- self.create_directory_on_download_server(path)
- self.rsync_files_to_download_server(files, path)
-
- def create_directory_on_download_server(self, path):
- user = self.settings['download-server-username']
- host = self.settings['download-server-address']
- self.status(msg='Create {host}:{path}', host=host, path=path)
- target = '{user}@{host}'.format(user=user, host=host)
- cliapp.ssh_runcmd(target, ['mkdir', '-p', path])
-
- def rsync_files_to_download_server(self, files, path):
- self.status(msg='Upload release artifacts to download server')
- rsync_files_to_server(
- self.settings['release-artifact-dir'],
- files,
- self.settings['download-server-username'],
- self.settings['download-server-address'],
- path)
- set_permissions_on_server(
- self.settings['download-server-username'],
- self.settings['download-server-address'],
- path,
- files)
-
- def move_release_artifacts_to_public_dir(self, files):
- self.status(msg='Move release artifacts to public directory')
- private_dir = self.settings['download-server-private-dir']
- public_dir = self.settings['download-server-public-dir']
- self.create_directory_on_download_server(public_dir)
-
- # Move just the contents of the private dir, not the dir
- # itself (-mindepth). Avoid overwriting existing files (mv
- # -n).
- argv = ['find', private_dir, '-mindepth', '1',
- '-exec', 'mv', '-n', '{}', public_dir + '/.', ';']
-
- target = '{user}@{host}'.format(
- user=self.settings['download-server-username'],
- host=self.settings['download-server-address'])
- cliapp.ssh_runcmd(target, argv)
-
- def create_symlinks_to_new_release_artifacts(self, files):
- self.status(msg='FIXME: Create symlinks to new releas artifacts')
-
-
-def rsync_files_to_server(
- source_dir, source_filenames, user, host, target_dir):
-
- if not source_filenames:
- return
-
- argv = [
- 'rsync',
- '-a',
- '--progress',
- '--partial',
- '--human-readable',
- '--sparse',
- '--protect-args',
- '-0',
- '--files-from=-',
- source_dir,
- '{user}@{host}:{path}'.format(user=user, host=host, path=target_dir),
- ]
-
- files_list = '\0'.join(filename for filename in source_filenames)
- cliapp.runcmd(argv, feed_stdin=files_list, stdout=None, stderr=None)
-
-
-def set_permissions_on_server(user, host, target_dir, filenames):
- # If we have no files, we can't form a valid command to run on the server
- if not filenames:
- return
- target = '{user}@{host}'.format(user=user, host=host)
- argv = ['xargs', '-0', 'chmod', '0644']
- files_list = ''.join(
- '{0}\0'.format(os.path.join(target_dir, filename)) for filename in filenames)
- cliapp.ssh_runcmd(target, argv, feed_stdin=files_list, stdout=None, stderr=None)
-
-
-ReleaseUploader(description=__doc__).run()
diff --git a/scripts/release-upload.test.conf b/scripts/release-upload.test.conf
deleted file mode 100644
index 13227983..00000000
--- a/scripts/release-upload.test.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-[config]
-download-server-address = localhost
-download-server-private-dir = /tmp/private
-download-server-public-dir = /tmp/public
-build-trove-host = ct-mcr-1.ducie.codethink.co.uk
-public-trove-host = localhost
-public-trove-username = root
-public-trove-artifact-dir = /tmp/artifacts
-release-artifact-dir = t.release-files
-morph-cmd = /home/root/git-morph
diff --git a/strata/docutils.morph b/strata/docutils.morph
index e76cc7a2..f6b202da 100644
--- a/strata/docutils.morph
+++ b/strata/docutils.morph
@@ -15,6 +15,7 @@ chunks:
ref: c8f0ce32a8075e9ab21e5cf734fb96195455264d
unpetrify-ref: docbook-xml-4.5
build-depends:
+
# This is a little awkward, we don't really build-depend on xml-catalog,
# but if we don't include it as a build dependency
# then we won't have the existing xml catalog in our staging area
@@ -31,6 +32,7 @@ chunks:
ref: 802da9dd5d4bc18f46a916eedc0c5c1980a15e59
unpetrify-ref: docbook-xsl-1.78.1
build-depends:
+
# Same issue as above, except this time we don't want to overwrite
# the catalog that now contains the XML DTDs.
- docbook-xml
diff --git a/strata/gerrit-tools.morph b/strata/gerrit-tools.morph
new file mode 100644
index 00000000..94a4575f
--- /dev/null
+++ b/strata/gerrit-tools.morph
@@ -0,0 +1,11 @@
+name: gerrit-tools
+kind: stratum
+description: Tools for use with Gerrit
+build-depends:
+- morph: strata/python-common.morph
+chunks:
+- name: pygerrit
+ repo: github:sonyxperiadev/pygerrit
+ ref: daad8e23d9bbcd8a2bc565497be50a3cc29dae6b
+ unpetrify-ref: master
+ build-system: python-distutils
diff --git a/strata/gnome.morph b/strata/gnome.morph
index 3a8e0f64..84d89e0e 100644
--- a/strata/gnome.morph
+++ b/strata/gnome.morph
@@ -70,17 +70,17 @@ chunks:
repo: upstream:yelp-xsl
ref: 2dcf168dde754d18152b008473269a803d6e650b
unpetrify-ref: master
+ build-system: autotools
build-depends:
- itstool
- build-system: autotools
- name: yelp-tools
repo: upstream:yelp-tools
ref: 6df6bf24e385bbc1ac0b800be8a6e878f18a775c
unpetrify-ref: master
+ build-system: autotools
build-depends:
- itstool
- yelp-xsl
- build-system: autotools
- name: iso-codes
repo: upstream:iso-codes
ref: 18ac112b4b84c8f03743565fde3512a637820761
@@ -223,9 +223,9 @@ chunks:
repo: upstream:zenity
ref: d7135db2bdfdca812c0165f90e4611af9c7c6a50
unpetrify-ref: master
+ build-system: autotools
build-depends:
- yelp-tools
- build-system: autotools
- name: mozjs24
morph: strata/gnome/mozjs24.morph
repo: upstream:mozilla/mozjs24
@@ -235,9 +235,9 @@ chunks:
repo: upstream:gjs
ref: 5d480f519e2b7e95cf1fad2f9f1a8e798fd5fe49
unpetrify-ref: master
+ build-system: autotools
build-depends:
- mozjs24
- build-system: autotools
- name: telepathy-glib
morph: strata/gnome/telepathy-glib.morph
repo: upstream:telepathy-glib
@@ -262,6 +262,28 @@ chunks:
ref: 07237ff25d6171e1b548118442ddba4259a53ba5
unpetrify-ref: master
build-system: autotools
+<<<<<<< HEAD
+- name: libtasn1
+ repo: upstream:libtasn1
+ ref: 7f3a9c2294cdebd9e63fe007150c181b980865ef
+ unpetrify-ref: baserock/gnome
+ build-system: autotools
+- name: p11-kit
+ morph: strata/gnome/p11-kit.morph
+ repo: upstream:p11-kit
+ ref: ec9e2450bafa1cda47525b38a28c8f981f43c1e1
+ unpetrify-ref: 0.23.1
+ build-depends:
+ - libtasn1
+- name: gcr
+ repo: upstream:gnome/gcr
+ ref: 289ba4859fd170285f51e6bbfe91f2aac3b9170d
+ unpetrify-ref: 3.18.0
+ build-system: autotools
+ build-depends:
+ - p11-kit
+=======
+>>>>>>> ee7c42ae4eca2995a58b3c9d0a298dae6f9684b6
- name: libxkbcommon-x11
repo: upstream:xorg-lib-libxkbcommon
ref: c43c3c866eb9d52cd8f61e75cbef1c30d07f3a28
@@ -286,9 +308,25 @@ chunks:
repo: upstream:libxklavier
ref: 65c132a65e90a42e898f07243ef544109ada53c9
unpetrify-ref: master
+ build-system: autotools
build-depends:
- iso-codes
+<<<<<<< HEAD
+- name: m4-common
+ repo: upstream:m4-common
+ ref: 4b704704f5e826b73b79ccfddaf215a510c104c4
+ unpetrify-ref: baserock/morph
+ build-system: autotools
+- name: libgee
+ repo: upstream:gnome/libgee
+ ref: 4301ab58efc217409c588a5527f68990b4e3d220
+ unpetrify-ref: master
+ build-system: autotools
+ build-depends:
+ - m4-common
+=======
build-system: autotools
+>>>>>>> ee7c42ae4eca2995a58b3c9d0a298dae6f9684b6
- name: caribou
morph: strata/gnome/caribou.morph
repo: upstream:caribou
diff --git a/strata/input-common.morph b/strata/input-common.morph
index a2df41cd..3d75536e 100644
--- a/strata/input-common.morph
+++ b/strata/input-common.morph
@@ -42,4 +42,4 @@ chunks:
morph: strata/input-common/libxkbcommon-no-x11.morph
unpetrify-ref: xkbcommon-0.5.0
build-depends:
- - xkeyboard-config
+ - xkeyboard-config
diff --git a/systems/gerrit-system-x86_64.morph b/systems/gerrit-system-x86_64.morph
new file mode 100644
index 00000000..aec06eea
--- /dev/null
+++ b/systems/gerrit-system-x86_64.morph
@@ -0,0 +1,70 @@
+name: gerrit-system-x86_64
+kind: system
+description: |
+ System for running Gerrit on Baserock.
+
+ Note this system doesn't contain Gerrit or Java: the Baserock reference
+ definitions don't have any support for Java yet. Instead, Java and Gerrit
+ are downloaded from the web and installed on first-boot by the configuration
+ management scripts.
+
+ So this system is really just a Baserock base system plus
+ Ansible and some extras.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: glib-common
+ morph: strata/glib-common.morph
+- name: python2-core
+ morph: strata/python2-core.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+
+- name: lvm
+ morph: strata/lvm.morph
+
+- name: python-common
+ morph: strata/python-common.morph
+- name: ansible
+ morph: strata/ansible.morph
+
+# lighttpd is needed for Lorry Controller. Gerrit uses its own web server.
+#
+# pcre-utils is a dependency of lighttpd.
+# python-cliapp and python-wsgi are needed for lorry-controller.
+- name: pcre-utils
+ morph: strata/pcre-utils.morph
+- name: lighttpd-server
+ morph: strata/lighttpd-server.morph
+- name: python-cliapp
+ morph: strata/python-cliapp.morph
+- name: python-wsgi
+ morph: strata/python-wsgi.morph
+- name: lorry
+ morph: strata/lorry.morph
+- name: lorry-controller
+ morph: strata/lorry-controller.morph
+
+- name: gerrit-tools
+ morph: strata/gerrit-tools.morph
+
+# FIXME: 'tools' is needed mainly because rsync lives there and we need
+# rsync for upgrades using system-version-manager.
+- name: tools
+ morph: strata/tools.morph
+
+configuration-extensions:
+- extensions/set-hostname
+- extensions/add-config-files
+- extensions/nfsboot
+- extensions/install-files
+- extensions/fstab
+- extensions/cloud-init
+- extensions/install-essential-files
diff --git a/tasks/create-data-volume.yml b/tasks/create-data-volume.yml
new file mode 100644
index 00000000..05b07afe
--- /dev/null
+++ b/tasks/create-data-volume.yml
@@ -0,0 +1,26 @@
+# Format a volume for data storage
+#
+# The pattern is to create an LVM volume group on the volume, with
+# one logical volume set up. Snapshots can be taken of the data LV
+# very quickly, allowing us to take backup copies without requiring
+# long periods of downtime for the relevant services.
+---
+
+- name: ensure LVM metadata service is running
+ service: name=lvm2-lvmetad enabled=yes state=started
+
+- name: LVM logical volume group on /dev/vdb
+ lvg: vg=vg0 pvs=/dev/vdb
+
+- name: logical volume for {{ lv_name }}
+ lvol: vg=vg0 lv={{ lv_name }} size={{ lv_size }}
+
+# This will NEVER overwrite an existing filesystem. Unless you add
+# 'force=yes' to the arguments. So don't do that. See:
+# http://docs.ansible.com/filesystem_module.html.
+- name: ext4 filesystem on /dev/vg0/{{ lv_name }}
+ filesystem: fstype=ext4 dev=/dev/vg0/{{ lv_name }}
+
+- name: mount {{ lv_name }} logical volume
+ mount: src=/dev/vg0/{{ lv_name }} name={{ mountpoint }} fstype=ext4 state=mounted
+