imfreedom/bamboo-terraform
Add a new role "cache" which sets up apt-cacher-ng and add support in the worker instances for it
--- a/.hgignore Mon May 27 07:37:23 2019 -0500
+++ b/.hgignore Tue Jun 18 05:45:16 2019 -0500
@@ -8,6 +8,8 @@
\.terraform\.tfstate\.lock\.info
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible.tf Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,6 @@
+# Use the archive_file data source to detect changes in the ansible scripts +data "archive_file" "ansible_scripts" { + source_dir = "ansible/" + output_path = "ansible.zip" --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/cache.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,6 @@
--- a/ansible/roles/pidgin.bamboo-agent/tasks/main.yml Mon May 27 07:37:23 2019 -0500
+++ b/ansible/roles/pidgin.bamboo-agent/tasks/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -50,9 +50,18 @@
+# if the dependencies changed, make sure we reconfigure qemu-user-static so +# that our binfmt handlers are register +- name: Reconfigure qemu-user-static + command: dpkg-reconfigure qemu-user-static +# when: dependencies.changed - name: create bamboo group
@@ -122,6 +131,15 @@
+# make sure everything under agent-home belongs to bamboo + path: "{{bamboo_agent_home}}" + owner: "{{bamboo_user}}" + group: "{{bamboo_group}}" # now make sure the agent is running
- name: start bamboo-agent
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.cache-client/defaults/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,3 @@
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.cache-client/tasks/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,16 @@
+- name: add apt-cacher proxy + dest: /etc/apt/apt.conf.d/90aptcacher + Acquire::http { Proxy "http://{{cache_hostname}}:3142"; } +- name: remove apt-cache proxy + path: /etc/apt/apt.conf.d/90aptcacher + when: not cache_enabled --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.cache/tasks/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,15 @@
+- name: set debconf selectsion for apt-cacher-ng + question: apt-cacher-ng/tunnelenable +- name: install apt-cacher-ng --- a/ansible/worker.yml Mon May 27 07:37:23 2019 -0500
+++ b/ansible/worker.yml Tue Jun 18 05:45:16 2019 -0500
@@ -3,11 +3,12 @@
+ - role: rwgrim.cache-client convey_sha256_checksum: c5f1d5c7d5da5ec61d325659e77e74a205f62929f2abca82b949c22164f2e5b6
- role: pidgin.bamboo-agent
bamboo_server: https://bamboo.pidgin.im/
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/instance-cache.tf Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,105 @@
+variable "cache_enabled" { default = true } +variable "cache_disk_size" { default = "107374182400" } # 100gb +variable "cache_memory" { default="1024" } # 1gb +variable "cache_cpu" { default="1" } +resource "libvirt_volume" "cache" { + count = "${var.cache_enabled ? 1 : 0 }" + name = "${format("%s-cache-root", var.node_name)}" + base_volume_id = "${libvirt_volume.debian_base.id}" + size = "${var.cache_disk_size}" + pool = "${var.volume_pool}" +data "template_file" "cache_user_data" { + count = "${var.cache_enabled ? 1 : 0 }" + ignore_growroot_disabled: false + admin_ssh_pubkey = "${var.admin_ssh_pubkey}" + fqdn = "${format("%s-cache", var.node_name)}" +resource "libvirt_cloudinit_disk" "cache" { + count = "${var.cache_enabled ? 1 : 0 }" + name = "${format("%s-cache-init.iso", var.node_name)}" + user_data = "${data.template_file.cache_user_data.rendered}" +resource "libvirt_domain" "cache" { + count = "${var.cache_enabled ? 1 : 0 }" + name = "${format("%s-cache", var.node_name)}" + memory = "${var.cache_memory}" + vcpu = "${var.cache_cpu}" + cloudinit = "${libvirt_cloudinit_disk.cache.id}" + volume_id = "${libvirt_volume.cache.id}" + network_name = "default" +# Create the variables file for the cache (used by the worker) +resource "local_file" "cache_variables" { + content = "${join("\n", list("---", format("cache_enabled: %s", var.cache_enabled ? "true" : "false"), var.cache_enabled ? format("cache_hostname: %s-cache", var.node_name) : ""))}" + filename="${path.module}/cache.variables" +# Create the ansible inventory +resource "local_file" "cache_hosts" { + count = "${var.cache_enabled ? 1 : 0 }" + content = "${join("\n", formatlist("%s ansible_ssh_common_args='-o ProxyJump=%s -o StrictHostKeyChecking=off' ansible_user=root ansible_host=%s", libvirt_domain.cache.*.name, replace(replace(var.libvirt_uri, "qemu+ssh://", ""), "/system", ""), flatten(libvirt_domain.cache.*.network_interface.0.addresses)))}" + filename = "${path.module}/hosts.cache" +# Run ansible against all the workers +resource "null_resource" "cache_ansible" { + count = "${var.cache_enabled ? 1 : 0 }" + hosts = "${sha1(local_file.cache_hosts.content)}" + ansible = "${data.archive_file.ansible_scripts.output_sha}" + provisioner "local-exec" { + command = "ansible-playbook -i ${path.module}/hosts.cache -b ansible/cache.yml --vault-password-file=secrets/ansible_vault_password.txt" +# Output the ip of the cache + value = "${flatten(libvirt_domain.cache.*.network_interface.0.addresses)}" --- a/instance-registry-cache.tf Mon May 27 07:37:23 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-variable "registry_cache_enabled" { default = true }
-variable "registry_cache_disk_size" { default = "53687091200" } # 50gb
-variable "registry_cache_memory" { default="1024" } # 1gb
-variable "registry_cache_cpu" { default="1" }
-resource "libvirt_volume" "registry_cache" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- name = "${format("%s-registry-cache-root", var.node_name)}"
- base_volume_id = "${libvirt_volume.debian_base.id}"
- size = "${var.registry_cache_disk_size}"
- pool = "${var.volume_pool}"
-data "template_file" "registry_cache_user_data" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- ignore_growroot_disabled: false
- admin_ssh_pubkey = "${var.admin_ssh_pubkey}"
- fqdn = "${format("%s-registry-cache", var.node_name)}"
-resource "libvirt_cloudinit_disk" "registry_cache" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- name = "${format("%s-registry-cache-init.iso", var.node_name)}"
- user_data = "${data.template_file.registry_cache_user_data.rendered}"
-resource "libvirt_domain" "registry_cache" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- name = "${format("%s-registry-cache", var.node_name)}"
- memory = "${var.registry_cache_memory}"
- vcpu = "${var.registry_cache_cpu}"
- cloudinit = "${libvirt_cloudinit_disk.registry_cache.id}"
- volume_id = "${libvirt_volume.registry_cache.id}"
- network_name = "default"
--- a/instance-worker.tf Mon May 27 07:37:23 2019 -0500
+++ b/instance-worker.tf Tue Jun 18 05:45:16 2019 -0500
@@ -1,6 +1,6 @@
variable "worker_count" { default = "1" }
variable "worker_disk_size" { default = "53687091200" } # 50gb
-variable "worker_memory" { default="1024" } # 1gb
+variable "worker_memory" { default="2048" } # 1gb variable "worker_cpu" { default="1" }
resource "libvirt_volume" "worker" {
@@ -87,22 +87,18 @@
filename = "${path.module}/hosts.workers"
-# Use the archive_file data source to detect changes in the ansible scripts
-data "archive_file" "ansible_scripts" {
- source_dir = "ansible/"
- output_path = "ansible.zip"
# Run ansible against all the workers
resource "null_resource" "worker_ansible" {
+ count = "${var.worker_count > 0 ? 1 : 0 }" hosts = "${sha1(local_file.worker_hosts.content)}"
ansible = "${data.archive_file.ansible_scripts.output_sha}"
+ cache_vars = "${sha1(local_file.cache_variables.content)}" provisioner "local-exec" {
- command = "ansible-playbook -i ${path.module}/hosts.workers -b ansible/worker.yml --vault-password-file=secrets/ansible_vault_password.txt"
+ command = "ansible-playbook -i ${path.module}/hosts.workers -b ansible/worker.yml --vault-password-file=secrets/ansible_vault_password.txt --extra-vars @${local_file.cache_variables.filename}"