imfreedom/bamboo-terraform

Parents 435e16a05f1c
Children 5f5d1226d876
Add a new role "cache" which sets up apt-cacher-ng and add support in the worker instances for it
--- a/.hgignore Mon May 27 07:37:23 2019 -0500
+++ b/.hgignore Tue Jun 18 05:45:16 2019 -0500
@@ -8,6 +8,8 @@
\.terraform\.tfstate\.lock\.info
^ansible\.zip$
^ansible\/.+\.retry$
+^cache\.variables$
+^hosts\.cache$
^hosts\.workers$
^nodes\/
^secrets\/
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible.tf Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,6 @@
+# Use the archive_file data source to detect changes in the ansible scripts
+data "archive_file" "ansible_scripts" {
+ type = "zip"
+ source_dir = "ansible/"
+ output_path = "ansible.zip"
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/cache.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,6 @@
+---
+- hosts: all
+ vars_files:
+ - vars/secrets.yml
+ roles:
+ - role: rwgrim.cache
--- a/ansible/roles/pidgin.bamboo-agent/tasks/main.yml Mon May 27 07:37:23 2019 -0500
+++ b/ansible/roles/pidgin.bamboo-agent/tasks/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -50,9 +50,18 @@
- git
- openjdk-8-jre-headless
- qemu-user-static
+ - binfmt-support
state: present
install_recommends: no
update_cache: yes
+ register: dependencies
+
+# if the dependencies changed, make sure we reconfigure qemu-user-static so
+# that our binfmt handlers are register
+- name: Reconfigure qemu-user-static
+ command: dpkg-reconfigure qemu-user-static
+ become: yes
+# when: dependencies.changed
# create the group
- name: create bamboo group
@@ -122,6 +131,15 @@
name: "{{bamboo_jar}}"
state: absent
+# make sure everything under agent-home belongs to bamboo
+- name: fix permissions
+ file:
+ path: "{{bamboo_agent_home}}"
+ owner: "{{bamboo_user}}"
+ group: "{{bamboo_group}}"
+ recurse: yes
+ state: directory
+
# now make sure the agent is running
- name: start bamboo-agent
systemd:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.cache-client/defaults/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,3 @@
+---
+cache_enabled: false
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.cache-client/tasks/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,16 @@
+---
+- name: add apt-cacher proxy
+ copy:
+ dest: /etc/apt/apt.conf.d/90aptcacher
+ content: |
+ Acquire::http { Proxy "http://{{cache_hostname}}:3142"; }
+ when: cache_enabled
+- name: remove apt-cache proxy
+ file:
+ path: /etc/apt/apt.conf.d/90aptcacher
+ state: absent
+ when: not cache_enabled
+- name: update apt
+ apt:
+ update_cache: yes
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.cache/tasks/main.yml Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,15 @@
+---
+- name: set debconf selectsion for apt-cacher-ng
+ debconf:
+ name: apt-cacher-ng
+ question: apt-cacher-ng/tunnelenable
+ value: true
+ vtype: boolean
+- name: install apt-cacher-ng
+ become: yes
+ apt:
+ name:
+ - apt-cacher-ng
+ state: present
+ install_recommends: no
+ update_cache: yes
--- a/ansible/worker.yml Mon May 27 07:37:23 2019 -0500
+++ b/ansible/worker.yml Tue Jun 18 05:45:16 2019 -0500
@@ -3,11 +3,12 @@
vars_files:
- vars/secrets.yml
roles:
+ - role: rwgrim.cache-client
- role: rwgrim.docker
- role: rwgrim.convey
convey_version: 0.13.1
convey_sha256_checksum: c5f1d5c7d5da5ec61d325659e77e74a205f62929f2abca82b949c22164f2e5b6
- role: pidgin.bamboo-agent
bamboo_server: https://bamboo.pidgin.im/
- bamboo_version: 6.8.0
+ bamboo_version: 6.9.1
bamboo_groups: docker
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/instance-cache.tf Tue Jun 18 05:45:16 2019 -0500
@@ -0,0 +1,105 @@
+variable "cache_enabled" { default = true }
+variable "cache_disk_size" { default = "107374182400" } # 100gb
+variable "cache_memory" { default="1024" } # 1gb
+variable "cache_cpu" { default="1" }
+
+resource "libvirt_volume" "cache" {
+ count = "${var.cache_enabled ? 1 : 0 }"
+ name = "${format("%s-cache-root", var.node_name)}"
+
+ format = "qcow2"
+ base_volume_id = "${libvirt_volume.debian_base.id}"
+ size = "${var.cache_disk_size}"
+ pool = "${var.volume_pool}"
+}
+
+data "template_file" "cache_user_data" {
+ count = "${var.cache_enabled ? 1 : 0 }"
+ template = <<EOF
+#cloud-config
+fqdn: $${fqdn}
+users:
+ - name: root
+ ssh_authorized_keys:
+ - $${admin_ssh_pubkey}
+growpart:
+ mode: auto
+ devices: ['/']
+ ignore_growroot_disabled: false
+EOF
+
+ vars {
+ admin_ssh_pubkey = "${var.admin_ssh_pubkey}"
+ fqdn = "${format("%s-cache", var.node_name)}"
+ }
+}
+
+resource "libvirt_cloudinit_disk" "cache" {
+ count = "${var.cache_enabled ? 1 : 0 }"
+ name = "${format("%s-cache-init.iso", var.node_name)}"
+
+ user_data = "${data.template_file.cache_user_data.rendered}"
+}
+
+resource "libvirt_domain" "cache" {
+ count = "${var.cache_enabled ? 1 : 0 }"
+ name = "${format("%s-cache", var.node_name)}"
+
+ memory = "${var.cache_memory}"
+ vcpu = "${var.cache_cpu}"
+ autostart = true
+
+ cloudinit = "${libvirt_cloudinit_disk.cache.id}"
+
+ console {
+ type = "pty"
+ target_port = "0"
+ target_type = "serial"
+ }
+
+ disk {
+ volume_id = "${libvirt_volume.cache.id}"
+ }
+
+ network_interface {
+ network_name = "default"
+ wait_for_lease = true
+ }
+
+ boot_device {
+ dev = ["hd"]
+ }
+}
+
+# Create the variables file for the cache (used by the worker)
+resource "local_file" "cache_variables" {
+ content = "${join("\n", list("---", format("cache_enabled: %s", var.cache_enabled ? "true" : "false"), var.cache_enabled ? format("cache_hostname: %s-cache", var.node_name) : ""))}"
+ filename="${path.module}/cache.variables"
+}
+
+# Create the ansible inventory
+resource "local_file" "cache_hosts" {
+ count = "${var.cache_enabled ? 1 : 0 }"
+
+ content = "${join("\n", formatlist("%s ansible_ssh_common_args='-o ProxyJump=%s -o StrictHostKeyChecking=off' ansible_user=root ansible_host=%s", libvirt_domain.cache.*.name, replace(replace(var.libvirt_uri, "qemu+ssh://", ""), "/system", ""), flatten(libvirt_domain.cache.*.network_interface.0.addresses)))}"
+ filename = "${path.module}/hosts.cache"
+}
+
+# Run ansible against all the workers
+resource "null_resource" "cache_ansible" {
+ count = "${var.cache_enabled ? 1 : 0 }"
+
+ triggers = {
+ hosts = "${sha1(local_file.cache_hosts.content)}"
+ ansible = "${data.archive_file.ansible_scripts.output_sha}"
+ }
+
+ provisioner "local-exec" {
+ command = "ansible-playbook -i ${path.module}/hosts.cache -b ansible/cache.yml --vault-password-file=secrets/ansible_vault_password.txt"
+ }
+}
+
+# Output the ip of the cache
+output "cache_ip" {
+ value = "${flatten(libvirt_domain.cache.*.network_interface.0.addresses)}"
+}
--- a/instance-registry-cache.tf Mon May 27 07:37:23 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-variable "registry_cache_enabled" { default = true }
-variable "registry_cache_disk_size" { default = "53687091200" } # 50gb
-variable "registry_cache_memory" { default="1024" } # 1gb
-variable "registry_cache_cpu" { default="1" }
-
-resource "libvirt_volume" "registry_cache" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- name = "${format("%s-registry-cache-root", var.node_name)}"
-
- format = "qcow2"
- base_volume_id = "${libvirt_volume.debian_base.id}"
- size = "${var.registry_cache_disk_size}"
- pool = "${var.volume_pool}"
-}
-
-data "template_file" "registry_cache_user_data" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- template = <<EOF
-#cloud-config
-fqdn: $${fqdn}
-users:
- - name: root
- ssh_authorized_keys:
- - $${admin_ssh_pubkey}
-growpart:
- mode: auto
- devices: ['/']
- ignore_growroot_disabled: false
-EOF
-
- vars {
- admin_ssh_pubkey = "${var.admin_ssh_pubkey}"
- fqdn = "${format("%s-registry-cache", var.node_name)}"
- }
-}
-
-resource "libvirt_cloudinit_disk" "registry_cache" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- name = "${format("%s-registry-cache-init.iso", var.node_name)}"
-
- user_data = "${data.template_file.registry_cache_user_data.rendered}"
-}
-
-resource "libvirt_domain" "registry_cache" {
- count = "${var.registry_cache_enabled ? 1 : 0 }"
- name = "${format("%s-registry-cache", var.node_name)}"
-
- memory = "${var.registry_cache_memory}"
- vcpu = "${var.registry_cache_cpu}"
- autostart = true
-
- cloudinit = "${libvirt_cloudinit_disk.registry_cache.id}"
-
- console {
- type = "pty"
- target_port = "0"
- target_type = "serial"
- }
-
- disk {
- volume_id = "${libvirt_volume.registry_cache.id}"
- }
-
- network_interface {
- network_name = "default"
- wait_for_lease = true
- }
-
- boot_device {
- dev = ["hd"]
- }
-}
-
--- a/instance-worker.tf Mon May 27 07:37:23 2019 -0500
+++ b/instance-worker.tf Tue Jun 18 05:45:16 2019 -0500
@@ -1,6 +1,6 @@
variable "worker_count" { default = "1" }
variable "worker_disk_size" { default = "53687091200" } # 50gb
-variable "worker_memory" { default="1024" } # 1gb
+variable "worker_memory" { default="2048" } # 1gb
variable "worker_cpu" { default="1" }
resource "libvirt_volume" "worker" {
@@ -87,22 +87,18 @@
filename = "${path.module}/hosts.workers"
}
-# Use the archive_file data source to detect changes in the ansible scripts
-data "archive_file" "ansible_scripts" {
- type = "zip"
- source_dir = "ansible/"
- output_path = "ansible.zip"
-}
-
# Run ansible against all the workers
resource "null_resource" "worker_ansible" {
+ count = "${var.worker_count > 0 ? 1 : 0 }"
+
triggers = {
hosts = "${sha1(local_file.worker_hosts.content)}"
ansible = "${data.archive_file.ansible_scripts.output_sha}"
+ cache_vars = "${sha1(local_file.cache_variables.content)}"
}
provisioner "local-exec" {
- command = "ansible-playbook -i ${path.module}/hosts.workers -b ansible/worker.yml --vault-password-file=secrets/ansible_vault_password.txt"
+ command = "ansible-playbook -i ${path.module}/hosts.workers -b ansible/worker.yml --vault-password-file=secrets/ansible_vault_password.txt --extra-vars @${local_file.cache_variables.filename}"
}
}