imfreedom/bamboo-terraform
Lots of work on the ansible side, still not full working though
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/pidgin.bamboo-agent/defaults/main.yml Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,5 @@
+bamboo_home: /home/bamboo --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/pidgin.bamboo-agent/tasks/main.yml Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,104 @@
+- name: precondition - bamboo_server + fail: msg="ERROR -required variable 'bamboo_server' missing." + when: bamboo_server is not defined +- name: precondition - bamboo_version + fail: msg="ERROR -required variable 'bamboo_version' missing." + when: bamboo_version is not defined +- name: precondition - bamboo_token + fail: msg="ERROR -required variable 'bamboo_token' missing." + when: bamboo_token is not defined +- name: set internal var for bamboo_jar + bamboo_jar: "atlassian-bamboo-agent-installer-{{bamboo_version}}.jar" +- name: set internal var for bamboo_jar_url + bamboo_jar_url: "{{bamboo_server}}agentServer/agentInstaller/{{bamboo_jar}}" +- name: set bamboo agent home + bamboo_agent_home: "{{bamboo_home}}/agent-home" +# create the systemd unit +- name: install bamboo systemd service + dest: /etc/systemd/system/bamboo-agent.service + Description=Atlassian Bamboo Agent + After=syslog.target network.target + ExecStart={{bamboo_agent_home}}/bin/bamboo-agent.sh start + ExecStop={{bamboo_agent_home}}/bin/bamboo-agent.sh stop + PIDFile={{bamboo_agent_home}}/bin/bamboo-agent.pid + WantedBy=multi-user.target +# install our dependencies +- name: Install Dependencies + - openjdk-8-jre-headless +- name: create bamboo group + name: "{{bamboo_group}}" + name: "{{bamboo_user}}" + group: "{{bamboo_group}}" + groups: "{{bamboo_groups}}" + home: "{{bamboo_home}}" +- name: get bamboo agent installer (as jar) + url: "{{bamboo_jar_url}}" + dest: "{{bamboo_home}}/{{bamboo_jar}}" +# stop the agent if it's already running +- name: run bamboo installer + command: java -Dbamboo.home={{bamboo_agent_home}} -jar {{bamboo_jar}} {{bamboo_server}}/agentServer/ install -t {{bamboo_token}} + chdir: "{{bamboo_home}}" +- name: fix bamboo permissions + owner: "{{bamboo_user}}" + group: "{{bamboo_group}}" + path: "{{bamboo_home}}" +- name: start bamboo-agent --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/pidgin.bamboo-agent/templates/bamboo-agent.service Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,15 @@
+Description=Atlassian Bamboo Agent +After=syslog.target network.target +ExecStart={{bamboo_agent_home}}/bin/bamboo-agent.sh start +ExecStop={{bamboo_agent_home}}/bin/bamboo-agent.sh stop +PIDFile={{bamboo_agent_home}}/bin/bamboo-agent.pid +WantedBy=multi-user.target --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.convey/tasks/main.yml Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,27 @@
+- name: precondition - convey_version + fail: msg="ERROR -required variable 'convey_version' missing." + when: convey_version is not defined +- name: precondition - convey_sha256_checksum + fail: msg="ERROR -required variable 'convey_sha256_checksum' missing." + when: convey_sha256_checksum is not defined +- name: set internal var for convey_filename + convey_filename: "convey-{{convey_version}}-linux-amd64" +- name: set internal var for convey_url + convey_url: "https://bitbucket.org/rw_grim/convey/downloads/{{convey_filename}}" +- name: set install directory + convey_install_dir: "/usr/local/bin" + dest: "{{convey_install_dir}}/convey" + sha256sum: "{{convey_sha256_checksum}}" --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/roles/rwgrim.docker/tasks/main.yml Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,26 @@
+- name: Install Dependencies +- name: Add Docker apt-key + url: https://download.docker.com/linux/ubuntu/gpg + id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 +- name: Add Docker repository + repo: "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/vars/vars.yml Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,3 @@
+bamboo_token: "{{ vault_bamboo_token }}" --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/vars/vault Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,9 @@
+$ANSIBLE_VAULT;1.1;AES256 +36373762363335306333323435306331323432343462336634373130313634616464323563363634 +3735323433333033306462653431373735323162626138390a346264386232623065633965346262 +33353931313037386438363765666264343066306134626338633432383434366563353732653462 +6633373464383635640a643839656639353564376233386531313730616134326633643764666134 +33396466613066383233306661643461613537653464313765346662323638363765363635333135 +35393336643631306237666162636130363237623866646336343665303536386565653164666364 +31316430393632363839663630663963373965313031316261396337376637396265373337373737 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ansible/worker.yml Sat May 25 21:22:40 2019 -0500
@@ -0,0 +1,15 @@
+ convey_sha256_checksum: c5f1d5c7d5da5ec61d325659e77e74a205f62929f2abca82b949c22164f2e5b6 + - role: pidgin.bamboo-agent + bamboo_server: https://bamboo.pidgin.im/ + bamboo_token: "{{ vault_bamboo_token }}" --- a/common.tf Wed Feb 06 23:37:56 2019 -0600
+++ b/common.tf Sat May 25 21:22:40 2019 -0500
@@ -1,2 +1,4 @@
variable "admin_ssh_pubkey" {}
+variable "network_name" { default = "default" } variable "node_name" {default = "bamboo" }
+variable "volume_pool" { default = "default" } --- a/instance-registry-cache.tf Wed Feb 06 23:37:56 2019 -0600
+++ b/instance-registry-cache.tf Sat May 25 21:22:40 2019 -0500
@@ -1,8 +1,10 @@
+variable "registry_cache_enabled" { default = true } variable "registry_cache_disk_size" { default = "53687091200" } # 50gb
variable "registry_cache_memory" { default="1024" } # 1gb
variable "registry_cache_cpu" { default="1" }
resource "libvirt_volume" "registry_cache" {
+ count = "${var.registry_cache_enabled ? 1 : 0 }" name = "${format("%s-registry-cache-root", var.node_name)}"
@@ -12,6 +14,7 @@
data "template_file" "registry_cache_user_data" {
+ count = "${var.registry_cache_enabled ? 1 : 0 }" @@ -32,11 +35,14 @@
resource "libvirt_cloudinit_disk" "registry_cache" {
+ count = "${var.registry_cache_enabled ? 1 : 0 }" name = "${format("%s-registry-cache-init.iso", var.node_name)}"
user_data = "${data.template_file.registry_cache_user_data.rendered}"
resource "libvirt_domain" "registry_cache" {
+ count = "${var.registry_cache_enabled ? 1 : 0 }" name = "${format("%s-registry-cache", var.node_name)}"
memory = "${var.registry_cache_memory}"
@@ -65,7 +71,3 @@
-output "registry_cache_ip" {
- value = "${libvirt_domain.registry_cache.network_interface.0.addresses}"
--- a/instance-worker.tf Wed Feb 06 23:37:56 2019 -0600
+++ b/instance-worker.tf Sat May 25 21:22:40 2019 -0500
@@ -1,12 +1,12 @@
variable "worker_count" { default = "1" }
-variable "worker_disk_size" { default = "21474836480" } # 20gb
+variable "worker_disk_size" { default = "53687091200" } # 50gb variable "worker_memory" { default="1024" } # 1gb
variable "worker_cpu" { default="1" }
resource "libvirt_volume" "worker" {
count = "${var.worker_count}"
- name = "${format("%s-worker-%03d-root", var.node_name, count.index)}"
+ name = "${format("%s-worker-%03d-root.qcow2", var.node_name, count.index)}" base_volume_id = "${libvirt_volume.debian_base.id}"
@@ -14,6 +14,8 @@
pool = "${var.volume_pool}"
+# Build the cloud init config file. It sets the hostname, expands the root +# disk, and sets which ssh key to allow root to log in. data "template_file" "worker_user_data" {
count = "${var.worker_count > 0 ? var.worker_count : 0}"
@@ -36,6 +38,8 @@
+# Setup the cloudinit disk itself. We create one per worker because it sets resource "libvirt_cloudinit_disk" "worker" {
count = "${var.worker_count}"
@@ -43,6 +47,7 @@
user_data = "${element(data.template_file.worker_user_data.*.rendered, count.index)}"
+# Create the instance itself resource "libvirt_domain" "worker" {
count = "${var.worker_count}"
name = "${format("%s-worker-%03d", var.node_name, count.index)}"
@@ -64,7 +69,7 @@
- network_name = "default"
+ network_name = "${var.network_name}" @@ -73,7 +78,35 @@
- value = "${libvirt_domain.worker.*.network_interface.0.addresses}"
+# Create the ansible inventory +resource "local_file" "worker_hosts" { + count = "${var.worker_count > 0 ? 1 : 0 }" + content = "${join("\n", formatlist("%s ansible_ssh_common_args='-J %s -o StrictHostKeyChecking=off' ansible_user=root ansible_host=%s", libvirt_domain.worker.*.name, replace(replace(var.libvirt_uri, "qemu+ssh://", ""), "/system", ""), flatten(libvirt_domain.worker.*.network_interface.0.addresses)))}" + filename = "${path.module}/hosts.workers" +# Use the archive_file data source to detect changes in the ansible scripts +data "archive_file" "ansible_scripts" { + source_dir = "ansible/" + output_path = "ansible.zip" +# Run ansible against all the workers +resource "null_resource" "worker_ansible" { + hosts = "${sha1(local_file.worker_hosts.content)}" + ansible = "${data.archive_file.ansible_scripts.output_sha}" + provisioner "local-exec" { + command = "ansible-playbook -i ${path.module}/hosts.workers -b ansible/worker.yml --vault-password-file=secrets/ansible_vault_password.txt" +# Output the ip's of the workers + value = "${flatten(libvirt_domain.worker.*.network_interface.0.addresses)}" --- a/volume-base.tf Wed Feb 06 23:37:56 2019 -0600
+++ b/volume-base.tf Sat May 25 21:22:40 2019 -0500
@@ -1,7 +1,4 @@
-variable "volume_pool" { default = "default" }
-variable "base_image_url" { default = "https://cdimage.debian.org/cdimage/openstack/9.7.0/debian-9.7.0-openstack-amd64.qcow2" }
+variable "base_image_url" { default = "https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2" } resource "libvirt_volume" "debian_base" {