imfreedom/bamboo-terraform

set bamboo capabilities to an empty dict by default
draft default tip
2019-10-01, Gary Kramlich
05b5b1440935
set bamboo capabilities to an empty dict by default
variable "worker_count" { default = "1" }
variable "worker_disk_size" { default = "53687091200" } # 50gb
variable "worker_memory" { default="2048" } # 2gb
variable "worker_cpu" { default="1" }
resource "libvirt_volume" "worker" {
count = "${var.worker_count}"
name = "${format("%s-worker-%03d-root.qcow2", var.node_name, count.index)}"
format = "qcow2"
base_volume_id = "${libvirt_volume.debian_base.id}"
size = "${var.worker_disk_size}"
pool = "${var.volume_pool}"
}
# Build the cloud init config file. It sets the hostname, expands the root
# disk, and sets which ssh key to allow root to log in.
data "template_file" "worker_user_data" {
count = "${var.worker_count > 0 ? var.worker_count : 0}"
template = <<EOF
#cloud-config
fqdn: $${fqdn}
users:
- name: root
ssh_authorized_keys:
- $${admin_ssh_pubkey}
growpart:
mode: auto
devices: ['/']
ignore_growroot_disabled: false
EOF
vars {
admin_ssh_pubkey = "${var.admin_ssh_pubkey}"
fqdn = "${format("%s-worker-%03d", var.node_name, count.index)}"
}
}
# Setup the cloudinit disk itself. We create one per worker because it sets
# the hostname.
resource "libvirt_cloudinit_disk" "worker" {
count = "${var.worker_count}"
pool = "${var.volume_pool}"
name = "${format("%s-worker-%03d-init.iso", var.node_name, count.index)}"
user_data = "${element(data.template_file.worker_user_data.*.rendered, count.index)}"
}
# Create the instance itself
resource "libvirt_domain" "worker" {
count = "${var.worker_count}"
name = "${format("%s-worker-%03d", var.node_name, count.index)}"
cpu = {
mode = "host-passthrough"
}
memory = "${var.worker_memory}"
vcpu = "${var.worker_cpu}"
autostart = true
cloudinit = "${element(libvirt_cloudinit_disk.worker.*.id, count.index)}"
console {
type = "pty"
target_port = "0"
target_type = "serial"
}
disk {
volume_id = "${element(libvirt_volume.worker.*.id, count.index)}"
}
network_interface {
network_name = "${var.network_name}"
wait_for_lease = true
}
boot_device {
dev = ["hd"]
}
}
# Create the ansible inventory
resource "local_file" "worker_hosts" {
count = "${var.worker_count > 0 ? 1 : 0 }"
content = "${join("\n", formatlist("%s ansible_ssh_common_args='-o ProxyJump=%s -o StrictHostKeyChecking=off' ansible_user=root ansible_host=%s", libvirt_domain.worker.*.name, replace(replace(var.libvirt_uri, "qemu+ssh://", ""), "/system", ""), flatten(libvirt_domain.worker.*.network_interface.0.addresses)))}"
filename = "${path.module}/hosts.workers"
}
# Run ansible against all the workers
resource "null_resource" "worker_ansible" {
count = "${var.worker_count > 0 ? 1 : 0 }"
triggers = {
hosts = "${sha1(local_file.worker_hosts.content)}"
ansible = "${data.archive_file.ansible_scripts.output_sha}"
cache_vars = "${sha1(local_file.cache_variables.content)}"
}
depends_on = [
"null_resource.cache_ansible"
]
provisioner "local-exec" {
command = "ansible-playbook -i ${path.module}/hosts.workers -b ansible/worker.yml --vault-password-file=secrets/ansible_vault_password.txt --extra-vars @${local_file.cache_variables.filename}"
}
}
# Output the ip's of the workers
output "worker_ips" {
value = "${flatten(libvirt_domain.worker.*.network_interface.0.addresses)}"
}