imfreedom/bamboo-terraform
Clone
Summary
Browse
Changes
Graph
Lots of doc updates
draft
2019-05-27, Gary Kramlich
435e16a05f1c
Lots of doc updates
variable
"worker_count"
{
default
=
"1"
}
variable
"worker_disk_size"
{
default
=
"53687091200"
}
# 50gb
variable
"worker_memory"
{
default
=
"1024"
}
# 1gb
variable
"worker_cpu"
{
default
=
"1"
}
resource
"libvirt_volume"
"worker"
{
count
=
"${var.worker_count}"
name
=
"${format("%s-worker-%03d-root.qcow2", var.node_name, count.index)}"
format
=
"qcow2"
base_volume_id
=
"${libvirt_volume.debian_base.id}"
size
=
"${var.worker_disk_size}"
pool
=
"${var.volume_pool}"
}
# Build the cloud init config file. It sets the hostname, expands the root
# disk, and sets which ssh key to allow root to log in.
data
"template_file"
"worker_user_data"
{
count
=
"${var.worker_count > 0 ? var.worker_count : 0}"
template
=
<<
EOF
#cloud-config
fqdn: $${fqdn}
users:
- name: root
ssh_authorized_keys:
- $${admin_ssh_pubkey}
growpart:
mode: auto
devices: ['/']
ignore_growroot_disabled: false
EOF
vars
{
admin_ssh_pubkey
=
"${var.admin_ssh_pubkey}"
fqdn
=
"${format("%s-worker-%03d", var.node_name, count.index)}"
}
}
# Setup the cloudinit disk itself. We create one per worker because it sets
# the hostname.
resource
"libvirt_cloudinit_disk"
"worker"
{
count
=
"${var.worker_count}"
pool
=
"${var.volume_pool}"
name
=
"${format("%s-worker-%03d-init.iso", var.node_name, count.index)}"
user_data
=
"${element(data.template_file.worker_user_data.*.rendered, count.index)}"
}
# Create the instance itself
resource
"libvirt_domain"
"worker"
{
count
=
"${var.worker_count}"
name
=
"${format("%s-worker-%03d", var.node_name, count.index)}"
memory
=
"${var.worker_memory}"
vcpu
=
"${var.worker_cpu}"
autostart
=
true
cloudinit
=
"${element(libvirt_cloudinit_disk.worker.*.id, count.index)}"
console
{
type
=
"pty"
target_port
=
"0"
target_type
=
"serial"
}
disk
{
volume_id
=
"${element(libvirt_volume.worker.*.id, count.index)}"
}
network_interface
{
network_name
=
"${var.network_name}"
wait_for_lease
=
true
}
boot_device
{
dev
=
[
"hd"
]
}
}
# Create the ansible inventory
resource
"local_file"
"worker_hosts"
{
count
=
"${var.worker_count > 0 ? 1 : 0 }"
content
=
"${join("\n", formatlist("%s ansible_ssh_common_args='-o ProxyJump=%s -o StrictHostKeyChecking=off' ansible_user=root ansible_host=%s", libvirt_domain.worker.*.name, replace(replace(var.libvirt_uri, "qemu+ssh://", ""), "/system", ""), flatten(libvirt_domain.worker.*.network_interface.0.addresses)))}"
filename
=
"${path.module}/hosts.workers"
}
# Use the archive_file data source to detect changes in the ansible scripts
data
"archive_file"
"ansible_scripts"
{
type
=
"zip"
source_dir
=
"ansible/"
output_path
=
"ansible.zip"
}
# Run ansible against all the workers
resource
"null_resource"
"worker_ansible"
{
triggers
=
{
hosts
=
"${sha1(local_file.worker_hosts.content)}"
ansible
=
"${data.archive_file.ansible_scripts.output_sha}"
}
provisioner
"local-exec"
{
command
=
"ansible-playbook -i ${path.module}/hosts.workers -b ansible/worker.yml --vault-password-file=secrets/ansible_vault_password.txt"
}
}
# Output the ip's of the workers
output
"worker_ips"
{
value
=
"${flatten(libvirt_domain.worker.*.network_interface.0.addresses)}"
}