adapt domain creation to controller and worker scenario
This commit is contained in:
parent
ac33fb466f
commit
6fbedaa474
4
Makefile
4
Makefile
@ -41,3 +41,7 @@ stack_init:
|
|||||||
-backend-config="cacert_path=$$HOME/virtualization/kubernetes-the-hard-way/certs/ca.pem" \
|
-backend-config="cacert_path=$$HOME/virtualization/kubernetes-the-hard-way/certs/ca.pem" \
|
||||||
-backend-config="cert_path=$$HOME/virtualization/kubernetes-the-hard-way/certs/kubernetes.pem" \
|
-backend-config="cert_path=$$HOME/virtualization/kubernetes-the-hard-way/certs/kubernetes.pem" \
|
||||||
-backend-config="key_path=$$HOME/virtualization/kubernetes-the-hard-way/certs/kubernetes-key.pem"
|
-backend-config="key_path=$$HOME/virtualization/kubernetes-the-hard-way/certs/kubernetes-key.pem"
|
||||||
|
|
||||||
|
stack_test:
|
||||||
|
cd applications/ && \
|
||||||
|
ansible -m debug -i stack_address.toml all -a "msg=test"
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
# two first domain is controller domain
|
# controller domain
|
||||||
[controller]
|
[controller]
|
||||||
compute-0 ansible_host=100.64.0.146 index=0
|
compute-0 ansible_host=100.64.0.216 index=0
|
||||||
compute-1 ansible_host=100.64.0.214 index=1
|
# worker domain
|
||||||
# next domain is worker domain
|
|
||||||
[worker]
|
[worker]
|
||||||
compute-2 ansible_host=100.64.0.79 index=0
|
compute-1 ansible_host=100.64.0.14 index=0
|
||||||
compute-3 ansible_host=100.64.0.86 index=1
|
compute-2 ansible_host=100.64.0.26 index=1
|
||||||
|
compute-3 ansible_host=100.64.0.34 index=2
|
||||||
|
|
||||||
[all:children]
|
[all:children]
|
||||||
controller
|
controller
|
||||||
|
@ -3,7 +3,7 @@ data "template_file" "user_data" {
|
|||||||
vars = {
|
vars = {
|
||||||
hostname = format("%s-%s", var.hostname, count.index)
|
hostname = format("%s-%s", var.hostname, count.index)
|
||||||
}
|
}
|
||||||
count = var.number_domain
|
count = local.number_domain
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "meta_data" {
|
data "template_file" "meta_data" {
|
||||||
@ -11,7 +11,7 @@ data "template_file" "meta_data" {
|
|||||||
vars = {
|
vars = {
|
||||||
instance_id = count.index
|
instance_id = count.index
|
||||||
}
|
}
|
||||||
count = var.number_domain
|
count = local.number_domain
|
||||||
}
|
}
|
||||||
|
|
||||||
# Use CloudInit to add the instance
|
# Use CloudInit to add the instance
|
||||||
@ -21,5 +21,5 @@ resource "libvirt_cloudinit_disk" "commoninit" {
|
|||||||
user_data = data.template_file.user_data[count.index].rendered
|
user_data = data.template_file.user_data[count.index].rendered
|
||||||
meta_data = data.template_file.meta_data[count.index].rendered
|
meta_data = data.template_file.meta_data[count.index].rendered
|
||||||
|
|
||||||
count = var.number_domain
|
count = local.number_domain
|
||||||
}
|
}
|
||||||
|
@ -2,14 +2,14 @@
|
|||||||
|
|
||||||
# Define KVM domain to create
|
# Define KVM domain to create
|
||||||
|
|
||||||
resource "libvirt_domain" "domains" {
|
resource "libvirt_domain" "controllers" {
|
||||||
name = format("db%s", count.index)
|
name = format("db%s", count.index)
|
||||||
memory = "1024"
|
memory = "2048"
|
||||||
vcpu = 1
|
vcpu = 2
|
||||||
running = "true"
|
running = "true"
|
||||||
autostart = "true"
|
autostart = "true"
|
||||||
|
|
||||||
count = var.number_domain
|
count = var.number_controller
|
||||||
qemu_agent = true
|
qemu_agent = true
|
||||||
|
|
||||||
boot_device {
|
boot_device {
|
||||||
@ -43,3 +43,45 @@ resource "libvirt_domain" "domains" {
|
|||||||
autoport = "true"
|
autoport = "true"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "libvirt_domain" "workers" {
|
||||||
|
name = format("db%s", var.number_controller + count.index)
|
||||||
|
memory = "1024"
|
||||||
|
vcpu = 1
|
||||||
|
running = "true"
|
||||||
|
autostart = "true"
|
||||||
|
|
||||||
|
count = var.number_worker
|
||||||
|
qemu_agent = true
|
||||||
|
|
||||||
|
boot_device {
|
||||||
|
dev = ["hd", "network"]
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network_id = libvirt_network.private_network.id
|
||||||
|
hostname = format("compute-%s", var.number_controller + count.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
disk {
|
||||||
|
volume_id = libvirt_volume.root_debian[var.number_controller + count.index].id
|
||||||
|
}
|
||||||
|
|
||||||
|
disk {
|
||||||
|
volume_id = libvirt_volume.external_disk[var.number_controller + count.index].id
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudinit = libvirt_cloudinit_disk.commoninit[var.number_controller + count.index].id
|
||||||
|
|
||||||
|
console {
|
||||||
|
type = "pty"
|
||||||
|
target_type = "serial"
|
||||||
|
target_port = "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
graphics {
|
||||||
|
type = "vnc"
|
||||||
|
listen_type = "address"
|
||||||
|
autoport = "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -33,37 +33,45 @@ provider "libvirt" {
|
|||||||
uri = "qemu+tcp://dx30.localdomain/system"
|
uri = "qemu+tcp://dx30.localdomain/system"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "delay_10s" {
|
resource "null_resource" "delay_controllers_10s" {
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sleep 120"
|
command = "sleep 60"
|
||||||
}
|
}
|
||||||
triggers = {
|
triggers = {
|
||||||
# trigger after last domain created
|
# trigger after last domain controller and laster domain worker created
|
||||||
"after" = libvirt_domain.domains[var.number_domain - 1].id
|
"after" = libvirt_domain.controllers[var.number_controller - 1].id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resource "null_resource" "delay_workers_10s" {
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sleep 60"
|
||||||
|
}
|
||||||
|
triggers = {
|
||||||
|
# trigger after last domain controller and laster domain worker created
|
||||||
|
"after" = libvirt_domain.workers[var.number_worker - 1].id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Output Server IP
|
# Output Server IP
|
||||||
output "ip" {
|
output "ip" {
|
||||||
value = libvirt_domain.domains.*.network_interface
|
value = concat(libvirt_domain.controllers.*.network_interface, libvirt_domain.workers.*.network_interface)
|
||||||
depends_on = [null_resource.delay_10s]
|
depends_on = [null_resource.delay_controllers_10s, null_resource.delay_workers_10s]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "local_file" "write_address" {
|
resource "local_file" "write_address" {
|
||||||
|
|
||||||
content = <<-EOT
|
content = <<-EOT
|
||||||
# two first domain is controller domain
|
# controller domain
|
||||||
[controller]
|
[controller]
|
||||||
%{ for idx, s in slice(libvirt_domain.domains, 0, 2) ~}
|
%{ for idx, s in libvirt_domain.controllers.*.network_interface.0 ~}
|
||||||
%{ if length(s.network_interface.0.addresses) > 0 ~}
|
%{ if length(s.addresses) > 0 ~}
|
||||||
${s.network_interface.0.hostname} ansible_host=${s.network_interface.0.addresses.0} index=${idx}
|
${s.hostname} ansible_host=${s.addresses.0} index=${idx}
|
||||||
%{ endif ~}
|
%{ endif ~}
|
||||||
%{ endfor ~}
|
%{ endfor ~}
|
||||||
# next domain is worker domain
|
# worker domain
|
||||||
[worker]
|
[worker]
|
||||||
%{ for idx, s in slice(libvirt_domain.domains, 2, length(libvirt_domain.domains)) ~}
|
%{ for idx, s in libvirt_domain.workers.*.network_interface.0 ~}
|
||||||
%{ if length(s.network_interface.0.addresses) > 0 ~}
|
%{ if length(s.addresses) > 0 ~}
|
||||||
${s.network_interface.0.hostname} ansible_host=${s.network_interface.0.addresses.0} index=${idx}
|
${s.hostname} ansible_host=${s.addresses.0} index=${idx}
|
||||||
%{ endif ~}
|
%{ endif ~}
|
||||||
%{ endfor ~}
|
%{ endfor ~}
|
||||||
|
|
||||||
|
@ -4,10 +4,16 @@ variable "ip" {
|
|||||||
description = "fixed ip address for compute"
|
description = "fixed ip address for compute"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "number_domain" {
|
variable "number_controller" {
|
||||||
type = string
|
type = number
|
||||||
default = "4"
|
default = 1
|
||||||
description = "number of domain"
|
description = "number of controller domain"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "number_worker" {
|
||||||
|
type = number
|
||||||
|
default = 3
|
||||||
|
description = "number of worker domain"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "hostname" {
|
variable "hostname" {
|
||||||
@ -29,6 +35,7 @@ variable "pool_1" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
|
number_domain = var.number_controller + var.number_worker
|
||||||
debian_buster_qcow2 = "debian-10.7.0-with-docker.qcow2"
|
debian_buster_qcow2 = "debian-10.7.0-with-docker.qcow2"
|
||||||
centos7_qcow2 = "centos7.qcow2"
|
centos7_qcow2 = "centos7.qcow2"
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ resource "libvirt_volume" "root_debian" {
|
|||||||
name = format("root-debian-%s", count.index)
|
name = format("root-debian-%s", count.index)
|
||||||
pool = var.pool_1
|
pool = var.pool_1
|
||||||
base_volume_name = local.debian_buster_qcow2
|
base_volume_name = local.debian_buster_qcow2
|
||||||
count = var.number_domain
|
count = local.number_domain
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "libvirt_volume" "external_disk" {
|
resource "libvirt_volume" "external_disk" {
|
||||||
@ -22,5 +22,5 @@ resource "libvirt_volume" "external_disk" {
|
|||||||
# 10Gb
|
# 10Gb
|
||||||
size = 10737418240
|
size = 10737418240
|
||||||
pool = var.pool_1
|
pool = var.pool_1
|
||||||
count = var.number_domain
|
count = local.number_domain
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user