From 0543e3011817211445da32afe1eaa60ded44c673 Mon Sep 17 00:00:00 2001 From: David Runge Date: Wed, 16 Dec 2020 21:24:26 +0100 Subject: Add kubernetes example using libvirt provider k8s_cluster/*: Add a terraform-provider-libvirt based examples to semi-automatically create a k8s cluster for testing. --- k8s_cluster/README.rst | 116 +++++++++++++ k8s_cluster/cloud-init/network_config.yaml | 33 ++++ k8s_cluster/k8s/controller0.yml | 76 +++++++++ k8s_cluster/k8s/controller1.yml | 28 ++++ k8s_cluster/k8s/worker0.yml | 19 +++ k8s_cluster/k8s/worker1.yml | 19 +++ k8s_cluster/main.tf | 183 +++++++++++++++++++++ .../systemd-networkd/30-libvirt-bridge.netdev | 3 + .../systemd-networkd/30-libvirt-bridge.network | 9 + k8s_cluster/templates/cloud_init.cfg.tpl | 56 +++++++ .../templates/cloud_init_load_balancer.cfg.tpl | 62 +++++++ k8s_cluster/terraform.tfvars | 15 ++ k8s_cluster/variables.tf | 34 ++++ 13 files changed, 653 insertions(+) create mode 100644 k8s_cluster/README.rst create mode 100644 k8s_cluster/cloud-init/network_config.yaml create mode 100644 k8s_cluster/k8s/controller0.yml create mode 100644 k8s_cluster/k8s/controller1.yml create mode 100644 k8s_cluster/k8s/worker0.yml create mode 100644 k8s_cluster/k8s/worker1.yml create mode 100644 k8s_cluster/main.tf create mode 100644 k8s_cluster/systemd-networkd/30-libvirt-bridge.netdev create mode 100644 k8s_cluster/systemd-networkd/30-libvirt-bridge.network create mode 100644 k8s_cluster/templates/cloud_init.cfg.tpl create mode 100644 k8s_cluster/templates/cloud_init_load_balancer.cfg.tpl create mode 100644 k8s_cluster/terraform.tfvars create mode 100644 k8s_cluster/variables.tf (limited to 'k8s_cluster') diff --git a/k8s_cluster/README.rst b/k8s_cluster/README.rst new file mode 100644 index 0000000..0a733fd --- /dev/null +++ b/k8s_cluster/README.rst @@ -0,0 +1,116 @@ +=========== +k8s_cluster +=========== + +This |terraform| setup uses the |terraform-provider-libvirt| to setup a local +|kubernetes| cluster using |libvirt| for testing purposes. + +**DO NOT USE THIS SETUP IN PRODUCTION! IT HARDCODES TOKENS AND ENCRYPTION KEYS!** + +Requirements +------------ + +It is required to setup a bridge device to be used by the cluster (see +`systemd-networkd `_ examples) and allow the forwarding of +traffic from that bridge to a device connected to the internet +and from the bridge to the bridge itself. + +If the firewall is |nftables|, the following snippet can be adapted to allow the +required traffic between the bridge and the outside world (see upstream +documentation for full examples!): + + .. code:: bash + + # example of a simple forward chain + chain forward { + type filter hook forward priority 0; policy drop; + + # Allow established and related connections. + ct state {established, related} accept + + # Allow connections from virbr0 to eth0 (internet facing device). + iifname virbr0 oifname eth0 accept + + # Allow connections from virbr0 to virbr0 + iifname virbr0 oifname virbr0 accept + } + +Furthermore, the terraform setup uses a local |postgresql| database to persist +the state of the infrastructure. +It requires setting up a local database called `terraform_local`, writable by +the user `terraform_local`. + +Usage +----- + +To initialize the database connection and terraform provider for this setup: + + .. code:: bash + + terraform init + +To see what would be done: + + .. code:: bash + + terraform plan + +To apply the changes: + + .. code:: bash + + terraform apply -auto-approve + +To destroy the infrastructure again: + + .. code:: bash + + terraform destroy -auto-approve + +Bootstrap the cluster +--------------------- + +Copy kubeadm configs from `k8s `_ to the respective hosts. Connect to the +`controller0` host and initialize the cluster (as root): + + .. code:: bash + + kubeadm init --upload-certs=true --config controller0.yml + +Connect to the second control-plane node `controller1` and make it join the +cluster: + + .. code:: bash + + kubeadm join --config controller0.yml + +Afterwards connect to the worker nodes (e.g. `worker0`) and make them join the +cluster: + + .. code:: bash + + kubeadm join --config worker0.yml + +.. |terraform| raw:: html + + terraform + +.. |terraform-provider-libvirt| raw:: html + + terraform-provider-libvirt + +.. |kubernetes| raw:: html + + kubernetes + +.. |libvirt| raw:: html + + libvirt + +.. |nftables| raw:: html + + nftables + +.. |postgresql| raw:: html + + postgresql diff --git a/k8s_cluster/cloud-init/network_config.yaml b/k8s_cluster/cloud-init/network_config.yaml new file mode 100644 index 0000000..0c36cb7 --- /dev/null +++ b/k8s_cluster/cloud-init/network_config.yaml @@ -0,0 +1,33 @@ +--- +version: 2 +ethernets: + controller0: + match: + macaddress: AA:BB:CC:11:11:00 + dhcp4: true + addresses: + - 10.10.66.200/24 + controller1: + match: + macaddress: AA:BB:CC:11:11:01 + dhcp4: true + addresses: + - 10.10.66.201/24 + worker0: + match: + macaddress: AA:BB:CC:11:22:00 + dhcp4: true + addresses: + - 10.10.66.100/24 + worker1: + match: + macaddress: AA:BB:CC:11:22:01 + dhcp4: true + addresses: + - 10.10.66.101/24 + load-balancer0: + match: + macaddress: AA:BB:CC:11:33:00 + dhcp4: true + addresses: + - 10.10.66.50/24 diff --git a/k8s_cluster/k8s/controller0.yml b/k8s_cluster/k8s/controller0.yml new file mode 100644 index 0000000..db8a847 --- /dev/null +++ b/k8s_cluster/k8s/controller0.yml @@ -0,0 +1,76 @@ +--- +# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#InitConfiguration +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +bootstrapTokens: + - token: 783bdf.3f89s0fje9f38fhf + description: "kubeadm bootstrap token for controller1" + usages: + - authentication + - signing + groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 1h + - token: 9a08jv.c0izixklcxtmnze7 + description: "kubeadm bootstrap token for worker0" + usages: + - authentication + - signing + groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 1h + - token: 783bde.3f89s0fje9f38fhf + description: "kubeadm bootstrap token for worker1" + usages: + - authentication + - signing + groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 1h +nodeRegistration: + name: controller0 + criSocket: /run/crio/crio.sock + taints: + - key: kubeadmNode + value: master + effect: NoSchedule + kubeletExtraArgs: + cgroup-driver: systemd +localAPIEndpoint: + advertiseAddress: 0.0.0.0 + bindPort: 6443 +certificateKey: e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204 +--- +# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: v1.20.0 +controlPlaneEndpoint: k8s:6443 +certificatesDir: /etc/kubernetes/pki +imageRepository: k8s.gcr.io +useHyperKubeImage: false +clusterName: cluster0 +networking: + serviceSubnet: 10.96.0.0/12 + podSubnet: 10.85.0.0/16 + dnsDomain: cluster.local +etcd: +apiServer: + certSANs: + - k8s + - controller0 + extraArgs: + authorization-mode: Node,RBAC + timeoutForControlPlane: 4m0s +controllerManager: +scheduler: +--- +# https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +cgroupDriver: systemd +--- +# https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: 10.85.0.0/16 diff --git a/k8s_cluster/k8s/controller1.yml b/k8s_cluster/k8s/controller1.yml new file mode 100644 index 0000000..c99129b --- /dev/null +++ b/k8s_cluster/k8s/controller1.yml @@ -0,0 +1,28 @@ +--- +# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#JoinConfiguration +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +nodeRegistration: + name: controller1 + criSocket: /run/crio/crio.sock + taints: + - key: kubeadmNode + value: master + effect: NoSchedule + kubeletExtraArgs: + cgroup-driver: systemd +discovery: + bootstrapToken: + token: 783bdf.3f89s0fje9f38fhf + apiServerEndpoint: k8s:6443 + unsafeSkipCAVerification: true +controlPlane: + localAPIEndpoint: + advertiseAddress: 0.0.0.0 + bindPort: 6443 + certificateKey: e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204 +--- +# https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +cgroupDriver: systemd diff --git a/k8s_cluster/k8s/worker0.yml b/k8s_cluster/k8s/worker0.yml new file mode 100644 index 0000000..dc9ff0e --- /dev/null +++ b/k8s_cluster/k8s/worker0.yml @@ -0,0 +1,19 @@ +--- +# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#JoinConfiguration +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +nodeRegistration: + name: worker0 + criSocket: /run/crio/crio.sock + kubeletExtraArgs: + cgroup-driver: systemd +discovery: + bootstrapToken: + token: 9a08jv.c0izixklcxtmnze7 + apiServerEndpoint: k8s:6443 + unsafeSkipCAVerification: true +--- +# https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +cgroupDriver: systemd diff --git a/k8s_cluster/k8s/worker1.yml b/k8s_cluster/k8s/worker1.yml new file mode 100644 index 0000000..8ce8a81 --- /dev/null +++ b/k8s_cluster/k8s/worker1.yml @@ -0,0 +1,19 @@ +--- +# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#JoinConfiguration +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +nodeRegistration: + name: worker1 + criSocket: /run/crio/crio.sock + kubeletExtraArgs: + cgroup-driver: systemd +discovery: + bootstrapToken: + token: 783bde.3f89s0fje9f38fhf + apiServerEndpoint: k8s:6443 + unsafeSkipCAVerification: true +--- +# https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +cgroupDriver: systemd diff --git a/k8s_cluster/main.tf b/k8s_cluster/main.tf new file mode 100644 index 0000000..6e0c0a3 --- /dev/null +++ b/k8s_cluster/main.tf @@ -0,0 +1,183 @@ +terraform { + required_providers { + libvirt = { + source = "dmacvicar/libvirt" + version = ">= 0.6.2" + } + } + backend "pg" { + conn_str = "postgres://terraform_local@localhost/terraform_local?sslmode=disable" + } +} + +provider "libvirt" { + uri = var.libvirt_provider_uri +} + +resource "libvirt_volume" "base_volume" { + name = "vm_base_volume" + source = var.vm_image_source +} + +resource "libvirt_volume" "control_plane_volume" { + name = "control_plane_${count.index}.qcow2" + base_volume_id = libvirt_volume.base_volume.id + count = var.control_plane_count +} + +resource "libvirt_volume" "node_volume" { + name = "worker_${count.index}.qcow2" + base_volume_id = libvirt_volume.base_volume.id + count = var.node_count +} + +resource "libvirt_volume" "load_balancer_volume" { + name = "load_balancer_${count.index}.qcow2" + base_volume_id = libvirt_volume.base_volume.id + count = var.load_balancer_count +} + +# Public ssh key for vm (it is directly injected in cloud-init configuration) # +data "template_file" "public_ssh_key" { + template = file("${var.vm_ssh_private_key}.pub") +} + +# Cloud-init configuration template # +data "template_file" "cloud_init_tpl" { + template = file("templates/cloud_init.cfg.tpl") + + vars = { + ssh_public_key = data.template_file.public_ssh_key.rendered + } +} + +# Cloud-init configuration template for load balancer # +data "template_file" "cloud_init_load_balancer_tpl" { + template = file("templates/cloud_init_load_balancer.cfg.tpl") + + vars = { + ssh_public_key = data.template_file.public_ssh_key.rendered + } +} + +# Creates cloud-init configuration file from template for node # +resource "local_file" "cloud_init_node_file" { + content = data.template_file.cloud_init_tpl.rendered + filename = "config/cloud_init.cfg" +} + +# Creates cloud-init configuration file from template for load balancer # +resource "local_file" "cloud_init_load_balancer_file" { + content = data.template_file.cloud_init_load_balancer_tpl.rendered + filename = "config/cloud_init_load_balancer.cfg" +} + +data "template_file" "network_config" { + template = file("${path.module}/cloud-init/network_config.yaml") +} + +resource "libvirt_cloudinit_disk" "cloud_init_k8s" { + name = "cloud_init_k8s.iso" + user_data = data.template_file.cloud_init_tpl.rendered + network_config = data.template_file.network_config.rendered +} + +resource "libvirt_cloudinit_disk" "cloud_init_load_balancer" { + name = "cloud_init_load_balancer.iso" + user_data = data.template_file.cloud_init_load_balancer_tpl.rendered + network_config = data.template_file.network_config.rendered +} + +resource "libvirt_network" "vm_net" { + name = "vm_net" + mode = "bridge" + bridge = "virbr0" + autostart = true +} + +resource "libvirt_domain" "load_balancer" { + name = "load_balancer${count.index}" + description = "Kubernetes Load Balancer ${count.index}" + cpu = { + mode = "host-passthrough" + } + vcpu = 1 + memory = "1024" + cloudinit = libvirt_cloudinit_disk.cloud_init_load_balancer.id + disk { + volume_id = libvirt_volume.load_balancer_volume[count.index].id + } + qemu_agent = true + autostart = true + running = true + count = var.load_balancer_count + + network_interface { + network_id = libvirt_network.vm_net.id + hostname = "load-balancer${count.index}" + mac = "AA:BB:CC:11:33:0${count.index}" + wait_for_lease = true + } +} + +resource "libvirt_domain" "control_plane" { + name = "controller${count.index}" + description = "Kubernetes control plane ${count.index}" + cpu = { + mode = "host-passthrough" + } + vcpu = 2 + memory = "2048" + cloudinit = libvirt_cloudinit_disk.cloud_init_k8s.id + disk { + volume_id = libvirt_volume.control_plane_volume[count.index].id + } + network_interface { + network_id = libvirt_network.vm_net.id + hostname = "controller${count.index}" + mac = "AA:BB:CC:11:11:0${count.index}" + wait_for_lease = true + } + qemu_agent = true + autostart = true + running = true + count = var.control_plane_count + +} + +resource "libvirt_domain" "node" { + name = "worker${count.index}" + description = "Kubernetes Node ${count.index}" + cpu = { + mode = "host-passthrough" + } + vcpu = 2 + memory = "2048" + cloudinit = libvirt_cloudinit_disk.cloud_init_k8s.id + disk { + volume_id = libvirt_volume.node_volume[count.index].id + } + qemu_agent = true + autostart = true + running = true + count = var.node_count + + network_interface { + network_id = libvirt_network.vm_net.id + hostname = "worker${count.index}" + mac = "AA:BB:CC:11:22:0${count.index}" + wait_for_lease = true + } +} + +output "load_balancer_ip_addresses" { + value = libvirt_domain.load_balancer.*.network_interface.0.addresses +} + +output "control_plane_ip_addresses" { + value = libvirt_domain.control_plane.*.network_interface.0.addresses +} + +output "node_ip_addresses" { + value = libvirt_domain.node.*.network_interface.0.addresses +} diff --git a/k8s_cluster/systemd-networkd/30-libvirt-bridge.netdev b/k8s_cluster/systemd-networkd/30-libvirt-bridge.netdev new file mode 100644 index 0000000..df1c80e --- /dev/null +++ b/k8s_cluster/systemd-networkd/30-libvirt-bridge.netdev @@ -0,0 +1,3 @@ +[NetDev] +Name=virbr0 +Kind=bridge diff --git a/k8s_cluster/systemd-networkd/30-libvirt-bridge.network b/k8s_cluster/systemd-networkd/30-libvirt-bridge.network new file mode 100644 index 0000000..542b4bd --- /dev/null +++ b/k8s_cluster/systemd-networkd/30-libvirt-bridge.network @@ -0,0 +1,9 @@ +[Match] +Name=virbr0 + +[Network] +Address=10.10.66.1/24 +IPForward=ipv4 +IPv6AcceptRA=no +DHCPServer=yes +IPMasquerade=True diff --git a/k8s_cluster/templates/cloud_init.cfg.tpl b/k8s_cluster/templates/cloud_init.cfg.tpl new file mode 100644 index 0000000..6299f40 --- /dev/null +++ b/k8s_cluster/templates/cloud_init.cfg.tpl @@ -0,0 +1,56 @@ +#cloud-config + +users: + - name: arch + groups: + - wheel + - users + lock_passwd: false + passwd: $6$KHqF91TQzQNJECRV$/zMRa92BqMB5juUpKfIHbOpOdUPyNlLJTpymplTOP.uqoonD/AZzNRnA/XACZTuqyxAktuohSjXhyfskbRjbO1 + ssh_authorized_keys: + - ${ssh_public_key} + sudo: ALL=(ALL) NOPASSWD:ALL +packages: + - bash-completion + - cri-o + - kubeadm + - kubectl + - kubelet + - man + - man-pages + - qemu-guest-agent + - ripgrep + - tmux + - tree + - vim +write_files: + - content: | + 10.10.66.50 k8s + 10.10.66.100 worker0 + 10.10.66.101 worker1 + 10.10.66.200 controller0 + 10.10.66.201 controller1 + path: /etc/hosts + append: true + - content: | + # added by cloud-init + [crio] + storage_driver = "btrfs" + path: /etc/crio/crio.conf.d/10-btrfs.conf + - content: | + # added by cloud-init + [crio.network] + plugin_dirs = ["/usr/lib/cni/", "/opt/cni/"] + path: /etc/crio/crio.conf.d/10-cni_plugins.conf +runcmd: + - [ systemctl, mask, swap-swapfile.swap ] + - [ swapoff, -a ] + - [ rm, -rfv, /var/lib/containers ] + - [ btrfs, subvolume, create, /var/lib/kubelet ] + - [ btrfs, subvolume, create, /var/lib/containers/ ] + - [ sed, -e, '$a/dev/vda2 /var/lib/kubelet btrfs rw,relatime,compress=zstd:3,space_cache,ssd,subvol=var/lib/kubelet 0 0', -i, /etc/fstab] + - [ sed, -e, '$a/dev/vda2 /var/lib/containers btrfs rw,relatime,compress=zstd:3,space_cache,ssd,subvol=var/lib/containers 0 0', -i, /etc/fstab] + - [ mount, -a ] + - [ modprobe, overlay ] + - [ modprobe, br_netfilter] + - [ systemctl, enable, --now, qemu-guest-agent, crio, kubelet ] diff --git a/k8s_cluster/templates/cloud_init_load_balancer.cfg.tpl b/k8s_cluster/templates/cloud_init_load_balancer.cfg.tpl new file mode 100644 index 0000000..4d8ebae --- /dev/null +++ b/k8s_cluster/templates/cloud_init_load_balancer.cfg.tpl @@ -0,0 +1,62 @@ +#cloud-config + +users: + - name: arch + groups: + - wheel + - users + lock_passwd: false + passwd: $6$KHqF91TQzQNJECRV$/zMRa92BqMB5juUpKfIHbOpOdUPyNlLJTpymplTOP.uqoonD/AZzNRnA/XACZTuqyxAktuohSjXhyfskbRjbO1 + ssh_authorized_keys: + - ${ssh_public_key} + sudo: ALL=(ALL) NOPASSWD:ALL +packages: + - bash-completion + - man + - man-pages + - nginx + - qemu-guest-agent + - ripgrep + - tmux + - tree + - vim +write_files: + - content: | + 10.10.66.50 k8s + 10.10.66.100 worker0 + 10.10.66.101 worker1 + 10.10.66.200 controller0 + 10.10.66.201 controller1 + path: /etc/hosts + append: true + - content: | + worker_processes auto; + error_log /var/log/nginx/error.log; + events { + worker_connections 1024; + } + http { + include mime.types; + resolver 127.0.0.1 [::1]; + default_type application/octet-stream; + sendfile on; + types_hash_max_size 4096; + keepalive_requests 55; + keepalive_timeout 55; + } + include conf.d/k8s_load_balancer.conf; + path: /etc/nginx/nginx.conf + - content: | + stream { + upstream k8s_apiserver { + server controller0:6443; + server controller1:6443; + } + server { + listen 6443; + proxy_pass k8s_apiserver; + } + } + path: /etc/nginx/conf.d/k8s_load_balancer.conf +runcmd: + - [ systemctl, enable, --now, qemu-guest-agent, nginx ] diff --git a/k8s_cluster/terraform.tfvars b/k8s_cluster/terraform.tfvars new file mode 100644 index 0000000..a3b969f --- /dev/null +++ b/k8s_cluster/terraform.tfvars @@ -0,0 +1,15 @@ + +# SSH private key to use for access to nodes +vm_ssh_private_key = "~/.ssh/id_ed25519" + +# how many control-plane nodes to create +control_plane_count = 2 + +# how many worker nodes to create +node_count = 2 + +# how many load balancers to create +load_balancer_count = 1 + +# which image to use as the base image +vm_image_source = "https://pkgbuild.com/~dvzrv/images/Arch-Linux-x86_64-cloudimg-20201211.10966.qcow2" diff --git a/k8s_cluster/variables.tf b/k8s_cluster/variables.tf new file mode 100644 index 0000000..dca2f82 --- /dev/null +++ b/k8s_cluster/variables.tf @@ -0,0 +1,34 @@ + +variable "libvirt_provider_uri" { + type = string + description = "Libvirt provider's URI" + default = "qemu:///system" +} + +variable "vm_ssh_private_key" { + type = string + description = "Location of SSH private key for VMs" +} + +variable "vm_image_source" { + type = string + description = "Image source, which can be path on host's filesystem or URL." +} + +variable "control_plane_count" { + type = number + description = "The amount of initial control-plane nodes" + default = 2 +} + +variable "node_count" { + type = number + description = "The amount of initial worker nodes" + default = 2 +} + +variable "load_balancer_count" { + type = number + description = "The amount of initial load balancers" + default = 1 +} -- cgit v1.2.3-70-g09d2