In my setup, I have 1 controller node, with a public IP-address. Internally it creates a private network and masquerading.
The cluster consists of 6 worker nodes connected to the private network, setup on the controller node.
The worker nodes will be able to reach the internet through masquerading on the controller node.
Playbook, to setup this entire Kubernetes cluster.
- inventory_public.yml
all:
hosts:
controller:
ansible_host: 192.168.50.10
worker1:
ansible_host: 192.168.50.11
worker2:
ansible_host: 192.168.50.12
worker3:
ansible_host: 192.168.50.13
worker4:
ansible_host: 192.168.50.14
worker5:
ansible_host: 192.168.50.15
worker6:
ansible_host: 192.168.50.16
vars:
ansible_user: jan
ansible_python_interpreter: /usr/bin/python3- inventory_private.yml
all:
hosts:
controller:
ansible_host: 192.168.25.1
worker1:
ansible_host: 192.168.25.11
worker2:
ansible_host: 192.168.25.12
worker3:
ansible_host: 192.168.25.13
worker4:
ansible_host: 192.168.25.14
worker5:
ansible_host: 192.168.25.15
worker6:
ansible_host: 192.168.25.16
vars:
ansible_user: jan
ansible_python_interpreter: /usr/bin/python3
- setup_k3s_cluster.yml
- name: Set up private network on controller
hosts: controller
become: true
tags: network
tasks:
- name: Assign static private IP to eth1
lineinfile:
path: /etc/dhcpcd.conf
line: |
interface eth1
static ip_address=192.168.25.1/24
notify: Restart networking
- name: Enable IP forwarding
sysctl:
name: net.ipv4.ip_forward
value: '1'
state: present
reload: yes
- name: Set up NAT for private network
iptables:
table: nat
chain: POSTROUTING
out_interface: eth0
source: "192.168.25.0/24"
jump: MASQUERADE
- name: Persist iptables rules
shell: iptables-save > /etc/iptables/rules.v4
handlers:
- name: Restart networking
shell: systemctl restart dhcpcd
- name: Configure static private IPs on workers
hosts: worker1,worker2,worker3,worker4,worker5,worker6
become: true
tags: network
vars:
worker_ip_map:
worker1: 192.168.25.11
worker2: 192.168.25.12
worker3: 192.168.25.13
worker4: 192.168.25.14
worker5: 192.168.25.15
worker6: 192.168.25.16
tasks:
- name: Set static IP config
lineinfile:
path: /etc/dhcpcd.conf
line: |
interface eth1
static ip_address={{ worker_ip_map[inventory_hostname] }}/24
static routers=192.168.25.1
static domain_name_servers=8.8.8.8
notify: Restart networking
handlers:
- name: Restart networking
shell: systemctl restart dhcpcd
- name: Pause for IP changes to take effect
hosts: localhost
tags: network
tasks:
- name: Wait for 20 seconds
pause:
seconds: 20
- name: Prompt user to switch inventory
debug:
msg: |
Network reconfiguration complete.
Now switch to 'inventory_private.yml' and re-run:
ansible-playbook -i inventory_private.yml setup_k3s_cluster.yml --tags k3s
- name: Install K3s on controller
hosts: controller
become: true
tags: k3s
tasks:
- name Install K3s server
shell: |
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--write-kubeconfig-mode644" sh -
args:
warn: false
- name: Get K3s token
shell: cat /var/lib/rancher/k3s/server/node-token
register: k3s_token
- name: Write token file
copy:
content "{{ ansible_host }} {{ k3s_token.stdout }}"
dest: /tmp/k3s_server_info
- name: Join workers to cluster
hosts: worker1,worker2,worker3,worker4,worker5,worker6
become: true
tags: k3s
tasks:
- name: Get server IP and token
delegate_to: controller
fetch:
src: /tmp/k3s_server_info
dest: /tmp/k3s_server_info
flat: yes
- name: Install K3s agent
shell: |
server_ip=$(awk '{print $1}' /tmp/k3s_server_info)
token=$(awk '{print $2}' /tmp/k3s_server_info)
curl -sfL https://get.k3s.io | K3S_URL=https://$server_i:6443 K3S_TOKEN=$token sh -
args:
warn: falseRunning network setup.
ansible-playbook -i inventory_public.yml setup_k3s_cluster.yml --tags networkWait until it’s prompted and then switch to inventory_private.yml
ansible-playbook -i inventory_private.yml setup_k3s_cluster.yml --tags k3sIf all goes well, you should now have a k3s cluster up and running.
Leave a Reply