diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0d64df6
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+vault-pass
diff --git a/.mailmap b/.mailmap
index 60305c0..d71ca82 100644
--- a/.mailmap
+++ b/.mailmap
@@ -2,4 +2,8 @@ Alexander Dahl <alex@netz39.de> <post@lespocky.de>
 David Kilias <dkdent@netz39.de>
 David Kilias <dkdent@netz39.de> <david.kilias@gmail.com>
 Maximilian Grau <mg-95@t-online.de>
+Maximilian Grau <mg-95@t-online.de> <mg-95@gitea.n39.eu>
+Jens Winter-Hübenthal <jens.winter@gmail.com>
+Jens Winter-Hübenthal <jens.winter@gmail.com> <jens.winter-huebenthal@bridgefield.de>
 Stefan Haun <tux@netz39.de>
+<timo@netz39.de> <n39@therr.de>
diff --git a/.yamllint b/.yamllint
index 2522155..6e223ae 100644
--- a/.yamllint
+++ b/.yamllint
@@ -2,6 +2,7 @@
 extends: default
 
 rules:
+  comments-indentation: disable
   line-length: disable
   truthy:
     allowed-values:
diff --git a/README.md b/README.md
index 3d4880f..92bff30 100644
--- a/README.md
+++ b/README.md
@@ -20,6 +20,9 @@ SSH_KEY=<absolute/path/to/ssh/private/key>
 ansible-playbook setup-ssh.yml --ask-vault-pass -e "setup_ssh_logname=$LOGUSER" -e "setup_ssh_key=$SSH_KEY"
 ```
 
+This playbook also adds `rhodium.n39.eu` (OpenWRT router), but our Ansible cannot set up SSH keys (yet).
+Please [add your key to OpenWRT manually](https://openwrt.org/docs/guide-user/security/dropbear.public-key.auth#from_the_luci_web_interface).
+
 ## Edit vault encrypted vars files
 
 ```bash
@@ -49,7 +52,7 @@ To set up a new HTTPS vhost, the following steps need to be taken:
 
 1. Select a domain (for internal services we use sub-domains of `.n39.eu`).
 2. Create an external CNAME from this domain to `dyndns.n39.eu`.
-3. Create an internal DNS entry in the [Descartes DNS config](https://gitea.n39.eu/Netz39_Admin/config.descartes/src/branch/prepare/dns_dhcp.txt). This is usually an alias on an existing server.
+3. Create an internal DNS entry in the [Descartes DNS config](https://git.n39.eu/Netz39_Admin/config.descartes/src/branch/prepare/dns_dhcp.txt). This is usually an alias on an existing server.
 4. Add the entry to the [holmium playbook](holmium.yml).
 5. Set up Dehydrated and vhost on the target host, e.g. using `setup_http_site_proxy`.
 
diff --git a/files/hobbes/grafana-kiosk.service b/files/hobbes/grafana-kiosk.service
new file mode 100644
index 0000000..09a32ce
--- /dev/null
+++ b/files/hobbes/grafana-kiosk.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Grafana Kiosk
+After=network.target
+Wants=network.target
+
+[Service]
+User=root
+
+ExecStart=/usr/local/bin/kiosk.sh
+
+Restart=always
+
+PIDFile=/run/kiosk.pid
+ExecStop=/bin/kill -s SIGTERM $MAINPID
+
+[Install]
+WantedBy=multi-user.target
diff --git a/files/platon/11_asterisk_i2c b/files/platon/11_asterisk_i2c
new file mode 100644
index 0000000..589db21
--- /dev/null
+++ b/files/platon/11_asterisk_i2c
@@ -0,0 +1 @@
+asterisk ALL=(root) NOPASSWD: /usr/sbin/i2cget, /usr/sbin/i2cset
diff --git a/files/platon/reboot.sh b/files/platon/reboot.sh
index f05d0c6..269d3af 100755
--- a/files/platon/reboot.sh
+++ b/files/platon/reboot.sh
@@ -1,11 +1,13 @@
 #!/bin/sh
 PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games'
+echo 'set PCM volume'
+sudo amixer set "PCM" "70%"
 echo 'start i2c-foo'
 sudo modprobe i2c_dev
 sudo modprobe i2c_bcm2708
 echo 'starting log'
 tmux new-session -s status -d 'sudo less /var/log/shuttercontrol.log'
-cd /home/pi/netz39_rollladensteuerung/raspberry/shuttercontrol 
+cd /home/pi/netz39_rollladensteuerung/raspberry/shuttercontrol
 echo 'switch-on.sh'
 ../switch-on.sh
 cd /home/pi
diff --git a/files/wittgenstein/reboot.sh b/files/wittgenstein/reboot.sh
new file mode 100755
index 0000000..de29b05
--- /dev/null
+++ b/files/wittgenstein/reboot.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games'
+
+echo 'switch-on.sh'
+/home/pi/switch-on.sh
+
+echo 'start i2c-foo'
+sudo modprobe i2c_dev
+sudo modprobe i2c_bcm2708
+
+# wait for network devices
+sleep 30
+
+cd /home/pi
+echo 'start ampel controller'
+tmux new-session -s ampel -d 'cd /home/pi/netz39_space_notification/raspberry/ledcontrol && ./ledcontrol'
+
+echo 'start lever controller'
+tmux new-window -t ampel:1 'cd /home/pi/netz39_space_notification/raspberry/statusswitch && ./statusswitch'
diff --git a/files/wittgenstein/switch-off.sh b/files/wittgenstein/switch-off.sh
new file mode 100755
index 0000000..40a081e
--- /dev/null
+++ b/files/wittgenstein/switch-off.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+gpio write 2 0
+gpio write 3 0
+
+gpio mode 2 tri
+gpio mode 3 tri
diff --git a/files/wittgenstein/switch-on.sh b/files/wittgenstein/switch-on.sh
new file mode 100755
index 0000000..aae9e2e
--- /dev/null
+++ b/files/wittgenstein/switch-on.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# INT
+gpio mode 0 tri
+
+# Power
+gpio mode 2 out
+gpio mode 3 out
+
+gpio write 2 1
+gpio write 3 1
diff --git a/files/wittgenstein/unstuck.sh b/files/wittgenstein/unstuck.sh
new file mode 100755
index 0000000..29da941
--- /dev/null
+++ b/files/wittgenstein/unstuck.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+logger -t unstuck "unstuck $(date)"
+
+killall tmux
+
+sleep 1
+/home/pi/reboot.sh
diff --git a/group-all.yml b/group-all.yml
index cb380e2..8d06a78 100644
--- a/group-all.yml
+++ b/group-all.yml
@@ -1,34 +1,33 @@
 ---
-# tasks for all hosts
-
-- hosts: all
+- name: Tasks for all hosts
+  hosts: all
   become: true
 
   vars:
     ansible_python_interpreter: /usr/bin/python3
 
   roles:
-    - role: ansible.timezone
+    - role: adriagalin.timezone
       vars:
         ag_timezone: "{{ timezone }}"
     - role: users
 
   tasks:
     - name: Update and clean package cache
-      apt:
+      ansible.builtin.apt:
         update_cache: true
         cache_valid_time: 3600
         autoclean: true
       changed_when: false
 
     - name: Ensure unattended-upgrades is installed and up to date
-      apt:
+      ansible.builtin.apt:
         name: unattended-upgrades
         state: present
 
     - name: Setup unattended-upgrades
-      include_role:
-        name: hifis.unattended_upgrades
+      ansible.builtin.include_role:
+        name: hifis.toolkit.unattended_upgrades
       vars:
         unattended_origins_patterns:
           - "origin=*"
diff --git a/group-docker_host.yml b/group-docker_host.yml
index 15c2ed9..10a4e36 100644
--- a/group-docker_host.yml
+++ b/group-docker_host.yml
@@ -1,15 +1,18 @@
 ---
-- hosts: docker_host
+- name: Tasks for docker hosts
+  hosts: docker_host
   become: true
 
   roles:
-    - role: docker_setup
+    - role: netz39.host_docker
 
-- hosts: docker_host:&location_space
+- name: Tasks for docker hosts at location space
+  hosts: docker_host:&location_space
   become: true
 
   roles:
     - role: lespocky.telegraf_docker_in_docker
+      when: (ansible_architecture == "x86_64")
       vars:
         tdid_conf_dir: "/etc/telegraf"
         tdid_influxdb_org: "{{ influxdb_org }}"
diff --git a/group-k3s.yml b/group-k3s.yml
new file mode 100644
index 0000000..96917bf
--- /dev/null
+++ b/group-k3s.yml
@@ -0,0 +1,10 @@
+---
+- name: Tasks for kubernetes hosts
+  hosts: k3s
+  become: true
+
+  tasks:
+    - name: Ensure nfs-common is installed on k3s VMs
+      ansible.builtin.apt:
+        pkg: nfs-common
+        state: present
diff --git a/group-proxmox.yml b/group-proxmox.yml
index 6ec32bb..d95a857 100644
--- a/group-proxmox.yml
+++ b/group-proxmox.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: proxmox
+- name: Tasks for virtual machines on proxmox host
+  hosts: proxmox
   become: true
 
   tasks:
diff --git a/group_vars/all/vars.yml b/group_vars/all/vars.yml
index ec293be..fa863ca 100644
--- a/group_vars/all/vars.yml
+++ b/group_vars/all/vars.yml
@@ -10,6 +10,8 @@ users:
     ssh_pub:
       - !unsafe >
         ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVZPAE3XE8Ek1Ji4sCIHxLVx+bi2qpsTSsYhBqtYysnFn9AHJj14BR59D0Si05sfVkmL4OQoo7Q98oIxy33PgtqoUfgXk9dc7dlsye3t/gsAb25ABnqG/ZYe65nZLN7BzRM1/QZIbd6sSu6eXrNFCh0ikB5se4zgVkDO8t6h2dnz4FvTuIM2Bi/PnIJTqb8+uLQE1vS3A7tTx100ZKXxr81dlo2Y1JBP6WrS1W1IyFiG6wofl2XTY02ssyoENQyR89lLMJYKvm5xlhL/L69gtMsqIX9UBQFk8Rpq04ZIwN6b0K4R142GZvxdJNdQULgtI3gPkKgH7FDoFsRHNA6b/9 adahl@ada
+      - !unsafe >
+        ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDb5j4SlVDkK/CE/viZA5M/VquKm0DaMB6h5yR9ZWf7oW2h/q5tPQr5Kzatt+uCF++1eCOfoq6BR/NV01KkVdyTMemA8LMZwuf4uUzTlfnoXO4eGP0+d4aGzSuE08gak8c0iYF5zzzJGSKVIZ7qQXAmAH5guJxdRltpJlFbnYY6Plo1nxmluSAAh8qPSBQhZy+ja05ZpXct6+IeXHDLJ9ia5x71hAbEzKJXafVukL/Qt6Gr80snW1OuVzBpDs5/O2taKNV4a3dAzM4cNb0xGbhNogiuZD5IPHjkbsiOifBT+i48CBOasSWO9tnNZ6X/kDXxizoo4gB1rWOVvPE8SXXbKSxus48AG0MEKh0XGB7z7klCxDWITn1JpN3x8/vbG9Y02/QlVdqdTuIq7fUfrQz3hipR2DMXuGnMkwkR80XXkQziuBP6UG3Meh2wZ0SxIex3JgVsZh4gxvIvNxuU9iEcpgEFhGFvQwxbZ+nWYYe0j//OzfKQpod/D03tx7W6SXM= adahl@ada-pc
       - !unsafe >
         ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDvczlb1+9d1BjuLk5ZcQt2Z0Dh61Vg91i47tM48CN2koJ4I/9vgN37l6mnr383zD8kQkXDGmCYpXOa48WocyyUuP3h75DCjANYcWOsohQfFu2F1ZOiiVCGduDntzS2nbZEF2W3nZNLQ6/dKKEeaSxu5RjKflkWakghkMt3H4KN20bxzYzHQMLhRYFEGHpskOqeaXKPkqqEP+u5kToINtmXwegCvQFnlx4fNrysFII79buBNlcLsO1X4ABucVMYT/OJnBpJEfEcNFUKrJZRGgM8aDbUpkV9LRY2lywvoKJhiRMc7x7kK0LWOTdPJri+SJhW6fEW4JKCRTSHVN8OS8S/ alex@buffy
       - !unsafe >
@@ -22,11 +24,6 @@ users:
         ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGmU7MfOFuc6z5Vbwh4CbBFSg19f8B9rUO2ITjgmEvkY alex@lemmy
     sudo: yes
     docker: yes
-  - logname: "kwasir"
-    viewname: "Peter Seidel"
-    email: "kwasir@netz39.de"
-    sudo: yes
-    docker: yes
   - logname: "tux"
     viewname: "Stefan Haun"
     email: "tux@netz39.de"
@@ -53,10 +50,29 @@ users:
         ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHasp1Q/HJURndKnNRP5TJqJVHPuN9G/9uHdaNGhI8yi mg@mg-Swift-SF314-52G
     sudo: yes
     docker: yes
+  - logname: "timo"
+    viewname: "Timo Herrmann"
+    email: "timo@netz39.de"
+    ssh_pub:
+      - !unsafe >
+        ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILKhKHCPibswu2p6UQHKsBSqGaXzMFM+oMX0XEWsxCIc timo@Space-Lap
+      - !unsafe >
+        ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMJoS7nsLLzSAsZA4us2/7JaQhgRjj/BY+LOpDQnfy8u timo@mac
+    sudo: yes
+    docker: yes
+  - logname: "jens"
+    viewname: "Jens Winter-Hübenthal"
+    email: "jens.winter@gmail.com"
+    ssh_pub:
+      - !unsafe >
+        ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIII4FS2sROKs2nIW8uzDuWmj8q127UoljtkVMthY8g// jens@work-lenovo
+    sudo: yes
+    docker: yes
 
-# Data for DD24 dyndns updates
+# Data for dyndns updates
 dyndns_domain: "dyndns.n39.eu"
 dyndns_password: "{{ vault_dyndns_password }}"
+dyndns_token: "{{ vault_dyndns_token }}"
 
 # Shared influxdb items
 influxdb_org: "netz39"
diff --git a/group_vars/all/vault b/group_vars/all/vault
index 1e42948..255971a 100644
--- a/group_vars/all/vault
+++ b/group_vars/all/vault
@@ -1,7 +1,10 @@
 $ANSIBLE_VAULT;1.1;AES256
-34303066383937623831333466333965323161376134353838346235323662373164303163363734
-3134626237346361656533636161363331666537633538380a613761643431356530343663626666
-62646361316364333533316638646261373661633863363733366337373338336565366536386237
-3138646266613837310a396139363830613463393861336161363533343362383462623265356563
-31333862613937306463353130316365636634353862363039663762326263313366363530636631
-3630653638333831303432316266633833643739643533353536
+37306233306262383862373661626635346436316265663162343433303432653536376632316439
+6336396564613232363337303266643965346333396331620a316536636666393461353633366466
+39333362306166376462353739626139623835326461373834303330346538366637626363306438
+3033376133373330330a356236396366643938323666663836643738386337373362323933623838
+30316663646134623232336563343562393037363463303739626464633461323539306261316638
+61343330626263393065636230303632663965653939373437386561656539646533653661613236
+35326334313232633738633933653939383830636361373938373864643133363539623734646435
+32336630613231353337336466646164373734386539653936313865316336616264373061633139
+3839
diff --git a/group_vars/docker_host/vars.yml b/group_vars/docker_host/vars.yml
index 2978231..0a27306 100644
--- a/group_vars/docker_host/vars.yml
+++ b/group_vars/docker_host/vars.yml
@@ -1,2 +1,3 @@
 ---
 docker_data_root: "/srv/docker"
+docker_image_prune: true
diff --git a/host-beaker.yml b/host-beaker.yml
index ccd5285..4eb5817 100644
--- a/host-beaker.yml
+++ b/host-beaker.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: beaker.n39.eu
+- name: Setup things on host 'beaker' (proxmox server im space)
+  hosts: beaker.n39.eu
   become: true
 
   vars:
@@ -9,7 +10,7 @@
 
   tasks:
 
-    - name: enable proxmox gui login for admin users
+    - name: Enable proxmox gui login for admin users
       ansible.builtin.lineinfile:
         path: /etc/pve/user.cfg
         regexp: "^user:{{ item.logname }}@pam"
@@ -18,11 +19,10 @@
         state: present
       loop: "{{ users }}"
 
-    - name: configure proxmox admin group
+    - name: Configure proxmox admin group
       ansible.builtin.lineinfile:
         path: /etc/pve/user.cfg
         regexp: "^group:Admins:"
-        # group:Admins:kwasir@pam,lespocky@pam,tux@pam::
-        line: "group:Admins:{{ users | map(attribute = 'logname') | join(\"@pam,\") }}@pam::"
+        line: "group:Admins:{{ users | map(attribute='logname') | join(\"@pam,\") }}@pam::"
 
   handlers:
diff --git a/host-hobbes.yml b/host-hobbes.yml
index 7db081c..618d07f 100644
--- a/host-hobbes.yml
+++ b/host-hobbes.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: hobbes.n39.eu
+- name: Setup things on host 'hobbes' (raspberry pi for kiosk screen)
+  hosts: hobbes.n39.eu
   become: true
 
   vars:
@@ -8,7 +9,6 @@
 
   roles:
 
-
   tasks:
     - name: Install packages needed for the system
       # This is a list of all packages,
@@ -16,15 +16,9 @@
       ansible.builtin.apt:
         state: present
         name:
-          # This is needed for the user-executed tasks
-          - acl
-          # Regular packages
-          - lightdm
-          - accountsservice
-          - unclutter
-          - lxde
-          - chromium-browser
-          - rng-tools
+          - mosquitto-clients
+          - fbi
+
 
     - name: Remove the screensavers
       ansible.builtin.apt:
@@ -53,50 +47,23 @@
 
 
     ### Kiosk setup
-    #
-    # https://github.com/grafana/grafana-kiosk
-
     - name: Ensure kiosk user is there
       ansible.builtin.user:
         name: "{{ kiosk_user }}"
         groups: audio,plugdev,input,netdev
         append: yes
 
-
-    - name: Create bin directory
-      file:
-        path: "/home/{{ kiosk_user }}/bin"
-        owner: "{{ kiosk_user }}"
+    - name: Install Kiosk script
+      ansible.builtin.template:
+        src: templates/hobbes/kiosk.sh.j2
+        dest: /usr/local/bin/kiosk.sh
+        owner: root
+        group: root
         mode: '0755'
-        state: directory
-
-    - name: Download grafana-kiosk
-      ansible.builtin.get_url:
-        url: "https://github.com/grafana/grafana-kiosk/releases/download/{{ kiosk_software_version }}/grafana-kiosk.linux.{{ kiosk_software_arch }}"
-        dest: "/home/{{ kiosk_user }}/bin/grafana-kiosk"
-        mode: '0755'
-        force: no
-
-
-    - name: Setup autologin in lightdm
-      ansible.builtin.blockinfile:
-        path: /etc/lightdm/lightdm.conf
-        block: |
-          [Seat:seat0]
-          autologin-user = pi
-          autologin-user-timeout = 0
-          autologin-in-background = False
-
-    - name: Remove autostart
-      # None of the things in autostart are needed or wanted
-      ansible.builtin.file:
-        path: /etc/xdg/lxsession/LXDE/autostart
-        state: absent
-
 
     - name: Add systemd service
-      ansible.builtin.template:
-        src: templates/hobbes/grafana-kiosk.service.j2
+      ansible.builtin.copy:
+        src: files/hobbes/grafana-kiosk.service
         dest: /etc/systemd/system/grafana-kiosk.service
         owner: root
         group: root
@@ -108,12 +75,4 @@
         enabled: true
         state: started
 
-    - name: Set default systemd target to graphical
-      ansible.builtin.file:
-        src: /lib/systemd/system/graphical.target
-        dest: /etc/systemd/system/default.target
-        state: link
-        force: yes
-
-
   handlers:
diff --git a/host-holmium.yml b/host-holmium.yml
index 6cd608e..948f705 100644
--- a/host-holmium.yml
+++ b/host-holmium.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: holmium.n39.eu
+- name: Setup things on host 'holmium' (http ingress vm)
+  hosts: holmium.n39.eu
   become: true
 
   vars:
@@ -15,7 +16,7 @@
               - name: entities-validation.svc.n39.eu
           - server: pottwal
             hosts:
-              - name: gitea.n39.eu
+              - name: git.n39.eu
               - name: redmine.n39.eu
               - name: uritools.n39.eu
               - name: uritools-api.n39.eu
@@ -37,5 +38,5 @@
                 local: true
               - name: pwr-meter-pulse-gw-19i.svc.n39.eu
                 local: true
-              - name: brotherql-web.n39.eu
+              - name: labelprinter.n39.eu
                 local: true
diff --git a/host-krypton.yml b/host-krypton.yml
index 53204f8..2efdf68 100644
--- a/host-krypton.yml
+++ b/host-krypton.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: krypton.n39.eu
+- name: Setup things on host 'krypton' (ldap vm)
+  hosts: krypton.n39.eu
   become: true
 
   vars:
@@ -9,7 +10,6 @@
 
     docker_ip_ranges: ["172.16.0.0/12", "192.168.0.0/16"]
 
-    openldap_image_version: 1.5.0
     openldap_data: "{{ data_dir }}/openldap"
     openldap_domain: "ldap.n39.eu"
     ldap_domain: "netz39.de"
@@ -20,10 +20,10 @@
 
 
   roles:
-    # role 'docker_setup' applied through group 'docker_host'
+    # role 'netz39.host_docker' applied through group 'docker_host'
     - role: apache
     - role: apache_letsencrypt  # Uses configuration from dehydrated setup
-    - role: ansible-role-dehydrated
+    - role: 24367dfa.dehydrated
       vars:
         dehydrated_contact_email: "{{ server_admin }}"
         dehydrated_domains:
@@ -33,13 +33,13 @@
   tasks:
 
     # - name: Setup dehydrated challenge endpoint for {{ openldap_domain }}
-    #   include_role:
+    #   ansible.builtin.include_role:
     #     name: setup-http-dehydrated
     #   vars:
     #     site_name: "{{ openldap_domain }}"
 
     - name: Ensure openLDAP directories are present.
-      file:
+      ansible.builtin.file:
         path: "{{ item.path }}"
         mode: "0755"
         state: directory
@@ -50,9 +50,9 @@
         - path: "{{ dehydrated_certs_dir }}/{{ openldap_domain }}"
 
     - name: Ensure container for openLDAP is running.
-      docker_container:
+      community.docker.docker_container:
         name: openLDAP
-        image: "osixia/openldap:{{ openldap_image_version }}"
+        image: osixia/openldap:1.5.0
         detach: yes
         state: started
         restart_policy: unless-stopped
@@ -99,9 +99,9 @@
         rule: allow
         port: '389'
         proto: tcp
-        from: "{{  item  }}"
+        from: "{{ item }}"
         comment: LDAP Docker Access
-      loop: "{{  docker_ip_ranges  }}"
+      loop: "{{ docker_ip_ranges }}"
 
     - name: Allow access to openLDAP from local docker container [2/2]
       become: true
@@ -109,15 +109,15 @@
         rule: allow
         port: '636'
         proto: tcp
-        from: "{{  item  }}"
+        from: "{{ item }}"
         comment: LDAP Docker Access
-      loop: "{{  docker_ip_ranges  }}"
+      loop: "{{ docker_ip_ranges }}"
 
 
     - name: Ensure container for entities validation service is running
-      docker_container:
+      community.docker.docker_container:
         name: entities_validation_svc
-        image: netz39/entities_validation_svc:v1.0.0
+        image: netz39/entities_validation_svc:v1.0.4
         pull: true
         state: started
         detach: yes
@@ -128,7 +128,7 @@
           TZ: "{{ timezone }}"
 
     - name: Setup proxy site entities-validation.svc.n39.eu
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: entities-validation.svc.n39.eu
diff --git a/host-oganesson.yml b/host-oganesson.yml
index ed2e5a0..a4cc596 100644
--- a/host-oganesson.yml
+++ b/host-oganesson.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: oganesson.n39.eu
+- name: Setup things on host 'oganesson' (ssh jump host vm)
+  hosts: oganesson.n39.eu
   become: true
 
   vars:
diff --git a/host-platon.yml b/host-platon.yml
index e219a27..21027c4 100644
--- a/host-platon.yml
+++ b/host-platon.yml
@@ -1,12 +1,13 @@
 ---
-- hosts: platon.n39.eu
+- name: Setup things on host 'platon' (raspberry pi for entrance door)
+  hosts: platon.n39.eu
   become: true
   vars:
     ansible_python_interpreter: /usr/bin/python3
     door_open_command: '/home/pi/sesame-open.sh'
     ble_keykeeper_dir: '/home/pi/netz39_ble_keykeeper'
   roles:
-    - role: ble-keykeeper-role
+    - role: maz3max.ble_keykeeper
       vars:
         ble_keykeeper_user: "{{ gatekeeper_user }}"
         ble_keykeeper_group: "{{ gatekeeper_user }}"
@@ -63,7 +64,7 @@
         owner: root
         group: root
         mode: '0644'
-      notify: restart mosquitto
+      notify: Restart mosquitto service
 
 
     ### Sesam for SSH access
@@ -141,7 +142,7 @@
         mode: "0644"
       register: wiringPi_copy
 
-    - name: Install wiringPi library  # noqa 503
+    - name: Install wiringPi library  # noqa: no-handler
       ansible.builtin.apt:
         state: present
         deb: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
@@ -204,7 +205,6 @@
       become: yes
       become_user: "{{ gatekeeper_user }}"
       ansible.builtin.shell:
-        warn: false
         chdir: "/home/{{ gatekeeper_user }}/mqtt-tools"
         cmd: |
           mkdir build
@@ -246,7 +246,7 @@
         owner: root
         group: root
         mode: "0644"
-      notify: restart rsyslog
+      notify: Restart rsyslog
 
 
     ### Asterisk
@@ -259,7 +259,7 @@
         owner: root
         group: root
         mode: "0644"
-      notify: restart asterisk
+      notify: Restart asterisk
 
     - name: Set up extensions for asterisk
       # This uses the variables gatekeeper_user and door_open_command
@@ -269,14 +269,25 @@
         owner: root
         group: root
         mode: "0644"
-      notify: restart asterisk
+      notify: Restart asterisk
 
     - name: Ensure asterisk is in the right groups
       ansible.builtin.user:
         name: asterisk
         groups: audio,i2c,gpio
         append: yes
-      notify: restart asterisk
+      notify: Restart asterisk
+
+    # Asterisk now executes shell scripts with reduced privileges, so we need to
+    # use sudo for I2C access.
+    - name: Set up sudo configuration for Asterisk I2C access
+      ansible.builtin.copy:
+        src: "files/platon/11_asterisk_i2c"
+        dest: "/etc/sudoers.d/"
+        owner: root
+        group: root
+        mode: "0644"
+      # Asterisk restart is not necessary
 
     - name: Copy sounds
       ansible.builtin.copy:
@@ -294,20 +305,20 @@
 
 
   handlers:
-    - name: restart mosquitto
-      service:
+    - name: Restart mosquitto service
+      ansible.builtin.service:
         name: mosquitto
         state: restarted
         enabled: yes
 
-    - name: restart rsyslog
-      service:
+    - name: Restart rsyslog
+      ansible.builtin.service:
         name: rsyslog
         state: restarted
         enabled: yes
 
-    - name: restart asterisk
-      service:
+    - name: Restart asterisk
+      ansible.builtin.service:
         name: asterisk
         state: restarted
         enabled: yes
diff --git a/host-plumbum.yml b/host-plumbum.yml
new file mode 100644
index 0000000..19875f6
--- /dev/null
+++ b/host-plumbum.yml
@@ -0,0 +1,15 @@
+---
+- name: Setup things on host 'plumbum' (nfs server)
+  hosts: plumbum.n39.eu
+  become: true
+
+  roles:
+    - role: nfs_host
+      vars:
+        nfs_host_exports:
+          - directory: "/srv/nfs/backup"
+            hosts: "*.n39.eu"
+            options: rw,sync,no_subtree_check,no_root_squash
+          - directory: "/srv/nfs/ephemeral"
+            hosts: "*.n39.eu"
+            options: rw,sync,no_subtree_check,no_root_squash
diff --git a/host-pottwal.yml b/host-pottwal.yml
index e1f265f..c6b97ad 100644
--- a/host-pottwal.yml
+++ b/host-pottwal.yml
@@ -1,33 +1,36 @@
 ---
-- hosts: pottwal.n39.eu
+- name: Setup things on host 'pottwal' (the big docker container host)
+  hosts: pottwal.n39.eu
   become: true
 
   roles:
-    # role 'docker_setup' applied through group 'docker_host'
+    # role 'netz39.host_docker' applied through group 'docker_host'
     - role: apache
     - role: apache_letsencrypt  # Uses configuration from dehydrated setup
-    - role: ansible-role-dehydrated
+    - role: 24367dfa.dehydrated
       vars:
         dehydrated_contact_email: "{{ server_admin }}"
         dehydrated_domains:
-          - name: gitea.n39.eu
+          - name: "{{ forgejo_domain_name }}"
           - name: uritools.n39.eu
           - name: uritools-api.n39.eu
           - name: "{{ shlink_domain_name }}"
-          - name: pad.n39.eu
+          - name: "{{ hedgedoc_domain_name }}"
           - name: "{{ prosody_domain_name }}"
             alternate_names:
               - conference.jabber.n39.eu
             deploy_cert_hook: "docker exec prosody prosodyctl --root cert import ${DOMAIN} /var/lib/dehydrated/certs"
-          - name: redmine.n39.eu
+          - name: "{{ redmine_domain_name }}"
           - name: "{{ influxdb_domain_name }}"
-          - name: uptime.n39.eu
+          - name: "{{ uptimekuma_domain_name }}"
           - name: "{{ grafana_domain_name }}"
           - name: "{{ homebox_domain_name }}"
           - name: spaceapi.n39.eu
     - role: penguineer.dehydrated_cron
     - role: dd24_dyndns_cron
       # variables are set in the inventory
+    - role: desec_dyndns_cron
+      # variables are set in the inventory
     - role: cleanuri
       vars:
         cleanuri_ui_domain: uritools.n39.eu
@@ -42,49 +45,53 @@
       ansible.builtin.stat:
         path: "{{ data_dir }}/forgejo"
       register: forgejo_dir
+      tags: ["forgejo"]
     - name: Fail if forgejo data dir does not exist
       ansible.builtin.fail:
         msg: "Forgejo data dir is missing, please restore from the backup!"
       when: not forgejo_dir.stat.exists
+      tags: ["forgejo"]
 
     # If port 2222 is changed here, it must also be adapted
     # in the forgejo config file (see application volume)!!
-    - name: Setup the docker container for gitea
-      docker_container:
+    - name: Setup the docker container for forgejo
+      community.docker.docker_container:
         name: forgejo
-        image: "codeberg.org/forgejo/forgejo:1.19"
+        image: "{{ forgejo_image }}"
         pull: true
         state: started
         restart_policy: unless-stopped
         detach: yes
         ports:
           - 127.0.0.1:{{ forgejo_host_port }}:3000
-          - 2222:2222
+          - "{{ forgejo_ssh_port }}:2222"
         env:
           TZ: "{{ timezone }}"
           APP_NAME: "Netz39 Git"
           RUN_MODE: "prod"
-          SSH_DOMAIN: "gitea.n39.eu"
+          SSH_DOMAIN: "{{ forgejo_domain_name }}"
           SSH_PORT: "2222"
           SSH_START_SERVER: "false"
-          ROOT_URL: "https://gitea.n39.eu"
+          ROOT_URL: "https://{{ forgejo_domain_name }}"
           DISABLE_REGISTRATION: "true"
           USER_UID: "1000"
           USER_GID: "1000"
         volumes:
           - "{{ data_dir }}/forgejo:/data:rw"
+      tags: ["forgejo"]
 
-    - name: Setup proxy site gitea.n39.eu
-      include_role:
+    - name: Setup proxy site "{{ forgejo_domain_name }}"
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
-        site_name: "gitea.n39.eu"
+        site_name: "{{ forgejo_domain_name }}"
         proxy_port: "{{ forgejo_host_port }}"
+      tags: ["forgejo"]
 
     - name: Ensure apt-cacher container is running
-      docker_container:
+      community.docker.docker_container:
         name: apt_cacher_ng
-        image: "mrtux/apt-cacher-ng"
+        image: mrtux/apt-cacher-ng:latest
         pull: true
         state: started
         restart_policy: unless-stopped
@@ -94,11 +101,46 @@
         env:
           TZ: "{{ timezone }}"
 
+    - name: Setup docker network
+      community.docker.docker_network:
+        name: shlinknet
+        state: present
+        internal: true
+      tags:
+        - shlink
+
+    - name: Ensure shlink data dir exists
+      ansible.builtin.file:
+        path: "{{ data_dir }}/shlink/data/database"
+        state: directory
+        mode: 0755
+      tags:
+        - shlink
+
+    - name: Ensure shlink database container is running
+      community.docker.docker_container:
+        name: shlinkdb
+        image: postgres:16.8-alpine
+        pull: true
+        state: started
+        restart_policy: unless-stopped
+        detach: yes
+        env:
+          TZ: "{{ timezone }}"
+          POSTGRES_USER: "shlink"
+          POSTGRES_PASSWORD: "{{ shlink_postgres_password }}"
+          POSTGRES_DB: "shlink"
+        volumes:
+          - "{{ data_dir }}/shlink/data/database:/var/lib/postgresql/data"
+        networks:
+          - name: shlinknet
+      tags:
+        - shlink
 
     - name: Ensure container for shlink is running
-      docker_container:
+      community.docker.docker_container:
         name: shlink
-        image: shlinkio/shlink:2.6.2
+        image: "{{ shlink_image }}"
         pull: true
         state: started
         detach: yes
@@ -107,16 +149,31 @@
         restart_policy: unless-stopped
         env:
           TZ: "{{ timezone }}"
-          SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}"
-          SHORT_DOMAIN_SCHEMA: https
-          GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
+          DEFAULT_DOMAIN: "{{ shlink_domain_name }}"
+          INITIAL_API_KEY: "{{ shlink_initial_api_key }}"
+          DB_DRIVER: "postgres"
+          DB_HOST: shlinkdb
+          DB_NAME: "shlink"
+          DB_USER: "shlink"
+          DB_PASSWORD: "{{ shlink_postgres_password }}"
+        volumes:
+          - "{{ data_dir }}/shlink/database.sqlite:/etc/shlink/datadatabase.sqlite:rw"
+        networks_cli_compatible: false
+        comparisons:
+          networks: allow_more_present
+        networks:
+          - name: shlinknet
+      tags:
+        - shlink
 
     - name: Setup proxy site {{ shlink_domain_name }}
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "{{ shlink_domain_name }}"
         proxy_port: "{{ shlink_host_port }}"
+      tags:
+        - shlink
 
     - name: Check if prosody data dir exists
       ansible.builtin.stat:
@@ -127,7 +184,7 @@
         msg: "prosody data dir is missing, please restore from the backup!"
       when: not prosody_dir.stat.exists
 
-    - name: Ensure prosody config dir exists
+    - name: Ensure prosody main config dir exists
       ansible.builtin.file:
         path: "{{ prosody_config_dir }}"
         state: directory
@@ -147,7 +204,7 @@
         state: directory
         mode: 0755
 
-    - name: Ensure prosody certs dir exists
+    - name: Ensure prosody conf.d dir exists
       ansible.builtin.file:
         path: "{{ prosody_config_dir }}/conf.d"
         state: directory
@@ -178,9 +235,9 @@
         - Restart prosody
 
     - name: Ensure container for prosody XMPP server is running
-      docker_container:
+      community.docker.docker_container:
         name: prosody
-        image: netz39/prosody:0.11
+        image: "{{ prosody_image }}"
         pull: true
         state: started
         detach: true
@@ -200,9 +257,9 @@
 
 
     - name: Ensure container for static XMPP website is running
-      docker_container:
+      community.docker.docker_container:
         name: jabber-static-website
-        image: joseluisq/static-web-server:2.14
+        image: "{{ prosody_web_image }}"
         pull: true
         state: started
         detach: true
@@ -215,26 +272,34 @@
           - "127.0.0.1:{{ jabber_host_port }}:80"
         volumes:
           - "{{ prosody_data_dir }}/var/www:/public:ro"
+      tags:
+        - prosody-web
 
     - name: Setup proxy site {{ prosody_domain_name }}
       # point to static website for now
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "{{ prosody_domain_name }}"
         proxy_port: "{{ jabber_host_port }}"
+      tags:
+        - prosody-web
 
     - name: Check if hedgedoc data dir exists
       ansible.builtin.stat:
         path: "{{ data_dir }}/hedgedoc"
       register: hedgedoc_dir
+      tags:
+        - hedgedoc
     - name: Fail if hedgedoc data dir does not exist
       ansible.builtin.fail:
         msg: "hedgedoc data dir is missing, please restore from the backup!"
       when: not hedgedoc_dir.stat.exists
+      tags:
+        - hedgedoc
 
     - name: Ensure the hedgedoc directories exist
-      file:
+      ansible.builtin.file:
         path: "{{ item.path }}"
         mode: "{{ item.mode }}"
         state: directory
@@ -243,17 +308,21 @@
           mode: "0700"
         - path: "{{ data_dir }}/hedgedoc/data/uploads"
           mode: "0755"
+      tags:
+        - hedgedoc
 
     - name: Setup docker network
-      docker_network:
+      community.docker.docker_network:
         name: hedgedocnet
         state: present
         internal: true
+      tags:
+        - hedgedoc
 
     - name: Install HedgeDoc database container
-      docker_container:
+      community.docker.docker_container:
         name: hedgedocdb
-        image: "postgres:11.6-alpine"
+        image: "{{ hedgedoc_db_image }}"
         pull: true
         state: started
         restart_policy: unless-stopped
@@ -267,9 +336,11 @@
           - "{{ data_dir }}/hedgedoc/data/database:/var/lib/postgresql/data"
         networks:
           - name: hedgedocnet
+      tags:
+        - hedgedoc
 
     - name: Ensure container for hedgedoc is running
-      docker_container:
+      community.docker.docker_container:
         name: hedgedoc
         image: "{{ hedgedoc_image }}"
         pull: true
@@ -282,7 +353,7 @@
           TZ: "{{ timezone }}"
           NODE_ENV: "production"
           CMD_PROTOCOL_USESSL: "true"
-          CMD_DOMAIN: "pad.n39.eu"
+          CMD_DOMAIN: "{{ hedgedoc_domain_name }}"
           CMD_URL_ADDPORT: "false"
           CMD_DB_HOST: "hedgedocdb"
           CMD_DB_PORT: "5432"
@@ -292,18 +363,25 @@
           CMD_DB_PASSWORD: "{{ hedgedoc_postgres_password }}"
         volumes:
           - "{{ data_dir }}/hedgedoc/data/uploads:/hedgedoc/public/uploads"
+        networks_cli_compatible: false
+        comparisons:
+          networks: allow_more_present
         networks:
           - name: hedgedocnet
+      tags:
+        - hedgedoc
 
-    - name: Setup proxy site pad.n39.eu
-      include_role:
+    - name: Setup proxy site "{{ hedgedoc_domain_name }}"
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
-        site_name: pad.n39.eu
+        site_name: "{{ hedgedoc_domain_name }}"
         proxy_port: "{{ hedgedoc_host_port }}"
+      tags:
+        - hedgedoc
 
     - name: Ensure the influxdb directories exist
-      file:
+      ansible.builtin.file:
         path: "{{ item }}"
         mode: 0700
         state: directory
@@ -313,7 +391,7 @@
         - "{{ data_dir }}/influxdb/cfg"
 
     - name: Ensure container for influxdb is running
-      docker_container:
+      community.docker.docker_container:
         name: influxdb
         image: "{{ influxdb_image }}"
         pull: true
@@ -333,7 +411,7 @@
           - "{{ data_dir }}/influxdb/cfg:/etc/influxdb2"
 
     - name: Setup proxy site {{ influxdb_domain_name }}
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "{{ influxdb_domain_name }}"
@@ -348,19 +426,25 @@
       ansible.builtin.stat:
         path: "{{ data_dir }}/redmine"
       register: redmine_dir
+      tags:
+        - redmine
     - name: Fail if redmine data dir does not exist
       ansible.builtin.fail:
         msg: "Redmine data dir is missing, please restore from the backup!"
       when: not redmine_dir.stat.exists
+      tags:
+        - redmine
 
     - name: Setup Redmine docker network
-      docker_network:
+      community.docker.docker_network:
         name: redminenet
         state: present
         internal: true
+      tags:
+        - redmine
 
     - name: Setup Redmine MySQL container
-      docker_container:
+      community.docker.docker_container:
         name: redminedb
         image: "{{ redmine_mysql_image }}"
         pull: true
@@ -373,11 +457,14 @@
           MYSQL_DATABASE: "{{ redmine_database }}"
         volumes:
           - "{{ data_dir }}/redmine/mysql:/var/lib/mysql"
+          - "{{ data_dir }}/redmine/mysql-config:/etc/mysql/conf.d"
         networks:
           - name: redminenet
+      tags:
+        - redmine
 
     - name: Setup Redmine container
-      docker_container:
+      community.docker.docker_container:
         name: redmine
         image: "{{ redmine_image }}"
         pull: true
@@ -394,28 +481,37 @@
           - "{{ data_dir }}/redmine/configuration.yml:/usr/src/redmine/config/configuration.yml"
           - "{{ data_dir }}/redmine/files:/usr/src/redmine/files"
           - "{{ data_dir }}/redmine/themes:/usr/src/redmine/public/themes"
+        networks_cli_compatible: false
+        comparisons:
+          networks: allow_more_present
         networks:
           - name: redminenet
+      tags:
+        - redmine
 
-    - name: Setup proxy site redmine.n39.eu
-      include_role:
+    - name: Setup proxy site "{{ redmine_domain_name }}"
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
-        site_name: redmine.n39.eu
+        site_name: "{{ redmine_domain_name }}"
         proxy_port: "{{ redmine_host_port }}"
+      tags:
+        - redmine
 
     - name: Ensure the uptime-kuma directories exist
-      file:
+      ansible.builtin.file:
         path: "{{ item }}"
         mode: "0755"
         state: directory
       with_items:
         - "{{ data_dir }}/uptime-kuma"
+      tags:
+        - uptimekuma
 
     - name: Ensure container for uptime-kuma is running
-      docker_container:
+      community.docker.docker_container:
         name: uptime-kuma
-        image: "louislam/uptime-kuma:1"
+        image: "{{ uptimekuma_image }}"
         pull: true
         state: started
         detach: yes
@@ -426,16 +522,20 @@
           TZ: "{{ timezone }}"
         volumes:
           - "{{ data_dir }}/uptime-kuma:/app/data"
+      tags:
+        - uptimekuma
 
-    - name: Setup proxy site uptime.n39.eu
-      include_role:
+    - name: Setup proxy site "{{ uptimekuma_domain_name }}"
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
-        site_name: uptime.n39.eu
+        site_name: "{{ uptimekuma_domain_name }}"
         proxy_port: "{{ uptimekuma_host_port }}"
+      tags:
+        - uptimekuma
 
     - name: Ensure the grafana directories exist
-      file:
+      ansible.builtin.file:
         path: "{{ item.path }}"
         owner: "{{ item.owner | default('root') }}"
         mode: "{{ item.mode }}"
@@ -448,11 +548,13 @@
           mode: "0755"
         - path: "{{ data_dir }}/grafana/etc"
           mode: "0755"
+      tags:
+        - grafana
 
     - name: Ensure container for grafana is running
-      docker_container:
+      community.docker.docker_container:
         name: grafana
-        image: "grafana/grafana:9.4.7"
+        image: "{{ grafana_image }}"
         pull: true
         state: started
         detach: yes
@@ -467,17 +569,21 @@
           GF_SECURITY_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
           GF_USERS_ALLOW_SIGN_UP: "false"
           GF_INSTALL_PLUGINS: "flant-statusmap-panel,ae3e-plotly-panel"
+      tags:
+        - grafana
 
-    - name: Setup proxy site grafana.n39.eu
-      include_role:
+    - name: Setup proxy site "{{ grafana_domain_name }}"
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "{{ grafana_domain_name }}"
         proxy_port: "{{ grafana_host_port }}"
         proxy_preserve_host: "On"
+      tags:
+        - grafana
 
     - name: Ensure the homebox directories exist
-      file:
+      ansible.builtin.file:
         path: "{{ item.path }}"
         owner: "{{ item.owner | default('root') }}"
         mode: "{{ item.mode }}"
@@ -487,11 +593,13 @@
           mode: "0755"
         - path: "{{ data_dir }}/homebox/data"
           mode: "0755"
+      tags:
+        - homebox
 
     - name: Ensure container for homebox is running
-      docker_container:
+      community.docker.docker_container:
         name: homebox
-        image: "ghcr.io/hay-kot/homebox"
+        image: "{{ homebox_image }}"
         pull: true
         state: started
         detach: yes
@@ -505,17 +613,21 @@
           HBOX_LOG_LEVEL: "info"
           HBOX_LOG_FORMAT: "text"
           HBOX_WEB_MAX_UPLOAD_SIZE: "10"
+      tags:
+        - homebox
 
     - name: Setup proxy site {{ homebox_domain_name }}
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "{{ homebox_domain_name }}"
         proxy_port: "{{ homebox_host_port }}"
         proxy_preserve_host: "On"
+      tags:
+        - homebox
 
     - name: Setup proxy site spaceapi.n39.eu
-      template:
+      ansible.builtin.template:
         src: templates/pottwal/spaceapi-apache-site.j2
         dest: /etc/apache2/sites-available/spaceapi.n39.eu.conf
         mode: "0644"
@@ -524,7 +636,17 @@
         proxy_preserve_host: "On"
       notify: Restart apache2
       tags:
-        - dev
+        - spaceapi
+
+    # Renovate configuration is sourced from `renovate.json` in each repository
+    - name: Ensure renovate bot cronjob is present
+      ansible.builtin.template:
+        src: templates/pottwal/renovate-cron.j2
+        dest: /etc/cron.hourly/renovate-bot
+        mode: "0700"
+      notify: Reload cron
+      tags:
+        - renovate
 
   handlers:
     - name: Restart prosody
@@ -534,7 +656,14 @@
         restart: yes
 
     - name: Restart apache2
-      service:
+      ansible.builtin.service:
         name: apache2
         state: restarted
 
+    - name: Reload cron
+      ansible.builtin.shell:
+        cmd: service cron reload
+    # Use the shell call because the task sometimes has problems finding the service state
+    #  ansible.builtin.service:
+    #    name: cron
+    #    state: restarted
diff --git a/host-radon.yml b/host-radon.yml
index 6f6f68f..6131bd4 100644
--- a/host-radon.yml
+++ b/host-radon.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: radon.n39.eu
+- name: Setup things on host 'radon' (services for space automation)
+  hosts: radon.n39.eu
   become: true
 
   vars:
@@ -7,25 +8,24 @@
 
     data_dir: "/srv/data"
 
-    mosquitto_image: eclipse-mosquitto:2.0.14
+    mosquitto_image: eclipse-mosquitto:2.0.21
     mosquitto_data: "{{ data_dir }}/mosquitto"
 
     nodered_image: nodered/node-red:3.0.1-1-18
     nodered_data: "{{ data_dir }}/nodered"
 
-    rabbitmq_image: "bitnami/rabbitmq:3.10.7"
+    rabbitmq_image: bitnami/rabbitmq:4.0.7
     rabbitmq_data: "{{ data_dir }}/rabbitmq"
 
-    pwr_meter_pulse_gw_image: "netz39/power-meter-pulse-gateway:0.3.0"
+    pwr_meter_pulse_gw_image: netz39/power-meter-pulse-gateway:0.3.0
 
     brotherql_host_port: 9004
-    brotherql_web_image: "pklaus/brother_ql_web:alpine_9e20b6d"
 
   roles:
-    # role 'docker_setup' applied through group 'docker_host'
+    # role 'netz39.host_docker' applied through group 'docker_host'
     - role: apache
     - role: apache_letsencrypt  # Uses configuration from dehydrated setup
-    - role: ansible-role-dehydrated
+    - role: 24367dfa.dehydrated
       vars:
         dehydrated_contact_email: "{{ server_admin }}"
         dehydrated_domains:
@@ -38,7 +38,7 @@
 
   tasks:
     - name: Ensure the mosquitto directories exist
-      file:
+      ansible.builtin.file:
         path: "{{ item }}"
         mode: 0755
         state: directory
@@ -46,16 +46,20 @@
         - "{{ mosquitto_data }}/config"
         - "{{ mosquitto_data }}/data"
         - "{{ mosquitto_data }}/log"
+      tags:
+        - mosquitto
 
     - name: Make sure mosquitto config is there
-      template:
+      ansible.builtin.template:
         src: "templates/mosquitto.conf.j2"
         dest: "{{ mosquitto_data }}/config/mosquitto.conf"
         mode: 0644
-      notify: restart mosquitto
+      notify: Restart mosquitto container
+      tags:
+        - mosquitto
 
     - name: Ensure mosquitto is running
-      docker_container:
+      community.docker.docker_container:
         name: mosquitto
         image: "{{ mosquitto_image }}"
         pull: true
@@ -72,6 +76,8 @@
         restart_policy: unless-stopped
         env:
           TZ: "{{ timezone }}"
+      tags:
+        - mosquitto
 
     - name: Check if nodered data dir exists
       ansible.builtin.stat:
@@ -83,7 +89,7 @@
       when: not nodered_dir.stat.exists
 
     - name: Ensure nodered is running
-      docker_container:
+      community.docker.docker_container:
         name: nodered
         image: "{{ nodered_image }}"
         pull: true
@@ -103,7 +109,7 @@
         restart_policy: unless-stopped
 
     - name: Setup proxy site nodered.n39.eu
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "nodered.n39.eu"
@@ -114,13 +120,17 @@
       ansible.builtin.stat:
         path: "{{ rabbitmq_data }}"
       register: rabbitmq_dir
+      tags:
+        - rabbitmq
     - name: Fail if rabbitmq data dir does not exist
       ansible.builtin.fail:
         msg: "RabbitMQ data dir is missing, please restore from the backup!"
       when: not rabbitmq_dir.stat.exists
+      tags:
+        - rabbitmq
 
     - name: Ensure rabbitmq docker container is running
-      docker_container:
+      community.docker.docker_container:
         name: rabbitmq
         image: "{{ rabbitmq_image }}"
         ports:
@@ -137,17 +147,20 @@
           - "{{ rabbitmq_data }}/bitnami:/bitnami:rw"
           - "{{ rabbitmq_data }}/etc_rabbitmq:/etc/rabbitmq:rw"
         restart_policy: unless-stopped
+      tags:
+        - rabbitmq
 
     - name: Setup proxy site rabbitmq.n39.eu
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "rabbitmq.n39.eu"
         proxy_port: 15672
-
+      tags:
+        - rabbitmq
 
     - name: Ensure Power Meter Pulse Gateway for 19i room is running
-      docker_container:
+      community.docker.docker_container:
         name: pwr-meter-pulse-gw-19i
         image: "{{ pwr_meter_pulse_gw_image }}"
         ports:
@@ -165,7 +178,7 @@
         restart_policy: unless-stopped
 
     - name: Setup proxy site pwr-meter-pulse-gw-19i.svc.n39.eu
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "pwr-meter-pulse-gw-19i.svc.n39.eu"
@@ -173,30 +186,49 @@
 
 
     - name: Setup docker container for BrotherQL Web UI printer
-      docker_container:
+      community.docker.docker_container:
         name: brotherql-web
-        image: "{{ brotherql_web_image }}"
+        image: dersimn/brother_ql_web:2.1.9-alpine
         pull: true
         restart_policy: unless-stopped
         state: started
         ports:
           - "127.0.0.1:{{ brotherql_host_port }}:8013"
-        command: " ./brother_ql_web.py --model QL-720NW tcp://{{ brotherql_printer_ip }}"
+        command: "--default-label-size 62 --model QL-720NW tcp://{{ brotherql_printer_host }}"
         detach: yes
         env:
           TZ: "{{ timezone }}"
+      tags:
+        - labelprinter
 
     - name: Setup proxy site labelprinter.n39.eu
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: labelprinter.n39.eu
         proxy_port: "{{ brotherql_host_port }}"
+      tags:
+        - labelprinter
 
+    - name: Setup docker container for Grafana Screenshots
+      community.docker.docker_container:
+        name: grafana-screenshot
+        image: mrtux/grafana-screenshot:0.1.3
+        pull: true
+        restart_policy: unless-stopped
+        detach: yes
+        env:
+          MQTT_BROKER_URL: "{{ kiosk_mqtt_host }}"
+          MQTT_TOPIC: "{{ kiosk_mqtt_topic }}"
+          GRAFANA_DASHBOARD_URL: "{{ kiosk_grafana_url }}"
+          GRAFANA_USERNAME: "{{ kiosk_grafana_user }}"
+          GRAFANA_PASSWORD: "{{ kiosk_grafana_pass }}"
+      tags:
+        - grafana-screenshot
 
   handlers:
-    - name: restart mosquitto
-      docker_container:
+    - name: Restart mosquitto container
+      community.docker.docker_container:
         name: mosquitto
         state: started
         restart: yes
diff --git a/host-tau.yml b/host-tau.yml
index 6defba2..e204b4d 100644
--- a/host-tau.yml
+++ b/host-tau.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: tau.netz39.de
+- name: Setup things on host 'tau' (vserver for wiki etc.)
+  hosts: tau.netz39.de
   become: true
 
   vars:
@@ -14,18 +15,18 @@
     dokuwiki_port: 9005
     # This container is pinned, because there are issues
     # with backwards compatibility within the same tag!
-    dokuwiki_image: "bitnami/dokuwiki:20220731@sha256:989ab52cf2d2e0f84166e114ca4ce88f59546b8f6d34958905f8d81c18cbd759"
+    dokuwiki_image: bitnami/dokuwiki:20240206.1.0
 
     discord_invite_domain: discord.netz39.de
 
   roles:
-    # role 'docker_setup' applied through group 'docker_host'
+    # role 'netz39.host_docker' applied through group 'docker_host'
     - role: apache
     - role: penguineer.dehydrated_cron
 
   tasks:
     - name: Setup docker network
-      docker_network:
+      community.docker.docker_network:
         name: dockernet
         driver: bridge
         ipam_config:
@@ -34,8 +35,8 @@
         state: present
 
     - name: Setup Dehydrated
-      include_role:
-        name: ansible-role-dehydrated
+      ansible.builtin.include_role:
+        name: 24367dfa.dehydrated
       vars:
         dehydrated_contact_email: "{{ server_admin }}"
         dehydrated_domains:
@@ -51,17 +52,17 @@
             deploy_challenge_hook: "/bin/systemctl restart apache2"
 
     - name: Setup proxy site testredmine.netz39.de
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: testredmine.netz39.de
         proxy_port: 9004
 
     - name: Setup phpmyadmin
-      docker_container:
+      community.docker.docker_container:
         name: phpmyadmin
         state: started
-        image: phpmyadmin:5.0
+        image: phpmyadmin:5.2
         networks_cli_compatible: true
         networks:
           - name: dockernet
@@ -75,7 +76,7 @@
           - 9001:80
 
     - name: Setup proxy site mysql.adm.netz39.de
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: mysql.adm.netz39.de
@@ -85,22 +86,22 @@
     - name: Check if Docker Registry auth dir exists
       ansible.builtin.stat:
         path: "{{ data_dir }}/registry/auth"
-      register: docker_dir
-    - name: Fail if docker registry data dir does not exist
+      register: docker_registry_auth_dir
+    - name: Fail if Docker Registry auth dir does not exist
       ansible.builtin.fail:
         msg: "Docker Registry auth dir is missing, please restore from the backup!"
-      when: not docker_dir.stat.exists
+      when: not docker_registry_auth_dir.stat.exists
     - name: Ensure the Docker Registry data directory exists
       # This may not be part of the backup
-      file:
+      ansible.builtin.file:
         path: "{{ data_dir }}/registry/data"
         state: directory
         mode: "0755"
 
     - name: Setup Docker Registry Container
-      docker_container:
+      community.docker.docker_container:
         name: registry
-        image: "registry:2"
+        image: registry:2
         pull: true
         state: started
         restart_policy: unless-stopped
@@ -117,7 +118,7 @@
           - "{{ data_dir }}/registry/auth:/auth:rw"
 
     - name: Setup proxy site for the Docker Registry
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "{{ docker_registry_domain }}"
@@ -128,19 +129,25 @@
       ansible.builtin.stat:
         path: "{{ data_dir }}/dokuwiki"
       register: dokuwiki_dir
+      tags:
+        - dokuwiki
     - name: Fail if Dokuwiki data dir does not exist
       ansible.builtin.fail:
         msg: "Dokuwiki data dir is missing, please restore from the backup!"
       when: not dokuwiki_dir.stat.exists
+      tags:
+        - dokuwiki
 
     - name: Set correct user for Dokuwiki data
       ansible.builtin.file:
         path: "{{ data_dir }}/dokuwiki"
         owner: "1001"  # According to container config
         recurse: yes
+      tags:
+        - dokuwiki
 
     - name: Setup Dokuwiki Container
-      docker_container:
+      community.docker.docker_container:
         name: dokuwiki
         image: "{{ dokuwiki_image }}"
         pull: true
@@ -155,19 +162,23 @@
           - "{{ data_dir }}/dokuwiki:/bitnami/dokuwiki:rw"
         env:
           TZ: "{{ timezone }}"
+      tags:
+        - dokuwiki
 
     - name: Setup proxy site for Dokuwiki
-      include_role:
+      ansible.builtin.include_role:
         name: setup_http_site_proxy
       vars:
         site_name: "{{ dokuwiki_domain }}"
         proxy_port: "{{ dokuwiki_port }}"
+      tags:
+        - dokuwiki
 
 
     - name: Setup container for secondary FFMD DNS
-      docker_container:
+      community.docker.docker_container:
         name: bind9-md-freifunk-net
-        image: "ffmd/bind9-md-freifunk-net:2022111601"
+        image: ffmd/bind9-md-freifunk-net:v2022122301
         pull: true
         state: started
         restart_policy: unless-stopped
@@ -176,12 +187,13 @@
           - "53:53/udp"
         env:
           TZ: "{{ timezone }}"
+      tags:
+        - ffmd-dns
 
     - name: Setup forwarding for Discord invite
-      include_role:
-        name: setup-http-site-forward
+      ansible.builtin.include_role:
+        name: setup_http_site_forward
       vars:
         site_name: "{{ discord_invite_domain }}"
         # forward_to: "https://discord.com/invite/8FcDvAf"
         forward_to: "https://sl.n39.eu/discord"
-
diff --git a/host-unicorn.yml b/host-unicorn.yml
index 2780230..374cd32 100644
--- a/host-unicorn.yml
+++ b/host-unicorn.yml
@@ -1,19 +1,20 @@
 ---
 # this is for a dedicated vm just hosting the unifi controller.
-- hosts: unicorn.n39.eu
+- name: Setup things on host 'unicorn' (vm for ubiquiti unifi controller)
+  hosts: unicorn.n39.eu
   become: true
   vars:
     ansible_python_interpreter: /usr/bin/python3
     data_dir: "/srv/data"
 
   roles:
-    # role 'docker_setup' applied through group 'docker_host'
+    # role 'netz39.host_docker' applied through group 'docker_host'
 
   tasks:
     - name: Setup the docker container for unifi-controller
-      docker_container:
+      community.docker.docker_container:
         name: unifi-controller
-        image: "jacobalberty/unifi:v7.1.65"
+        image: jacobalberty/unifi:v9.0.114
         state: started
         restart_policy: unless-stopped
         container_default_behavior: no_defaults
@@ -22,12 +23,13 @@
         # These fixed ports are needed.
         # https://help.ui.com/hc/en-us/articles/218506997-UniFi-Ports-Used
         ports:
-          - "8080:8080/tcp"   # Device command/control
-          - "8443:8443/tcp"   # Web interface + API
-          - "8843:8843/tcp"   # HTTPS portal
-          - "8880:8880/tcp"   # HTTP portal
-          - "3478:3478/udp"   # STUN service
-          - "6789:6789/tcp"   # Speed Test (unifi5 only)
+          - "8080:8080/tcp"     # Device command/control
+          - "8443:8443/tcp"     # Web interface + API
+          - "8843:8843/tcp"     # HTTPS portal
+          - "8880:8880/tcp"     # HTTP portal
+          - "3478:3478/udp"     # STUN service
+          - "6789:6789/tcp"     # Speed Test (unifi5 only)
+          - "10001:10001/udp"   # Used for device discovery.
         volumes:
           - "{{ data_dir }}/unifi-controller/data:/unifi/data"
           - "{{ data_dir }}/unifi-controller/log:/unifi/log"
diff --git a/host-wittgenstein.yml b/host-wittgenstein.yml
new file mode 100644
index 0000000..dd9aae6
--- /dev/null
+++ b/host-wittgenstein.yml
@@ -0,0 +1,170 @@
+---
+- name: Setup things on host 'wittgenstein' (raspberry pi for ampel and spaceapi)
+  hosts: wittgenstein.n39.eu
+  become: true
+
+  roles:
+    - role: apache
+    - role: apache_letsencrypt  # Uses configuration from dehydrated setup
+    - role: 24367dfa.dehydrated
+      vars:
+        dehydrated_contact_email: "{{ server_admin }}"
+    - role: penguineer.dehydrated_cron
+
+  tasks:
+    - name: Install packages needed for the system
+      # This is a list of all packages,
+      # unless they are installed by a specific role
+      ansible.builtin.apt:
+        state: present
+        name:
+          # This is needed for the user-executed tasks
+          - acl
+          # Regular packages
+          - tmux
+          - git-core
+          - cmake
+          - build-essential
+          - libmosquitto-dev
+          - libconfig-dev
+          - mosquitto-clients
+          - python3-paho-mqtt
+          - i2c-tools
+
+
+    - name: Set MAC address for proper DHCP recognition
+      # Uses mac variable from inventory
+      ansible.builtin.template:
+        src: templates/network-interfaces-dhcp-mac.j2
+        dest: /etc/network/interfaces.d/wittgenstein-mac
+        owner: root
+        group: root
+        mode: '0644'
+
+    - name: Disable IPv6
+      # Because it is not working....
+      ansible.builtin.copy:
+        src: files/sysctl-no-ipv6.conf
+        dest: /etc/sysctl.d/99-systcl-no-ipv6.conf
+        owner: root
+        group: root
+        mode: '0644'
+
+
+    ### Gatekeeper user (pi for now)
+    #
+    # All the gatekeeping / door control stuff is here!
+
+    - name: Ensure gatekeeper user is there
+      ansible.builtin.user:
+        name: "{{ gatekeeper_user }}"
+        groups: dialout,audio,plugdev,input,netdev,i2c,gpio
+        append: yes
+
+    - name: Copy management scripts
+      ansible.builtin.copy:
+        src: "files/wittgenstein/{{ item }}"
+        dest: "/home/{{ gatekeeper_user }}/{{ item }}"
+        owner: "{{ gatekeeper_user }}"
+        group: "{{ gatekeeper_user }}"
+        mode: "0750"
+      loop:
+        - reboot.sh
+        - unstuck.sh
+        - switch-on.sh
+        - switch-off.sh
+
+    - name: Install start-up cron
+      ansible.builtin.cron:
+        name: Start the gatekeeper services
+        job: "/home/{{ gatekeeper_user }}/reboot.sh"
+        user: "{{ gatekeeper_user }}"
+        special_time: reboot
+
+
+    - name: Download wiringPi library
+      # WiringPi needs to be installed, but that library seems to be
+      # obsolete. We download something and hope it works...
+      ansible.builtin.get_url:
+        url: https://project-downloads.drogon.net/wiringpi-latest.deb
+        dest: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
+        mode: "0644"
+        force: no
+      register: wiringPi_download
+
+    - name: Install wiringPi library  # noqa: no-handler
+      ansible.builtin.apt:
+        state: present
+        deb: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
+      when: wiringPi_download.changed
+
+
+    ### Ampelsteuerung
+    - name: Clone netz39_space_notification initial checkout
+      # Do this as the gatekeeper user!
+      become: yes
+      become_user: "{{ gatekeeper_user }}"
+      ansible.builtin.git:
+        repo: https://github.com/netz39/space_notification.git
+        dest: "/home/{{ gatekeeper_user }}/netz39_space_notification"
+        clone: yes
+        update: no
+
+    - name: Compile ledcontrol agent
+      # Do this as the gatekeeper user!
+      become: yes
+      become_user: "{{ gatekeeper_user }}"
+      ansible.builtin.shell:
+        chdir: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/ledcontrol"
+        cmd: make
+        creates: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/ledcontrol/ledcontrol"
+
+    - name: Compile statusswitch agent
+      # Do this as the gatekeeper user!
+      become: yes
+      become_user: "{{ gatekeeper_user }}"
+      ansible.builtin.shell:
+        chdir: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/statusswitch"
+        cmd: make
+        creates: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/statusswitch/statusswitch"
+
+    ### Space API
+    - name: Setup the SpaceAPI Docker container
+      community.docker.docker_container:
+        name: spaceapi
+        image: "{{ spaceapi_image }}"
+        pull: true
+        state: started
+        detach: yes
+        restart_policy: unless-stopped
+        ports:
+          - "0.0.0.0:{{ spaceapi_host_port }}:8080"   # Must be reached by pottwal
+#          - "127.0.0.1:{{ spaceapi_host_port }}:8080"
+        env:
+          TZ: "{{ timezone }}"
+          MQTT_BROKER: "platon.n39.eu"
+          MQTT_TOPIC_STATUS: "{{ spaceapi_topic_status }}"
+          MQTT_TOPIC_LASTCHANGE: "{{ spaceapi_topic_lastchange }}"
+      tags:
+        - spaceapi
+
+    - name: Setup the Ampel Controller Docker container
+      community.docker.docker_container:
+        name: ampelcontroller
+        image: "{{ ampelcontroller_image }}"
+        pull: true
+        state: started
+        detach: yes
+        restart_policy: unless-stopped
+        env:
+          TZ: "{{ timezone }}"
+          MQTT_BROKER: "platon.n39.eu"
+          MQTT_LEVER_STATE_TOPIC: "{{ topic_lever_state }}"
+          MQTT_DOOR_EVENTS_TOPIC: "{{ topic_door_events }}"
+          MQTT_SPACESTATUS_ISOPEN_TOPIC: "{{ spaceapi_topic_status }}"
+          MQTT_SPACESTATUS_LASTCHANGE_TOPIC: "{{ spaceapi_topic_lastchange }}"
+          MQTT_TRAFFIC_LIGHT_TOPIC: "{{ topic_traffic_light }}"
+      tags:
+        - spaceapi
+
+  handlers:
diff --git a/host_vars/hobbes.n39.eu/vars.yml b/host_vars/hobbes.n39.eu/vars.yml
index 2b64fb3..b0b622b 100644
--- a/host_vars/hobbes.n39.eu/vars.yml
+++ b/host_vars/hobbes.n39.eu/vars.yml
@@ -2,8 +2,6 @@
 server_admin: "admin+hobbes@netz39.de"
 mac: "b8:27:eb:f9:43:43"
 kiosk_user: pi
-kiosk_software_version: v1.0.6
-kiosk_software_arch: "armv7"
-kiosk_url: "https://grafana.n39.eu/d/xpLj6UD4z/hobbes-space-monitor?orgId=1"
-kiosk_grafana_user: "{{ vault_kiosk_grafana_user }}"
-kiosk_grafana_pass: "{{ vault_kiosk_grafana_pass }}"
+kiosk_mqtt_host: "mqtt.n39.eu"
+kiosk_mqtt_topic: "Netz39/Things/HackingDashboard/Screenshot"
+docker_data_root: "/srv/docker"
diff --git a/host_vars/hobbes.n39.eu/vault b/host_vars/hobbes.n39.eu/vault
index 5f4b386..6c9a336 100644
--- a/host_vars/hobbes.n39.eu/vault
+++ b/host_vars/hobbes.n39.eu/vault
@@ -1,9 +1,6 @@
 $ANSIBLE_VAULT;1.1;AES256
-32313738636231313036633334333934643839636563646334336533316436653263623461643438
-6362343635626266313466643465343962663931623662320a316635613231313930343937363064
-33326164333137633039376363643539346463303934333430626431336637326638363233333234
-3132333533376134380a383837616331303536623665383735663531343538366332313236386137
-62306436663934383363616332316262313762633261396535663533636665633532316366386430
-65343830376634633365343337313433643465323662313563366463393664653766623338623635
-30653263303761316238396634346337636461643231303561353133643162633934323161663539
-66646364373034633334
+32343732363234396136616164383833316634373061376235656566303761646461626138363432
+3264633461383739393138396233303839363132343866370a306433306364666438623434383036
+63366634313937623736393636393030333961323335323762663538373631353331353162613362
+3661653539306365350a333263383537643738373939376563356566313732613766303931633630
+6462
diff --git a/host_vars/plumbum.n39.eu/vars.yml b/host_vars/plumbum.n39.eu/vars.yml
new file mode 100644
index 0000000..af6228c
--- /dev/null
+++ b/host_vars/plumbum.n39.eu/vars.yml
@@ -0,0 +1,3 @@
+---
+server_admin: "admin+plumbum@netz39.de"
+mac: "32:A3:94:A0:23:77"
diff --git a/host_vars/pottwal.n39.eu/vars.yml b/host_vars/pottwal.n39.eu/vars.yml
index 20cc119..854fb67 100644
--- a/host_vars/pottwal.n39.eu/vars.yml
+++ b/host_vars/pottwal.n39.eu/vars.yml
@@ -9,38 +9,57 @@ cleanuri_amqp_user: "cleanuri"
 cleanuri_amqp_pass: "{{ vault_cleanuri_amqp_pass }}"
 cleanuri_amqp_vhost: "/cleanuri"
 
+forgejo_host_port: 9091
+forgejo_ssh_port: 2222
+forgejo_domain_name: git.n39.eu
+forgejo_image: codeberg.org/forgejo/forgejo:10.0.3
+
 shlink_host_port: 8083
 shlink_domain_name: sl.n39.eu
-shlink_geolite_license_key: "{{ vault_shlink_geolite_license_key }}"
+shlink_image: shlinkio/shlink:4.4.6
+shlink_initial_api_key: "{{ vault_shlink_initial_api_key }}"
+shlink_postgres_password: "{{ vault_shlink_postgres_password }}"
 
 hedgedoc_host_port: 8084
-hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.9.3
+hedgedoc_domain_name: pad.n39.eu
+hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.10.2
+hedgedoc_db_image: postgres:16.8-alpine
 hedgedoc_postgres_password: "{{ vault_hedgedoc_postgres_password }}"
 
 redmine_host_port: 8087
-redmine_image: redmine:4.2.7
-redmine_mysql_image: mysql:5.7
+redmine_domain_name: redmine.n39.eu
+redmine_image: redmine:6.0.4
+redmine_mysql_image: mysql:9.2
 redmine_database: redmine
 redmine_database_password: "{{ vault_redmine_database_password }}"
 
 influxdb_host_port: 8088
 influxdb_domain_name: influx.n39.eu
-influxdb_image: influxdb:2.4-alpine
+influxdb_image: influxdb:2.7-alpine
 influxdb_init_username: admin
 influxdb_init_password: "{{ vault_influxdb_init_password }}"
 
-forgejo_host_port: 9091
-
+jabber_host_port: 8086
+prosody_domain_name: jabber.n39.eu
+prosody_image: netz39/prosody:0.11
+prosody_web_image: joseluisq/static-web-server:2.36
 prosody_config_dir: "/etc/prosody"
 prosody_data_dir: "{{ data_dir }}/prosody"
-prosody_domain_name: jabber.n39.eu
-jabber_host_port: 8086
 
 uptimekuma_host_port: 8085
+uptimekuma_domain_name: uptime.n39.eu
+uptimekuma_image: louislam/uptime-kuma:1.23.16
 
 grafana_host_port: 8089
 grafana_domain_name: grafana.n39.eu
+grafana_image: grafana/grafana:11.6.0
 grafana_admin_password: "{{ vault_grafana_admin_password }}"
 
 homebox_host_port: 8092
 homebox_domain_name: inventory.n39.eu
+homebox_image: ghcr.io/hay-kot/homebox:v0.10.3
+
+renovate_image: renovate/renovate:39.220.1
+renovate_forgejo_pat: "{{ vault_renovate_forgejo_pat }}"
+renovate_github_pat: "{{ vault_renovate_github_pat }}"
+renovate_git_user: "Renovate Bot <accounts+renovatebot@netz39.de>"
diff --git a/host_vars/pottwal.n39.eu/vault b/host_vars/pottwal.n39.eu/vault
index 422b1e7..f112beb 100644
--- a/host_vars/pottwal.n39.eu/vault
+++ b/host_vars/pottwal.n39.eu/vault
@@ -1,20 +1,33 @@
 $ANSIBLE_VAULT;1.1;AES256
-35323634353263613464653863633861303539636238333464653633653164353632306233626231
-3766386232326433383932636136313831346131336335360a383639643334613033336134373566
-36343465336365363732363931383031356532336235313537396338316465366537313032616261
-6634393966623662390a383335316661613332633165333936396335666637306163363133323363
-33613639306537396136643438623937363336376332353634333130313434623433303264393461
-38663337396465343937396438333261393463303866306234323138396563653837373334356239
-64653231633066656662306530656139316530316263356135363538303061646432353338323237
-66663161333133313762366261343434666238376537636433313461343065646565633130333061
-33376537316338666662643639623637396366336263656537326363663936616234343235373636
-33373039373033333533363366356435633863633434643963633664613238363961363733366437
-61353936613065303230616239646334313130636133653461663561303037383663643761376235
-33303661663063613635306438613738613064386466656430343130356131663262353239326334
-63323630333466356263646162336437646133616565353430313737313332363330663236383830
-33366138303665386561393136616238346335633163313330386434323239623736333562363862
-66636165373264353132626232633537613536303362366535653438303261323735666331363439
-61613939373333616364303134353437333965386239623933393932373434666234373736316166
-63373935356162326230653437643030313262373965353831396361646136663938336334646633
-65313166613131396665393363633166663137363564393063363330366364373936643831373030
-333465303435636163616236306264646666
+61323135656430613464613334653239613865623361363734306139383261653563373365306364
+3232353634356664323235393135653762383538353635660a363461393133376566613064366233
+32323065633164646535386461373261373461343961383333333063663831353961656265313836
+6231356666356266390a333230376264313537376461326331313134313737616137636465336430
+38616261333534393464343630616464326331653163616435613863616165633730353263656565
+66346536393737353962666438333661663636636339613633653363323438326635643738656430
+38313635323066376532396666653633393736633939306566333337336635386430373662666534
+64653662333832313964323039353838353638313337306631613564383933663166633164373132
+33326537366135613733386436663366613238636133343065376534323561656265613433336637
+64613330306530323238663738356133663166303730633735656562636139626232396130656337
+34323238326437303730643736646430646239383239613061333033343733663832656262383732
+66343236326537633539353230376365666462393264303532346431383838303963613731343263
+63656630623934643763636237366630386333646263336261386162656439323232653066393266
+36633239323638396639623734666466343164663539316165386632306235363435303139356433
+37633731366565393339326235616264616535363461653531613331356239666534653232376235
+36623431343136633964656330313833643161353738303564663662363062653631363661633333
+31663339643034333336313630356266393062323637333664646335363961386433303662343734
+32313338613064373966393163623863633037353564316361656162323234313435646532343231
+30356336626435306332316566323932313564626164316165646530656365363330643033376134
+32363530306536633531326535373136326364356237376264646130663430343838323834386264
+35306561353866346430393837346333396236356465666334656139373764653365396534613034
+36393239623930656266336130303236393336373063623738653939393563336130316461393535
+32313932396263306439356663373361393539633639343238393631343830306532336162616565
+32336264646333613238363065613130633966656164666333303332313536616334623639613630
+34323665366131663736623638636263616131393133346464653037366465633332363131316332
+65356563373036353432376234626262313266316435656562646365363539386361653966366465
+39383536313764663732613462383466616238363765633062333830373038656334363764643663
+61346664353064333238313038303333386436653738316630383237366532353765346633383862
+65666235666663666638656337303762626563663135613431616439633731383638653466623434
+62663164633032666638656464666130623566356636343330386236336266386263323936396330
+31613339623034663466613930613062343666633530306136623734393862333365646538326261
+63646439343565366463
diff --git a/host_vars/radon.n39.eu/vars.yml b/host_vars/radon.n39.eu/vars.yml
index fc9bff6..410f310 100644
--- a/host_vars/radon.n39.eu/vars.yml
+++ b/host_vars/radon.n39.eu/vars.yml
@@ -3,5 +3,11 @@ server_admin: "admin+radon@netz39.de"
 pwr_meter_amqp_user: "pwr-meter"
 pwr_meter_amqp_pass: "{{ vault_pwr_meter_amqp_pass }}"
 pwr_meter_api_token: "{{ vault_pwr_meter_api_token }}"
-# See https://gitea.n39.eu/Netz39_Admin/config.descartes/src/branch/live/dns_dhcp.txt
-brotherql_printer_ip: "172.23.48.53"
+brotherql_printer_host: "brotherql-720nw.n39.eu"
+
+# URL for the grafana kiosk in our Bastelbereich
+kiosk_grafana_url: "https://grafana.n39.eu/d/xpLj6UD4z/hobbes-space-monitor?orgId=1&kiosk"
+kiosk_grafana_user: "{{ vault_kiosk_grafana_user }}"
+kiosk_grafana_pass: "{{ vault_kiosk_grafana_pass }}"
+kiosk_mqtt_host: "mqtt.n39.eu"
+kiosk_mqtt_topic: "Netz39/Things/HackingDashboard/Screenshot"
diff --git a/host_vars/radon.n39.eu/vault b/host_vars/radon.n39.eu/vault
index 51ed17c..14c9a89 100644
--- a/host_vars/radon.n39.eu/vault
+++ b/host_vars/radon.n39.eu/vault
@@ -1,10 +1,14 @@
 $ANSIBLE_VAULT;1.1;AES256
-61393134306361663861356132333135633566626136383536363763646134386338363362343830
-6339626232333037613437386634396138323438643037390a366338353862653439323961626532
-37393438326261363563323233333364323536373735383834383134653935383436356137396166
-3531326465363438310a663232306138333866373637336234326166666261333332386632316163
-61616339656436666233343339383835643934366661366333386363386639306631643366623333
-30666430623435633961613932323239343239623532316662323937346634656136396539303036
-63363365363861646333386364373263303037663266323832663761633633663136616338323362
-36326561623063646666373034333335373135343736633066393937653234313932363138643065
-38646231333564303861633231353535623436326135303463613738346231633962
+64396666646432653766656333333139613631333035393137363036633330336134383932663631
+6533326532333366633136346232306637306266343264380a616164643037393036383834313238
+32343437343466343262336137633436343935663465616364303961656565643134346563373461
+3337303534646563660a366562323065666630626331346266366139653533386238663361373930
+30623733336361353838373730316537623066326166366634643466386332396333613531613564
+38373363303466346639343563356339303037323261383034316439326237636565633462336462
+35313561356465393337616162323866393365613537333461656234313464653165333963343331
+32343634383335663764353831303864373637393833366333376635343665396366616363663033
+37323031316535636131333738633237383665323638613233666432316261376239636234626638
+33313230643563316662323937656338613362646466323335376363626163383233623831643565
+31393438363334653863363536373632333930616636323237336236353863616638616165303931
+63333639393665633537646665613933323632376162363139656632323166393264313333653163
+64333935356138336562366634636364346461356539363162616438613232306533
diff --git a/host_vars/wittgenstein.n39.eu/vars.yml b/host_vars/wittgenstein.n39.eu/vars.yml
new file mode 100644
index 0000000..2cebfa5
--- /dev/null
+++ b/host_vars/wittgenstein.n39.eu/vars.yml
@@ -0,0 +1,17 @@
+---
+server_admin: "admin+wittgenstein@netz39.de"
+mac: "b8:27:eb:48:f1:59"
+ansible_python_interpreter: /usr/bin/python3
+gatekeeper_user: pi
+data_dir: "/srv/data"
+
+spaceapi_host_port: 8001
+spaceapi_domain_name: spaceapi.n39.eu
+spaceapi_image: netz39/spaceapi-service:0.1.1
+spaceapi_topic_status: "Netz39/SpaceAPI/isOpen"
+spaceapi_topic_lastchange: "Netz39/SpaceAPI/lastchange"
+
+ampelcontroller_image: netz39/ampel-controller:0.2.0
+topic_lever_state: "Netz39/Things/StatusSwitch/Lever/State"
+topic_door_events: "Netz39/Things/Door/Events"
+topic_traffic_light: "Netz39/Things/Ampel/Light"
diff --git a/inventory.yml b/inventory.yml
index ef482de..9220b95 100644
--- a/inventory.yml
+++ b/inventory.yml
@@ -8,15 +8,19 @@ all:
     krypton.n39.eu:
     oganesson.n39.eu:
     platon.n39.eu:
+    plumbum.n39.eu:
     pottwal.n39.eu:
     radon.n39.eu:
     unicorn.n39.eu:
+    wittgenstein.n39.eu:
     k3s-c1.n39.eu:
     k3s-c2.n39.eu:
     k3s-c3.n39.eu:
     k3s-w1.n39.eu:
     k3s-w2.n39.eu:
     k3s-w3.n39.eu:
+#   Host rhodium.n39.eu is the OpenWRT router, but cannot be added here
+#   as it would be treated like a Debian host
 
   children:
     docker_host:
@@ -26,11 +30,13 @@ all:
         radon.n39.eu:
         tau.netz39.de:
         unicorn.n39.eu:
+        wittgenstein.n39.eu:
     proxmox:
       hosts:
         holmium.n39.eu:
         krypton.n39.eu:
         oganesson.n39.eu:
+        plumbum.n39.eu:
         pottwal.n39.eu:
         radon.n39.eu:
         unicorn.n39.eu:
@@ -48,6 +54,7 @@ all:
         krypton.n39.eu:
         oganesson.n39.eu:
         platon.n39.eu:
+        plumbum.n39.eu:
         pottwal.n39.eu:
         radon.n39.eu:
         wittgenstein.n39.eu:
diff --git a/main.yml b/main.yml
index 785175b..177bccc 100644
--- a/main.yml
+++ b/main.yml
@@ -39,3 +39,9 @@
 
 - name: Hobbes specific setup
   import_playbook: host-hobbes.yml
+
+- name: Plumbum specific setup
+  import_playbook: host-plumbum.yml
+
+- name: Wittgenstein specific setup
+  import_playbook: host-wittgenstein.yml
diff --git a/renovate.json b/renovate.json
new file mode 100644
index 0000000..ec350d4
--- /dev/null
+++ b/renovate.json
@@ -0,0 +1,24 @@
+{
+  "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+  "regexManagers": [
+    {
+      "fileMatch": [
+        "^host-.*.yml$",
+        "^host_vars/.*/vars.yml$"
+      ],
+      "datasourceTemplate": "docker",
+      "versioningTemplate": "docker",
+      "matchStrings": [
+        "image: (?<depName>.*?):(?<currentValue>.*?)(@(?<currentDigest>sha256:.*?))?\\s"
+      ]
+    }
+  ],
+  "packageRules": [
+    {
+      "matchDatasources": ["docker"],
+      "matchPackageNames": ["renovate/renovate"],
+      "schedule": [ "before 1am on friday" ],
+      "automerge": true
+    }
+  ]
+}
diff --git a/requirements.yml b/requirements.yml
index 7281888..769e481 100644
--- a/requirements.yml
+++ b/requirements.yml
@@ -1,18 +1,26 @@
 ---
 roles:
-  - src: hifis.unattended_upgrades
-    version: v1.12.2
-  - src: git+https://github.com/adriagalin/ansible.timezone.git
-    version: 3.0.0
-  - src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
-    version: 1.0.3
-  - src: penguineer.dehydrated_cron
-    version: v1.0.0
-  - src: git+https://github.com/maz3max/ble-keykeeper-role.git
+  - name: adriagalin.timezone
+    src: git+https://github.com/adriagalin/ansible.timezone.git
+    version: 4.0.0
+  - name: 24367dfa.dehydrated
+    src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
+    version: 2.1.0
+  - name: penguineer.dehydrated_cron
+    src: https://github.com/penguineer/ansible-role-dehydrated_cron.git
+    version: v1.1.0
+  - name: maz3max.ble_keykeeper
+    src: git+https://github.com/maz3max/ble-keykeeper-role.git
     version: v1.1.0
   - src: lespocky.telegraf_docker_in_docker
-    version: v0.2.1
+    version: v0.2.2
+  - name: netz39.host_docker
+    src: git+https://github.com/netz39/ansible-role-host-docker.git
+    version: v0.5.0
 
 collections:
   - name: community.grafana
-    version: 1.5.3
+    version: 2.1.0
+  # for role 'hifis.toolkit.unattended_upgrades'
+  - name: hifis.toolkit
+    version: 5.3.0
diff --git a/roles/apache/handlers/main.yml b/roles/apache/handlers/main.yml
index e307af9..126d33e 100644
--- a/roles/apache/handlers/main.yml
+++ b/roles/apache/handlers/main.yml
@@ -1,6 +1,6 @@
 # Handlers for role apache
 ---
-- name: restart apache2
-  service:
+- name: Restart apache2
+  ansible.builtin.service:
     name: apache2
     state: restarted
diff --git a/roles/apache/tasks/main.yml b/roles/apache/tasks/main.yml
index 6bfa4da..67928af 100644
--- a/roles/apache/tasks/main.yml
+++ b/roles/apache/tasks/main.yml
@@ -1,12 +1,12 @@
 ---
 - name: Ensure Apache2 and modules are installed and up to date
-  apt:
+  ansible.builtin.apt:
     name:
       - apache2
     state: present
 
 - name: Ensure necessary modules are enabled
-  apache2_module:
+  community.general.apache2_module:
     name: "{{ item }}"
     state: present
   with_items:
@@ -23,7 +23,7 @@
     mode: "0644"
     owner: root
     group: root
-  notify: restart apache2
+  notify: Restart apache2
 
 - name: Add symlink to enable configuration
   ansible.builtin.file:
@@ -32,4 +32,4 @@
     state: link
     owner: root
     group: root
-  notify: restart apache2
+  notify: Restart apache2
diff --git a/roles/apache_letsencrypt/handlers/main.yml b/roles/apache_letsencrypt/handlers/main.yml
index abf4ea1..b2cf41c 100644
--- a/roles/apache_letsencrypt/handlers/main.yml
+++ b/roles/apache_letsencrypt/handlers/main.yml
@@ -1,6 +1,6 @@
 # Handlers for role apache_letsencrypt
 ---
-- name: restart apache2
-  service:
+- name: Restart apache2
+  ansible.builtin.service:
     name: apache2
     state: restarted
diff --git a/roles/apache_letsencrypt/tasks/main.yml b/roles/apache_letsencrypt/tasks/main.yml
index 6acb952..1294f2d 100644
--- a/roles/apache_letsencrypt/tasks/main.yml
+++ b/roles/apache_letsencrypt/tasks/main.yml
@@ -7,7 +7,7 @@
     mode: "0644"
     owner: root
     group: root
-  notify: restart apache2
+  notify: Restart apache2
 
 - name: Add symlink to enable configuration
   ansible.builtin.file:
@@ -17,4 +17,4 @@
     mode: "0644"
     owner: root
     group: root
-  notify: restart apache2
+  notify: Restart apache2
diff --git a/roles/cleanuri/defaults/main.yml b/roles/cleanuri/defaults/main.yml
index 632b59e..73422b8 100644
--- a/roles/cleanuri/defaults/main.yml
+++ b/roles/cleanuri/defaults/main.yml
@@ -19,7 +19,7 @@ cleanuri_amqp_canonizer: "canonizer"
 cleanuri_amqp_retrieval: "extractor"
 
 # Docker images
-cleanuri_image_webui: mrtux/cleanuri-webui:0.2.0
-cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.0
-cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.3.0
-cleanuri_image_extractor: mrtux/cleanuri-extractor:0.3.0
+cleanuri_image_webui: mrtux/cleanuri-webui:0.2.2
+cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.2
+cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.5.3
+cleanuri_image_extractor: mrtux/cleanuri-extractor:0.5.3
diff --git a/roles/cleanuri/tasks/main.yml b/roles/cleanuri/tasks/main.yml
index b968729..33aeb9b 100644
--- a/roles/cleanuri/tasks/main.yml
+++ b/roles/cleanuri/tasks/main.yml
@@ -1,7 +1,7 @@
 # Tasks for the cleanuri role
 ---
 - name: Ensure CleanURI WebUI is running
-  docker_container:
+  community.docker.docker_container:
     name: cleanuri-webui
     image: "{{ cleanuri_image_webui }}"
     pull: true
@@ -15,7 +15,7 @@
       REACT_APP_API_GATEWAY: "https://{{ cleanuri_api_domain }}"
 
 - name: Setup proxy site for the CleanURI WebUI
-  include_role:
+  ansible.builtin.include_role:
     name: setup_http_site_proxy
   vars:
     site_name: "{{ cleanuri_ui_domain }}"
@@ -23,7 +23,7 @@
 
 
 - name: Ensure CleanURI API Gateway is running
-  docker_container:
+  community.docker.docker_container:
     name: cleanuri-apigateway
     image: "{{ cleanuri_image_apigateway }}"
     pull: true
@@ -35,14 +35,14 @@
     env:
       TZ: "{{ timezone }}"
       AMQP_HOST: "{{ cleanuri_amqp_host }}"
-      AMQP_USER: "{{ cleanuri_amqp_user  }}"
+      AMQP_USER: "{{ cleanuri_amqp_user }}"
       AMQP_PASS: "{{ cleanuri_amqp_pass }}"
       AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
       GATEWAY_RESULT_QUEUE: "{{ cleanuri_amqp_results }}"
       GATEWAY_TASK_RK: "{{ cleanuri_amqp_canonizer }}"
 
 - name: Ensure CleanURI Canonizer is running
-  docker_container:
+  community.docker.docker_container:
     name: cleanuri-canonizer
     image: "{{ cleanuri_image_canonizer }}"
     pull: true
@@ -52,14 +52,14 @@
     env:
       TZ: "{{ timezone }}"
       AMQP_HOST: "{{ cleanuri_amqp_host }}"
-      AMQP_USER: "{{ cleanuri_amqp_user  }}"
+      AMQP_USER: "{{ cleanuri_amqp_user }}"
       AMQP_PASS: "{{ cleanuri_amqp_pass }}"
       AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
       CANONIZER_TASK_QUEUE: "{{ cleanuri_amqp_canonizer }}"
       EXTRACTOR_TASK_RK: "{{ cleanuri_amqp_retrieval }}"
 
 - name: Ensure CleanURI Extractor is running
-  docker_container:
+  community.docker.docker_container:
     name: cleanuri-extractor
     image: "{{ cleanuri_image_extractor }}"
     pull: true
@@ -69,14 +69,14 @@
     env:
       TZ: "{{ timezone }}"
       AMQP_HOST: "{{ cleanuri_amqp_host }}"
-      AMQP_USER: "{{ cleanuri_amqp_user  }}"
+      AMQP_USER: "{{ cleanuri_amqp_user }}"
       AMQP_PASS: "{{ cleanuri_amqp_pass }}"
       AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
       EXTRACTION_TASK_QUEUE: "{{ cleanuri_amqp_retrieval }}"
 
 
 - name: Setup proxy site the CleanURI API Gateway
-  include_role:
+  ansible.builtin.include_role:
     name: setup_http_site_proxy
   vars:
     site_name: "{{ cleanuri_api_domain }}"
diff --git a/roles/dd24_dyndns_cron/handlers/main.yml b/roles/dd24_dyndns_cron/handlers/main.yml
index 49c802c..10ded8f 100644
--- a/roles/dd24_dyndns_cron/handlers/main.yml
+++ b/roles/dd24_dyndns_cron/handlers/main.yml
@@ -1,6 +1,6 @@
 # handlers file for cron-dd24-dyndns
 ---
-- name: reload cron
+- name: Reload cron
   ansible.builtin.shell:
     cmd: service cron reload
     warn: no
diff --git a/roles/dd24_dyndns_cron/tasks/main.yml b/roles/dd24_dyndns_cron/tasks/main.yml
index 5060260..da32fbd 100644
--- a/roles/dd24_dyndns_cron/tasks/main.yml
+++ b/roles/dd24_dyndns_cron/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: Make sure cron and curl are installed
-  apt:
+  ansible.builtin.apt:
     name:
       - cron
       - curl
@@ -13,6 +13,6 @@
     owner: root
     group: root
     mode: "0644"
-  notify: reload cron
+  notify: Reload cron
   # There is ansible.builtin.cron, but this makes configuration much
   # more complicated, so we stick to the template.
diff --git a/roles/dd24_dyndns_cron/templates/dd24-dyndns.cron.j2 b/roles/dd24_dyndns_cron/templates/dd24-dyndns.cron.j2
index 8e01809..8e509b7 100644
--- a/roles/dd24_dyndns_cron/templates/dd24-dyndns.cron.j2
+++ b/roles/dd24_dyndns_cron/templates/dd24-dyndns.cron.j2
@@ -1,6 +1,6 @@
 # /etc/cron.d/dd24-dyndns: Cron call to renew DynDNS entry
 
-SHELL=/bin/sh
+SHELL=/bin/bash
 PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
 
-*/5 * * * * root curl --silent --show-error "https://dynamicdns.key-systems.net/update.php?hostname={{dyndns_domain}}&password={{dyndns_password}}&ip={{dyndns_ip}}" > /dev/null 2> >(/usr/bin/logger -p user.error -t dd24)
+*/5 * * * * root curl --silent --show-error "https://dynamicdns.key-systems.net/update.php?hostname={{dyndns_domain}}&password={{dyndns_password}}&ip={{dyndns_ip}}" > >(grep 'code\|description' | paste -d',' - - | logger -p user.debug -t dd24) 2> >(/usr/bin/logger -p user.error -t dd24)
diff --git a/roles/desec_dyndns_cron/defaults/main.yml b/roles/desec_dyndns_cron/defaults/main.yml
new file mode 100644
index 0000000..59fc031
--- /dev/null
+++ b/roles/desec_dyndns_cron/defaults/main.yml
@@ -0,0 +1,4 @@
+# desec.io Cron configuration
+---
+dyndns_domain: www.example.com
+dyndns_token: yourtoken
diff --git a/roles/desec_dyndns_cron/handlers/main.yml b/roles/desec_dyndns_cron/handlers/main.yml
new file mode 100644
index 0000000..8017c9d
--- /dev/null
+++ b/roles/desec_dyndns_cron/handlers/main.yml
@@ -0,0 +1,10 @@
+# handlers file for desec_dyndns_cron
+---
+- name: Reload cron
+  ansible.builtin.shell:
+    cmd: service cron reload
+    warn: no
+# Use the shell call because the task sometimes has problems finding the service state
+#  service:
+#    name: cron
+#    state: restarted
diff --git a/roles/desec_dyndns_cron/tasks/main.yml b/roles/desec_dyndns_cron/tasks/main.yml
new file mode 100644
index 0000000..eb164d0
--- /dev/null
+++ b/roles/desec_dyndns_cron/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Make sure cron and curl are installed
+  ansible.builtin.apt:
+    name:
+      - cron
+      - curl
+    state: present
+
+- name: Setup cron file for desec.io updates
+  ansible.builtin.template:
+    src: "templates/desec-dyndns.cron.j2"
+    dest: "/etc/cron.d/desec-dyndns"
+    owner: root
+    group: root
+    mode: "0644"
+  notify: Reload cron
+  # There is ansible.builtin.cron, but this makes configuration much
+  # more complicated, so we stick to the template.
diff --git a/roles/desec_dyndns_cron/templates/desec-dyndns.cron.j2 b/roles/desec_dyndns_cron/templates/desec-dyndns.cron.j2
new file mode 100644
index 0000000..8250477
--- /dev/null
+++ b/roles/desec_dyndns_cron/templates/desec-dyndns.cron.j2
@@ -0,0 +1,6 @@
+# /etc/cron.d/desec-dyndns: Cron call to renew DynDNS entry
+
+SHELL=/bin/bash
+PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+
+*/5 * * * * root curl --silent --show-error --user {{ dyndns_domain }}:{{ dyndns_token }} "https://update.dedyn.io/" > >(logger -p user.debug -t desec) 2> >(/usr/bin/logger -p user.error -t desec)
diff --git a/roles/docker_setup/defaults/main.yml b/roles/docker_setup/defaults/main.yml
deleted file mode 100644
index f1d15e3..0000000
--- a/roles/docker_setup/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-docker_compose_version: "1.25.4"
-docker_compose_path: /usr/local/bin/docker-compose
-docker_data_root: "/var/lib/docker"
-docker_storage_driver: "overlay2"
diff --git a/roles/docker_setup/handlers/main.yml b/roles/docker_setup/handlers/main.yml
deleted file mode 100644
index 4e8c5a0..0000000
--- a/roles/docker_setup/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart docker
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
diff --git a/roles/docker_setup/tasks/main.yml b/roles/docker_setup/tasks/main.yml
deleted file mode 100644
index 8f946fb..0000000
--- a/roles/docker_setup/tasks/main.yml
+++ /dev/null
@@ -1,84 +0,0 @@
-# This file is a mash-up of:
-#   https://github.com/geerlingguy/ansible-role-docker/blob/master/tasks/docker-compose.yml
-#   https://www.digitalocean.com/community/tutorials/how-to-install-docker-compose-on-debian-9
-#   and our own stuff …
----
-- name: Gather package facts
-  package_facts:
-    manager: "auto"
-
-- name: Exit if docker.io is installed
-  fail:
-    msg: "Please remove docker.io (Debian vanilla docker package) first!"
-  when: "'docker.io' in ansible_facts.packages"
-
-- name: Install Docker APT deps
-  package:
-    name: "{{ packages }}"
-    state: present
-  vars:
-    packages:
-      - apt-transport-https
-      - ca-certificates
-      - gnupg2
-      - software-properties-common
-
-- name: add Docker apt-key
-  apt_key:
-    url: https://download.docker.com/linux/debian/gpg
-    state: present
-
-- name: add Docker's APT repository
-  ansible.builtin.template:
-    src: templates/docker.list.j2
-    dest: /etc/apt/sources.list.d/docker.list
-  register: apt_repo
-
-- name: Update package cache  # noqa 503
-  ansible.builtin.apt:
-    update_cache: true
-  when: apt_repo.changed
-
-- name: install Docker
-  package:
-    name: "{{ packages }}"
-    state: present
-  vars:
-    packages:
-      - docker-ce
-      - python3-docker
-
-- name: Set docker configuration
-  template:
-    src: templates/daemon.json.j2
-    dest: /etc/docker/daemon.json
-    mode: "0644"
-  notify: restart docker
-
-- name: Check current docker-compose version.
-  command: docker-compose --version
-  register: docker_compose_current_version
-  changed_when: false
-  failed_when: false
-
-- name: Delete existing docker-compose version if it's different.
-  file:
-    path: "{{ docker_compose_path }}"
-    state: absent
-  when: >
-    docker_compose_current_version.stdout is defined
-    and docker_compose_version not in docker_compose_current_version.stdout
-
-- name: Install Docker Compose (if configured).
-  get_url:
-    url: https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-Linux-x86_64
-    dest: "{{ docker_compose_path }}"
-    mode: "0755"
-
-- name: Place admin users in docker group
-  user:
-    name: "{{ item.logname }}"
-    groups: [docker]
-    append: yes
-  when: item.docker
-  with_items: "{{ users }}"
diff --git a/roles/docker_setup/templates/daemon.json.j2 b/roles/docker_setup/templates/daemon.json.j2
deleted file mode 100644
index ee43392..0000000
--- a/roles/docker_setup/templates/daemon.json.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-    "exec-opts": ["native.cgroupdriver=systemd"],
-    "log-driver": "json-file",
-        "log-opts": {
-            "max-size": "100m"
-        },
-    "data-root": "{{ docker_data_root }}",
-    "storage-driver": "{{ docker_storage_driver }}"
-}
diff --git a/roles/docker_setup/templates/docker.list.j2 b/roles/docker_setup/templates/docker.list.j2
deleted file mode 100644
index 7795847..0000000
--- a/roles/docker_setup/templates/docker.list.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-deb https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable
-
diff --git a/roles/nfs_host/defaults/main.yml b/roles/nfs_host/defaults/main.yml
new file mode 100644
index 0000000..ee0d286
--- /dev/null
+++ b/roles/nfs_host/defaults/main.yml
@@ -0,0 +1,8 @@
+# Defaults for nfs_host
+---
+nfs_host_exports: []
+  # - directory: "/srv/nfs"
+  #   hosts: "k3s-w[0-9]+.n39.eu"
+  #   options: rw,sync,no_subtree_check
+
+nfs_host_storage_device: "/dev/sdb"
diff --git a/roles/nfs_host/handlers/main.yml b/roles/nfs_host/handlers/main.yml
new file mode 100644
index 0000000..860a11c
--- /dev/null
+++ b/roles/nfs_host/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Reload nfs
+  ansible.builtin.command: 'exportfs -ra'
diff --git a/roles/nfs_host/tasks/main.yml b/roles/nfs_host/tasks/main.yml
new file mode 100644
index 0000000..4ad14c0
--- /dev/null
+++ b/roles/nfs_host/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+- name: Install required packages
+  ansible.builtin.apt:
+    state: present
+    name:
+      - nfs-kernel-server
+      - nfs-common
+      - parted
+
+- name: Create a new ext4 primary partition
+  community.general.parted:
+    device: "{{ nfs_host_storage_device }}"
+    number: 1
+    state: present
+    fs_type: ext4
+
+- name: Ensure nfs mountpoints exist
+  ansible.builtin.file:
+    path: "{{ item.directory }}"
+    state: directory
+    owner: nobody
+    group: nogroup
+    mode: '0777'
+  with_items: "{{ nfs_host_exports }}"
+
+- name: Mount up device by label
+  ansible.posix.mount:
+    path: "{{ nfs_host_exports[0].directory }}"
+    src: /dev/sdb1
+    fstype: ext4
+    state: present
+
+- name: Put /etc/exports in place from template
+  ansible.builtin.template:
+    src: templates/exports.j2
+    dest: "/etc/exports"
+  notify: Reload nfs
+
+- name: Ensure nfs is running.
+  ansible.builtin.service: "name=nfs-kernel-server state=started enabled=yes"
+  when: nfs_host_exports|length
diff --git a/roles/nfs_host/templates/exports.j2 b/roles/nfs_host/templates/exports.j2
new file mode 100644
index 0000000..87d2c20
--- /dev/null
+++ b/roles/nfs_host/templates/exports.j2
@@ -0,0 +1,3 @@
+{% for export in nfs_host_exports %}
+{{ export.directory }} {{ export.hosts }}({{ export.options }})
+{% endfor %}
\ No newline at end of file
diff --git a/roles/nginx_https_ingress/handlers/main.yml b/roles/nginx_https_ingress/handlers/main.yml
index 53aebbb..2a16ea6 100644
--- a/roles/nginx_https_ingress/handlers/main.yml
+++ b/roles/nginx_https_ingress/handlers/main.yml
@@ -1,7 +1,7 @@
 # Handlers für nginx-https-proxy
 ---
-- name: restart nginx
-  service:
+- name: Restart nginx
+  ansible.builtin.service:
     name: nginx
     state: restarted
     enabled: yes
diff --git a/roles/nginx_https_ingress/tasks/main.yml b/roles/nginx_https_ingress/tasks/main.yml
index 64884a5..e824733 100644
--- a/roles/nginx_https_ingress/tasks/main.yml
+++ b/roles/nginx_https_ingress/tasks/main.yml
@@ -8,9 +8,9 @@
   ansible.builtin.apt:
     state: present
     name:
-    - apt-transport-https
-    - ca-certificates
-    - gnupg2
+      - apt-transport-https
+      - ca-certificates
+      - gnupg2
 
 ### Setup APT cache for the nginx repository
 #
@@ -18,7 +18,7 @@
 # for SSL passthrough.
 
 - name: Add nginx apt-key
-  apt_key:
+  ansible.builtin.apt_key:
     url: https://nginx.org/keys/nginx_signing.key
     state: present
 
@@ -33,7 +33,7 @@
     src: files/apt-preference-99nginx
     dest: /etc/apt/preferences.d/99nginx
 
-- name: Update package cache # noqa 503
+- name: Update package cache  # noqa: no-handler
   ansible.builtin.apt:
     update_cache: true
   when: apt_repo.changed
@@ -45,7 +45,7 @@
     state: present
     name:
     # This version of nginx comes with the ngx_stream_core_module module
-    - nginx
+      - nginx
 
 
 ### Configuration
@@ -56,7 +56,7 @@
     owner: root
     group: root
     mode: '0644'
-  notify: restart nginx
+  notify: Restart nginx
 
 - name: Create directory for dehydrated forwardings
   ansible.builtin.file:
@@ -74,7 +74,7 @@
     group: root
     mode: '0644'
   loop: "{{ ingress }}"
-  notify: restart nginx
+  notify: Restart nginx
 
 - name: Setup nginx configuration
   # Note the order here: The nginx configuration _needs_ he dehydrated-hosts
@@ -86,4 +86,4 @@
     owner: root
     group: root
     mode: '0644'
-  notify: restart nginx
+  notify: Restart nginx
diff --git a/roles/setup-http-site-forward/handlers/main.yml b/roles/setup-http-site-forward/handlers/main.yml
deleted file mode 100644
index 670471f..0000000
--- a/roles/setup-http-site-forward/handlers/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: restart apache2
-  service:
-    name: apache2
-    state: restarted
diff --git a/roles/setup_http_site_forward/handlers/main.yml b/roles/setup_http_site_forward/handlers/main.yml
new file mode 100644
index 0000000..47e5060
--- /dev/null
+++ b/roles/setup_http_site_forward/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart apache2
+  ansible.builtin.service:
+    name: apache2
+    state: restarted
diff --git a/roles/setup-http-site-forward/tasks/main.yml b/roles/setup_http_site_forward/tasks/main.yml
similarity index 65%
rename from roles/setup-http-site-forward/tasks/main.yml
rename to roles/setup_http_site_forward/tasks/main.yml
index cac1779..7d0dff9 100644
--- a/roles/setup-http-site-forward/tasks/main.yml
+++ b/roles/setup_http_site_forward/tasks/main.yml
@@ -1,12 +1,12 @@
 ---
 - name: Add or update Apache2 site
-  template:
+  ansible.builtin.template:
     src: templates/apache-docker-forward-site.j2
     dest: /etc/apache2/sites-available/{{ site_name }}.conf
-  notify: restart apache2
+  notify: Restart apache2
 
 - name: Activate Apache2 site
-  command: a2ensite {{ site_name }}
+  ansible.builtin.command: a2ensite {{ site_name }}
   args:
     creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
-  notify: restart apache2
+  notify: Restart apache2
diff --git a/roles/setup-http-site-forward/templates/apache-docker-forward-site.j2 b/roles/setup_http_site_forward/templates/apache-docker-forward-site.j2
similarity index 100%
rename from roles/setup-http-site-forward/templates/apache-docker-forward-site.j2
rename to roles/setup_http_site_forward/templates/apache-docker-forward-site.j2
diff --git a/roles/setup_http_site_proxy/handlers/main.yml b/roles/setup_http_site_proxy/handlers/main.yml
index 670471f..47e5060 100644
--- a/roles/setup_http_site_proxy/handlers/main.yml
+++ b/roles/setup_http_site_proxy/handlers/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: restart apache2
-  service:
+- name: Restart apache2
+  ansible.builtin.service:
     name: apache2
     state: restarted
diff --git a/roles/setup_http_site_proxy/tasks/main.yml b/roles/setup_http_site_proxy/tasks/main.yml
index c1f52d2..c50d016 100644
--- a/roles/setup_http_site_proxy/tasks/main.yml
+++ b/roles/setup_http_site_proxy/tasks/main.yml
@@ -1,13 +1,13 @@
 ---
 - name: Add or update Apache2 site
-  template:
+  ansible.builtin.template:
     src: templates/apache-docker-proxy-site.j2
     dest: /etc/apache2/sites-available/{{ site_name }}.conf
     mode: "0644"
-  notify: restart apache2
+  notify: Restart apache2
 
 - name: Activate Apache2 site
-  command: a2ensite {{ site_name }}
+  ansible.builtin.command: a2ensite {{ site_name }}
   args:
     creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
-  notify: restart apache2
+  notify: Restart apache2
diff --git a/roles/users/handlers/main.yml b/roles/users/handlers/main.yml
index 372d62a..04184af 100644
--- a/roles/users/handlers/main.yml
+++ b/roles/users/handlers/main.yml
@@ -1,3 +1,3 @@
 ---
 - name: Update aliases
-  shell: which newaliases && newaliases || true
+  ansible.builtin.shell: which newaliases && newaliases || true
diff --git a/roles/users/tasks/main.yml b/roles/users/tasks/main.yml
index 15fa0fc..3291b35 100644
--- a/roles/users/tasks/main.yml
+++ b/roles/users/tasks/main.yml
@@ -1,12 +1,12 @@
 ---
 - name: Ensure sudo is installed
-  package:
+  ansible.builtin.package:
     name:
       - sudo
     state: present
 
 - name: Configure group sudo for sudoers without password
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/sudoers
     state: present
     regexp: '^%sudo\s'
@@ -14,7 +14,7 @@
     validate: /usr/sbin/visudo -cf %s
 
 - name: Add users | create users' shell and home dir
-  user:
+  ansible.builtin.user:
     name: "{{ item.logname }}"
     shell: /bin/bash
     createhome: yes
@@ -22,7 +22,7 @@
   with_items: "{{ users }}"
 
 - name: Add authorized keys for user
-  authorized_key:
+  ansible.posix.authorized_key:
     user: "{{ item.0.logname }}"
     key: "{{ item.1 }}"
     state: present
@@ -32,7 +32,7 @@
     - skip_missing: true
 
 - name: Place user in sudo group
-  user:
+  ansible.builtin.user:
     name: "{{ item.logname }}"
     groups: [sudo]
     append: yes
@@ -40,12 +40,12 @@
   with_items: "{{ users }}"
 
 - name: Check if /etc/aliases exists
-  stat:
+  ansible.builtin.stat:
     path: /etc/aliases
   register: aliases
 
 - name: Set system email alias
-  lineinfile:
+  ansible.builtin.lineinfile:
     path: /etc/aliases
     state: present
     regexp: "^{{ item.logname }}:"
diff --git a/setup-ssh.yml b/setup-ssh.yml
index e5297a3..58fcbcd 100644
--- a/setup-ssh.yml
+++ b/setup-ssh.yml
@@ -1,21 +1,21 @@
 ---
-- name: configure local ssh to access n39 hosts
+- name: Configure local ssh to access n39 hosts
   hosts: localhost
 
   tasks:
-    - name: ensure {{ lookup('env', 'HOME') }}/.ssh/config.d/ dir is present
+    - name: Ensure $HOME/.ssh/config.d/ dir is present
       ansible.builtin.file:
         path: "{{ lookup('env', 'HOME') }}/.ssh/config.d/"
         state: directory
       delegate_to: localhost
 
-    - name: template ssh config for access to internal systems
+    - name: Put ssh config for access to n39 internal systems in place
       ansible.builtin.template:
         src: templates/ssh_config.j2
         dest: "{{ lookup('env', 'HOME') }}/.ssh/config.d/n39_config"
       delegate_to: localhost
 
-    - name: ensure that n39 access config is included
+    - name: Ensure that n39 access config is included
       ansible.builtin.lineinfile:
         path: ~/.ssh/config
         insertbefore: BOF
diff --git a/templates/hobbes/grafana-kiosk.service.j2 b/templates/hobbes/grafana-kiosk.service.j2
deleted file mode 100644
index 0712910..0000000
--- a/templates/hobbes/grafana-kiosk.service.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-[Unit]
-Description=Grafana Kiosk
-Documentation=https://github.com/grafana/grafana-kiosk
-Documentation=https://grafana.com/blog/2019/05/02/grafana-tutorial-how-to-create-kiosks-to-display-dashboards-on-a-tv
-After=network.target
-Wants=graphical.target
-After=graphical.target
-
-[Service]
-User={{ kiosk_user }}
-Environment="DISPLAY=:0"
-Environment="XAUTHORITY=/home/{{ kiosk_user }}/.Xauthority"
-
-# These should work according to the docs, but are nowhere in the code?
-#Environment="KIOSK_MODE=full"
-#Environment="KIOSK_AUTOFIT=false"
-#Environment="KIOSK_LXDE_ENABLED=true"
-#Environment="KIOSK_LXDE_HOME=/home/{{ kiosk_user }}"
-#Environment="KIOSK_URL={{ kiosk_url }}"
-#Environment="KIOSK_LOGIN_METHOD=local"
-#Environment="KIOSK_LOGIN_USER={{ kiosk_grafana_user }}"
-#Environment="KIOSK_LOGIN_PASSWORD={{ kiosk_grafana_pass }}"
-
-# Disable screensaver etc.
-ExecStartPre=xset s off
-ExecStartPre=xset -dpms
-ExecStartPre=xset s noblank
-
-ExecStart=/home/{{ kiosk_user }}/bin/grafana-kiosk -kiosk-mode=full -autofit=false  -lxde-home=/home/{{ kiosk_user }} -URL="{{ kiosk_url }}" -login-method=local -username={{ kiosk_grafana_user }} --password={{ kiosk_grafana_pass }}
-
-Restart=on-failure
-RestartSec=30s
-
-[Install]
-WantedBy=graphical.target
-
diff --git a/templates/hobbes/kiosk.sh.j2 b/templates/hobbes/kiosk.sh.j2
new file mode 100644
index 0000000..1a5d583
--- /dev/null
+++ b/templates/hobbes/kiosk.sh.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Check if the script is run by root
+if [ "$EUID" -ne 0 ]
+  then echo "Please run as root"
+  exit
+fi
+
+# MQTT broker details
+BROKER="{{ kiosk_mqtt_host }}"
+TOPIC="{{ kiosk_mqtt_topic }}"
+
+# Variable to store the PID of the fbi process
+fbi_pid=0
+
+# Function to be executed on SIGTERM
+on_sigterm() {
+    echo "SIGTERM received, exiting..."
+
+    # Kill the fbi process
+    # As the process forks itself, we do not get a reliable PID and killall is needed
+    killall fbi
+
+    # Remove the temporary file
+    rm -f /tmp/grafana.png
+    exit 0
+}
+
+# Trap SIGTERM and call on_sigterm() when it is received
+trap 'on_sigterm' SIGTERM
+
+while true
+do
+    # Subscribe to the topic and save received data to a file
+    mosquitto_sub -h $BROKER -t $TOPIC -C 1 > /tmp/grafana.png
+
+    # Kill the previous fbi process
+    # As the process forks itself, we do not get a reliable PID and killall is needed
+    killall fbi
+
+    # Display the image
+    fbi -T 1 -noverbose -a /tmp/grafana.png &
+
+    # Wait to avoid a race condition between
+    # fbi starting and mosquitto truncating the file
+    sleep 1
+done
diff --git a/templates/pottwal/renovate-cron.j2 b/templates/pottwal/renovate-cron.j2
new file mode 100644
index 0000000..4e7dd01
--- /dev/null
+++ b/templates/pottwal/renovate-cron.j2
@@ -0,0 +1,8 @@
+#!/bin/bash
+docker run --rm \
+	-e RENOVATE_TOKEN={{ renovate_forgejo_pat }} \
+	-e RENOVATE_ENDPOINT=https://{{ forgejo_domain_name }}/api/v1 \
+	-e RENOVATE_PLATFORM=gitea \
+	-e RENOVATE_GIT_AUTHOR={{ renovate_git_user | quote }} \
+	-e GITHUB_COM_TOKEN={{ renovate_github_pat }} \
+	{{ renovate_image }} --autodiscover
diff --git a/templates/pottwal/spaceapi-apache-site.j2 b/templates/pottwal/spaceapi-apache-site.j2
index 5d6961d..9959e90 100644
--- a/templates/pottwal/spaceapi-apache-site.j2
+++ b/templates/pottwal/spaceapi-apache-site.j2
@@ -44,9 +44,9 @@
     RequestHeader set "X-Forwarded-SSL" expr=%{HTTPS}
     ProxyPreserveHost {{ proxy_preserve_host | default("Off") }}
 
-    ProxyPass /json http://172.23.48.7/spaceapi
-    ProxyPass /text http://172.23.48.7/state.txt
-    ProxyPass /state.png  http://172.23.48.7/state.png
+    ProxyPass /json http://172.23.48.7:8001/json
+    ProxyPass /text http://172.23.48.7:8001/text
+    ProxyPass /state.png  http://172.23.48.7:8001/state.png
 </VirtualHost>
 </IfFile>
 </IfFile>
diff --git a/templates/ssh_config.j2 b/templates/ssh_config.j2
index 8dcadf2..411b381 100644
--- a/templates/ssh_config.j2
+++ b/templates/ssh_config.j2
@@ -25,3 +25,11 @@ Host {{ host }}
   Port 22
 
 {% endfor %}
+
+{# This is our router #}
+Host rhodium.n39.eu
+  Hostname rhodium.n39.eu
+  IdentityFile {{ setup_ssh_key }}
+  User root
+  ProxyJump ssh.n39.eu
+  Port 22