Compare commits

..

1 commit

Author SHA1 Message Date
4da334a236 Update postgres Docker tag to v17 2025-03-04 10:20:51 +00:00
43 changed files with 194 additions and 211 deletions
group-all.ymlgroup-docker_host.ymlgroup-k3s.ymlgroup-proxmox.yml
group_vars/docker_host
host-beaker.ymlhost-hobbes.ymlhost-holmium.ymlhost-krypton.ymlhost-oganesson.ymlhost-platon.ymlhost-plumbum.ymlhost-pottwal.ymlhost-radon.ymlhost-tau.ymlhost-unicorn.ymlhost-wittgenstein.yml
host_vars
pottwal.n39.eu
wittgenstein.n39.eu
inventory.ymlrenovate.jsonrequirements.yml
roles
apache
handlers
tasks
apache_letsencrypt
handlers
tasks
cleanuri
defaults
tasks
dd24_dyndns_cron
handlers
tasks
desec_dyndns_cron
handlers
tasks
nfs_host
handlers
tasks
nginx_https_ingress
handlers
tasks
setup_http_site_forward
handlers
tasks
setup_http_site_proxy
handlers
tasks
users
handlers
tasks
setup-ssh.yml

View file

@ -1,6 +1,7 @@
---
- name: Tasks for all hosts
hosts: all
# tasks for all hosts
- hosts: all
become: true
vars:
@ -14,20 +15,20 @@
tasks:
- name: Update and clean package cache
ansible.builtin.apt:
apt:
update_cache: true
cache_valid_time: 3600
autoclean: true
changed_when: false
- name: Ensure unattended-upgrades is installed and up to date
ansible.builtin.apt:
apt:
name: unattended-upgrades
state: present
- name: Setup unattended-upgrades
ansible.builtin.include_role:
name: hifis.toolkit.unattended_upgrades
include_role:
name: hifis.unattended_upgrades
vars:
unattended_origins_patterns:
- "origin=*"

View file

@ -1,18 +1,15 @@
---
- name: Tasks for docker hosts
hosts: docker_host
- hosts: docker_host
become: true
roles:
- role: netz39.host_docker
- name: Tasks for docker hosts at location space
hosts: docker_host:&location_space
- hosts: docker_host:&location_space
become: true
roles:
- role: lespocky.telegraf_docker_in_docker
when: (ansible_architecture == "x86_64")
vars:
tdid_conf_dir: "/etc/telegraf"
tdid_influxdb_org: "{{ influxdb_org }}"

View file

@ -1,6 +1,5 @@
---
- name: Tasks for kubernetes hosts
hosts: k3s
- hosts: k3s
become: true
tasks:

View file

@ -1,6 +1,5 @@
---
- name: Tasks for virtual machines on proxmox host
hosts: proxmox
- hosts: proxmox
become: true
tasks:

View file

@ -1,3 +1,3 @@
---
docker_data_root: "/srv/docker"
docker_image_prune: true
docker_cron_image_prune: true

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'beaker' (proxmox server im space)
hosts: beaker.n39.eu
- hosts: beaker.n39.eu
become: true
vars:
@ -10,7 +9,7 @@
tasks:
- name: Enable proxmox gui login for admin users
- name: enable proxmox gui login for admin users
ansible.builtin.lineinfile:
path: /etc/pve/user.cfg
regexp: "^user:{{ item.logname }}@pam"
@ -19,7 +18,7 @@
state: present
loop: "{{ users }}"
- name: Configure proxmox admin group
- name: configure proxmox admin group
ansible.builtin.lineinfile:
path: /etc/pve/user.cfg
regexp: "^group:Admins:"

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'hobbes' (raspberry pi for kiosk screen)
hosts: hobbes.n39.eu
- hosts: hobbes.n39.eu
become: true
vars:

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'holmium' (http ingress vm)
hosts: holmium.n39.eu
- hosts: holmium.n39.eu
become: true
vars:

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'krypton' (ldap vm)
hosts: krypton.n39.eu
- hosts: krypton.n39.eu
become: true
vars:
@ -33,13 +32,13 @@
tasks:
# - name: Setup dehydrated challenge endpoint for {{ openldap_domain }}
# ansible.builtin.include_role:
# include_role:
# name: setup-http-dehydrated
# vars:
# site_name: "{{ openldap_domain }}"
- name: Ensure openLDAP directories are present.
ansible.builtin.file:
file:
path: "{{ item.path }}"
mode: "0755"
state: directory
@ -50,7 +49,7 @@
- path: "{{ dehydrated_certs_dir }}/{{ openldap_domain }}"
- name: Ensure container for openLDAP is running.
community.docker.docker_container:
docker_container:
name: openLDAP
image: osixia/openldap:1.5.0
detach: yes
@ -115,7 +114,7 @@
- name: Ensure container for entities validation service is running
community.docker.docker_container:
docker_container:
name: entities_validation_svc
image: netz39/entities_validation_svc:v1.0.4
pull: true
@ -128,7 +127,7 @@
TZ: "{{ timezone }}"
- name: Setup proxy site entities-validation.svc.n39.eu
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: entities-validation.svc.n39.eu

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'oganesson' (ssh jump host vm)
hosts: oganesson.n39.eu
- hosts: oganesson.n39.eu
become: true
vars:

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'platon' (raspberry pi for entrance door)
hosts: platon.n39.eu
- hosts: platon.n39.eu
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
@ -64,7 +63,7 @@
owner: root
group: root
mode: '0644'
notify: Restart mosquitto service
notify: restart mosquitto
### Sesam for SSH access
@ -246,7 +245,7 @@
owner: root
group: root
mode: "0644"
notify: Restart rsyslog
notify: restart rsyslog
### Asterisk
@ -259,7 +258,7 @@
owner: root
group: root
mode: "0644"
notify: Restart asterisk
notify: restart asterisk
- name: Set up extensions for asterisk
# This uses the variables gatekeeper_user and door_open_command
@ -269,14 +268,14 @@
owner: root
group: root
mode: "0644"
notify: Restart asterisk
notify: restart asterisk
- name: Ensure asterisk is in the right groups
ansible.builtin.user:
name: asterisk
groups: audio,i2c,gpio
append: yes
notify: Restart asterisk
notify: restart asterisk
# Asterisk now executes shell scripts with reduced privileges, so we need to
# use sudo for I2C access.
@ -305,20 +304,20 @@
handlers:
- name: Restart mosquitto service
ansible.builtin.service:
- name: restart mosquitto
service:
name: mosquitto
state: restarted
enabled: yes
- name: Restart rsyslog
ansible.builtin.service:
- name: restart rsyslog
service:
name: rsyslog
state: restarted
enabled: yes
- name: Restart asterisk
ansible.builtin.service:
- name: restart asterisk
service:
name: asterisk
state: restarted
enabled: yes

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'plumbum' (nfs server)
hosts: plumbum.n39.eu
- hosts: plumbum.n39.eu
become: true
roles:

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'pottwal' (the big docker container host)
hosts: pottwal.n39.eu
- hosts: pottwal.n39.eu
become: true
roles:
@ -55,7 +54,7 @@
# If port 2222 is changed here, it must also be adapted
# in the forgejo config file (see application volume)!!
- name: Setup the docker container for forgejo
community.docker.docker_container:
docker_container:
name: forgejo
image: "{{ forgejo_image }}"
pull: true
@ -81,7 +80,7 @@
tags: ["forgejo"]
- name: Setup proxy site "{{ forgejo_domain_name }}"
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ forgejo_domain_name }}"
@ -89,7 +88,7 @@
tags: ["forgejo"]
- name: Ensure apt-cacher container is running
community.docker.docker_container:
docker_container:
name: apt_cacher_ng
image: mrtux/apt-cacher-ng:latest
pull: true
@ -102,7 +101,7 @@
TZ: "{{ timezone }}"
- name: Setup docker network
community.docker.docker_network:
docker_network:
name: shlinknet
state: present
internal: true
@ -118,9 +117,9 @@
- shlink
- name: Ensure shlink database container is running
community.docker.docker_container:
docker_container:
name: shlinkdb
image: postgres:16.8-alpine
image: postgres:17.4-alpine
pull: true
state: started
restart_policy: unless-stopped
@ -138,7 +137,7 @@
- shlink
- name: Ensure container for shlink is running
community.docker.docker_container:
docker_container:
name: shlink
image: "{{ shlink_image }}"
pull: true
@ -167,7 +166,7 @@
- shlink
- name: Setup proxy site {{ shlink_domain_name }}
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ shlink_domain_name }}"
@ -184,7 +183,7 @@
msg: "prosody data dir is missing, please restore from the backup!"
when: not prosody_dir.stat.exists
- name: Ensure prosody main config dir exists
- name: Ensure prosody config dir exists
ansible.builtin.file:
path: "{{ prosody_config_dir }}"
state: directory
@ -204,7 +203,7 @@
state: directory
mode: 0755
- name: Ensure prosody conf.d dir exists
- name: Ensure prosody certs dir exists
ansible.builtin.file:
path: "{{ prosody_config_dir }}/conf.d"
state: directory
@ -235,7 +234,7 @@
- Restart prosody
- name: Ensure container for prosody XMPP server is running
community.docker.docker_container:
docker_container:
name: prosody
image: "{{ prosody_image }}"
pull: true
@ -257,7 +256,7 @@
- name: Ensure container for static XMPP website is running
community.docker.docker_container:
docker_container:
name: jabber-static-website
image: "{{ prosody_web_image }}"
pull: true
@ -277,7 +276,7 @@
- name: Setup proxy site {{ prosody_domain_name }}
# point to static website for now
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ prosody_domain_name }}"
@ -299,7 +298,7 @@
- hedgedoc
- name: Ensure the hedgedoc directories exist
ansible.builtin.file:
file:
path: "{{ item.path }}"
mode: "{{ item.mode }}"
state: directory
@ -312,7 +311,7 @@
- hedgedoc
- name: Setup docker network
community.docker.docker_network:
docker_network:
name: hedgedocnet
state: present
internal: true
@ -320,7 +319,7 @@
- hedgedoc
- name: Install HedgeDoc database container
community.docker.docker_container:
docker_container:
name: hedgedocdb
image: "{{ hedgedoc_db_image }}"
pull: true
@ -340,7 +339,7 @@
- hedgedoc
- name: Ensure container for hedgedoc is running
community.docker.docker_container:
docker_container:
name: hedgedoc
image: "{{ hedgedoc_image }}"
pull: true
@ -372,7 +371,7 @@
- hedgedoc
- name: Setup proxy site "{{ hedgedoc_domain_name }}"
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ hedgedoc_domain_name }}"
@ -381,7 +380,7 @@
- hedgedoc
- name: Ensure the influxdb directories exist
ansible.builtin.file:
file:
path: "{{ item }}"
mode: 0700
state: directory
@ -391,7 +390,7 @@
- "{{ data_dir }}/influxdb/cfg"
- name: Ensure container for influxdb is running
community.docker.docker_container:
docker_container:
name: influxdb
image: "{{ influxdb_image }}"
pull: true
@ -411,7 +410,7 @@
- "{{ data_dir }}/influxdb/cfg:/etc/influxdb2"
- name: Setup proxy site {{ influxdb_domain_name }}
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ influxdb_domain_name }}"
@ -436,7 +435,7 @@
- redmine
- name: Setup Redmine docker network
community.docker.docker_network:
docker_network:
name: redminenet
state: present
internal: true
@ -444,7 +443,7 @@
- redmine
- name: Setup Redmine MySQL container
community.docker.docker_container:
docker_container:
name: redminedb
image: "{{ redmine_mysql_image }}"
pull: true
@ -464,7 +463,7 @@
- redmine
- name: Setup Redmine container
community.docker.docker_container:
docker_container:
name: redmine
image: "{{ redmine_image }}"
pull: true
@ -490,7 +489,7 @@
- redmine
- name: Setup proxy site "{{ redmine_domain_name }}"
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ redmine_domain_name }}"
@ -499,7 +498,7 @@
- redmine
- name: Ensure the uptime-kuma directories exist
ansible.builtin.file:
file:
path: "{{ item }}"
mode: "0755"
state: directory
@ -509,7 +508,7 @@
- uptimekuma
- name: Ensure container for uptime-kuma is running
community.docker.docker_container:
docker_container:
name: uptime-kuma
image: "{{ uptimekuma_image }}"
pull: true
@ -526,7 +525,7 @@
- uptimekuma
- name: Setup proxy site "{{ uptimekuma_domain_name }}"
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ uptimekuma_domain_name }}"
@ -535,7 +534,7 @@
- uptimekuma
- name: Ensure the grafana directories exist
ansible.builtin.file:
file:
path: "{{ item.path }}"
owner: "{{ item.owner | default('root') }}"
mode: "{{ item.mode }}"
@ -552,7 +551,7 @@
- grafana
- name: Ensure container for grafana is running
community.docker.docker_container:
docker_container:
name: grafana
image: "{{ grafana_image }}"
pull: true
@ -573,7 +572,7 @@
- grafana
- name: Setup proxy site "{{ grafana_domain_name }}"
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ grafana_domain_name }}"
@ -583,7 +582,7 @@
- grafana
- name: Ensure the homebox directories exist
ansible.builtin.file:
file:
path: "{{ item.path }}"
owner: "{{ item.owner | default('root') }}"
mode: "{{ item.mode }}"
@ -597,7 +596,7 @@
- homebox
- name: Ensure container for homebox is running
community.docker.docker_container:
docker_container:
name: homebox
image: "{{ homebox_image }}"
pull: true
@ -617,7 +616,7 @@
- homebox
- name: Setup proxy site {{ homebox_domain_name }}
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ homebox_domain_name }}"
@ -627,7 +626,7 @@
- homebox
- name: Setup proxy site spaceapi.n39.eu
ansible.builtin.template:
template:
src: templates/pottwal/spaceapi-apache-site.j2
dest: /etc/apache2/sites-available/spaceapi.n39.eu.conf
mode: "0644"
@ -638,13 +637,12 @@
tags:
- spaceapi
# Renovate configuration is sourced from `renovate.json` in each repository
- name: Ensure renovate bot cronjob is present
ansible.builtin.template:
src: templates/pottwal/renovate-cron.j2
dest: /etc/cron.hourly/renovate-bot
mode: "0700"
notify: Reload cron
notify: reload cron
tags:
- renovate
@ -656,14 +654,14 @@
restart: yes
- name: Restart apache2
ansible.builtin.service:
service:
name: apache2
state: restarted
- name: Reload cron
- name: reload cron
ansible.builtin.shell:
cmd: service cron reload
# Use the shell call because the task sometimes has problems finding the service state
# ansible.builtin.service:
# service:
# name: cron
# state: restarted

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'radon' (services for space automation)
hosts: radon.n39.eu
- hosts: radon.n39.eu
become: true
vars:
@ -8,7 +7,7 @@
data_dir: "/srv/data"
mosquitto_image: eclipse-mosquitto:2.0.21
mosquitto_image: eclipse-mosquitto:2.0.20
mosquitto_data: "{{ data_dir }}/mosquitto"
nodered_image: nodered/node-red:3.0.1-1-18
@ -38,7 +37,7 @@
tasks:
- name: Ensure the mosquitto directories exist
ansible.builtin.file:
file:
path: "{{ item }}"
mode: 0755
state: directory
@ -50,16 +49,16 @@
- mosquitto
- name: Make sure mosquitto config is there
ansible.builtin.template:
template:
src: "templates/mosquitto.conf.j2"
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
mode: 0644
notify: Restart mosquitto container
notify: restart mosquitto
tags:
- mosquitto
- name: Ensure mosquitto is running
community.docker.docker_container:
docker_container:
name: mosquitto
image: "{{ mosquitto_image }}"
pull: true
@ -89,7 +88,7 @@
when: not nodered_dir.stat.exists
- name: Ensure nodered is running
community.docker.docker_container:
docker_container:
name: nodered
image: "{{ nodered_image }}"
pull: true
@ -109,7 +108,7 @@
restart_policy: unless-stopped
- name: Setup proxy site nodered.n39.eu
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "nodered.n39.eu"
@ -130,7 +129,7 @@
- rabbitmq
- name: Ensure rabbitmq docker container is running
community.docker.docker_container:
docker_container:
name: rabbitmq
image: "{{ rabbitmq_image }}"
ports:
@ -151,7 +150,7 @@
- rabbitmq
- name: Setup proxy site rabbitmq.n39.eu
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "rabbitmq.n39.eu"
@ -160,7 +159,7 @@
- rabbitmq
- name: Ensure Power Meter Pulse Gateway for 19i room is running
community.docker.docker_container:
docker_container:
name: pwr-meter-pulse-gw-19i
image: "{{ pwr_meter_pulse_gw_image }}"
ports:
@ -178,7 +177,7 @@
restart_policy: unless-stopped
- name: Setup proxy site pwr-meter-pulse-gw-19i.svc.n39.eu
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "pwr-meter-pulse-gw-19i.svc.n39.eu"
@ -186,7 +185,7 @@
- name: Setup docker container for BrotherQL Web UI printer
community.docker.docker_container:
docker_container:
name: brotherql-web
image: dersimn/brother_ql_web:2.1.9-alpine
pull: true
@ -202,7 +201,7 @@
- labelprinter
- name: Setup proxy site labelprinter.n39.eu
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: labelprinter.n39.eu
@ -211,9 +210,9 @@
- labelprinter
- name: Setup docker container for Grafana Screenshots
community.docker.docker_container:
docker_container:
name: grafana-screenshot
image: mrtux/grafana-screenshot:0.1.3
image: mrtux/grafana-screenshot:0.1.1
pull: true
restart_policy: unless-stopped
detach: yes
@ -227,8 +226,8 @@
- grafana-screenshot
handlers:
- name: Restart mosquitto container
community.docker.docker_container:
- name: restart mosquitto
docker_container:
name: mosquitto
state: started
restart: yes

View file

@ -1,6 +1,5 @@
---
- name: Setup things on host 'tau' (vserver for wiki etc.)
hosts: tau.netz39.de
- hosts: tau.netz39.de
become: true
vars:
@ -26,7 +25,7 @@
tasks:
- name: Setup docker network
community.docker.docker_network:
docker_network:
name: dockernet
driver: bridge
ipam_config:
@ -35,7 +34,7 @@
state: present
- name: Setup Dehydrated
ansible.builtin.include_role:
include_role:
name: 24367dfa.dehydrated
vars:
dehydrated_contact_email: "{{ server_admin }}"
@ -52,14 +51,14 @@
deploy_challenge_hook: "/bin/systemctl restart apache2"
- name: Setup proxy site testredmine.netz39.de
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: testredmine.netz39.de
proxy_port: 9004
- name: Setup phpmyadmin
community.docker.docker_container:
docker_container:
name: phpmyadmin
state: started
image: phpmyadmin:5.2
@ -76,7 +75,7 @@
- 9001:80
- name: Setup proxy site mysql.adm.netz39.de
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: mysql.adm.netz39.de
@ -86,20 +85,20 @@
- name: Check if Docker Registry auth dir exists
ansible.builtin.stat:
path: "{{ data_dir }}/registry/auth"
register: docker_registry_auth_dir
- name: Fail if Docker Registry auth dir does not exist
register: docker_dir
- name: Fail if docker registry data dir does not exist
ansible.builtin.fail:
msg: "Docker Registry auth dir is missing, please restore from the backup!"
when: not docker_registry_auth_dir.stat.exists
when: not docker_dir.stat.exists
- name: Ensure the Docker Registry data directory exists
# This may not be part of the backup
ansible.builtin.file:
file:
path: "{{ data_dir }}/registry/data"
state: directory
mode: "0755"
- name: Setup Docker Registry Container
community.docker.docker_container:
docker_container:
name: registry
image: registry:2
pull: true
@ -118,7 +117,7 @@
- "{{ data_dir }}/registry/auth:/auth:rw"
- name: Setup proxy site for the Docker Registry
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ docker_registry_domain }}"
@ -147,7 +146,7 @@
- dokuwiki
- name: Setup Dokuwiki Container
community.docker.docker_container:
docker_container:
name: dokuwiki
image: "{{ dokuwiki_image }}"
pull: true
@ -166,7 +165,7 @@
- dokuwiki
- name: Setup proxy site for Dokuwiki
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ dokuwiki_domain }}"
@ -176,7 +175,7 @@
- name: Setup container for secondary FFMD DNS
community.docker.docker_container:
docker_container:
name: bind9-md-freifunk-net
image: ffmd/bind9-md-freifunk-net:v2022122301
pull: true
@ -191,7 +190,7 @@
- ffmd-dns
- name: Setup forwarding for Discord invite
ansible.builtin.include_role:
include_role:
name: setup_http_site_forward
vars:
site_name: "{{ discord_invite_domain }}"

View file

@ -1,7 +1,6 @@
---
# this is for a dedicated vm just hosting the unifi controller.
- name: Setup things on host 'unicorn' (vm for ubiquiti unifi controller)
hosts: unicorn.n39.eu
- hosts: unicorn.n39.eu
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
@ -12,7 +11,7 @@
tasks:
- name: Setup the docker container for unifi-controller
community.docker.docker_container:
docker_container:
name: unifi-controller
image: jacobalberty/unifi:v9.0.114
state: started

View file

@ -1,9 +1,12 @@
---
- name: Setup things on host 'wittgenstein' (raspberry pi for ampel and spaceapi)
hosts: wittgenstein.n39.eu
- hosts: wittgenstein.n39.eu
become: true
roles:
- role: netz39.host_docker
vars:
docker_data_root: "/srv/docker"
docker_cron_image_prune: true
- role: apache
- role: apache_letsencrypt # Uses configuration from dehydrated setup
- role: 24367dfa.dehydrated
@ -130,7 +133,7 @@
### Space API
- name: Setup the SpaceAPI Docker container
community.docker.docker_container:
docker_container:
name: spaceapi
image: "{{ spaceapi_image }}"
pull: true
@ -149,7 +152,7 @@
- spaceapi
- name: Setup the Ampel Controller Docker container
community.docker.docker_container:
docker_container:
name: ampelcontroller
image: "{{ ampelcontroller_image }}"
pull: true

View file

@ -12,23 +12,23 @@ cleanuri_amqp_vhost: "/cleanuri"
forgejo_host_port: 9091
forgejo_ssh_port: 2222
forgejo_domain_name: git.n39.eu
forgejo_image: codeberg.org/forgejo/forgejo:10.0.3
forgejo_image: codeberg.org/forgejo/forgejo:10.0.1
shlink_host_port: 8083
shlink_domain_name: sl.n39.eu
shlink_image: shlinkio/shlink:4.4.6
shlink_image: shlinkio/shlink:4.4.5
shlink_initial_api_key: "{{ vault_shlink_initial_api_key }}"
shlink_postgres_password: "{{ vault_shlink_postgres_password }}"
hedgedoc_host_port: 8084
hedgedoc_domain_name: pad.n39.eu
hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.10.2
hedgedoc_db_image: postgres:16.8-alpine
hedgedoc_db_image: postgres:17.4-alpine
hedgedoc_postgres_password: "{{ vault_hedgedoc_postgres_password }}"
redmine_host_port: 8087
redmine_domain_name: redmine.n39.eu
redmine_image: redmine:6.0.4
redmine_image: redmine:6.0.3
redmine_mysql_image: mysql:9.2
redmine_database: redmine
redmine_database_password: "{{ vault_redmine_database_password }}"
@ -52,14 +52,14 @@ uptimekuma_image: louislam/uptime-kuma:1.23.16
grafana_host_port: 8089
grafana_domain_name: grafana.n39.eu
grafana_image: grafana/grafana:11.6.0
grafana_image: grafana/grafana:11.5.2
grafana_admin_password: "{{ vault_grafana_admin_password }}"
homebox_host_port: 8092
homebox_domain_name: inventory.n39.eu
homebox_image: ghcr.io/hay-kot/homebox:v0.10.3
renovate_image: renovate/renovate:39.220.1
renovate_image: renovate/renovate:39.180.2
renovate_forgejo_pat: "{{ vault_renovate_forgejo_pat }}"
renovate_github_pat: "{{ vault_renovate_github_pat }}"
renovate_git_user: "Renovate Bot <accounts+renovatebot@netz39.de>"

View file

@ -11,7 +11,7 @@ spaceapi_image: netz39/spaceapi-service:0.1.1
spaceapi_topic_status: "Netz39/SpaceAPI/isOpen"
spaceapi_topic_lastchange: "Netz39/SpaceAPI/lastchange"
ampelcontroller_image: netz39/ampel-controller:0.2.0
ampelcontroller_image: netz39/ampel-controller:0.1.0
topic_lever_state: "Netz39/Things/StatusSwitch/Lever/State"
topic_door_events: "Netz39/Things/Door/Events"
topic_traffic_light: "Netz39/Things/Ampel/Light"

View file

@ -30,7 +30,6 @@ all:
radon.n39.eu:
tau.netz39.de:
unicorn.n39.eu:
wittgenstein.n39.eu:
proxmox:
hosts:
holmium.n39.eu:

View file

@ -15,10 +15,8 @@
],
"packageRules": [
{
"matchDatasources": ["docker"],
"matchPackageNames": ["renovate/renovate"],
"schedule": [ "before 1am on friday" ],
"automerge": true
"schedule": [ "on friday" ]
}
]
}

View file

@ -1,11 +1,13 @@
---
roles:
- src: hifis.unattended_upgrades
version: v3.2.1
- name: adriagalin.timezone
src: git+https://github.com/adriagalin/ansible.timezone.git
version: 4.0.0
- name: 24367dfa.dehydrated
src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
version: 2.1.0
version: 2.0.0
- name: penguineer.dehydrated_cron
src: https://github.com/penguineer/ansible-role-dehydrated_cron.git
version: v1.1.0
@ -13,14 +15,11 @@ roles:
src: git+https://github.com/maz3max/ble-keykeeper-role.git
version: v1.1.0
- src: lespocky.telegraf_docker_in_docker
version: v0.2.2
version: v0.2.1
- name: netz39.host_docker
src: git+https://github.com/netz39/ansible-role-host-docker.git
version: v0.5.0
version: v0.4.0
collections:
- name: community.grafana
version: 2.1.0
# for role 'hifis.toolkit.unattended_upgrades'
- name: hifis.toolkit
version: 5.3.0

View file

@ -1,6 +1,6 @@
# Handlers for role apache
---
- name: Restart apache2
ansible.builtin.service:
- name: restart apache2
service:
name: apache2
state: restarted

View file

@ -1,12 +1,12 @@
---
- name: Ensure Apache2 and modules are installed and up to date
ansible.builtin.apt:
apt:
name:
- apache2
state: present
- name: Ensure necessary modules are enabled
community.general.apache2_module:
apache2_module:
name: "{{ item }}"
state: present
with_items:
@ -23,7 +23,7 @@
mode: "0644"
owner: root
group: root
notify: Restart apache2
notify: restart apache2
- name: Add symlink to enable configuration
ansible.builtin.file:
@ -32,4 +32,4 @@
state: link
owner: root
group: root
notify: Restart apache2
notify: restart apache2

View file

@ -1,6 +1,6 @@
# Handlers for role apache_letsencrypt
---
- name: Restart apache2
ansible.builtin.service:
- name: restart apache2
service:
name: apache2
state: restarted

View file

@ -7,7 +7,7 @@
mode: "0644"
owner: root
group: root
notify: Restart apache2
notify: restart apache2
- name: Add symlink to enable configuration
ansible.builtin.file:
@ -17,4 +17,4 @@
mode: "0644"
owner: root
group: root
notify: Restart apache2
notify: restart apache2

View file

@ -19,7 +19,7 @@ cleanuri_amqp_canonizer: "canonizer"
cleanuri_amqp_retrieval: "extractor"
# Docker images
cleanuri_image_webui: mrtux/cleanuri-webui:0.2.2
cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.2
cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.5.3
cleanuri_image_extractor: mrtux/cleanuri-extractor:0.5.3
cleanuri_image_webui: mrtux/cleanuri-webui:0.2.1
cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.1
cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.5.0
cleanuri_image_extractor: mrtux/cleanuri-extractor:0.5.0

View file

@ -1,7 +1,7 @@
# Tasks for the cleanuri role
---
- name: Ensure CleanURI WebUI is running
community.docker.docker_container:
docker_container:
name: cleanuri-webui
image: "{{ cleanuri_image_webui }}"
pull: true
@ -15,7 +15,7 @@
REACT_APP_API_GATEWAY: "https://{{ cleanuri_api_domain }}"
- name: Setup proxy site for the CleanURI WebUI
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ cleanuri_ui_domain }}"
@ -23,7 +23,7 @@
- name: Ensure CleanURI API Gateway is running
community.docker.docker_container:
docker_container:
name: cleanuri-apigateway
image: "{{ cleanuri_image_apigateway }}"
pull: true
@ -42,7 +42,7 @@
GATEWAY_TASK_RK: "{{ cleanuri_amqp_canonizer }}"
- name: Ensure CleanURI Canonizer is running
community.docker.docker_container:
docker_container:
name: cleanuri-canonizer
image: "{{ cleanuri_image_canonizer }}"
pull: true
@ -59,7 +59,7 @@
EXTRACTOR_TASK_RK: "{{ cleanuri_amqp_retrieval }}"
- name: Ensure CleanURI Extractor is running
community.docker.docker_container:
docker_container:
name: cleanuri-extractor
image: "{{ cleanuri_image_extractor }}"
pull: true
@ -76,7 +76,7 @@
- name: Setup proxy site the CleanURI API Gateway
ansible.builtin.include_role:
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ cleanuri_api_domain }}"

View file

@ -1,6 +1,6 @@
# handlers file for cron-dd24-dyndns
---
- name: Reload cron
- name: reload cron
ansible.builtin.shell:
cmd: service cron reload
warn: no

View file

@ -1,6 +1,6 @@
---
- name: Make sure cron and curl are installed
ansible.builtin.apt:
apt:
name:
- cron
- curl
@ -13,6 +13,6 @@
owner: root
group: root
mode: "0644"
notify: Reload cron
notify: reload cron
# There is ansible.builtin.cron, but this makes configuration much
# more complicated, so we stick to the template.

View file

@ -1,6 +1,6 @@
# handlers file for desec_dyndns_cron
---
- name: Reload cron
- name: reload cron
ansible.builtin.shell:
cmd: service cron reload
warn: no

View file

@ -1,6 +1,6 @@
---
- name: Make sure cron and curl are installed
ansible.builtin.apt:
apt:
name:
- cron
- curl
@ -13,6 +13,6 @@
owner: root
group: root
mode: "0644"
notify: Reload cron
notify: reload cron
# There is ansible.builtin.cron, but this makes configuration much
# more complicated, so we stick to the template.

View file

@ -1,3 +1,3 @@
---
- name: Reload nfs
ansible.builtin.command: 'exportfs -ra'
- name: reload nfs
command: 'exportfs -ra'

View file

@ -14,7 +14,7 @@
state: present
fs_type: ext4
- name: Ensure nfs mountpoints exist
- name: ensure nfs mountpoints exist
ansible.builtin.file:
path: "{{ item.directory }}"
state: directory
@ -30,11 +30,11 @@
fstype: ext4
state: present
- name: Put /etc/exports in place from template
- name: template /etc/exports
ansible.builtin.template:
src: templates/exports.j2
dest: "/etc/exports"
notify: Reload nfs
notify: reload nfs
- name: Ensure nfs is running.
ansible.builtin.service: "name=nfs-kernel-server state=started enabled=yes"

View file

@ -1,7 +1,7 @@
# Handlers für nginx-https-proxy
---
- name: Restart nginx
ansible.builtin.service:
- name: restart nginx
service:
name: nginx
state: restarted
enabled: yes

View file

@ -18,7 +18,7 @@
# for SSL passthrough.
- name: Add nginx apt-key
ansible.builtin.apt_key:
apt_key:
url: https://nginx.org/keys/nginx_signing.key
state: present
@ -56,7 +56,7 @@
owner: root
group: root
mode: '0644'
notify: Restart nginx
notify: restart nginx
- name: Create directory for dehydrated forwardings
ansible.builtin.file:
@ -74,7 +74,7 @@
group: root
mode: '0644'
loop: "{{ ingress }}"
notify: Restart nginx
notify: restart nginx
- name: Setup nginx configuration
# Note the order here: The nginx configuration _needs_ he dehydrated-hosts
@ -86,4 +86,4 @@
owner: root
group: root
mode: '0644'
notify: Restart nginx
notify: restart nginx

View file

@ -1,5 +1,5 @@
---
- name: Restart apache2
ansible.builtin.service:
- name: restart apache2
service:
name: apache2
state: restarted

View file

@ -1,12 +1,12 @@
---
- name: Add or update Apache2 site
ansible.builtin.template:
template:
src: templates/apache-docker-forward-site.j2
dest: /etc/apache2/sites-available/{{ site_name }}.conf
notify: Restart apache2
notify: restart apache2
- name: Activate Apache2 site
ansible.builtin.command: a2ensite {{ site_name }}
command: a2ensite {{ site_name }}
args:
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
notify: Restart apache2
notify: restart apache2

View file

@ -1,5 +1,5 @@
---
- name: Restart apache2
ansible.builtin.service:
- name: restart apache2
service:
name: apache2
state: restarted

View file

@ -1,13 +1,13 @@
---
- name: Add or update Apache2 site
ansible.builtin.template:
template:
src: templates/apache-docker-proxy-site.j2
dest: /etc/apache2/sites-available/{{ site_name }}.conf
mode: "0644"
notify: Restart apache2
notify: restart apache2
- name: Activate Apache2 site
ansible.builtin.command: a2ensite {{ site_name }}
command: a2ensite {{ site_name }}
args:
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
notify: Restart apache2
notify: restart apache2

View file

@ -1,3 +1,3 @@
---
- name: Update aliases
ansible.builtin.shell: which newaliases && newaliases || true
shell: which newaliases && newaliases || true

View file

@ -1,12 +1,12 @@
---
- name: Ensure sudo is installed
ansible.builtin.package:
package:
name:
- sudo
state: present
- name: Configure group sudo for sudoers without password
ansible.builtin.lineinfile:
lineinfile:
path: /etc/sudoers
state: present
regexp: '^%sudo\s'
@ -14,7 +14,7 @@
validate: /usr/sbin/visudo -cf %s
- name: Add users | create users' shell and home dir
ansible.builtin.user:
user:
name: "{{ item.logname }}"
shell: /bin/bash
createhome: yes
@ -22,7 +22,7 @@
with_items: "{{ users }}"
- name: Add authorized keys for user
ansible.posix.authorized_key:
authorized_key:
user: "{{ item.0.logname }}"
key: "{{ item.1 }}"
state: present
@ -32,7 +32,7 @@
- skip_missing: true
- name: Place user in sudo group
ansible.builtin.user:
user:
name: "{{ item.logname }}"
groups: [sudo]
append: yes
@ -40,12 +40,12 @@
with_items: "{{ users }}"
- name: Check if /etc/aliases exists
ansible.builtin.stat:
stat:
path: /etc/aliases
register: aliases
- name: Set system email alias
ansible.builtin.lineinfile:
lineinfile:
path: /etc/aliases
state: present
regexp: "^{{ item.logname }}:"

View file

@ -1,21 +1,21 @@
---
- name: Configure local ssh to access n39 hosts
- name: configure local ssh to access n39 hosts
hosts: localhost
tasks:
- name: Ensure $HOME/.ssh/config.d/ dir is present
- name: ensure {{ lookup('env', 'HOME') }}/.ssh/config.d/ dir is present
ansible.builtin.file:
path: "{{ lookup('env', 'HOME') }}/.ssh/config.d/"
state: directory
delegate_to: localhost
- name: Put ssh config for access to n39 internal systems in place
- name: template ssh config for access to internal systems
ansible.builtin.template:
src: templates/ssh_config.j2
dest: "{{ lookup('env', 'HOME') }}/.ssh/config.d/n39_config"
delegate_to: localhost
- name: Ensure that n39 access config is included
- name: ensure that n39 access config is included
ansible.builtin.lineinfile:
path: ~/.ssh/config
insertbefore: BOF