netz39-infra-ansible/host-pottwal.yml
Alexander Dahl 605eca4c38 🚚 roles: dehydrated_cron: Use namespaced name
Removes the redundant words "ansible" and "role" from the role name
originating from the Git repo name, and uses the author's name as
namespace instead.  This makes it easier to recognize as external role.

Note: the host-wittgenstein recipe already used that new name, but we
did not set it up in requirements, yet.  (How did that ever work?)

Link: https://docs.ansible.com/ansible/latest/galaxy/user_guide.html#installing-multiple-roles-from-a-file
Fixes: f4db6fa395 ("Add Ansible setup for wittgenstein")
2024-12-28 12:16:21 +01:00

667 lines
20 KiB
YAML

---
- hosts: pottwal.n39.eu
become: true
roles:
# role 'docker_setup' applied through group 'docker_host'
- role: apache
- role: apache_letsencrypt # Uses configuration from dehydrated setup
- role: 24367dfa.dehydrated
vars:
dehydrated_contact_email: "{{ server_admin }}"
dehydrated_domains:
- name: "{{ forgejo_domain_name }}"
- name: uritools.n39.eu
- name: uritools-api.n39.eu
- name: "{{ shlink_domain_name }}"
- name: "{{ hedgedoc_domain_name }}"
- name: "{{ prosody_domain_name }}"
alternate_names:
- conference.jabber.n39.eu
deploy_cert_hook: "docker exec prosody prosodyctl --root cert import ${DOMAIN} /var/lib/dehydrated/certs"
- name: "{{ redmine_domain_name }}"
- name: "{{ influxdb_domain_name }}"
- name: "{{ uptimekuma_domain_name }}"
- name: "{{ grafana_domain_name }}"
- name: "{{ homebox_domain_name }}"
- name: spaceapi.n39.eu
- role: penguineer.dehydrated_cron
- role: dd24_dyndns_cron
# variables are set in the inventory
- role: desec_dyndns_cron
# variables are set in the inventory
- role: cleanuri
vars:
cleanuri_ui_domain: uritools.n39.eu
cleanuri_ui_host_port: 8090
cleanuri_api_domain: uritools-api.n39.eu
cleanuri_api_host_port: 8091
# RabbitMQ setup can be found in the inventory
tasks:
- name: Check if forgejo data dir exists
ansible.builtin.stat:
path: "{{ data_dir }}/forgejo"
register: forgejo_dir
tags: ["forgejo"]
- name: Fail if forgejo data dir does not exist
ansible.builtin.fail:
msg: "Forgejo data dir is missing, please restore from the backup!"
when: not forgejo_dir.stat.exists
tags: ["forgejo"]
# If port 2222 is changed here, it must also be adapted
# in the forgejo config file (see application volume)!!
- name: Setup the docker container for forgejo
docker_container:
name: forgejo
image: "{{ forgejo_image }}"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 127.0.0.1:{{ forgejo_host_port }}:3000
- "{{ forgejo_ssh_port }}:2222"
env:
TZ: "{{ timezone }}"
APP_NAME: "Netz39 Git"
RUN_MODE: "prod"
SSH_DOMAIN: "{{ forgejo_domain_name }}"
SSH_PORT: "2222"
SSH_START_SERVER: "false"
ROOT_URL: "https://{{ forgejo_domain_name }}"
DISABLE_REGISTRATION: "true"
USER_UID: "1000"
USER_GID: "1000"
volumes:
- "{{ data_dir }}/forgejo:/data:rw"
tags: ["forgejo"]
- name: Setup proxy site "{{ forgejo_domain_name }}"
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ forgejo_domain_name }}"
proxy_port: "{{ forgejo_host_port }}"
tags: ["forgejo"]
- name: Ensure apt-cacher container is running
docker_container:
name: apt_cacher_ng
image: mrtux/apt-cacher-ng:latest
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 3142:3142
env:
TZ: "{{ timezone }}"
- name: Setup docker network
docker_network:
name: shlinknet
state: present
internal: true
tags:
- shlink
- name: Ensure shlink data dir exists
ansible.builtin.file:
path: "{{ data_dir }}/shlink/data/database"
state: directory
mode: 0755
tags:
- shlink
- name: Ensure shlink database container is running
docker_container:
name: shlinkdb
image: postgres:16.4-alpine
pull: true
state: started
restart_policy: unless-stopped
detach: yes
env:
TZ: "{{ timezone }}"
POSTGRES_USER: "shlink"
POSTGRES_PASSWORD: "{{ shlink_postgres_password }}"
POSTGRES_DB: "shlink"
volumes:
- "{{ data_dir }}/shlink/data/database:/var/lib/postgresql/data"
networks:
- name: shlinknet
tags:
- shlink
- name: Ensure container for shlink is running
docker_container:
name: shlink
image: "{{ shlink_image }}"
pull: true
state: started
detach: yes
ports:
- "127.0.0.1:{{ shlink_host_port }}:8080"
restart_policy: unless-stopped
env:
TZ: "{{ timezone }}"
DEFAULT_DOMAIN: "{{ shlink_domain_name }}"
INITIAL_API_KEY: "{{ shlink_initial_api_key }}"
DB_DRIVER: "postgres"
DB_HOST: shlinkdb
DB_NAME: "shlink"
DB_USER: "shlink"
DB_PASSWORD: "{{ shlink_postgres_password }}"
volumes:
- "{{ data_dir }}/shlink/database.sqlite:/etc/shlink/datadatabase.sqlite:rw"
networks_cli_compatible: false
comparisons:
networks: allow_more_present
networks:
- name: shlinknet
tags:
- shlink
- name: Setup proxy site {{ shlink_domain_name }}
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ shlink_domain_name }}"
proxy_port: "{{ shlink_host_port }}"
tags:
- shlink
- name: Check if prosody data dir exists
ansible.builtin.stat:
path: "{{ prosody_data_dir }}"
register: prosody_dir
- name: Fail if prosody data dir does not exist
ansible.builtin.fail:
msg: "prosody data dir is missing, please restore from the backup!"
when: not prosody_dir.stat.exists
- name: Ensure prosody config dir exists
ansible.builtin.file:
path: "{{ prosody_config_dir }}"
state: directory
mode: 0755
- name: Ensure prosody certs dir exists
ansible.builtin.file:
path: "{{ prosody_config_dir }}/certs"
state: directory
# group 'ssl-cert' inside of the container
group: "101"
mode: 0750
- name: Ensure prosody conf.avail dir exists
ansible.builtin.file:
path: "{{ prosody_config_dir }}/conf.avail"
state: directory
mode: 0755
- name: Ensure prosody certs dir exists
ansible.builtin.file:
path: "{{ prosody_config_dir }}/conf.d"
state: directory
mode: 0755
- name: Ensure prosody main config file is in place
ansible.builtin.template:
src: "templates/prosody/prosody.cfg.lua.j2"
dest: "{{ prosody_config_dir }}/prosody.cfg.lua"
mode: 0644
notify:
- Restart prosody
- name: "Ensure prosody config file is in place: {{ prosody_domain_name }}"
ansible.builtin.copy:
src: "files/prosody/{{ prosody_domain_name }}.cfg.lua"
dest: "{{ prosody_config_dir }}/conf.avail/{{ prosody_domain_name }}.cfg.lua"
mode: 0644
notify:
- Restart prosody
- name: "Ensure prosody config symlink exists: {{ prosody_domain_name }}"
ansible.builtin.file:
src: "../conf.avail/{{ prosody_domain_name }}.cfg.lua"
dest: "{{ prosody_config_dir }}/conf.d/{{ prosody_domain_name }}.cfg.lua"
state: link
notify:
- Restart prosody
- name: Ensure container for prosody XMPP server is running
docker_container:
name: prosody
image: "{{ prosody_image }}"
pull: true
state: started
detach: true
restart_policy: unless-stopped
ports:
# container offers more ports, depends on actual prosody configuration
- 5222:5222 # xmpp-client
- 5269:5269 # xmpp-server
volumes:
- "{{ prosody_config_dir }}:/etc/prosody:ro"
- "{{ prosody_config_dir }}/certs:/etc/prosody/certs:rw"
- "{{ prosody_data_dir }}/var/lib/prosody:/var/lib/prosody:rw"
- "{{ prosody_data_dir }}/var/log/prosody:/var/log/prosody:rw"
- "{{ dehydrated_certs_dir }}/{{ prosody_domain_name }}:/var/lib/dehydrated/certs/{{ prosody_domain_name }}:ro"
env:
TZ: "{{ timezone }}"
- name: Ensure container for static XMPP website is running
docker_container:
name: jabber-static-website
image: "{{ prosody_web_image }}"
pull: true
state: started
detach: true
restart_policy: unless-stopped
env:
TZ: "{{ timezone }}"
SERVER_PORT: "80"
SERVER_ROOT: "/public"
ports:
- "127.0.0.1:{{ jabber_host_port }}:80"
volumes:
- "{{ prosody_data_dir }}/var/www:/public:ro"
tags:
- prosody-web
- name: Setup proxy site {{ prosody_domain_name }}
# point to static website for now
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ prosody_domain_name }}"
proxy_port: "{{ jabber_host_port }}"
tags:
- prosody-web
- name: Check if hedgedoc data dir exists
ansible.builtin.stat:
path: "{{ data_dir }}/hedgedoc"
register: hedgedoc_dir
tags:
- hedgedoc
- name: Fail if hedgedoc data dir does not exist
ansible.builtin.fail:
msg: "hedgedoc data dir is missing, please restore from the backup!"
when: not hedgedoc_dir.stat.exists
tags:
- hedgedoc
- name: Ensure the hedgedoc directories exist
file:
path: "{{ item.path }}"
mode: "{{ item.mode }}"
state: directory
with_items:
- path: "{{ data_dir }}/hedgedoc/data/database"
mode: "0700"
- path: "{{ data_dir }}/hedgedoc/data/uploads"
mode: "0755"
tags:
- hedgedoc
- name: Setup docker network
docker_network:
name: hedgedocnet
state: present
internal: true
tags:
- hedgedoc
- name: Install HedgeDoc database container
docker_container:
name: hedgedocdb
image: "{{ hedgedoc_db_image }}"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
env:
TZ: "{{ timezone }}"
POSTGRES_USER: "hedgedoc"
POSTGRES_PASSWORD: "{{ hedgedoc_postgres_password }}"
POSTGRES_DB: "hedgedoc"
volumes:
- "{{ data_dir }}/hedgedoc/data/database:/var/lib/postgresql/data"
networks:
- name: hedgedocnet
tags:
- hedgedoc
- name: Ensure container for hedgedoc is running
docker_container:
name: hedgedoc
image: "{{ hedgedoc_image }}"
pull: true
state: started
detach: yes
ports:
- "127.0.0.1:{{ hedgedoc_host_port }}:3000"
restart_policy: unless-stopped
env:
TZ: "{{ timezone }}"
NODE_ENV: "production"
CMD_PROTOCOL_USESSL: "true"
CMD_DOMAIN: "{{ hedgedoc_domain_name }}"
CMD_URL_ADDPORT: "false"
CMD_DB_HOST: "hedgedocdb"
CMD_DB_PORT: "5432"
CMD_DB_DIALECT: "postgres"
CMD_DB_DATABASE: "hedgedoc"
CMD_DB_USERNAME: "hedgedoc"
CMD_DB_PASSWORD: "{{ hedgedoc_postgres_password }}"
volumes:
- "{{ data_dir }}/hedgedoc/data/uploads:/hedgedoc/public/uploads"
networks_cli_compatible: false
comparisons:
networks: allow_more_present
networks:
- name: hedgedocnet
tags:
- hedgedoc
- name: Setup proxy site "{{ hedgedoc_domain_name }}"
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ hedgedoc_domain_name }}"
proxy_port: "{{ hedgedoc_host_port }}"
tags:
- hedgedoc
- name: Ensure the influxdb directories exist
file:
path: "{{ item }}"
mode: 0700
state: directory
with_items:
- "{{ data_dir }}/influxdb"
- "{{ data_dir }}/influxdb/data"
- "{{ data_dir }}/influxdb/cfg"
- name: Ensure container for influxdb is running
docker_container:
name: influxdb
image: "{{ influxdb_image }}"
pull: true
state: started
detach: yes
ports:
- "127.0.0.1:{{ influxdb_host_port }}:8086"
restart_policy: unless-stopped
env:
TZ: "{{ timezone }}"
DOCKER_INFLUXDB_INIT_USERNAME: "{{ influxdb_init_username }}"
DOCKER_INFLUXDB_INIT_PASSWORD: "{{ influxdb_init_password }}"
DOCKER_INFLUXDB_INIT_ORG: "{{ influxdb_org }}"
DOCKER_INFLUXDB_INIT_BUCKET: default
volumes:
- "{{ data_dir }}/influxdb/data:/var/lib/influxdb2"
- "{{ data_dir }}/influxdb/cfg:/etc/influxdb2"
- name: Setup proxy site {{ influxdb_domain_name }}
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ influxdb_domain_name }}"
proxy_port: "{{ influxdb_host_port }}"
# Expected setup for the data directory
# file: configuration.yml
# directory: mysql
# directory: files
# directory: themes
- name: Check if redmine data dir exists
ansible.builtin.stat:
path: "{{ data_dir }}/redmine"
register: redmine_dir
tags:
- redmine
- name: Fail if redmine data dir does not exist
ansible.builtin.fail:
msg: "Redmine data dir is missing, please restore from the backup!"
when: not redmine_dir.stat.exists
tags:
- redmine
- name: Setup Redmine docker network
docker_network:
name: redminenet
state: present
internal: true
tags:
- redmine
- name: Setup Redmine MySQL container
docker_container:
name: redminedb
image: "{{ redmine_mysql_image }}"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
env:
TZ: "{{ timezone }}"
MYSQL_ROOT_PASSWORD: "{{ redmine_database_password }}"
MYSQL_DATABASE: "{{ redmine_database }}"
volumes:
- "{{ data_dir }}/redmine/mysql:/var/lib/mysql"
- "{{ data_dir }}/redmine/mysql-config:/etc/mysql/conf.d"
networks:
- name: redminenet
tags:
- redmine
- name: Setup Redmine container
docker_container:
name: redmine
image: "{{ redmine_image }}"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- "127.0.0.1:{{ redmine_host_port }}:3000"
env:
TZ: "{{ timezone }}"
REDMINE_DB_MYSQL: redminedb
REDMINE_DB_PASSWORD: "{{ redmine_database_password }}"
volumes:
- "{{ data_dir }}/redmine/configuration.yml:/usr/src/redmine/config/configuration.yml"
- "{{ data_dir }}/redmine/files:/usr/src/redmine/files"
- "{{ data_dir }}/redmine/themes:/usr/src/redmine/public/themes"
networks_cli_compatible: false
comparisons:
networks: allow_more_present
networks:
- name: redminenet
tags:
- redmine
- name: Setup proxy site "{{ redmine_domain_name }}"
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ redmine_domain_name }}"
proxy_port: "{{ redmine_host_port }}"
tags:
- redmine
- name: Ensure the uptime-kuma directories exist
file:
path: "{{ item }}"
mode: "0755"
state: directory
with_items:
- "{{ data_dir }}/uptime-kuma"
tags:
- uptimekuma
- name: Ensure container for uptime-kuma is running
docker_container:
name: uptime-kuma
image: "{{ uptimekuma_image }}"
pull: true
state: started
detach: yes
ports:
- "127.0.0.1:{{ uptimekuma_host_port }}:3001"
restart_policy: unless-stopped
env:
TZ: "{{ timezone }}"
volumes:
- "{{ data_dir }}/uptime-kuma:/app/data"
tags:
- uptimekuma
- name: Setup proxy site "{{ uptimekuma_domain_name }}"
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ uptimekuma_domain_name }}"
proxy_port: "{{ uptimekuma_host_port }}"
tags:
- uptimekuma
- name: Ensure the grafana directories exist
file:
path: "{{ item.path }}"
owner: "{{ item.owner | default('root') }}"
mode: "{{ item.mode }}"
state: directory
with_items:
- path: "{{ data_dir }}/grafana"
mode: "0755"
- path: "{{ data_dir }}/grafana/data"
owner: 472
mode: "0755"
- path: "{{ data_dir }}/grafana/etc"
mode: "0755"
tags:
- grafana
- name: Ensure container for grafana is running
docker_container:
name: grafana
image: "{{ grafana_image }}"
pull: true
state: started
detach: yes
restart_policy: unless-stopped
ports:
- "127.0.0.1:{{ grafana_host_port }}:3000"
volumes:
- "{{ data_dir }}/grafana/data:/var/lib/grafana"
- "{{ data_dir }}/grafana/etc:/etc/grafana"
env:
TZ: "{{ timezone }}"
GF_SECURITY_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
GF_USERS_ALLOW_SIGN_UP: "false"
GF_INSTALL_PLUGINS: "flant-statusmap-panel,ae3e-plotly-panel"
tags:
- grafana
- name: Setup proxy site "{{ grafana_domain_name }}"
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ grafana_domain_name }}"
proxy_port: "{{ grafana_host_port }}"
proxy_preserve_host: "On"
tags:
- grafana
- name: Ensure the homebox directories exist
file:
path: "{{ item.path }}"
owner: "{{ item.owner | default('root') }}"
mode: "{{ item.mode }}"
state: directory
with_items:
- path: "{{ data_dir }}/homebox"
mode: "0755"
- path: "{{ data_dir }}/homebox/data"
mode: "0755"
tags:
- homebox
- name: Ensure container for homebox is running
docker_container:
name: homebox
image: "{{ homebox_image }}"
pull: true
state: started
detach: yes
restart_policy: unless-stopped
ports:
- "127.0.0.1:{{ homebox_host_port }}:7745"
volumes:
- "{{ data_dir }}/homebox/data:/data"
env:
TZ: "{{ timezone }}"
HBOX_LOG_LEVEL: "info"
HBOX_LOG_FORMAT: "text"
HBOX_WEB_MAX_UPLOAD_SIZE: "10"
tags:
- homebox
- name: Setup proxy site {{ homebox_domain_name }}
include_role:
name: setup_http_site_proxy
vars:
site_name: "{{ homebox_domain_name }}"
proxy_port: "{{ homebox_host_port }}"
proxy_preserve_host: "On"
tags:
- homebox
- name: Setup proxy site spaceapi.n39.eu
template:
src: templates/pottwal/spaceapi-apache-site.j2
dest: /etc/apache2/sites-available/spaceapi.n39.eu.conf
mode: "0644"
vars:
site_name: "spaceapi.n39.eu"
proxy_preserve_host: "On"
notify: Restart apache2
tags:
- spaceapi
- name: Ensure renovate bot cronjob is present
ansible.builtin.template:
src: templates/pottwal/renovate-cron.j2
dest: /etc/cron.hourly/renovate-bot
mode: "0700"
notify: reload cron
tags:
- renovate
handlers:
- name: Restart prosody
community.docker.docker_container:
name: prosody
state: started
restart: yes
- name: Restart apache2
service:
name: apache2
state: restarted
- name: reload cron
ansible.builtin.shell:
cmd: service cron reload
# Use the shell call because the task sometimes has problems finding the service state
# service:
# name: cron
# state: restarted