netz39-infra-ansible/pottwal.yml

351 lines
11 KiB
YAML
Raw Normal View History

2020-12-28 22:53:53 +01:00
---
2020-12-10 22:15:09 +01:00
- hosts: pottwal.n39.eu
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
data_dir: "/srv/data"
openhab_image: openhab/openhab:2.5.11
openhab_data: "{{ data_dir }}/openhab"
openhab_host_port: 8081
openhab_configuration_source: https://github.com/netz39/n39-openhab.git
openhab_configuration_version: master
2022-01-06 12:03:36 +01:00
gitea_host_port: 9091
uritools_host_port: 8080
2021-04-21 17:52:19 +02:00
entities_validation_svc_host_port: 8082
2021-04-21 22:29:31 +02:00
shlink_host_port: 8083
shlink_domain_name: sl.n39.eu
2022-06-26 22:54:00 +02:00
hedgedoc_host_port: 8084
hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.9.3
2022-07-04 18:38:22 +02:00
openldap_image_version: 1.5.0
openldap_data: "{{ data_dir }}/openldap"
openldap_domain: "ldap.n39.eu"
2020-12-10 22:15:09 +01:00
roles:
2020-12-28 22:53:53 +01:00
- role: docker_setup
vars:
docker_data_root: "/srv/docker"
- role: apache
2020-12-10 22:15:09 +01:00
tasks:
2022-01-06 12:03:36 +01:00
- name: Check if gitea data dir exists
ansible.builtin.stat:
path: "{{ data_dir }}/gitea"
register: gitea_dir
- name: Fail if gitea data dir does not exist
ansible.builtin.fail:
msg: "Gitea data dir is missing, please restore from the backup!"
when: not gitea_dir.stat.exists
2022-01-06 12:03:36 +01:00
# If port 2222 is changed here, it must also be adapted
# in the gitea config file (see application volume)!!
- name: Setup the docker container for gitea
docker_container:
name: gitea
2022-06-20 01:54:01 +02:00
image: "gitea/gitea:1.16.8"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
# - 127.0.0.1:{{ gitea_host_port }}:3000
- "{{ gitea_host_port }}:3000"
- 2222:2222
env:
APP_NAME="Netz39 Gitea"
RUN_MODE="prod"
SSH_DOMAIN="gitea.n39.eu"
SSH_PORT="2222"
SSH_START_SERVER="false"
ROOT_URL="https://gitea.n39.eu"
DISABLE_REGISTRATION="true"
USER_UID=1000
USER_GID=1000
volumes:
- "{{ data_dir }}/gitea:/data:rw"
2022-01-06 12:03:36 +01:00
- name: Setup proxy site gitea.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: "gitea.n39.eu"
proxy_port: "{{ gitea_host_port }}"
2022-01-06 12:03:36 +01:00
- name: Ensure apt-cacher container is running
docker_container:
name: apt_cacher_ng
image: "mrtux/apt-cacher-ng"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 3142:3142
- name: Ensure the openhab directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ openhab_data }}/addons"
- "{{ openhab_data }}/conf"
- "{{ openhab_data }}/userdata"
- name: Clone or update configuration
git:
repo: "{{ openhab_configuration_source }}"
version: "{{ openhab_configuration_version }}"
dest: "{{ openhab_data }}/conf"
clone: yes
update: yes
- name: ensure openhab is up and running
docker_container:
name: openhab
image: "{{ openhab_image }}"
pull: true
state: started
detach: yes
interactive: yes
tty: yes
ports:
- "{{ openhab_host_port }}:8080"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- "{{ openhab_data }}/addons:/openhab/addons:rw"
- "{{ openhab_data }}/conf:/openhab/conf:rw"
- "{{ openhab_data }}/userdata:/openhab/userdata:rw"
keep_volumes: yes
restart_policy: unless-stopped
env: EXTRA_JAVA_OPTS="-Duser.timezone=Europe/Berlin"
- name: Setup proxy site openhab.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: openhab.n39.eu
proxy_port: "{{ openhab_host_port }}"
- name: Ensure container for URI tools is running
docker_container:
name: uritools
image: mrtux/clean_uri
pull: true
state: started
detach: yes
ports:
- "{{ uritools_host_port }}:8080"
restart_policy: unless-stopped
- name: Setup proxy site uritools.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: uritools.n39.eu
proxy_port: "{{ uritools_host_port }}"
- name: Ensure container for entities validation service is running
docker_container:
name: entities_validation_svc
image: netz39/entities_validation_svc:v1.0.0
pull: true
state: started
detach: yes
ports:
- "{{ entities_validation_svc_host_port }}:8080"
restart_policy: unless-stopped
- name: Setup proxy site entities-validation.svc.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: entities-validation.svc.n39.eu
proxy_port: "{{ entities_validation_svc_host_port }}"
- name: Ensure container for shlink is running
docker_container:
name: shlink
image: shlinkio/shlink:2.6.2
pull: true
state: started
detach: yes
ports:
- "{{ shlink_host_port }}:8080"
restart_policy: unless-stopped
env:
SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}"
SHORT_DOMAIN_SCHEMA: https
GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
- name: Setup proxy site {{ shlink_domain_name }}
include_role:
name: setup-http-site-proxy
vars:
site_name: "{{ shlink_domain_name }}"
2022-01-30 11:48:51 +01:00
proxy_port: "{{ shlink_host_port }}"
2022-06-26 22:54:00 +02:00
- name: Check if hedgedoc data dir exists
ansible.builtin.stat:
path: "{{ data_dir }}/hedgedoc"
2022-06-26 22:54:00 +02:00
register: hedgedoc_dir
- name: Fail if hedgedoc data dir does not exist
ansible.builtin.fail:
msg: "hedgedoc data dir is missing, please restore from the backup!"
when: not hedgedoc_dir.stat.exists
- name: Ensure the hedgedoc directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ data_dir }}/hedgedoc/data/database"
- "{{ data_dir }}/hedgedoc/data/uploads"
2022-06-26 22:54:00 +02:00
- name: Setup docker network
docker_network:
name: hedgedocnet
state: present
internal: true
- name: Install HedgeDoc database container
docker_container:
name: hedgedocdb
image: "postgres:11.6-alpine"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
env:
POSTGRES_USER: "hedgedoc"
2022-06-26 22:54:00 +02:00
POSTGRES_PASSWORD: "{{ hedgedoc_postgres_password }}"
POSTGRES_DB: "hedgedoc"
2022-06-26 22:54:00 +02:00
volumes:
- "{{ data_dir }}/hedgedoc/data/database:/var/lib/postgresql/data"
2022-06-26 22:54:00 +02:00
networks:
- name: hedgedocnet
- name: Ensure container for hedgedoc is running
docker_container:
name: hedgedoc
image: "{{ hedgedoc_image }}"
pull: true
state: started
detach: yes
ports:
- "{{ hedgedoc_host_port }}:3000"
restart_policy: unless-stopped
env:
NODE_ENV: "production"
2022-06-28 21:43:26 +02:00
CMD_PROTOCOL_USESSL: "true"
CMD_DOMAIN: "pad.n39.eu"
2022-06-28 21:43:26 +02:00
CMD_URL_ADDPORT: "false"
CMD_DB_HOST: "hedgedocdb"
CMD_DB_PORT: "5432"
CMD_DB_DIALECT: "postgres"
CMD_DB_DATABASE: "hedgedoc"
CMD_DB_USERNAME: "hedgedoc"
2022-06-26 22:54:00 +02:00
CMD_DB_PASSWORD: "{{ hedgedoc_postgres_password }}"
volumes:
- "{{ data_dir }}/hedgedoc/data/uploads:/hedgedoc/public/uploads"
2022-06-26 22:54:00 +02:00
networks:
- name: hedgedocnet
- name: Setup proxy site pad.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: pad.n39.eu
proxy_port: "{{ hedgedoc_host_port }}"
2022-07-04 18:38:22 +02:00
- name: Setup dehydrated challenge endpoint for {{ openldap_domain }}
include_role:
name: setup-http-dehydrated
vars:
site_name: "{{ openldap_domain }}"
- name: Ensure openLDAP directories are present.
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ openldap_data }}/ldap"
- "{{ openldap_data }}/slapd"
- "{{ openldap_data }}/ldif"
2022-07-04 18:38:22 +02:00
- "{{ dehydrated_certs_dir }}/certs/{{ openldap_domain }}"
- name: Ensure container for openLDAP is running.
docker_container:
name: openLDAP
image: "osixia/openldap:{{ openldap_image_version }}"
detach: yes
state: started
restart_policy: unless-stopped
container_default_behavior: no_defaults
pull: true
env:
LDAP_LOG_LEVEL: "256"
LDAP_ORGANISATION: "{{ldap_org}}"
LDAP_DOMAIN: "{{ldap_domain}}"
LDAP_BASE_DN: "{{ldap_base_dn}}"
LDAP_READONLY_USER: "false"
LDAP_ADMIN_PASSWORD: "{{ldap_admin_password}}"
LDAP_CONFIG_PASSWORD: "{{ldap_config_password}}"
LDAP_RFC2307BIS_SCHEMA: "true"
LDAP_TLS_CIPHER_SUITE: "SECURE256:-VERS-SSL3.0"
LDAP_REPLICATION: "{{ldap_replication_enable}}"
LDAP_REPLICATION_CONFIG_SYNCPROV: "{{ldap_replication_config_syncprov}}"
LDAP_REPLICATION_DB_SYNCPROV: "{{ldap_replication_db_syncprov}}"
LDAP_REPLICATION_HOSTS: "{{ldap_replication_hosts}}"
KEEP_EXISTING_CONFIG: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "true"
published_ports:
- "{{ldap_ip}}:389:389" # unencrypted/STARTTLS
- "{{ldap_ip}}:636:636" # SSL
2022-07-04 18:38:22 +02:00
volumes:
- "{{ openldap_data }}/ldap:/var/lib/ldap"
- "{{ openldap_data }}/slapd:/etc/ldap/slapd.d"
2022-07-04 18:38:22 +02:00
- "{{ dehydrated_certs_dir }}/certs/{{ openldap_domain }}:/container/service/slapd/assets/certs"
- "{{ openldap_data }}/ldif/custom-element.ldif:/container/service/slapd/assets/config/bootstrap/ldif/custom/01_netz39.ldif"
timeout: 500
# For replication to work correctly, domainname and hostname must be
# set correctly so that "hostname"."domainname" equates to the
# fully-qualified domain name for the host.
domainname: "{{ldap_domainname}}"
hostname: "{{ldap_hostname}}"
command: "--copy-service --loglevel debug"
- name: Allow access to openLDAP from local docker container [1/2]
become: true
community.general.ufw:
rule: allow
port: '389'
proto: tcp
from: "{{ item }}"
comment: LDAP Docker Access
loop: "{{ docker_ip_ranges }}"
- name: Allow access to openLDAP from local docker container [2/2]
become: true
community.general.ufw:
rule: allow
port: '636'
proto: tcp
from: "{{ item }}"
comment: LDAP Docker Access
loop: "{{ docker_ip_ranges }}"
2022-07-04 18:38:22 +02:00
handlers: