--- - hosts: pottwal.n39.eu become: true vars: ansible_python_interpreter: /usr/bin/python3 data_dir: "/srv/data" openhab_image: openhab/openhab:2.5.11 openhab_data: "{{ data_dir }}/openhab" openhab_host_port: 8081 openhab_configuration_source: https://github.com/netz39/n39-openhab.git openhab_configuration_version: master gitea_host_port: 9091 uritools_host_port: 8080 entities_validation_svc_host_port: 8082 shlink_host_port: 8083 shlink_domain_name: sl.n39.eu hedgedoc_host_port: 8084 hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.9.3 openldap_image_version: 1.5.0 openldap_data: "{{ data_dir }}/openldap" openldap_domain: "ldap.n39.eu" roles: - role: docker_setup vars: docker_data_root: "/srv/docker" - role: apache tasks: - name: Check if gitea data dir exists ansible.builtin.stat: path: "{{ data_dir }}/gitea" register: gitea_dir - name: Fail if gitea data dir does not exist ansible.builtin.fail: msg: "Gitea data dir is missing, please restore from the backup!" when: not gitea_dir.stat.exists # If port 2222 is changed here, it must also be adapted # in the gitea config file (see application volume)!! - name: Setup the docker container for gitea docker_container: name: gitea image: "gitea/gitea:1.16.8" pull: true state: started restart_policy: unless-stopped detach: yes ports: # - 127.0.0.1:{{ gitea_host_port }}:3000 - "{{ gitea_host_port }}:3000" - 2222:2222 env: APP_NAME="Netz39 Gitea" RUN_MODE="prod" SSH_DOMAIN="gitea.n39.eu" SSH_PORT="2222" SSH_START_SERVER="false" ROOT_URL="https://gitea.n39.eu" DISABLE_REGISTRATION="true" USER_UID=1000 USER_GID=1000 volumes: - "{{ data_dir }}/gitea:/data:rw" - name: Setup proxy site gitea.n39.eu include_role: name: setup-http-site-proxy vars: site_name: "gitea.n39.eu" proxy_port: "{{ gitea_host_port }}" - name: Ensure apt-cacher container is running docker_container: name: apt_cacher_ng image: "mrtux/apt-cacher-ng" pull: true state: started restart_policy: unless-stopped detach: yes ports: - 3142:3142 - name: Ensure the openhab directories exist file: path: "{{ item }}" state: directory with_items: - "{{ openhab_data }}/addons" - "{{ openhab_data }}/conf" - "{{ openhab_data }}/userdata" - name: Clone or update configuration git: repo: "{{ openhab_configuration_source }}" version: "{{ openhab_configuration_version }}" dest: "{{ openhab_data }}/conf" clone: yes update: yes - name: ensure openhab is up and running docker_container: name: openhab image: "{{ openhab_image }}" pull: true state: started detach: yes interactive: yes tty: yes ports: - "{{ openhab_host_port }}:8080" volumes: - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro - "{{ openhab_data }}/addons:/openhab/addons:rw" - "{{ openhab_data }}/conf:/openhab/conf:rw" - "{{ openhab_data }}/userdata:/openhab/userdata:rw" keep_volumes: yes restart_policy: unless-stopped env: EXTRA_JAVA_OPTS="-Duser.timezone=Europe/Berlin" - name: Setup proxy site openhab.n39.eu include_role: name: setup-http-site-proxy vars: site_name: openhab.n39.eu proxy_port: "{{ openhab_host_port }}" - name: Ensure container for URI tools is running docker_container: name: uritools image: mrtux/clean_uri pull: true state: started detach: yes ports: - "{{ uritools_host_port }}:8080" restart_policy: unless-stopped - name: Setup proxy site uritools.n39.eu include_role: name: setup-http-site-proxy vars: site_name: uritools.n39.eu proxy_port: "{{ uritools_host_port }}" - name: Ensure container for entities validation service is running docker_container: name: entities_validation_svc image: netz39/entities_validation_svc:v1.0.0 pull: true state: started detach: yes ports: - "{{ entities_validation_svc_host_port }}:8080" restart_policy: unless-stopped - name: Setup proxy site entities-validation.svc.n39.eu include_role: name: setup-http-site-proxy vars: site_name: entities-validation.svc.n39.eu proxy_port: "{{ entities_validation_svc_host_port }}" - name: Ensure container for shlink is running docker_container: name: shlink image: shlinkio/shlink:2.6.2 pull: true state: started detach: yes ports: - "{{ shlink_host_port }}:8080" restart_policy: unless-stopped env: SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}" SHORT_DOMAIN_SCHEMA: https GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}" - name: Setup proxy site {{ shlink_domain_name }} include_role: name: setup-http-site-proxy vars: site_name: "{{ shlink_domain_name }}" proxy_port: "{{ shlink_host_port }}" - name: Check if hedgedoc data dir exists ansible.builtin.stat: path: "{{ data_dir }}/hedgedoc" register: hedgedoc_dir - name: Fail if hedgedoc data dir does not exist ansible.builtin.fail: msg: "hedgedoc data dir is missing, please restore from the backup!" when: not hedgedoc_dir.stat.exists - name: Ensure the hedgedoc directories exist file: path: "{{ item }}" state: directory with_items: - "{{ data_dir }}/hedgedoc/data/database" - "{{ data_dir }}/hedgedoc/data/uploads" - name: Setup docker network docker_network: name: hedgedocnet state: present internal: true - name: Install HedgeDoc database container docker_container: name: hedgedocdb image: "postgres:11.6-alpine" pull: true state: started restart_policy: unless-stopped detach: yes env: POSTGRES_USER: "hedgedoc" POSTGRES_PASSWORD: "{{ hedgedoc_postgres_password }}" POSTGRES_DB: "hedgedoc" volumes: - "{{ data_dir }}/hedgedoc/data/database:/var/lib/postgresql/data" networks: - name: hedgedocnet - name: Ensure container for hedgedoc is running docker_container: name: hedgedoc image: "{{ hedgedoc_image }}" pull: true state: started detach: yes ports: - "{{ hedgedoc_host_port }}:3000" restart_policy: unless-stopped env: NODE_ENV: "production" CMD_PROTOCOL_USESSL: "true" CMD_DOMAIN: "pad.n39.eu" CMD_URL_ADDPORT: "false" CMD_DB_HOST: "hedgedocdb" CMD_DB_PORT: "5432" CMD_DB_DIALECT: "postgres" CMD_DB_DATABASE: "hedgedoc" CMD_DB_USERNAME: "hedgedoc" CMD_DB_PASSWORD: "{{ hedgedoc_postgres_password }}" volumes: - "{{ data_dir }}/hedgedoc/data/uploads:/hedgedoc/public/uploads" networks: - name: hedgedocnet - name: Setup proxy site pad.n39.eu include_role: name: setup-http-site-proxy vars: site_name: pad.n39.eu proxy_port: "{{ hedgedoc_host_port }}" - name: Setup dehydrated challenge endpoint for {{ openldap_domain }} include_role: name: setup-http-dehydrated vars: site_name: "{{ openldap_domain }}" - name: Ensure openLDAP directories are present. file: path: "{{ item }}" state: directory with_items: - "{{ openldap_data }}/ldap" - "{{ openldap_data }}/slapd" - "{{ openldap_data }}/ldif" - "{{ dehydrated_certs_dir }}/certs/{{ openldap_domain }}" - name: Ensure container for openLDAP is running. docker_container: name: openLDAP image: "osixia/openldap:{{ openldap_image_version }}" detach: yes state: started restart_policy: unless-stopped container_default_behavior: no_defaults pull: true env: LDAP_LOG_LEVEL: "256" LDAP_ORGANISATION: "{{ldap_org}}" LDAP_DOMAIN: "{{ldap_domain}}" LDAP_BASE_DN: "{{ldap_base_dn}}" LDAP_READONLY_USER: "false" LDAP_ADMIN_PASSWORD: "{{ldap_admin_password}}" LDAP_CONFIG_PASSWORD: "{{ldap_config_password}}" LDAP_RFC2307BIS_SCHEMA: "true" LDAP_TLS_CIPHER_SUITE: "SECURE256:-VERS-SSL3.0" LDAP_REPLICATION: "{{ldap_replication_enable}}" LDAP_REPLICATION_CONFIG_SYNCPROV: "{{ldap_replication_config_syncprov}}" LDAP_REPLICATION_DB_SYNCPROV: "{{ldap_replication_db_syncprov}}" LDAP_REPLICATION_HOSTS: "{{ldap_replication_hosts}}" KEEP_EXISTING_CONFIG: "false" LDAP_REMOVE_CONFIG_AFTER_SETUP: "true" published_ports: - "{{ldap_ip}}:389:389" # unencrypted/STARTTLS - "{{ldap_ip}}:636:636" # SSL volumes: - "{{ openldap_data }}/ldap:/var/lib/ldap" - "{{ openldap_data }}/slapd:/etc/ldap/slapd.d" - "{{ dehydrated_certs_dir }}/certs/{{ openldap_domain }}:/container/service/slapd/assets/certs" - "{{ openldap_data }}/ldif/custom-element.ldif:/container/service/slapd/assets/config/bootstrap/ldif/custom/01_netz39.ldif" timeout: 500 # For replication to work correctly, domainname and hostname must be # set correctly so that "hostname"."domainname" equates to the # fully-qualified domain name for the host. domainname: "{{ldap_domainname}}" hostname: "{{ldap_hostname}}" command: "--copy-service --loglevel debug" - name: Allow access to openLDAP from local docker container [1/2] become: true community.general.ufw: rule: allow port: '389' proto: tcp from: "{{ item }}" comment: LDAP Docker Access loop: "{{ docker_ip_ranges }}" - name: Allow access to openLDAP from local docker container [2/2] become: true community.general.ufw: rule: allow port: '636' proto: tcp from: "{{ item }}" comment: LDAP Docker Access loop: "{{ docker_ip_ranges }}" handlers: