Compare commits

..

No commits in common. "76d25384c61f3430f52e346af3c491d6ff067401" and "cac6cca1057f6a6be654d1673388ef8e82a4fc65" have entirely different histories.

7 changed files with 181 additions and 264 deletions

View file

@ -78,6 +78,3 @@ all:
34613761363237633865306332653631323366343232353666343165666664343838
unicorn.n39.eu:
server_admin: "admin+unicorn@netz39.de"
platon.n39.eu:
server_admin: "admin+platon@netz39.de"
ansible_ssh_user: pi

View file

@ -42,6 +42,3 @@
- name: Specific setup for host unicorn
import_playbook: unicorn.yml
- name: Platon specific setup
import_playbook: platon.yml

View file

@ -1,9 +0,0 @@
---
- hosts: platon.n39.eu
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
door_open_command: '/home/pi/netz39_rollladensteuerung/raspberry/doorcontrol/door-open.sh'
ble_keykeeper_dir: '/home/pi/netz39_ble_keykeeper'
roles:
- role: ble-keykeeper-role

View file

@ -8,7 +8,7 @@
mosquitto_image: eclipse-mosquitto:1.6
mosquitto_data: /srv/data/mosquitto
openhab_image: openhab/openhab:2.5.11
openhab_image: openhab/openhab:3.1.0
openhab_data: /srv/data/openhab
openhab_host_port: 8081
openhab_configuration_source: https://github.com/netz39/n39-openhab.git
@ -28,199 +28,199 @@
tasks:
- name: Check if gitea data dir exists
ansible.builtin.stat:
path: "/srv/data/gitea"
register: gitea_dir
- name: Fail if gitea data dir does not exist
ansible.builtin.fail:
msg: "Gitea data dir is missing, please restore from the backup!"
when: not gitea_dir.stat.exists
- name: Check if gitea data dir exists
ansible.builtin.stat:
path: "/srv/data/gitea"
register: gitea_dir
- name: Fail if gitea data dir does not exist
ansible.builtin.fail:
msg: "Gitea data dir is missing, please restore from the backup!"
when: not gitea_dir.stat.exists
# If port 2222 is changed here, it must also be adapted
# in the gitea config file (see application volume)!!
- name: Setup the docker container for gitea
docker_container:
name: gitea
image: "gitea/gitea:1.15.10"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
# - 127.0.0.1:{{ gitea_host_port }}:3000
- "{{ gitea_host_port }}:3000"
- 2222:2222
env:
APP_NAME="Netz39 Gitea"
RUN_MODE="prod"
SSH_DOMAIN="gitea.n39.eu"
SSH_PORT="2222"
SSH_START_SERVER="false"
ROOT_URL="https://gitea.n39.eu"
DISABLE_REGISTRATION="true"
USER_UID=1000
USER_GID=1000
volumes:
- "/srv/data/gitea:/data:rw"
# If port 2222 is changed here, it must also be adapted
# in the gitea config file (see application volume)!!
- name: Setup the docker container for gitea
docker_container:
name: gitea
image: "gitea/gitea:1.15.10"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
# - 127.0.0.1:{{ gitea_host_port }}:3000
- "{{ gitea_host_port }}:3000"
- 2222:2222
env:
APP_NAME="Netz39 Gitea"
RUN_MODE="prod"
SSH_DOMAIN="gitea.n39.eu"
SSH_PORT="2222"
SSH_START_SERVER="false"
ROOT_URL="https://gitea.n39.eu"
DISABLE_REGISTRATION="true"
USER_UID=1000
USER_GID=1000
volumes:
- "/srv/data/gitea:/data:rw"
- name: Setup proxy site gitea.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: "gitea.n39.eu"
proxy_port: "{{ gitea_host_port }}"
- name: Setup proxy site gitea.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: "gitea.n39.eu"
proxy_port: "{{ gitea_host_port }}"
- name: Ensure apt-cacher container is running
docker_container:
name: apt_cacher_ng
image: "mrtux/apt-cacher-ng"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 3142:3142
- name: Ensure apt-cacher container is running
docker_container:
name: apt_cacher_ng
image: "mrtux/apt-cacher-ng"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 3142:3142
- name: Ensure the mosquitto directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ mosquitto_data }}/config"
- "{{ mosquitto_data }}/data"
- "{{ mosquitto_data }}/log"
- name: Ensure the mosquitto directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ mosquitto_data }}/config"
- "{{ mosquitto_data }}/data"
- "{{ mosquitto_data }}/log"
- name: Make sure mosquitto config is there
template:
src: "templates/mosquitto.conf.j2"
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
notify: restart_mosquitto
- name: Make sure mosquitto config is there
template:
src: "templates/mosquitto.conf.j2"
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
notify: restart_mosquitto
- name: Ensure mosquitto is running
docker_container:
name: mosquitto
image: "{{ mosquitto_image }}"
pull: true
state: started
ports:
- 1883:1883
- 9001:9001
volumes:
- "{{ mosquitto_data }}/config:/mosquitto/config"
- "{{ mosquitto_data }}/data:/mosquitto/data"
- "{{ mosquitto_data }}/log:/mosquitto/log"
detach: yes
keep_volumes: yes
restart_policy: unless-stopped
- name: Ensure mosquitto is running
docker_container:
name: mosquitto
image: "{{ mosquitto_image }}"
pull: true
state: started
ports:
- 1883:1883
- 9001:9001
volumes:
- "{{ mosquitto_data }}/config:/mosquitto/config"
- "{{ mosquitto_data }}/data:/mosquitto/data"
- "{{ mosquitto_data }}/log:/mosquitto/log"
detach: yes
keep_volumes: yes
restart_policy: unless-stopped
- name: Ensure the openhab directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ openhab_data }}/addons"
- "{{ openhab_data }}/conf"
- "{{ openhab_data }}/userdata"
- name: Ensure the openhab directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ openhab_data }}/addons"
- "{{ openhab_data }}/conf"
- "{{ openhab_data }}/userdata"
- name: Clone or update configuration
git:
repo: "{{ openhab_configuration_source }}"
version: "{{ openhab_configuration_version }}"
dest: "{{ openhab_data }}/conf"
clone: yes
update: yes
- name: Clone or update configuration
git:
repo: "{{ openhab_configuration_source }}"
version: "{{ openhab_configuration_version }}"
dest: "{{ openhab_data }}/conf"
clone: yes
update: yes
- name: ensure openhab is up and running
docker_container:
name: openhab
image: "{{ openhab_image }}"
pull: true
state: started
detach: yes
interactive: yes
tty: yes
ports:
- "{{ openhab_host_port }}:8080"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- "{{ openhab_data }}/addons:/openhab/addons:rw"
- "{{ openhab_data }}/conf:/openhab/conf:rw"
- "{{ openhab_data }}/userdata:/openhab/userdata:rw"
keep_volumes: yes
restart_policy: unless-stopped
env: EXTRA_JAVA_OPTS="-Duser.timezone=Europe/Berlin"
- name: ensure openhab is up and running
docker_container:
name: openhab
image: "{{ openhab_image }}"
pull: true
state: started
detach: yes
interactive: yes
tty: yes
ports:
- "{{ openhab_host_port }}:8080"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- "{{ openhab_data }}/addons:/openhab/addons:rw"
- "{{ openhab_data }}/conf:/openhab/conf:rw"
- "{{ openhab_data }}/userdata:/openhab/userdata:rw"
keep_volumes: yes
restart_policy: unless-stopped
env: EXTRA_JAVA_OPTS="-Duser.timezone=Europe/Berlin"
- name: Setup proxy site openhab.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: openhab.n39.eu
proxy_port: "{{ openhab_host_port }}"
- name: Setup proxy site openhab.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: openhab.n39.eu
proxy_port: "{{ openhab_host_port }}"
- name: Ensure container for URI tools is running
docker_container:
name: uritools
image: mrtux/clean_uri
pull: true
state: started
detach: yes
ports:
- "{{ uritools_host_port }}:8080"
restart_policy: unless-stopped
- name: Ensure container for URI tools is running
docker_container:
name: uritools
image: mrtux/clean_uri
pull: true
state: started
detach: yes
ports:
- "{{ uritools_host_port }}:8080"
restart_policy: unless-stopped
- name: Setup proxy site uritools.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: uritools.n39.eu
proxy_port: "{{ uritools_host_port }}"
- name: Setup proxy site uritools.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: uritools.n39.eu
proxy_port: "{{ uritools_host_port }}"
- name: Ensure container for entities validation service is running
docker_container:
name: entities_validation_svc
image: netz39/entities_validation_svc:v1.0.0
pull: true
state: started
detach: yes
ports:
- "{{ entities_validation_svc_host_port }}:8080"
restart_policy: unless-stopped
- name: Ensure container for entities validation service is running
docker_container:
name: entities_validation_svc
image: netz39/entities_validation_svc:v1.0.0
pull: true
state: started
detach: yes
ports:
- "{{ entities_validation_svc_host_port }}:8080"
restart_policy: unless-stopped
- name: Setup proxy site entities-validation.svc.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: entities-validation.svc.n39.eu
proxy_port: "{{ entities_validation_svc_host_port }}"
- name: Setup proxy site entities-validation.svc.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: entities-validation.svc.n39.eu
proxy_port: "{{ entities_validation_svc_host_port }}"
- name: Ensure container for shlink is running
docker_container:
name: shlink
image: shlinkio/shlink:2.6.2
pull: true
state: started
detach: yes
ports:
- "{{ shlink_host_port }}:8080"
restart_policy: unless-stopped
env:
SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}"
SHORT_DOMAIN_SCHEMA: https
GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
- name: Setup proxy site {{ shlink_domain_name }}
include_role:
name: setup-http-site-proxy
vars:
site_name: "{{ shlink_domain_name }}"
proxy_port: "{{ shlink_host_port }}"
- name: Ensure container for shlink is running
docker_container:
name: shlink
image: shlinkio/shlink:2.6.2
pull: true
state: started
detach: yes
ports:
- "{{ shlink_host_port }}:8080"
restart_policy: unless-stopped
env:
SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}"
SHORT_DOMAIN_SCHEMA: https
GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
- name: Setup proxy site {{ shlink_domain_name }}
include_role:
name: setup-http-site-proxy
vars:
site_name: "{{ shlink_domain_name }}"
proxy_port: "{{ shlink_host_port }}"
handlers:
- name: restart mosquitto
docker_container:
name: mosquitto
state: started
restart: yes
- name: restart mosquitto
docker_container:
name: mosquitto
state: started
restart: yes

View file

@ -5,5 +5,3 @@
version: 3.0.0
- src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
version: 1.0.2
- src: git+https://github.com/maz3max/ble-keykeeper-role.git
version: v1.0.1

45
tau.yml
View file

@ -5,9 +5,6 @@
vars:
ansible_python_interpreter: /usr/bin/python3
docker_registry_port: 5000 # this is the reg standard port
docker_registry_domain: "docker.n39.eu"
roles:
- role: docker_setup
- role: apache
@ -34,8 +31,6 @@
deploy_challenge_hook: "/bin/systemctl restart apache2"
- name: "mysql.adm.netz39.de"
deploy_challenge_hook: "/bin/systemctl restart apache2"
- name: "{{ docker_registry_domain }}"
deploy_challenge_hook: "/bin/systemctl restart apache2"
- name: Setup forward site reservierung.netz39.de
include_role:
@ -73,43 +68,3 @@
vars:
site_name: mysql.adm.netz39.de
proxy_port: 9001
- name: Check if Docker Registry auth dir exists
ansible.builtin.stat:
path: "/srv/docker/registry/auth"
register: docker_dir
- name: Fail if docker registry data dir does not exist
ansible.builtin.fail:
msg: "Docker Registry auth dir is missing, please restore from the backup!"
when: not docker_dir.stat.exists
- name: Ensure the Docker Registry data directory exists
# This may not be part of the backup
file:
path: "/srv/docker/registry/data"
state: directory
- name: Setup Docker Registry Container
docker_container:
name: registry
image: "registry:2"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 127.0.0.1:{{ docker_registry_port }}:{{ docker_registry_port }}
env:
REGISTRY_HTTP_HOST: "https://{{ docker_registry_domain }}"
REGISTRY_AUTH_HTPASSWD_REALM: "Netz39 Docker Registry"
REGISTRY_AUTH_HTPASSWD_PATH: "/auth/htpasswd"
volumes:
- "/srv/docker/registry/data:/var/lib/registry:rw"
- "/srv/docker/registry/auth:/auth:rw"
- name: Setup proxy site for the Docker Registry
include_role:
name: setup-http-site-proxy
vars:
site_name: "{{ docker_registry_domain }}"
proxy_port: "{{ docker_registry_port }}"

View file

@ -1,32 +1,11 @@
---
# this is for a dedicated vm just hosting the unifi controller.
- hosts: unicorn.n39.eu
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
roles:
- role: docker_setup
vars:
docker_data_root: "/srv/docker"
tasks:
- name: Setup the docker container for unifi-controller
docker_container:
name: unifi-controller
image: "jacobalberty/unifi:v6.5.55"
state: started
restart_policy: unless-stopped
container_default_behavior: no_defaults
env:
TZ: "Europe/Berlin"
# These fixed ports are needed.
# https://help.ui.com/hc/en-us/articles/218506997-UniFi-Ports-Used
ports:
- "8080:8080/tcp" # Device command/control
- "8443:8443/tcp" # Web interface + API
- "8843:8843/tcp" # HTTPS portal
- "8880:8880/tcp" # HTTP portal
- "3478:3478/udp" # STUN service
- "6789:6789/tcp" # Speed Test (unifi5 only)
volumes:
- "/srv/data/unifi-controller/data:/unifi/data"
- "/srv/data/unifi-controller/log:/unifi/log"