Compare commits

..

15 commits

Author SHA1 Message Date
Maximilian Deubel
76d25384c6 bump ble version 2022-03-02 21:04:37 +01:00
tux
8cebc59443 Merge pull request 'Add BLE-Keykeeper to platon' (!26) from max/netz39-infra-ansible:platon into master
Reviewed-on: https://gitea.n39.eu/Netz39_Admin/netz39-infra-ansible/pulls/26
2022-02-19 15:23:06 +00:00
Maximilian Deubel
53b7815d27 Add BLE-Keykeeper to platon 2022-02-13 00:10:01 +01:00
tux
62b44867d7 Merge pull request 'Fix yaml issues' (!24) from alex/netz39-infra-ansible:yaml into master
Reviewed-on: https://gitea.n39.eu/Netz39_Admin/netz39-infra-ansible/pulls/24
2022-01-30 18:10:27 +00:00
65bfb358dd pottwal: Strip trailing whitespace 2022-01-30 18:25:35 +01:00
be09ef57f6 pottwal: Fix indentation
Please yamllint.

Fixes: 3aced1fe46 ("Install gitea via ansible")
2022-01-30 18:25:35 +01:00
b1b3382728 Revert "Fix YAML format issues"
This reverts commit babeef8226.

Instead of fixing the indentation of the recently introduced gitea
stuff, the indentation of the old stuff was changed.
2022-01-30 18:25:35 +01:00
9b31fe0619 Merge pull request 'Downgrade Openhab to 2.5.11' (!23) from openhab-downgrade into master
Reviewed-on: https://gitea.n39.eu/Netz39_Admin/netz39-infra-ansible/pulls/23
2022-01-30 17:24:51 +00:00
b6a05b8c8c Downgrade Openhab to 2.5.11
Openhab 3.1 does not work for us. The chosen version has been
detemined by analysing the (scarce) log entries. It seems that
migration beyond this point breaks the configuration and leads
to an incomplete setup.
2022-01-30 18:12:49 +01:00
71e031cdc6 Fix creation of directory for docker registry 2022-01-29 12:41:34 +01:00
tux
10da78d11b Merge pull request 'Add a docker registry' (!20) from docker-registry into master
Reviewed-on: https://gitea.n39.eu/Netz39_Admin/netz39-infra-ansible/pulls/20
2022-01-29 11:34:27 +00:00
dee4f2557e
Merge remote-tracking branch 'max/unifi-controller'
Link: https://gitea.n39.eu/Netz39_Admin/netz39-infra-ansible/pulls/16
2022-01-29 08:47:06 +01:00
Maximilian Deubel
48a4e9e62e add unifi controller role and playbook 2022-01-28 22:22:46 +01:00
00a647036e Switch domain to docker.n39.eu 2022-01-20 12:11:54 +01:00
f539a42024 Add a docker registry 2022-01-20 12:10:58 +01:00
7 changed files with 264 additions and 181 deletions

View file

@ -78,3 +78,6 @@ all:
34613761363237633865306332653631323366343232353666343165666664343838
unicorn.n39.eu:
server_admin: "admin+unicorn@netz39.de"
platon.n39.eu:
server_admin: "admin+platon@netz39.de"
ansible_ssh_user: pi

View file

@ -42,3 +42,6 @@
- name: Specific setup for host unicorn
import_playbook: unicorn.yml
- name: Platon specific setup
import_playbook: platon.yml

9
platon.yml Normal file
View file

@ -0,0 +1,9 @@
---
- hosts: platon.n39.eu
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
door_open_command: '/home/pi/netz39_rollladensteuerung/raspberry/doorcontrol/door-open.sh'
ble_keykeeper_dir: '/home/pi/netz39_ble_keykeeper'
roles:
- role: ble-keykeeper-role

View file

@ -8,7 +8,7 @@
mosquitto_image: eclipse-mosquitto:1.6
mosquitto_data: /srv/data/mosquitto
openhab_image: openhab/openhab:3.1.0
openhab_image: openhab/openhab:2.5.11
openhab_data: /srv/data/openhab
openhab_host_port: 8081
openhab_configuration_source: https://github.com/netz39/n39-openhab.git
@ -28,199 +28,199 @@
tasks:
- name: Check if gitea data dir exists
ansible.builtin.stat:
path: "/srv/data/gitea"
register: gitea_dir
- name: Fail if gitea data dir does not exist
ansible.builtin.fail:
msg: "Gitea data dir is missing, please restore from the backup!"
when: not gitea_dir.stat.exists
- name: Check if gitea data dir exists
ansible.builtin.stat:
path: "/srv/data/gitea"
register: gitea_dir
- name: Fail if gitea data dir does not exist
ansible.builtin.fail:
msg: "Gitea data dir is missing, please restore from the backup!"
when: not gitea_dir.stat.exists
# If port 2222 is changed here, it must also be adapted
# in the gitea config file (see application volume)!!
- name: Setup the docker container for gitea
docker_container:
name: gitea
image: "gitea/gitea:1.15.10"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
# - 127.0.0.1:{{ gitea_host_port }}:3000
- "{{ gitea_host_port }}:3000"
- 2222:2222
env:
APP_NAME="Netz39 Gitea"
RUN_MODE="prod"
SSH_DOMAIN="gitea.n39.eu"
SSH_PORT="2222"
SSH_START_SERVER="false"
ROOT_URL="https://gitea.n39.eu"
DISABLE_REGISTRATION="true"
USER_UID=1000
USER_GID=1000
volumes:
- "/srv/data/gitea:/data:rw"
# If port 2222 is changed here, it must also be adapted
# in the gitea config file (see application volume)!!
- name: Setup the docker container for gitea
docker_container:
name: gitea
image: "gitea/gitea:1.15.10"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
# - 127.0.0.1:{{ gitea_host_port }}:3000
- "{{ gitea_host_port }}:3000"
- 2222:2222
env:
APP_NAME="Netz39 Gitea"
RUN_MODE="prod"
SSH_DOMAIN="gitea.n39.eu"
SSH_PORT="2222"
SSH_START_SERVER="false"
ROOT_URL="https://gitea.n39.eu"
DISABLE_REGISTRATION="true"
USER_UID=1000
USER_GID=1000
volumes:
- "/srv/data/gitea:/data:rw"
- name: Setup proxy site gitea.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: "gitea.n39.eu"
proxy_port: "{{ gitea_host_port }}"
- name: Setup proxy site gitea.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: "gitea.n39.eu"
proxy_port: "{{ gitea_host_port }}"
- name: Ensure apt-cacher container is running
docker_container:
name: apt_cacher_ng
image: "mrtux/apt-cacher-ng"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 3142:3142
- name: Ensure apt-cacher container is running
docker_container:
name: apt_cacher_ng
image: "mrtux/apt-cacher-ng"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 3142:3142
- name: Ensure the mosquitto directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ mosquitto_data }}/config"
- "{{ mosquitto_data }}/data"
- "{{ mosquitto_data }}/log"
- name: Ensure the mosquitto directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ mosquitto_data }}/config"
- "{{ mosquitto_data }}/data"
- "{{ mosquitto_data }}/log"
- name: Make sure mosquitto config is there
template:
src: "templates/mosquitto.conf.j2"
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
notify: restart_mosquitto
- name: Make sure mosquitto config is there
template:
src: "templates/mosquitto.conf.j2"
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
notify: restart_mosquitto
- name: Ensure mosquitto is running
docker_container:
name: mosquitto
image: "{{ mosquitto_image }}"
pull: true
state: started
ports:
- 1883:1883
- 9001:9001
volumes:
- "{{ mosquitto_data }}/config:/mosquitto/config"
- "{{ mosquitto_data }}/data:/mosquitto/data"
- "{{ mosquitto_data }}/log:/mosquitto/log"
detach: yes
keep_volumes: yes
restart_policy: unless-stopped
- name: Ensure mosquitto is running
docker_container:
name: mosquitto
image: "{{ mosquitto_image }}"
pull: true
state: started
ports:
- 1883:1883
- 9001:9001
volumes:
- "{{ mosquitto_data }}/config:/mosquitto/config"
- "{{ mosquitto_data }}/data:/mosquitto/data"
- "{{ mosquitto_data }}/log:/mosquitto/log"
detach: yes
keep_volumes: yes
restart_policy: unless-stopped
- name: Ensure the openhab directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ openhab_data }}/addons"
- "{{ openhab_data }}/conf"
- "{{ openhab_data }}/userdata"
- name: Ensure the openhab directories exist
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ openhab_data }}/addons"
- "{{ openhab_data }}/conf"
- "{{ openhab_data }}/userdata"
- name: Clone or update configuration
git:
repo: "{{ openhab_configuration_source }}"
version: "{{ openhab_configuration_version }}"
dest: "{{ openhab_data }}/conf"
clone: yes
update: yes
- name: Clone or update configuration
git:
repo: "{{ openhab_configuration_source }}"
version: "{{ openhab_configuration_version }}"
dest: "{{ openhab_data }}/conf"
clone: yes
update: yes
- name: ensure openhab is up and running
docker_container:
name: openhab
image: "{{ openhab_image }}"
pull: true
state: started
detach: yes
interactive: yes
tty: yes
ports:
- "{{ openhab_host_port }}:8080"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- "{{ openhab_data }}/addons:/openhab/addons:rw"
- "{{ openhab_data }}/conf:/openhab/conf:rw"
- "{{ openhab_data }}/userdata:/openhab/userdata:rw"
keep_volumes: yes
restart_policy: unless-stopped
env: EXTRA_JAVA_OPTS="-Duser.timezone=Europe/Berlin"
- name: ensure openhab is up and running
docker_container:
name: openhab
image: "{{ openhab_image }}"
pull: true
state: started
detach: yes
interactive: yes
tty: yes
ports:
- "{{ openhab_host_port }}:8080"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- "{{ openhab_data }}/addons:/openhab/addons:rw"
- "{{ openhab_data }}/conf:/openhab/conf:rw"
- "{{ openhab_data }}/userdata:/openhab/userdata:rw"
keep_volumes: yes
restart_policy: unless-stopped
env: EXTRA_JAVA_OPTS="-Duser.timezone=Europe/Berlin"
- name: Setup proxy site openhab.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: openhab.n39.eu
proxy_port: "{{ openhab_host_port }}"
- name: Setup proxy site openhab.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: openhab.n39.eu
proxy_port: "{{ openhab_host_port }}"
- name: Ensure container for URI tools is running
docker_container:
name: uritools
image: mrtux/clean_uri
pull: true
state: started
detach: yes
ports:
- "{{ uritools_host_port }}:8080"
restart_policy: unless-stopped
- name: Ensure container for URI tools is running
docker_container:
name: uritools
image: mrtux/clean_uri
pull: true
state: started
detach: yes
ports:
- "{{ uritools_host_port }}:8080"
restart_policy: unless-stopped
- name: Setup proxy site uritools.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: uritools.n39.eu
proxy_port: "{{ uritools_host_port }}"
- name: Setup proxy site uritools.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: uritools.n39.eu
proxy_port: "{{ uritools_host_port }}"
- name: Ensure container for entities validation service is running
docker_container:
name: entities_validation_svc
image: netz39/entities_validation_svc:v1.0.0
pull: true
state: started
detach: yes
ports:
- "{{ entities_validation_svc_host_port }}:8080"
restart_policy: unless-stopped
- name: Ensure container for entities validation service is running
docker_container:
name: entities_validation_svc
image: netz39/entities_validation_svc:v1.0.0
pull: true
state: started
detach: yes
ports:
- "{{ entities_validation_svc_host_port }}:8080"
restart_policy: unless-stopped
- name: Setup proxy site entities-validation.svc.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: entities-validation.svc.n39.eu
proxy_port: "{{ entities_validation_svc_host_port }}"
- name: Setup proxy site entities-validation.svc.n39.eu
include_role:
name: setup-http-site-proxy
vars:
site_name: entities-validation.svc.n39.eu
proxy_port: "{{ entities_validation_svc_host_port }}"
- name: Ensure container for shlink is running
docker_container:
name: shlink
image: shlinkio/shlink:2.6.2
pull: true
state: started
detach: yes
ports:
- "{{ shlink_host_port }}:8080"
restart_policy: unless-stopped
env:
SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}"
SHORT_DOMAIN_SCHEMA: https
GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
- name: Ensure container for shlink is running
docker_container:
name: shlink
image: shlinkio/shlink:2.6.2
pull: true
state: started
detach: yes
ports:
- "{{ shlink_host_port }}:8080"
restart_policy: unless-stopped
env:
SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}"
SHORT_DOMAIN_SCHEMA: https
GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
- name: Setup proxy site {{ shlink_domain_name }}
include_role:
name: setup-http-site-proxy
vars:
site_name: "{{ shlink_domain_name }}"
proxy_port: "{{ shlink_host_port }}"
- name: Setup proxy site {{ shlink_domain_name }}
include_role:
name: setup-http-site-proxy
vars:
site_name: "{{ shlink_domain_name }}"
proxy_port: "{{ shlink_host_port }}"
handlers:
- name: restart mosquitto
docker_container:
name: mosquitto
state: started
restart: yes
- name: restart mosquitto
docker_container:
name: mosquitto
state: started
restart: yes

View file

@ -5,3 +5,5 @@
version: 3.0.0
- src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
version: 1.0.2
- src: git+https://github.com/maz3max/ble-keykeeper-role.git
version: v1.0.1

45
tau.yml
View file

@ -5,6 +5,9 @@
vars:
ansible_python_interpreter: /usr/bin/python3
docker_registry_port: 5000 # this is the reg standard port
docker_registry_domain: "docker.n39.eu"
roles:
- role: docker_setup
- role: apache
@ -31,6 +34,8 @@
deploy_challenge_hook: "/bin/systemctl restart apache2"
- name: "mysql.adm.netz39.de"
deploy_challenge_hook: "/bin/systemctl restart apache2"
- name: "{{ docker_registry_domain }}"
deploy_challenge_hook: "/bin/systemctl restart apache2"
- name: Setup forward site reservierung.netz39.de
include_role:
@ -68,3 +73,43 @@
vars:
site_name: mysql.adm.netz39.de
proxy_port: 9001
- name: Check if Docker Registry auth dir exists
ansible.builtin.stat:
path: "/srv/docker/registry/auth"
register: docker_dir
- name: Fail if docker registry data dir does not exist
ansible.builtin.fail:
msg: "Docker Registry auth dir is missing, please restore from the backup!"
when: not docker_dir.stat.exists
- name: Ensure the Docker Registry data directory exists
# This may not be part of the backup
file:
path: "/srv/docker/registry/data"
state: directory
- name: Setup Docker Registry Container
docker_container:
name: registry
image: "registry:2"
pull: true
state: started
restart_policy: unless-stopped
detach: yes
ports:
- 127.0.0.1:{{ docker_registry_port }}:{{ docker_registry_port }}
env:
REGISTRY_HTTP_HOST: "https://{{ docker_registry_domain }}"
REGISTRY_AUTH_HTPASSWD_REALM: "Netz39 Docker Registry"
REGISTRY_AUTH_HTPASSWD_PATH: "/auth/htpasswd"
volumes:
- "/srv/docker/registry/data:/var/lib/registry:rw"
- "/srv/docker/registry/auth:/auth:rw"
- name: Setup proxy site for the Docker Registry
include_role:
name: setup-http-site-proxy
vars:
site_name: "{{ docker_registry_domain }}"
proxy_port: "{{ docker_registry_port }}"

View file

@ -1,11 +1,32 @@
---
# this is for a dedicated vm just hosting the unifi controller.
- hosts: unicorn.n39.eu
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
roles:
- role: docker_setup
vars:
docker_data_root: "/srv/docker"
tasks:
- name: Setup the docker container for unifi-controller
docker_container:
name: unifi-controller
image: "jacobalberty/unifi:v6.5.55"
state: started
restart_policy: unless-stopped
container_default_behavior: no_defaults
env:
TZ: "Europe/Berlin"
# These fixed ports are needed.
# https://help.ui.com/hc/en-us/articles/218506997-UniFi-Ports-Used
ports:
- "8080:8080/tcp" # Device command/control
- "8443:8443/tcp" # Web interface + API
- "8843:8843/tcp" # HTTPS portal
- "8880:8880/tcp" # HTTP portal
- "3478:3478/udp" # STUN service
- "6789:6789/tcp" # Speed Test (unifi5 only)
volumes:
- "/srv/data/unifi-controller/data:/unifi/data"
- "/srv/data/unifi-controller/log:/unifi/log"