Compare commits
7 commits
master
...
feat/cisco
Author | SHA1 | Date | |
---|---|---|---|
|
eebdf0b360 | ||
7846e91e6b | |||
a454932328 | |||
3fff171180 | |||
77c2d6aa04 | |||
d211caae89 | |||
a9567bd31d |
85 changed files with 968 additions and 1072 deletions
.gitignore.mailmap.yamllintREADME.mddevice-cisco-2960-1.ymlgroup-all.ymlgroup-docker_host.ymlgroup-k3s.ymlgroup-proxmox.ymlinventory.ymlmain.ymlrenovate.jsonrequirements.ymlsetup-ssh.yml
files
hobbes
platon
wittgenstein
group_vars
host-beaker.ymlhost-hobbes.ymlhost-holmium.ymlhost-krypton.ymlhost-oganesson.ymlhost-platon.ymlhost-plumbum.ymlhost-pottwal.ymlhost-radon.ymlhost-tau.ymlhost-unicorn.ymlhost-wittgenstein.ymlhost_vars
cisco-2960-1.n39.eu
hobbes.n39.eu
plumbum.n39.eu
pottwal.n39.eu
radon.n39.eu
wittgenstein.n39.eu
roles
apache
apache_letsencrypt
cleanuri
dd24_dyndns_cron
desec_dyndns_cron
docker_setup
nfs_host
nginx_https_ingress
setup-http-site-forward
setup_http_site_forward/handlers
setup_http_site_proxy
users
templates
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1 +1 @@
|
||||||
vault-pass
|
vault-pass
|
4
.mailmap
4
.mailmap
|
@ -2,8 +2,4 @@ Alexander Dahl <alex@netz39.de> <post@lespocky.de>
|
||||||
David Kilias <dkdent@netz39.de>
|
David Kilias <dkdent@netz39.de>
|
||||||
David Kilias <dkdent@netz39.de> <david.kilias@gmail.com>
|
David Kilias <dkdent@netz39.de> <david.kilias@gmail.com>
|
||||||
Maximilian Grau <mg-95@t-online.de>
|
Maximilian Grau <mg-95@t-online.de>
|
||||||
Maximilian Grau <mg-95@t-online.de> <mg-95@gitea.n39.eu>
|
|
||||||
Jens Winter-Hübenthal <jens.winter@gmail.com>
|
|
||||||
Jens Winter-Hübenthal <jens.winter@gmail.com> <jens.winter-huebenthal@bridgefield.de>
|
|
||||||
Stefan Haun <tux@netz39.de>
|
Stefan Haun <tux@netz39.de>
|
||||||
<timo@netz39.de> <n39@therr.de>
|
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
extends: default
|
extends: default
|
||||||
|
|
||||||
rules:
|
rules:
|
||||||
comments-indentation: disable
|
|
||||||
line-length: disable
|
line-length: disable
|
||||||
truthy:
|
truthy:
|
||||||
allowed-values:
|
allowed-values:
|
||||||
|
|
|
@ -20,9 +20,6 @@ SSH_KEY=<absolute/path/to/ssh/private/key>
|
||||||
ansible-playbook setup-ssh.yml --ask-vault-pass -e "setup_ssh_logname=$LOGUSER" -e "setup_ssh_key=$SSH_KEY"
|
ansible-playbook setup-ssh.yml --ask-vault-pass -e "setup_ssh_logname=$LOGUSER" -e "setup_ssh_key=$SSH_KEY"
|
||||||
```
|
```
|
||||||
|
|
||||||
This playbook also adds `rhodium.n39.eu` (OpenWRT router), but our Ansible cannot set up SSH keys (yet).
|
|
||||||
Please [add your key to OpenWRT manually](https://openwrt.org/docs/guide-user/security/dropbear.public-key.auth#from_the_luci_web_interface).
|
|
||||||
|
|
||||||
## Edit vault encrypted vars files
|
## Edit vault encrypted vars files
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -52,7 +49,7 @@ To set up a new HTTPS vhost, the following steps need to be taken:
|
||||||
|
|
||||||
1. Select a domain (for internal services we use sub-domains of `.n39.eu`).
|
1. Select a domain (for internal services we use sub-domains of `.n39.eu`).
|
||||||
2. Create an external CNAME from this domain to `dyndns.n39.eu`.
|
2. Create an external CNAME from this domain to `dyndns.n39.eu`.
|
||||||
3. Create an internal DNS entry in the [Descartes DNS config](https://git.n39.eu/Netz39_Admin/config.descartes/src/branch/prepare/dns_dhcp.txt). This is usually an alias on an existing server.
|
3. Create an internal DNS entry in the [Descartes DNS config](https://gitea.n39.eu/Netz39_Admin/config.descartes/src/branch/prepare/dns_dhcp.txt). This is usually an alias on an existing server.
|
||||||
4. Add the entry to the [holmium playbook](holmium.yml).
|
4. Add the entry to the [holmium playbook](holmium.yml).
|
||||||
5. Set up Dehydrated and vhost on the target host, e.g. using `setup_http_site_proxy`.
|
5. Set up Dehydrated and vhost on the target host, e.g. using `setup_http_site_proxy`.
|
||||||
|
|
||||||
|
|
97
device-cisco-2960-1.yml
Normal file
97
device-cisco-2960-1.yml
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
---
|
||||||
|
- hosts: cisco-2960-1.n39.eu
|
||||||
|
become: true
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: configure login banner
|
||||||
|
cisco.ios.ios_banner:
|
||||||
|
banner: login
|
||||||
|
text: "Documentation here: https://wiki.netz39.de/internal:inventory:network:2960s-24td-l"
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: configure vlans
|
||||||
|
cisco.ios.ios_vlans:
|
||||||
|
config:
|
||||||
|
- name: lan
|
||||||
|
vlan_id: 4
|
||||||
|
state: active
|
||||||
|
shutdown: disabled
|
||||||
|
- name: wan
|
||||||
|
vlan_id: 5
|
||||||
|
state: active
|
||||||
|
shutdown: disabled
|
||||||
|
- name: service
|
||||||
|
vlan_id: 7
|
||||||
|
state: active
|
||||||
|
shutdown: disabled
|
||||||
|
- name: legacy
|
||||||
|
vlan_id: 8
|
||||||
|
state: active
|
||||||
|
shutdown: disabled
|
||||||
|
- name: dmz
|
||||||
|
vlan_id: 9
|
||||||
|
state: active
|
||||||
|
shutdown: disabled
|
||||||
|
- name: ffmd-client
|
||||||
|
vlan_id: 11
|
||||||
|
state: active
|
||||||
|
shutdown: disabled
|
||||||
|
state: merged
|
||||||
|
|
||||||
|
- name: configure port assignment
|
||||||
|
cisco.ios.ios_l2_interfaces:
|
||||||
|
config:
|
||||||
|
# USV
|
||||||
|
- name: Gi1/0/6
|
||||||
|
mode: access
|
||||||
|
access:
|
||||||
|
vlan: 1
|
||||||
|
# beaker ipmi
|
||||||
|
- name: Gi1/0/9
|
||||||
|
mode: access
|
||||||
|
access:
|
||||||
|
vlan: 1
|
||||||
|
# Patchfeld 1, Switch ausleihliste
|
||||||
|
- name: Gi1/0/13
|
||||||
|
mode: trunk
|
||||||
|
trunk:
|
||||||
|
allowed_vlans: 1,4,5,7,8,11
|
||||||
|
native_vlan: 4
|
||||||
|
# patchfeld 2 - Raspberry Pi Platon
|
||||||
|
- name: Gi1/0/15
|
||||||
|
mode: access
|
||||||
|
access:
|
||||||
|
vlan: 4
|
||||||
|
# patchfeld 6 - Access Point Hempels Zimmer
|
||||||
|
- name: Gi1/0/17
|
||||||
|
mode: access
|
||||||
|
access:
|
||||||
|
vlan: 4
|
||||||
|
# FräsPC
|
||||||
|
- name: Gi1/0/19
|
||||||
|
mode: access
|
||||||
|
access:
|
||||||
|
vlan: 4
|
||||||
|
# patchfeld 4 - Switch am Basteltisch
|
||||||
|
- name: Gi1/0/20
|
||||||
|
mode: trunk
|
||||||
|
trunk:
|
||||||
|
allowed_vlans: 1,4,5,7,8,11
|
||||||
|
native_vlan: 4
|
||||||
|
# uplink descartes
|
||||||
|
- name: Gi1/0/25
|
||||||
|
mode: trunk
|
||||||
|
trunk:
|
||||||
|
allowed_vlans: 1-11
|
||||||
|
native_vlan: 1
|
||||||
|
# server marx
|
||||||
|
- name: Gi1/0/26
|
||||||
|
mode: trunk
|
||||||
|
trunk:
|
||||||
|
allowed_vlans: 1-11
|
||||||
|
native_vlan: 1
|
||||||
|
state: merged
|
||||||
|
|
||||||
|
- name: Save running to startup when modified
|
||||||
|
cisco.ios.ios_config:
|
||||||
|
save_when: modified
|
|
@ -1,17 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=Grafana Kiosk
|
|
||||||
After=network.target
|
|
||||||
Wants=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=root
|
|
||||||
|
|
||||||
ExecStart=/usr/local/bin/kiosk.sh
|
|
||||||
|
|
||||||
Restart=always
|
|
||||||
|
|
||||||
PIDFile=/run/kiosk.pid
|
|
||||||
ExecStop=/bin/kill -s SIGTERM $MAINPID
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
|
@ -1 +0,0 @@
|
||||||
asterisk ALL=(root) NOPASSWD: /usr/sbin/i2cget, /usr/sbin/i2cset
|
|
|
@ -1,13 +1,11 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games'
|
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games'
|
||||||
echo 'set PCM volume'
|
|
||||||
sudo amixer set "PCM" "70%"
|
|
||||||
echo 'start i2c-foo'
|
echo 'start i2c-foo'
|
||||||
sudo modprobe i2c_dev
|
sudo modprobe i2c_dev
|
||||||
sudo modprobe i2c_bcm2708
|
sudo modprobe i2c_bcm2708
|
||||||
echo 'starting log'
|
echo 'starting log'
|
||||||
tmux new-session -s status -d 'sudo less /var/log/shuttercontrol.log'
|
tmux new-session -s status -d 'sudo less /var/log/shuttercontrol.log'
|
||||||
cd /home/pi/netz39_rollladensteuerung/raspberry/shuttercontrol
|
cd /home/pi/netz39_rollladensteuerung/raspberry/shuttercontrol
|
||||||
echo 'switch-on.sh'
|
echo 'switch-on.sh'
|
||||||
../switch-on.sh
|
../switch-on.sh
|
||||||
cd /home/pi
|
cd /home/pi
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games'
|
|
||||||
|
|
||||||
echo 'switch-on.sh'
|
|
||||||
/home/pi/switch-on.sh
|
|
||||||
|
|
||||||
echo 'start i2c-foo'
|
|
||||||
sudo modprobe i2c_dev
|
|
||||||
sudo modprobe i2c_bcm2708
|
|
||||||
|
|
||||||
# wait for network devices
|
|
||||||
sleep 30
|
|
||||||
|
|
||||||
cd /home/pi
|
|
||||||
echo 'start ampel controller'
|
|
||||||
tmux new-session -s ampel -d 'cd /home/pi/netz39_space_notification/raspberry/ledcontrol && ./ledcontrol'
|
|
||||||
|
|
||||||
echo 'start lever controller'
|
|
||||||
tmux new-window -t ampel:1 'cd /home/pi/netz39_space_notification/raspberry/statusswitch && ./statusswitch'
|
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
gpio write 2 0
|
|
||||||
gpio write 3 0
|
|
||||||
|
|
||||||
gpio mode 2 tri
|
|
||||||
gpio mode 3 tri
|
|
|
@ -1,11 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# INT
|
|
||||||
gpio mode 0 tri
|
|
||||||
|
|
||||||
# Power
|
|
||||||
gpio mode 2 out
|
|
||||||
gpio mode 3 out
|
|
||||||
|
|
||||||
gpio write 2 1
|
|
||||||
gpio write 3 1
|
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
logger -t unstuck "unstuck $(date)"
|
|
||||||
|
|
||||||
killall tmux
|
|
||||||
|
|
||||||
sleep 1
|
|
||||||
/home/pi/reboot.sh
|
|
|
@ -1,33 +1,34 @@
|
||||||
---
|
---
|
||||||
- name: Tasks for all hosts
|
# tasks for all hosts
|
||||||
hosts: all
|
|
||||||
|
- hosts: all
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
- role: adriagalin.timezone
|
- role: ansible.timezone
|
||||||
vars:
|
vars:
|
||||||
ag_timezone: "{{ timezone }}"
|
ag_timezone: "{{ timezone }}"
|
||||||
- role: users
|
- role: users
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Update and clean package cache
|
- name: Update and clean package cache
|
||||||
ansible.builtin.apt:
|
apt:
|
||||||
update_cache: true
|
update_cache: true
|
||||||
cache_valid_time: 3600
|
cache_valid_time: 3600
|
||||||
autoclean: true
|
autoclean: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: Ensure unattended-upgrades is installed and up to date
|
- name: Ensure unattended-upgrades is installed and up to date
|
||||||
ansible.builtin.apt:
|
apt:
|
||||||
name: unattended-upgrades
|
name: unattended-upgrades
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Setup unattended-upgrades
|
- name: Setup unattended-upgrades
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: hifis.toolkit.unattended_upgrades
|
name: hifis.unattended_upgrades
|
||||||
vars:
|
vars:
|
||||||
unattended_origins_patterns:
|
unattended_origins_patterns:
|
||||||
- "origin=*"
|
- "origin=*"
|
||||||
|
|
|
@ -1,18 +1,15 @@
|
||||||
---
|
---
|
||||||
- name: Tasks for docker hosts
|
- hosts: docker_host
|
||||||
hosts: docker_host
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
- role: netz39.host_docker
|
- role: docker_setup
|
||||||
|
|
||||||
- name: Tasks for docker hosts at location space
|
- hosts: docker_host:&location_space
|
||||||
hosts: docker_host:&location_space
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
- role: lespocky.telegraf_docker_in_docker
|
- role: lespocky.telegraf_docker_in_docker
|
||||||
when: (ansible_architecture == "x86_64")
|
|
||||||
vars:
|
vars:
|
||||||
tdid_conf_dir: "/etc/telegraf"
|
tdid_conf_dir: "/etc/telegraf"
|
||||||
tdid_influxdb_org: "{{ influxdb_org }}"
|
tdid_influxdb_org: "{{ influxdb_org }}"
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
---
|
|
||||||
- name: Tasks for kubernetes hosts
|
|
||||||
hosts: k3s
|
|
||||||
become: true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure nfs-common is installed on k3s VMs
|
|
||||||
ansible.builtin.apt:
|
|
||||||
pkg: nfs-common
|
|
||||||
state: present
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Tasks for virtual machines on proxmox host
|
- hosts: proxmox
|
||||||
hosts: proxmox
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
|
@ -10,8 +10,6 @@ users:
|
||||||
ssh_pub:
|
ssh_pub:
|
||||||
- !unsafe >
|
- !unsafe >
|
||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVZPAE3XE8Ek1Ji4sCIHxLVx+bi2qpsTSsYhBqtYysnFn9AHJj14BR59D0Si05sfVkmL4OQoo7Q98oIxy33PgtqoUfgXk9dc7dlsye3t/gsAb25ABnqG/ZYe65nZLN7BzRM1/QZIbd6sSu6eXrNFCh0ikB5se4zgVkDO8t6h2dnz4FvTuIM2Bi/PnIJTqb8+uLQE1vS3A7tTx100ZKXxr81dlo2Y1JBP6WrS1W1IyFiG6wofl2XTY02ssyoENQyR89lLMJYKvm5xlhL/L69gtMsqIX9UBQFk8Rpq04ZIwN6b0K4R142GZvxdJNdQULgtI3gPkKgH7FDoFsRHNA6b/9 adahl@ada
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVZPAE3XE8Ek1Ji4sCIHxLVx+bi2qpsTSsYhBqtYysnFn9AHJj14BR59D0Si05sfVkmL4OQoo7Q98oIxy33PgtqoUfgXk9dc7dlsye3t/gsAb25ABnqG/ZYe65nZLN7BzRM1/QZIbd6sSu6eXrNFCh0ikB5se4zgVkDO8t6h2dnz4FvTuIM2Bi/PnIJTqb8+uLQE1vS3A7tTx100ZKXxr81dlo2Y1JBP6WrS1W1IyFiG6wofl2XTY02ssyoENQyR89lLMJYKvm5xlhL/L69gtMsqIX9UBQFk8Rpq04ZIwN6b0K4R142GZvxdJNdQULgtI3gPkKgH7FDoFsRHNA6b/9 adahl@ada
|
||||||
- !unsafe >
|
|
||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDb5j4SlVDkK/CE/viZA5M/VquKm0DaMB6h5yR9ZWf7oW2h/q5tPQr5Kzatt+uCF++1eCOfoq6BR/NV01KkVdyTMemA8LMZwuf4uUzTlfnoXO4eGP0+d4aGzSuE08gak8c0iYF5zzzJGSKVIZ7qQXAmAH5guJxdRltpJlFbnYY6Plo1nxmluSAAh8qPSBQhZy+ja05ZpXct6+IeXHDLJ9ia5x71hAbEzKJXafVukL/Qt6Gr80snW1OuVzBpDs5/O2taKNV4a3dAzM4cNb0xGbhNogiuZD5IPHjkbsiOifBT+i48CBOasSWO9tnNZ6X/kDXxizoo4gB1rWOVvPE8SXXbKSxus48AG0MEKh0XGB7z7klCxDWITn1JpN3x8/vbG9Y02/QlVdqdTuIq7fUfrQz3hipR2DMXuGnMkwkR80XXkQziuBP6UG3Meh2wZ0SxIex3JgVsZh4gxvIvNxuU9iEcpgEFhGFvQwxbZ+nWYYe0j//OzfKQpod/D03tx7W6SXM= adahl@ada-pc
|
|
||||||
- !unsafe >
|
- !unsafe >
|
||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDvczlb1+9d1BjuLk5ZcQt2Z0Dh61Vg91i47tM48CN2koJ4I/9vgN37l6mnr383zD8kQkXDGmCYpXOa48WocyyUuP3h75DCjANYcWOsohQfFu2F1ZOiiVCGduDntzS2nbZEF2W3nZNLQ6/dKKEeaSxu5RjKflkWakghkMt3H4KN20bxzYzHQMLhRYFEGHpskOqeaXKPkqqEP+u5kToINtmXwegCvQFnlx4fNrysFII79buBNlcLsO1X4ABucVMYT/OJnBpJEfEcNFUKrJZRGgM8aDbUpkV9LRY2lywvoKJhiRMc7x7kK0LWOTdPJri+SJhW6fEW4JKCRTSHVN8OS8S/ alex@buffy
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDvczlb1+9d1BjuLk5ZcQt2Z0Dh61Vg91i47tM48CN2koJ4I/9vgN37l6mnr383zD8kQkXDGmCYpXOa48WocyyUuP3h75DCjANYcWOsohQfFu2F1ZOiiVCGduDntzS2nbZEF2W3nZNLQ6/dKKEeaSxu5RjKflkWakghkMt3H4KN20bxzYzHQMLhRYFEGHpskOqeaXKPkqqEP+u5kToINtmXwegCvQFnlx4fNrysFII79buBNlcLsO1X4ABucVMYT/OJnBpJEfEcNFUKrJZRGgM8aDbUpkV9LRY2lywvoKJhiRMc7x7kK0LWOTdPJri+SJhW6fEW4JKCRTSHVN8OS8S/ alex@buffy
|
||||||
- !unsafe >
|
- !unsafe >
|
||||||
|
@ -24,6 +22,11 @@ users:
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGmU7MfOFuc6z5Vbwh4CbBFSg19f8B9rUO2ITjgmEvkY alex@lemmy
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGmU7MfOFuc6z5Vbwh4CbBFSg19f8B9rUO2ITjgmEvkY alex@lemmy
|
||||||
sudo: yes
|
sudo: yes
|
||||||
docker: yes
|
docker: yes
|
||||||
|
- logname: "kwasir"
|
||||||
|
viewname: "Peter Seidel"
|
||||||
|
email: "kwasir@netz39.de"
|
||||||
|
sudo: yes
|
||||||
|
docker: yes
|
||||||
- logname: "tux"
|
- logname: "tux"
|
||||||
viewname: "Stefan Haun"
|
viewname: "Stefan Haun"
|
||||||
email: "tux@netz39.de"
|
email: "tux@netz39.de"
|
||||||
|
@ -50,29 +53,10 @@ users:
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHasp1Q/HJURndKnNRP5TJqJVHPuN9G/9uHdaNGhI8yi mg@mg-Swift-SF314-52G
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHasp1Q/HJURndKnNRP5TJqJVHPuN9G/9uHdaNGhI8yi mg@mg-Swift-SF314-52G
|
||||||
sudo: yes
|
sudo: yes
|
||||||
docker: yes
|
docker: yes
|
||||||
- logname: "timo"
|
|
||||||
viewname: "Timo Herrmann"
|
|
||||||
email: "timo@netz39.de"
|
|
||||||
ssh_pub:
|
|
||||||
- !unsafe >
|
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILKhKHCPibswu2p6UQHKsBSqGaXzMFM+oMX0XEWsxCIc timo@Space-Lap
|
|
||||||
- !unsafe >
|
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMJoS7nsLLzSAsZA4us2/7JaQhgRjj/BY+LOpDQnfy8u timo@mac
|
|
||||||
sudo: yes
|
|
||||||
docker: yes
|
|
||||||
- logname: "jens"
|
|
||||||
viewname: "Jens Winter-Hübenthal"
|
|
||||||
email: "jens.winter@gmail.com"
|
|
||||||
ssh_pub:
|
|
||||||
- !unsafe >
|
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIII4FS2sROKs2nIW8uzDuWmj8q127UoljtkVMthY8g// jens@work-lenovo
|
|
||||||
sudo: yes
|
|
||||||
docker: yes
|
|
||||||
|
|
||||||
# Data for dyndns updates
|
# Data for DD24 dyndns updates
|
||||||
dyndns_domain: "dyndns.n39.eu"
|
dyndns_domain: "dyndns.n39.eu"
|
||||||
dyndns_password: "{{ vault_dyndns_password }}"
|
dyndns_password: "{{ vault_dyndns_password }}"
|
||||||
dyndns_token: "{{ vault_dyndns_token }}"
|
|
||||||
|
|
||||||
# Shared influxdb items
|
# Shared influxdb items
|
||||||
influxdb_org: "netz39"
|
influxdb_org: "netz39"
|
||||||
|
|
|
@ -1,10 +1,7 @@
|
||||||
$ANSIBLE_VAULT;1.1;AES256
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
37306233306262383862373661626635346436316265663162343433303432653536376632316439
|
34303066383937623831333466333965323161376134353838346235323662373164303163363734
|
||||||
6336396564613232363337303266643965346333396331620a316536636666393461353633366466
|
3134626237346361656533636161363331666537633538380a613761643431356530343663626666
|
||||||
39333362306166376462353739626139623835326461373834303330346538366637626363306438
|
62646361316364333533316638646261373661633863363733366337373338336565366536386237
|
||||||
3033376133373330330a356236396366643938323666663836643738386337373362323933623838
|
3138646266613837310a396139363830613463393861336161363533343362383462623265356563
|
||||||
30316663646134623232336563343562393037363463303739626464633461323539306261316638
|
31333862613937306463353130316365636634353862363039663762326263313366363530636631
|
||||||
61343330626263393065636230303632663965653939373437386561656539646533653661613236
|
3630653638333831303432316266633833643739643533353536
|
||||||
35326334313232633738633933653939383830636361373938373864643133363539623734646435
|
|
||||||
32336630613231353337336466646164373734386539653936313865316336616264373061633139
|
|
||||||
3839
|
|
||||||
|
|
|
@ -1,3 +1,2 @@
|
||||||
---
|
---
|
||||||
docker_data_root: "/srv/docker"
|
docker_data_root: "/srv/docker"
|
||||||
docker_image_prune: true
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'beaker' (proxmox server im space)
|
- hosts: beaker.n39.eu
|
||||||
hosts: beaker.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
@ -10,7 +9,7 @@
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
- name: Enable proxmox gui login for admin users
|
- name: enable proxmox gui login for admin users
|
||||||
ansible.builtin.lineinfile:
|
ansible.builtin.lineinfile:
|
||||||
path: /etc/pve/user.cfg
|
path: /etc/pve/user.cfg
|
||||||
regexp: "^user:{{ item.logname }}@pam"
|
regexp: "^user:{{ item.logname }}@pam"
|
||||||
|
@ -19,10 +18,11 @@
|
||||||
state: present
|
state: present
|
||||||
loop: "{{ users }}"
|
loop: "{{ users }}"
|
||||||
|
|
||||||
- name: Configure proxmox admin group
|
- name: configure proxmox admin group
|
||||||
ansible.builtin.lineinfile:
|
ansible.builtin.lineinfile:
|
||||||
path: /etc/pve/user.cfg
|
path: /etc/pve/user.cfg
|
||||||
regexp: "^group:Admins:"
|
regexp: "^group:Admins:"
|
||||||
line: "group:Admins:{{ users | map(attribute='logname') | join(\"@pam,\") }}@pam::"
|
# group:Admins:kwasir@pam,lespocky@pam,tux@pam::
|
||||||
|
line: "group:Admins:{{ users | map(attribute = 'logname') | join(\"@pam,\") }}@pam::"
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'hobbes' (raspberry pi for kiosk screen)
|
- hosts: hobbes.n39.eu
|
||||||
hosts: hobbes.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
@ -9,6 +8,7 @@
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
|
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Install packages needed for the system
|
- name: Install packages needed for the system
|
||||||
# This is a list of all packages,
|
# This is a list of all packages,
|
||||||
|
@ -16,9 +16,15 @@
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
state: present
|
state: present
|
||||||
name:
|
name:
|
||||||
- mosquitto-clients
|
# This is needed for the user-executed tasks
|
||||||
- fbi
|
- acl
|
||||||
|
# Regular packages
|
||||||
|
- lightdm
|
||||||
|
- accountsservice
|
||||||
|
- unclutter
|
||||||
|
- lxde
|
||||||
|
- chromium-browser
|
||||||
|
- rng-tools
|
||||||
|
|
||||||
- name: Remove the screensavers
|
- name: Remove the screensavers
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
|
@ -47,23 +53,50 @@
|
||||||
|
|
||||||
|
|
||||||
### Kiosk setup
|
### Kiosk setup
|
||||||
|
#
|
||||||
|
# https://github.com/grafana/grafana-kiosk
|
||||||
|
|
||||||
- name: Ensure kiosk user is there
|
- name: Ensure kiosk user is there
|
||||||
ansible.builtin.user:
|
ansible.builtin.user:
|
||||||
name: "{{ kiosk_user }}"
|
name: "{{ kiosk_user }}"
|
||||||
groups: audio,plugdev,input,netdev
|
groups: audio,plugdev,input,netdev
|
||||||
append: yes
|
append: yes
|
||||||
|
|
||||||
- name: Install Kiosk script
|
|
||||||
ansible.builtin.template:
|
- name: Create bin directory
|
||||||
src: templates/hobbes/kiosk.sh.j2
|
file:
|
||||||
dest: /usr/local/bin/kiosk.sh
|
path: "/home/{{ kiosk_user }}/bin"
|
||||||
owner: root
|
owner: "{{ kiosk_user }}"
|
||||||
group: root
|
|
||||||
mode: '0755'
|
mode: '0755'
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Download grafana-kiosk
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://github.com/grafana/grafana-kiosk/releases/download/{{ kiosk_software_version }}/grafana-kiosk.linux.{{ kiosk_software_arch }}"
|
||||||
|
dest: "/home/{{ kiosk_user }}/bin/grafana-kiosk"
|
||||||
|
mode: '0755'
|
||||||
|
force: no
|
||||||
|
|
||||||
|
|
||||||
|
- name: Setup autologin in lightdm
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
path: /etc/lightdm/lightdm.conf
|
||||||
|
block: |
|
||||||
|
[Seat:seat0]
|
||||||
|
autologin-user = pi
|
||||||
|
autologin-user-timeout = 0
|
||||||
|
autologin-in-background = False
|
||||||
|
|
||||||
|
- name: Remove autostart
|
||||||
|
# None of the things in autostart are needed or wanted
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/xdg/lxsession/LXDE/autostart
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
|
||||||
- name: Add systemd service
|
- name: Add systemd service
|
||||||
ansible.builtin.copy:
|
ansible.builtin.template:
|
||||||
src: files/hobbes/grafana-kiosk.service
|
src: templates/hobbes/grafana-kiosk.service.j2
|
||||||
dest: /etc/systemd/system/grafana-kiosk.service
|
dest: /etc/systemd/system/grafana-kiosk.service
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
|
@ -75,4 +108,12 @@
|
||||||
enabled: true
|
enabled: true
|
||||||
state: started
|
state: started
|
||||||
|
|
||||||
|
- name: Set default systemd target to graphical
|
||||||
|
ansible.builtin.file:
|
||||||
|
src: /lib/systemd/system/graphical.target
|
||||||
|
dest: /etc/systemd/system/default.target
|
||||||
|
state: link
|
||||||
|
force: yes
|
||||||
|
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'holmium' (http ingress vm)
|
- hosts: holmium.n39.eu
|
||||||
hosts: holmium.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
@ -16,7 +15,7 @@
|
||||||
- name: entities-validation.svc.n39.eu
|
- name: entities-validation.svc.n39.eu
|
||||||
- server: pottwal
|
- server: pottwal
|
||||||
hosts:
|
hosts:
|
||||||
- name: git.n39.eu
|
- name: gitea.n39.eu
|
||||||
- name: redmine.n39.eu
|
- name: redmine.n39.eu
|
||||||
- name: uritools.n39.eu
|
- name: uritools.n39.eu
|
||||||
- name: uritools-api.n39.eu
|
- name: uritools-api.n39.eu
|
||||||
|
@ -38,5 +37,5 @@
|
||||||
local: true
|
local: true
|
||||||
- name: pwr-meter-pulse-gw-19i.svc.n39.eu
|
- name: pwr-meter-pulse-gw-19i.svc.n39.eu
|
||||||
local: true
|
local: true
|
||||||
- name: labelprinter.n39.eu
|
- name: brotherql-web.n39.eu
|
||||||
local: true
|
local: true
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'krypton' (ldap vm)
|
- hosts: krypton.n39.eu
|
||||||
hosts: krypton.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
@ -10,6 +9,7 @@
|
||||||
|
|
||||||
docker_ip_ranges: ["172.16.0.0/12", "192.168.0.0/16"]
|
docker_ip_ranges: ["172.16.0.0/12", "192.168.0.0/16"]
|
||||||
|
|
||||||
|
openldap_image_version: 1.5.0
|
||||||
openldap_data: "{{ data_dir }}/openldap"
|
openldap_data: "{{ data_dir }}/openldap"
|
||||||
openldap_domain: "ldap.n39.eu"
|
openldap_domain: "ldap.n39.eu"
|
||||||
ldap_domain: "netz39.de"
|
ldap_domain: "netz39.de"
|
||||||
|
@ -20,10 +20,10 @@
|
||||||
|
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
# role 'docker_setup' applied through group 'docker_host'
|
||||||
- role: apache
|
- role: apache
|
||||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
||||||
- role: 24367dfa.dehydrated
|
- role: ansible-role-dehydrated
|
||||||
vars:
|
vars:
|
||||||
dehydrated_contact_email: "{{ server_admin }}"
|
dehydrated_contact_email: "{{ server_admin }}"
|
||||||
dehydrated_domains:
|
dehydrated_domains:
|
||||||
|
@ -33,13 +33,13 @@
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
# - name: Setup dehydrated challenge endpoint for {{ openldap_domain }}
|
# - name: Setup dehydrated challenge endpoint for {{ openldap_domain }}
|
||||||
# ansible.builtin.include_role:
|
# include_role:
|
||||||
# name: setup-http-dehydrated
|
# name: setup-http-dehydrated
|
||||||
# vars:
|
# vars:
|
||||||
# site_name: "{{ openldap_domain }}"
|
# site_name: "{{ openldap_domain }}"
|
||||||
|
|
||||||
- name: Ensure openLDAP directories are present.
|
- name: Ensure openLDAP directories are present.
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ item.path }}"
|
path: "{{ item.path }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -50,9 +50,9 @@
|
||||||
- path: "{{ dehydrated_certs_dir }}/{{ openldap_domain }}"
|
- path: "{{ dehydrated_certs_dir }}/{{ openldap_domain }}"
|
||||||
|
|
||||||
- name: Ensure container for openLDAP is running.
|
- name: Ensure container for openLDAP is running.
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: openLDAP
|
name: openLDAP
|
||||||
image: osixia/openldap:1.5.0
|
image: "osixia/openldap:{{ openldap_image_version }}"
|
||||||
detach: yes
|
detach: yes
|
||||||
state: started
|
state: started
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
|
@ -99,9 +99,9 @@
|
||||||
rule: allow
|
rule: allow
|
||||||
port: '389'
|
port: '389'
|
||||||
proto: tcp
|
proto: tcp
|
||||||
from: "{{ item }}"
|
from: "{{ item }}"
|
||||||
comment: LDAP Docker Access
|
comment: LDAP Docker Access
|
||||||
loop: "{{ docker_ip_ranges }}"
|
loop: "{{ docker_ip_ranges }}"
|
||||||
|
|
||||||
- name: Allow access to openLDAP from local docker container [2/2]
|
- name: Allow access to openLDAP from local docker container [2/2]
|
||||||
become: true
|
become: true
|
||||||
|
@ -109,15 +109,15 @@
|
||||||
rule: allow
|
rule: allow
|
||||||
port: '636'
|
port: '636'
|
||||||
proto: tcp
|
proto: tcp
|
||||||
from: "{{ item }}"
|
from: "{{ item }}"
|
||||||
comment: LDAP Docker Access
|
comment: LDAP Docker Access
|
||||||
loop: "{{ docker_ip_ranges }}"
|
loop: "{{ docker_ip_ranges }}"
|
||||||
|
|
||||||
|
|
||||||
- name: Ensure container for entities validation service is running
|
- name: Ensure container for entities validation service is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: entities_validation_svc
|
name: entities_validation_svc
|
||||||
image: netz39/entities_validation_svc:v1.0.4
|
image: netz39/entities_validation_svc:v1.0.0
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
detach: yes
|
detach: yes
|
||||||
|
@ -128,7 +128,7 @@
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
|
|
||||||
- name: Setup proxy site entities-validation.svc.n39.eu
|
- name: Setup proxy site entities-validation.svc.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: entities-validation.svc.n39.eu
|
site_name: entities-validation.svc.n39.eu
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'oganesson' (ssh jump host vm)
|
- hosts: oganesson.n39.eu
|
||||||
hosts: oganesson.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'platon' (raspberry pi for entrance door)
|
- hosts: platon.n39.eu
|
||||||
hosts: platon.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
door_open_command: '/home/pi/sesame-open.sh'
|
door_open_command: '/home/pi/sesame-open.sh'
|
||||||
ble_keykeeper_dir: '/home/pi/netz39_ble_keykeeper'
|
ble_keykeeper_dir: '/home/pi/netz39_ble_keykeeper'
|
||||||
roles:
|
roles:
|
||||||
- role: maz3max.ble_keykeeper
|
- role: ble-keykeeper-role
|
||||||
vars:
|
vars:
|
||||||
ble_keykeeper_user: "{{ gatekeeper_user }}"
|
ble_keykeeper_user: "{{ gatekeeper_user }}"
|
||||||
ble_keykeeper_group: "{{ gatekeeper_user }}"
|
ble_keykeeper_group: "{{ gatekeeper_user }}"
|
||||||
|
@ -64,7 +63,7 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: '0644'
|
mode: '0644'
|
||||||
notify: Restart mosquitto service
|
notify: restart mosquitto
|
||||||
|
|
||||||
|
|
||||||
### Sesam for SSH access
|
### Sesam for SSH access
|
||||||
|
@ -142,7 +141,7 @@
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
register: wiringPi_copy
|
register: wiringPi_copy
|
||||||
|
|
||||||
- name: Install wiringPi library # noqa: no-handler
|
- name: Install wiringPi library # noqa 503
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
state: present
|
state: present
|
||||||
deb: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
|
deb: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
|
||||||
|
@ -205,6 +204,7 @@
|
||||||
become: yes
|
become: yes
|
||||||
become_user: "{{ gatekeeper_user }}"
|
become_user: "{{ gatekeeper_user }}"
|
||||||
ansible.builtin.shell:
|
ansible.builtin.shell:
|
||||||
|
warn: false
|
||||||
chdir: "/home/{{ gatekeeper_user }}/mqtt-tools"
|
chdir: "/home/{{ gatekeeper_user }}/mqtt-tools"
|
||||||
cmd: |
|
cmd: |
|
||||||
mkdir build
|
mkdir build
|
||||||
|
@ -246,7 +246,7 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
notify: Restart rsyslog
|
notify: restart rsyslog
|
||||||
|
|
||||||
|
|
||||||
### Asterisk
|
### Asterisk
|
||||||
|
@ -259,7 +259,7 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
notify: Restart asterisk
|
notify: restart asterisk
|
||||||
|
|
||||||
- name: Set up extensions for asterisk
|
- name: Set up extensions for asterisk
|
||||||
# This uses the variables gatekeeper_user and door_open_command
|
# This uses the variables gatekeeper_user and door_open_command
|
||||||
|
@ -269,25 +269,14 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
notify: Restart asterisk
|
notify: restart asterisk
|
||||||
|
|
||||||
- name: Ensure asterisk is in the right groups
|
- name: Ensure asterisk is in the right groups
|
||||||
ansible.builtin.user:
|
ansible.builtin.user:
|
||||||
name: asterisk
|
name: asterisk
|
||||||
groups: audio,i2c,gpio
|
groups: audio,i2c,gpio
|
||||||
append: yes
|
append: yes
|
||||||
notify: Restart asterisk
|
notify: restart asterisk
|
||||||
|
|
||||||
# Asterisk now executes shell scripts with reduced privileges, so we need to
|
|
||||||
# use sudo for I2C access.
|
|
||||||
- name: Set up sudo configuration for Asterisk I2C access
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: "files/platon/11_asterisk_i2c"
|
|
||||||
dest: "/etc/sudoers.d/"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: "0644"
|
|
||||||
# Asterisk restart is not necessary
|
|
||||||
|
|
||||||
- name: Copy sounds
|
- name: Copy sounds
|
||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
|
@ -305,20 +294,20 @@
|
||||||
|
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
- name: Restart mosquitto service
|
- name: restart mosquitto
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: mosquitto
|
name: mosquitto
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
||||||
- name: Restart rsyslog
|
- name: restart rsyslog
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: rsyslog
|
name: rsyslog
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
||||||
- name: Restart asterisk
|
- name: restart asterisk
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: asterisk
|
name: asterisk
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
---
|
|
||||||
- name: Setup things on host 'plumbum' (nfs server)
|
|
||||||
hosts: plumbum.n39.eu
|
|
||||||
become: true
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- role: nfs_host
|
|
||||||
vars:
|
|
||||||
nfs_host_exports:
|
|
||||||
- directory: "/srv/nfs/backup"
|
|
||||||
hosts: "*.n39.eu"
|
|
||||||
options: rw,sync,no_subtree_check,no_root_squash
|
|
||||||
- directory: "/srv/nfs/ephemeral"
|
|
||||||
hosts: "*.n39.eu"
|
|
||||||
options: rw,sync,no_subtree_check,no_root_squash
|
|
263
host-pottwal.yml
263
host-pottwal.yml
|
@ -1,36 +1,33 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'pottwal' (the big docker container host)
|
- hosts: pottwal.n39.eu
|
||||||
hosts: pottwal.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
# role 'docker_setup' applied through group 'docker_host'
|
||||||
- role: apache
|
- role: apache
|
||||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
||||||
- role: 24367dfa.dehydrated
|
- role: ansible-role-dehydrated
|
||||||
vars:
|
vars:
|
||||||
dehydrated_contact_email: "{{ server_admin }}"
|
dehydrated_contact_email: "{{ server_admin }}"
|
||||||
dehydrated_domains:
|
dehydrated_domains:
|
||||||
- name: "{{ forgejo_domain_name }}"
|
- name: gitea.n39.eu
|
||||||
- name: uritools.n39.eu
|
- name: uritools.n39.eu
|
||||||
- name: uritools-api.n39.eu
|
- name: uritools-api.n39.eu
|
||||||
- name: "{{ shlink_domain_name }}"
|
- name: "{{ shlink_domain_name }}"
|
||||||
- name: "{{ hedgedoc_domain_name }}"
|
- name: pad.n39.eu
|
||||||
- name: "{{ prosody_domain_name }}"
|
- name: "{{ prosody_domain_name }}"
|
||||||
alternate_names:
|
alternate_names:
|
||||||
- conference.jabber.n39.eu
|
- conference.jabber.n39.eu
|
||||||
deploy_cert_hook: "docker exec prosody prosodyctl --root cert import ${DOMAIN} /var/lib/dehydrated/certs"
|
deploy_cert_hook: "docker exec prosody prosodyctl --root cert import ${DOMAIN} /var/lib/dehydrated/certs"
|
||||||
- name: "{{ redmine_domain_name }}"
|
- name: redmine.n39.eu
|
||||||
- name: "{{ influxdb_domain_name }}"
|
- name: "{{ influxdb_domain_name }}"
|
||||||
- name: "{{ uptimekuma_domain_name }}"
|
- name: uptime.n39.eu
|
||||||
- name: "{{ grafana_domain_name }}"
|
- name: "{{ grafana_domain_name }}"
|
||||||
- name: "{{ homebox_domain_name }}"
|
- name: "{{ homebox_domain_name }}"
|
||||||
- name: spaceapi.n39.eu
|
- name: spaceapi.n39.eu
|
||||||
- role: penguineer.dehydrated_cron
|
- role: penguineer.dehydrated_cron
|
||||||
- role: dd24_dyndns_cron
|
- role: dd24_dyndns_cron
|
||||||
# variables are set in the inventory
|
# variables are set in the inventory
|
||||||
- role: desec_dyndns_cron
|
|
||||||
# variables are set in the inventory
|
|
||||||
- role: cleanuri
|
- role: cleanuri
|
||||||
vars:
|
vars:
|
||||||
cleanuri_ui_domain: uritools.n39.eu
|
cleanuri_ui_domain: uritools.n39.eu
|
||||||
|
@ -45,53 +42,49 @@
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
path: "{{ data_dir }}/forgejo"
|
path: "{{ data_dir }}/forgejo"
|
||||||
register: forgejo_dir
|
register: forgejo_dir
|
||||||
tags: ["forgejo"]
|
|
||||||
- name: Fail if forgejo data dir does not exist
|
- name: Fail if forgejo data dir does not exist
|
||||||
ansible.builtin.fail:
|
ansible.builtin.fail:
|
||||||
msg: "Forgejo data dir is missing, please restore from the backup!"
|
msg: "Forgejo data dir is missing, please restore from the backup!"
|
||||||
when: not forgejo_dir.stat.exists
|
when: not forgejo_dir.stat.exists
|
||||||
tags: ["forgejo"]
|
|
||||||
|
|
||||||
# If port 2222 is changed here, it must also be adapted
|
# If port 2222 is changed here, it must also be adapted
|
||||||
# in the forgejo config file (see application volume)!!
|
# in the forgejo config file (see application volume)!!
|
||||||
- name: Setup the docker container for forgejo
|
- name: Setup the docker container for gitea
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: forgejo
|
name: forgejo
|
||||||
image: "{{ forgejo_image }}"
|
image: "codeberg.org/forgejo/forgejo:1.19"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
detach: yes
|
detach: yes
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ forgejo_host_port }}:3000
|
- 127.0.0.1:{{ forgejo_host_port }}:3000
|
||||||
- "{{ forgejo_ssh_port }}:2222"
|
- 2222:2222
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
APP_NAME: "Netz39 Git"
|
APP_NAME: "Netz39 Git"
|
||||||
RUN_MODE: "prod"
|
RUN_MODE: "prod"
|
||||||
SSH_DOMAIN: "{{ forgejo_domain_name }}"
|
SSH_DOMAIN: "gitea.n39.eu"
|
||||||
SSH_PORT: "2222"
|
SSH_PORT: "2222"
|
||||||
SSH_START_SERVER: "false"
|
SSH_START_SERVER: "false"
|
||||||
ROOT_URL: "https://{{ forgejo_domain_name }}"
|
ROOT_URL: "https://gitea.n39.eu"
|
||||||
DISABLE_REGISTRATION: "true"
|
DISABLE_REGISTRATION: "true"
|
||||||
USER_UID: "1000"
|
USER_UID: "1000"
|
||||||
USER_GID: "1000"
|
USER_GID: "1000"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ data_dir }}/forgejo:/data:rw"
|
- "{{ data_dir }}/forgejo:/data:rw"
|
||||||
tags: ["forgejo"]
|
|
||||||
|
|
||||||
- name: Setup proxy site "{{ forgejo_domain_name }}"
|
- name: Setup proxy site gitea.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ forgejo_domain_name }}"
|
site_name: "gitea.n39.eu"
|
||||||
proxy_port: "{{ forgejo_host_port }}"
|
proxy_port: "{{ forgejo_host_port }}"
|
||||||
tags: ["forgejo"]
|
|
||||||
|
|
||||||
- name: Ensure apt-cacher container is running
|
- name: Ensure apt-cacher container is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: apt_cacher_ng
|
name: apt_cacher_ng
|
||||||
image: mrtux/apt-cacher-ng:latest
|
image: "mrtux/apt-cacher-ng"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
|
@ -101,46 +94,11 @@
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
|
|
||||||
- name: Setup docker network
|
|
||||||
community.docker.docker_network:
|
|
||||||
name: shlinknet
|
|
||||||
state: present
|
|
||||||
internal: true
|
|
||||||
tags:
|
|
||||||
- shlink
|
|
||||||
|
|
||||||
- name: Ensure shlink data dir exists
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ data_dir }}/shlink/data/database"
|
|
||||||
state: directory
|
|
||||||
mode: 0755
|
|
||||||
tags:
|
|
||||||
- shlink
|
|
||||||
|
|
||||||
- name: Ensure shlink database container is running
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: shlinkdb
|
|
||||||
image: postgres:16.8-alpine
|
|
||||||
pull: true
|
|
||||||
state: started
|
|
||||||
restart_policy: unless-stopped
|
|
||||||
detach: yes
|
|
||||||
env:
|
|
||||||
TZ: "{{ timezone }}"
|
|
||||||
POSTGRES_USER: "shlink"
|
|
||||||
POSTGRES_PASSWORD: "{{ shlink_postgres_password }}"
|
|
||||||
POSTGRES_DB: "shlink"
|
|
||||||
volumes:
|
|
||||||
- "{{ data_dir }}/shlink/data/database:/var/lib/postgresql/data"
|
|
||||||
networks:
|
|
||||||
- name: shlinknet
|
|
||||||
tags:
|
|
||||||
- shlink
|
|
||||||
|
|
||||||
- name: Ensure container for shlink is running
|
- name: Ensure container for shlink is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: shlink
|
name: shlink
|
||||||
image: "{{ shlink_image }}"
|
image: shlinkio/shlink:2.6.2
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
detach: yes
|
detach: yes
|
||||||
|
@ -149,31 +107,16 @@
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
DEFAULT_DOMAIN: "{{ shlink_domain_name }}"
|
SHORT_DOMAIN_HOST: "{{ shlink_domain_name }}"
|
||||||
INITIAL_API_KEY: "{{ shlink_initial_api_key }}"
|
SHORT_DOMAIN_SCHEMA: https
|
||||||
DB_DRIVER: "postgres"
|
GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
|
||||||
DB_HOST: shlinkdb
|
|
||||||
DB_NAME: "shlink"
|
|
||||||
DB_USER: "shlink"
|
|
||||||
DB_PASSWORD: "{{ shlink_postgres_password }}"
|
|
||||||
volumes:
|
|
||||||
- "{{ data_dir }}/shlink/database.sqlite:/etc/shlink/datadatabase.sqlite:rw"
|
|
||||||
networks_cli_compatible: false
|
|
||||||
comparisons:
|
|
||||||
networks: allow_more_present
|
|
||||||
networks:
|
|
||||||
- name: shlinknet
|
|
||||||
tags:
|
|
||||||
- shlink
|
|
||||||
|
|
||||||
- name: Setup proxy site {{ shlink_domain_name }}
|
- name: Setup proxy site {{ shlink_domain_name }}
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ shlink_domain_name }}"
|
site_name: "{{ shlink_domain_name }}"
|
||||||
proxy_port: "{{ shlink_host_port }}"
|
proxy_port: "{{ shlink_host_port }}"
|
||||||
tags:
|
|
||||||
- shlink
|
|
||||||
|
|
||||||
- name: Check if prosody data dir exists
|
- name: Check if prosody data dir exists
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
|
@ -184,7 +127,7 @@
|
||||||
msg: "prosody data dir is missing, please restore from the backup!"
|
msg: "prosody data dir is missing, please restore from the backup!"
|
||||||
when: not prosody_dir.stat.exists
|
when: not prosody_dir.stat.exists
|
||||||
|
|
||||||
- name: Ensure prosody main config dir exists
|
- name: Ensure prosody config dir exists
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "{{ prosody_config_dir }}"
|
path: "{{ prosody_config_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -204,7 +147,7 @@
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: Ensure prosody conf.d dir exists
|
- name: Ensure prosody certs dir exists
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "{{ prosody_config_dir }}/conf.d"
|
path: "{{ prosody_config_dir }}/conf.d"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -235,9 +178,9 @@
|
||||||
- Restart prosody
|
- Restart prosody
|
||||||
|
|
||||||
- name: Ensure container for prosody XMPP server is running
|
- name: Ensure container for prosody XMPP server is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: prosody
|
name: prosody
|
||||||
image: "{{ prosody_image }}"
|
image: netz39/prosody:0.11
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
detach: true
|
detach: true
|
||||||
|
@ -257,9 +200,9 @@
|
||||||
|
|
||||||
|
|
||||||
- name: Ensure container for static XMPP website is running
|
- name: Ensure container for static XMPP website is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: jabber-static-website
|
name: jabber-static-website
|
||||||
image: "{{ prosody_web_image }}"
|
image: joseluisq/static-web-server:2.14
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
detach: true
|
detach: true
|
||||||
|
@ -272,34 +215,26 @@
|
||||||
- "127.0.0.1:{{ jabber_host_port }}:80"
|
- "127.0.0.1:{{ jabber_host_port }}:80"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ prosody_data_dir }}/var/www:/public:ro"
|
- "{{ prosody_data_dir }}/var/www:/public:ro"
|
||||||
tags:
|
|
||||||
- prosody-web
|
|
||||||
|
|
||||||
- name: Setup proxy site {{ prosody_domain_name }}
|
- name: Setup proxy site {{ prosody_domain_name }}
|
||||||
# point to static website for now
|
# point to static website for now
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ prosody_domain_name }}"
|
site_name: "{{ prosody_domain_name }}"
|
||||||
proxy_port: "{{ jabber_host_port }}"
|
proxy_port: "{{ jabber_host_port }}"
|
||||||
tags:
|
|
||||||
- prosody-web
|
|
||||||
|
|
||||||
- name: Check if hedgedoc data dir exists
|
- name: Check if hedgedoc data dir exists
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
path: "{{ data_dir }}/hedgedoc"
|
path: "{{ data_dir }}/hedgedoc"
|
||||||
register: hedgedoc_dir
|
register: hedgedoc_dir
|
||||||
tags:
|
|
||||||
- hedgedoc
|
|
||||||
- name: Fail if hedgedoc data dir does not exist
|
- name: Fail if hedgedoc data dir does not exist
|
||||||
ansible.builtin.fail:
|
ansible.builtin.fail:
|
||||||
msg: "hedgedoc data dir is missing, please restore from the backup!"
|
msg: "hedgedoc data dir is missing, please restore from the backup!"
|
||||||
when: not hedgedoc_dir.stat.exists
|
when: not hedgedoc_dir.stat.exists
|
||||||
tags:
|
|
||||||
- hedgedoc
|
|
||||||
|
|
||||||
- name: Ensure the hedgedoc directories exist
|
- name: Ensure the hedgedoc directories exist
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ item.path }}"
|
path: "{{ item.path }}"
|
||||||
mode: "{{ item.mode }}"
|
mode: "{{ item.mode }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -308,21 +243,17 @@
|
||||||
mode: "0700"
|
mode: "0700"
|
||||||
- path: "{{ data_dir }}/hedgedoc/data/uploads"
|
- path: "{{ data_dir }}/hedgedoc/data/uploads"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
tags:
|
|
||||||
- hedgedoc
|
|
||||||
|
|
||||||
- name: Setup docker network
|
- name: Setup docker network
|
||||||
community.docker.docker_network:
|
docker_network:
|
||||||
name: hedgedocnet
|
name: hedgedocnet
|
||||||
state: present
|
state: present
|
||||||
internal: true
|
internal: true
|
||||||
tags:
|
|
||||||
- hedgedoc
|
|
||||||
|
|
||||||
- name: Install HedgeDoc database container
|
- name: Install HedgeDoc database container
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: hedgedocdb
|
name: hedgedocdb
|
||||||
image: "{{ hedgedoc_db_image }}"
|
image: "postgres:11.6-alpine"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
|
@ -336,11 +267,9 @@
|
||||||
- "{{ data_dir }}/hedgedoc/data/database:/var/lib/postgresql/data"
|
- "{{ data_dir }}/hedgedoc/data/database:/var/lib/postgresql/data"
|
||||||
networks:
|
networks:
|
||||||
- name: hedgedocnet
|
- name: hedgedocnet
|
||||||
tags:
|
|
||||||
- hedgedoc
|
|
||||||
|
|
||||||
- name: Ensure container for hedgedoc is running
|
- name: Ensure container for hedgedoc is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: hedgedoc
|
name: hedgedoc
|
||||||
image: "{{ hedgedoc_image }}"
|
image: "{{ hedgedoc_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -353,7 +282,7 @@
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
NODE_ENV: "production"
|
NODE_ENV: "production"
|
||||||
CMD_PROTOCOL_USESSL: "true"
|
CMD_PROTOCOL_USESSL: "true"
|
||||||
CMD_DOMAIN: "{{ hedgedoc_domain_name }}"
|
CMD_DOMAIN: "pad.n39.eu"
|
||||||
CMD_URL_ADDPORT: "false"
|
CMD_URL_ADDPORT: "false"
|
||||||
CMD_DB_HOST: "hedgedocdb"
|
CMD_DB_HOST: "hedgedocdb"
|
||||||
CMD_DB_PORT: "5432"
|
CMD_DB_PORT: "5432"
|
||||||
|
@ -363,25 +292,18 @@
|
||||||
CMD_DB_PASSWORD: "{{ hedgedoc_postgres_password }}"
|
CMD_DB_PASSWORD: "{{ hedgedoc_postgres_password }}"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ data_dir }}/hedgedoc/data/uploads:/hedgedoc/public/uploads"
|
- "{{ data_dir }}/hedgedoc/data/uploads:/hedgedoc/public/uploads"
|
||||||
networks_cli_compatible: false
|
|
||||||
comparisons:
|
|
||||||
networks: allow_more_present
|
|
||||||
networks:
|
networks:
|
||||||
- name: hedgedocnet
|
- name: hedgedocnet
|
||||||
tags:
|
|
||||||
- hedgedoc
|
|
||||||
|
|
||||||
- name: Setup proxy site "{{ hedgedoc_domain_name }}"
|
- name: Setup proxy site pad.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ hedgedoc_domain_name }}"
|
site_name: pad.n39.eu
|
||||||
proxy_port: "{{ hedgedoc_host_port }}"
|
proxy_port: "{{ hedgedoc_host_port }}"
|
||||||
tags:
|
|
||||||
- hedgedoc
|
|
||||||
|
|
||||||
- name: Ensure the influxdb directories exist
|
- name: Ensure the influxdb directories exist
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
mode: 0700
|
mode: 0700
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -391,7 +313,7 @@
|
||||||
- "{{ data_dir }}/influxdb/cfg"
|
- "{{ data_dir }}/influxdb/cfg"
|
||||||
|
|
||||||
- name: Ensure container for influxdb is running
|
- name: Ensure container for influxdb is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: influxdb
|
name: influxdb
|
||||||
image: "{{ influxdb_image }}"
|
image: "{{ influxdb_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -411,7 +333,7 @@
|
||||||
- "{{ data_dir }}/influxdb/cfg:/etc/influxdb2"
|
- "{{ data_dir }}/influxdb/cfg:/etc/influxdb2"
|
||||||
|
|
||||||
- name: Setup proxy site {{ influxdb_domain_name }}
|
- name: Setup proxy site {{ influxdb_domain_name }}
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ influxdb_domain_name }}"
|
site_name: "{{ influxdb_domain_name }}"
|
||||||
|
@ -426,25 +348,19 @@
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
path: "{{ data_dir }}/redmine"
|
path: "{{ data_dir }}/redmine"
|
||||||
register: redmine_dir
|
register: redmine_dir
|
||||||
tags:
|
|
||||||
- redmine
|
|
||||||
- name: Fail if redmine data dir does not exist
|
- name: Fail if redmine data dir does not exist
|
||||||
ansible.builtin.fail:
|
ansible.builtin.fail:
|
||||||
msg: "Redmine data dir is missing, please restore from the backup!"
|
msg: "Redmine data dir is missing, please restore from the backup!"
|
||||||
when: not redmine_dir.stat.exists
|
when: not redmine_dir.stat.exists
|
||||||
tags:
|
|
||||||
- redmine
|
|
||||||
|
|
||||||
- name: Setup Redmine docker network
|
- name: Setup Redmine docker network
|
||||||
community.docker.docker_network:
|
docker_network:
|
||||||
name: redminenet
|
name: redminenet
|
||||||
state: present
|
state: present
|
||||||
internal: true
|
internal: true
|
||||||
tags:
|
|
||||||
- redmine
|
|
||||||
|
|
||||||
- name: Setup Redmine MySQL container
|
- name: Setup Redmine MySQL container
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: redminedb
|
name: redminedb
|
||||||
image: "{{ redmine_mysql_image }}"
|
image: "{{ redmine_mysql_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -457,14 +373,11 @@
|
||||||
MYSQL_DATABASE: "{{ redmine_database }}"
|
MYSQL_DATABASE: "{{ redmine_database }}"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ data_dir }}/redmine/mysql:/var/lib/mysql"
|
- "{{ data_dir }}/redmine/mysql:/var/lib/mysql"
|
||||||
- "{{ data_dir }}/redmine/mysql-config:/etc/mysql/conf.d"
|
|
||||||
networks:
|
networks:
|
||||||
- name: redminenet
|
- name: redminenet
|
||||||
tags:
|
|
||||||
- redmine
|
|
||||||
|
|
||||||
- name: Setup Redmine container
|
- name: Setup Redmine container
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: redmine
|
name: redmine
|
||||||
image: "{{ redmine_image }}"
|
image: "{{ redmine_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -481,37 +394,28 @@
|
||||||
- "{{ data_dir }}/redmine/configuration.yml:/usr/src/redmine/config/configuration.yml"
|
- "{{ data_dir }}/redmine/configuration.yml:/usr/src/redmine/config/configuration.yml"
|
||||||
- "{{ data_dir }}/redmine/files:/usr/src/redmine/files"
|
- "{{ data_dir }}/redmine/files:/usr/src/redmine/files"
|
||||||
- "{{ data_dir }}/redmine/themes:/usr/src/redmine/public/themes"
|
- "{{ data_dir }}/redmine/themes:/usr/src/redmine/public/themes"
|
||||||
networks_cli_compatible: false
|
|
||||||
comparisons:
|
|
||||||
networks: allow_more_present
|
|
||||||
networks:
|
networks:
|
||||||
- name: redminenet
|
- name: redminenet
|
||||||
tags:
|
|
||||||
- redmine
|
|
||||||
|
|
||||||
- name: Setup proxy site "{{ redmine_domain_name }}"
|
- name: Setup proxy site redmine.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ redmine_domain_name }}"
|
site_name: redmine.n39.eu
|
||||||
proxy_port: "{{ redmine_host_port }}"
|
proxy_port: "{{ redmine_host_port }}"
|
||||||
tags:
|
|
||||||
- redmine
|
|
||||||
|
|
||||||
- name: Ensure the uptime-kuma directories exist
|
- name: Ensure the uptime-kuma directories exist
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
state: directory
|
state: directory
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ data_dir }}/uptime-kuma"
|
- "{{ data_dir }}/uptime-kuma"
|
||||||
tags:
|
|
||||||
- uptimekuma
|
|
||||||
|
|
||||||
- name: Ensure container for uptime-kuma is running
|
- name: Ensure container for uptime-kuma is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: uptime-kuma
|
name: uptime-kuma
|
||||||
image: "{{ uptimekuma_image }}"
|
image: "louislam/uptime-kuma:1"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
detach: yes
|
detach: yes
|
||||||
|
@ -522,20 +426,16 @@
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ data_dir }}/uptime-kuma:/app/data"
|
- "{{ data_dir }}/uptime-kuma:/app/data"
|
||||||
tags:
|
|
||||||
- uptimekuma
|
|
||||||
|
|
||||||
- name: Setup proxy site "{{ uptimekuma_domain_name }}"
|
- name: Setup proxy site uptime.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ uptimekuma_domain_name }}"
|
site_name: uptime.n39.eu
|
||||||
proxy_port: "{{ uptimekuma_host_port }}"
|
proxy_port: "{{ uptimekuma_host_port }}"
|
||||||
tags:
|
|
||||||
- uptimekuma
|
|
||||||
|
|
||||||
- name: Ensure the grafana directories exist
|
- name: Ensure the grafana directories exist
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ item.path }}"
|
path: "{{ item.path }}"
|
||||||
owner: "{{ item.owner | default('root') }}"
|
owner: "{{ item.owner | default('root') }}"
|
||||||
mode: "{{ item.mode }}"
|
mode: "{{ item.mode }}"
|
||||||
|
@ -548,13 +448,11 @@
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
- path: "{{ data_dir }}/grafana/etc"
|
- path: "{{ data_dir }}/grafana/etc"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
tags:
|
|
||||||
- grafana
|
|
||||||
|
|
||||||
- name: Ensure container for grafana is running
|
- name: Ensure container for grafana is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: grafana
|
name: grafana
|
||||||
image: "{{ grafana_image }}"
|
image: "grafana/grafana:9.4.7"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
detach: yes
|
detach: yes
|
||||||
|
@ -569,21 +467,17 @@
|
||||||
GF_SECURITY_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
|
GF_SECURITY_ADMIN_PASSWORD: "{{ grafana_admin_password }}"
|
||||||
GF_USERS_ALLOW_SIGN_UP: "false"
|
GF_USERS_ALLOW_SIGN_UP: "false"
|
||||||
GF_INSTALL_PLUGINS: "flant-statusmap-panel,ae3e-plotly-panel"
|
GF_INSTALL_PLUGINS: "flant-statusmap-panel,ae3e-plotly-panel"
|
||||||
tags:
|
|
||||||
- grafana
|
|
||||||
|
|
||||||
- name: Setup proxy site "{{ grafana_domain_name }}"
|
- name: Setup proxy site grafana.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ grafana_domain_name }}"
|
site_name: "{{ grafana_domain_name }}"
|
||||||
proxy_port: "{{ grafana_host_port }}"
|
proxy_port: "{{ grafana_host_port }}"
|
||||||
proxy_preserve_host: "On"
|
proxy_preserve_host: "On"
|
||||||
tags:
|
|
||||||
- grafana
|
|
||||||
|
|
||||||
- name: Ensure the homebox directories exist
|
- name: Ensure the homebox directories exist
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ item.path }}"
|
path: "{{ item.path }}"
|
||||||
owner: "{{ item.owner | default('root') }}"
|
owner: "{{ item.owner | default('root') }}"
|
||||||
mode: "{{ item.mode }}"
|
mode: "{{ item.mode }}"
|
||||||
|
@ -593,13 +487,11 @@
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
- path: "{{ data_dir }}/homebox/data"
|
- path: "{{ data_dir }}/homebox/data"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
tags:
|
|
||||||
- homebox
|
|
||||||
|
|
||||||
- name: Ensure container for homebox is running
|
- name: Ensure container for homebox is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: homebox
|
name: homebox
|
||||||
image: "{{ homebox_image }}"
|
image: "ghcr.io/hay-kot/homebox"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
detach: yes
|
detach: yes
|
||||||
|
@ -613,21 +505,17 @@
|
||||||
HBOX_LOG_LEVEL: "info"
|
HBOX_LOG_LEVEL: "info"
|
||||||
HBOX_LOG_FORMAT: "text"
|
HBOX_LOG_FORMAT: "text"
|
||||||
HBOX_WEB_MAX_UPLOAD_SIZE: "10"
|
HBOX_WEB_MAX_UPLOAD_SIZE: "10"
|
||||||
tags:
|
|
||||||
- homebox
|
|
||||||
|
|
||||||
- name: Setup proxy site {{ homebox_domain_name }}
|
- name: Setup proxy site {{ homebox_domain_name }}
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ homebox_domain_name }}"
|
site_name: "{{ homebox_domain_name }}"
|
||||||
proxy_port: "{{ homebox_host_port }}"
|
proxy_port: "{{ homebox_host_port }}"
|
||||||
proxy_preserve_host: "On"
|
proxy_preserve_host: "On"
|
||||||
tags:
|
|
||||||
- homebox
|
|
||||||
|
|
||||||
- name: Setup proxy site spaceapi.n39.eu
|
- name: Setup proxy site spaceapi.n39.eu
|
||||||
ansible.builtin.template:
|
template:
|
||||||
src: templates/pottwal/spaceapi-apache-site.j2
|
src: templates/pottwal/spaceapi-apache-site.j2
|
||||||
dest: /etc/apache2/sites-available/spaceapi.n39.eu.conf
|
dest: /etc/apache2/sites-available/spaceapi.n39.eu.conf
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
|
@ -636,17 +524,7 @@
|
||||||
proxy_preserve_host: "On"
|
proxy_preserve_host: "On"
|
||||||
notify: Restart apache2
|
notify: Restart apache2
|
||||||
tags:
|
tags:
|
||||||
- spaceapi
|
- dev
|
||||||
|
|
||||||
# Renovate configuration is sourced from `renovate.json` in each repository
|
|
||||||
- name: Ensure renovate bot cronjob is present
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: templates/pottwal/renovate-cron.j2
|
|
||||||
dest: /etc/cron.hourly/renovate-bot
|
|
||||||
mode: "0700"
|
|
||||||
notify: Reload cron
|
|
||||||
tags:
|
|
||||||
- renovate
|
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
- name: Restart prosody
|
- name: Restart prosody
|
||||||
|
@ -656,14 +534,7 @@
|
||||||
restart: yes
|
restart: yes
|
||||||
|
|
||||||
- name: Restart apache2
|
- name: Restart apache2
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: apache2
|
name: apache2
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
- name: Reload cron
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: service cron reload
|
|
||||||
# Use the shell call because the task sometimes has problems finding the service state
|
|
||||||
# ansible.builtin.service:
|
|
||||||
# name: cron
|
|
||||||
# state: restarted
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'radon' (services for space automation)
|
- hosts: radon.n39.eu
|
||||||
hosts: radon.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
@ -8,24 +7,25 @@
|
||||||
|
|
||||||
data_dir: "/srv/data"
|
data_dir: "/srv/data"
|
||||||
|
|
||||||
mosquitto_image: eclipse-mosquitto:2.0.21
|
mosquitto_image: eclipse-mosquitto:2.0.14
|
||||||
mosquitto_data: "{{ data_dir }}/mosquitto"
|
mosquitto_data: "{{ data_dir }}/mosquitto"
|
||||||
|
|
||||||
nodered_image: nodered/node-red:3.0.1-1-18
|
nodered_image: nodered/node-red:3.0.1-1-18
|
||||||
nodered_data: "{{ data_dir }}/nodered"
|
nodered_data: "{{ data_dir }}/nodered"
|
||||||
|
|
||||||
rabbitmq_image: bitnami/rabbitmq:4.0.7
|
rabbitmq_image: "bitnami/rabbitmq:3.10.7"
|
||||||
rabbitmq_data: "{{ data_dir }}/rabbitmq"
|
rabbitmq_data: "{{ data_dir }}/rabbitmq"
|
||||||
|
|
||||||
pwr_meter_pulse_gw_image: netz39/power-meter-pulse-gateway:0.3.0
|
pwr_meter_pulse_gw_image: "netz39/power-meter-pulse-gateway:0.3.0"
|
||||||
|
|
||||||
brotherql_host_port: 9004
|
brotherql_host_port: 9004
|
||||||
|
brotherql_web_image: "pklaus/brother_ql_web:alpine_9e20b6d"
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
# role 'docker_setup' applied through group 'docker_host'
|
||||||
- role: apache
|
- role: apache
|
||||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
||||||
- role: 24367dfa.dehydrated
|
- role: ansible-role-dehydrated
|
||||||
vars:
|
vars:
|
||||||
dehydrated_contact_email: "{{ server_admin }}"
|
dehydrated_contact_email: "{{ server_admin }}"
|
||||||
dehydrated_domains:
|
dehydrated_domains:
|
||||||
|
@ -38,7 +38,7 @@
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Ensure the mosquitto directories exist
|
- name: Ensure the mosquitto directories exist
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
mode: 0755
|
mode: 0755
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -46,20 +46,16 @@
|
||||||
- "{{ mosquitto_data }}/config"
|
- "{{ mosquitto_data }}/config"
|
||||||
- "{{ mosquitto_data }}/data"
|
- "{{ mosquitto_data }}/data"
|
||||||
- "{{ mosquitto_data }}/log"
|
- "{{ mosquitto_data }}/log"
|
||||||
tags:
|
|
||||||
- mosquitto
|
|
||||||
|
|
||||||
- name: Make sure mosquitto config is there
|
- name: Make sure mosquitto config is there
|
||||||
ansible.builtin.template:
|
template:
|
||||||
src: "templates/mosquitto.conf.j2"
|
src: "templates/mosquitto.conf.j2"
|
||||||
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
|
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
notify: Restart mosquitto container
|
notify: restart mosquitto
|
||||||
tags:
|
|
||||||
- mosquitto
|
|
||||||
|
|
||||||
- name: Ensure mosquitto is running
|
- name: Ensure mosquitto is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: mosquitto
|
name: mosquitto
|
||||||
image: "{{ mosquitto_image }}"
|
image: "{{ mosquitto_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -76,8 +72,6 @@
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
tags:
|
|
||||||
- mosquitto
|
|
||||||
|
|
||||||
- name: Check if nodered data dir exists
|
- name: Check if nodered data dir exists
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
|
@ -89,7 +83,7 @@
|
||||||
when: not nodered_dir.stat.exists
|
when: not nodered_dir.stat.exists
|
||||||
|
|
||||||
- name: Ensure nodered is running
|
- name: Ensure nodered is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: nodered
|
name: nodered
|
||||||
image: "{{ nodered_image }}"
|
image: "{{ nodered_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -109,7 +103,7 @@
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
|
|
||||||
- name: Setup proxy site nodered.n39.eu
|
- name: Setup proxy site nodered.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "nodered.n39.eu"
|
site_name: "nodered.n39.eu"
|
||||||
|
@ -120,17 +114,13 @@
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
path: "{{ rabbitmq_data }}"
|
path: "{{ rabbitmq_data }}"
|
||||||
register: rabbitmq_dir
|
register: rabbitmq_dir
|
||||||
tags:
|
|
||||||
- rabbitmq
|
|
||||||
- name: Fail if rabbitmq data dir does not exist
|
- name: Fail if rabbitmq data dir does not exist
|
||||||
ansible.builtin.fail:
|
ansible.builtin.fail:
|
||||||
msg: "RabbitMQ data dir is missing, please restore from the backup!"
|
msg: "RabbitMQ data dir is missing, please restore from the backup!"
|
||||||
when: not rabbitmq_dir.stat.exists
|
when: not rabbitmq_dir.stat.exists
|
||||||
tags:
|
|
||||||
- rabbitmq
|
|
||||||
|
|
||||||
- name: Ensure rabbitmq docker container is running
|
- name: Ensure rabbitmq docker container is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: rabbitmq
|
name: rabbitmq
|
||||||
image: "{{ rabbitmq_image }}"
|
image: "{{ rabbitmq_image }}"
|
||||||
ports:
|
ports:
|
||||||
|
@ -147,20 +137,17 @@
|
||||||
- "{{ rabbitmq_data }}/bitnami:/bitnami:rw"
|
- "{{ rabbitmq_data }}/bitnami:/bitnami:rw"
|
||||||
- "{{ rabbitmq_data }}/etc_rabbitmq:/etc/rabbitmq:rw"
|
- "{{ rabbitmq_data }}/etc_rabbitmq:/etc/rabbitmq:rw"
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
tags:
|
|
||||||
- rabbitmq
|
|
||||||
|
|
||||||
- name: Setup proxy site rabbitmq.n39.eu
|
- name: Setup proxy site rabbitmq.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "rabbitmq.n39.eu"
|
site_name: "rabbitmq.n39.eu"
|
||||||
proxy_port: 15672
|
proxy_port: 15672
|
||||||
tags:
|
|
||||||
- rabbitmq
|
|
||||||
|
|
||||||
- name: Ensure Power Meter Pulse Gateway for 19i room is running
|
- name: Ensure Power Meter Pulse Gateway for 19i room is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: pwr-meter-pulse-gw-19i
|
name: pwr-meter-pulse-gw-19i
|
||||||
image: "{{ pwr_meter_pulse_gw_image }}"
|
image: "{{ pwr_meter_pulse_gw_image }}"
|
||||||
ports:
|
ports:
|
||||||
|
@ -178,7 +165,7 @@
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
|
|
||||||
- name: Setup proxy site pwr-meter-pulse-gw-19i.svc.n39.eu
|
- name: Setup proxy site pwr-meter-pulse-gw-19i.svc.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "pwr-meter-pulse-gw-19i.svc.n39.eu"
|
site_name: "pwr-meter-pulse-gw-19i.svc.n39.eu"
|
||||||
|
@ -186,49 +173,30 @@
|
||||||
|
|
||||||
|
|
||||||
- name: Setup docker container for BrotherQL Web UI printer
|
- name: Setup docker container for BrotherQL Web UI printer
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: brotherql-web
|
name: brotherql-web
|
||||||
image: dersimn/brother_ql_web:2.1.9-alpine
|
image: "{{ brotherql_web_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
state: started
|
state: started
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ brotherql_host_port }}:8013"
|
- "127.0.0.1:{{ brotherql_host_port }}:8013"
|
||||||
command: "--default-label-size 62 --model QL-720NW tcp://{{ brotherql_printer_host }}"
|
command: " ./brother_ql_web.py --model QL-720NW tcp://{{ brotherql_printer_ip }}"
|
||||||
detach: yes
|
detach: yes
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
tags:
|
|
||||||
- labelprinter
|
|
||||||
|
|
||||||
- name: Setup proxy site labelprinter.n39.eu
|
- name: Setup proxy site labelprinter.n39.eu
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: labelprinter.n39.eu
|
site_name: labelprinter.n39.eu
|
||||||
proxy_port: "{{ brotherql_host_port }}"
|
proxy_port: "{{ brotherql_host_port }}"
|
||||||
tags:
|
|
||||||
- labelprinter
|
|
||||||
|
|
||||||
- name: Setup docker container for Grafana Screenshots
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: grafana-screenshot
|
|
||||||
image: mrtux/grafana-screenshot:0.1.3
|
|
||||||
pull: true
|
|
||||||
restart_policy: unless-stopped
|
|
||||||
detach: yes
|
|
||||||
env:
|
|
||||||
MQTT_BROKER_URL: "{{ kiosk_mqtt_host }}"
|
|
||||||
MQTT_TOPIC: "{{ kiosk_mqtt_topic }}"
|
|
||||||
GRAFANA_DASHBOARD_URL: "{{ kiosk_grafana_url }}"
|
|
||||||
GRAFANA_USERNAME: "{{ kiosk_grafana_user }}"
|
|
||||||
GRAFANA_PASSWORD: "{{ kiosk_grafana_pass }}"
|
|
||||||
tags:
|
|
||||||
- grafana-screenshot
|
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
- name: Restart mosquitto container
|
- name: restart mosquitto
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: mosquitto
|
name: mosquitto
|
||||||
state: started
|
state: started
|
||||||
restart: yes
|
restart: yes
|
||||||
|
|
60
host-tau.yml
60
host-tau.yml
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Setup things on host 'tau' (vserver for wiki etc.)
|
- hosts: tau.netz39.de
|
||||||
hosts: tau.netz39.de
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
@ -15,18 +14,18 @@
|
||||||
dokuwiki_port: 9005
|
dokuwiki_port: 9005
|
||||||
# This container is pinned, because there are issues
|
# This container is pinned, because there are issues
|
||||||
# with backwards compatibility within the same tag!
|
# with backwards compatibility within the same tag!
|
||||||
dokuwiki_image: bitnami/dokuwiki:20240206.1.0
|
dokuwiki_image: "bitnami/dokuwiki:20220731@sha256:989ab52cf2d2e0f84166e114ca4ce88f59546b8f6d34958905f8d81c18cbd759"
|
||||||
|
|
||||||
discord_invite_domain: discord.netz39.de
|
discord_invite_domain: discord.netz39.de
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
# role 'docker_setup' applied through group 'docker_host'
|
||||||
- role: apache
|
- role: apache
|
||||||
- role: penguineer.dehydrated_cron
|
- role: penguineer.dehydrated_cron
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Setup docker network
|
- name: Setup docker network
|
||||||
community.docker.docker_network:
|
docker_network:
|
||||||
name: dockernet
|
name: dockernet
|
||||||
driver: bridge
|
driver: bridge
|
||||||
ipam_config:
|
ipam_config:
|
||||||
|
@ -35,8 +34,8 @@
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Setup Dehydrated
|
- name: Setup Dehydrated
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: 24367dfa.dehydrated
|
name: ansible-role-dehydrated
|
||||||
vars:
|
vars:
|
||||||
dehydrated_contact_email: "{{ server_admin }}"
|
dehydrated_contact_email: "{{ server_admin }}"
|
||||||
dehydrated_domains:
|
dehydrated_domains:
|
||||||
|
@ -52,17 +51,17 @@
|
||||||
deploy_challenge_hook: "/bin/systemctl restart apache2"
|
deploy_challenge_hook: "/bin/systemctl restart apache2"
|
||||||
|
|
||||||
- name: Setup proxy site testredmine.netz39.de
|
- name: Setup proxy site testredmine.netz39.de
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: testredmine.netz39.de
|
site_name: testredmine.netz39.de
|
||||||
proxy_port: 9004
|
proxy_port: 9004
|
||||||
|
|
||||||
- name: Setup phpmyadmin
|
- name: Setup phpmyadmin
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: phpmyadmin
|
name: phpmyadmin
|
||||||
state: started
|
state: started
|
||||||
image: phpmyadmin:5.2
|
image: phpmyadmin:5.0
|
||||||
networks_cli_compatible: true
|
networks_cli_compatible: true
|
||||||
networks:
|
networks:
|
||||||
- name: dockernet
|
- name: dockernet
|
||||||
|
@ -76,7 +75,7 @@
|
||||||
- 9001:80
|
- 9001:80
|
||||||
|
|
||||||
- name: Setup proxy site mysql.adm.netz39.de
|
- name: Setup proxy site mysql.adm.netz39.de
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: mysql.adm.netz39.de
|
site_name: mysql.adm.netz39.de
|
||||||
|
@ -86,22 +85,22 @@
|
||||||
- name: Check if Docker Registry auth dir exists
|
- name: Check if Docker Registry auth dir exists
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
path: "{{ data_dir }}/registry/auth"
|
path: "{{ data_dir }}/registry/auth"
|
||||||
register: docker_registry_auth_dir
|
register: docker_dir
|
||||||
- name: Fail if Docker Registry auth dir does not exist
|
- name: Fail if docker registry data dir does not exist
|
||||||
ansible.builtin.fail:
|
ansible.builtin.fail:
|
||||||
msg: "Docker Registry auth dir is missing, please restore from the backup!"
|
msg: "Docker Registry auth dir is missing, please restore from the backup!"
|
||||||
when: not docker_registry_auth_dir.stat.exists
|
when: not docker_dir.stat.exists
|
||||||
- name: Ensure the Docker Registry data directory exists
|
- name: Ensure the Docker Registry data directory exists
|
||||||
# This may not be part of the backup
|
# This may not be part of the backup
|
||||||
ansible.builtin.file:
|
file:
|
||||||
path: "{{ data_dir }}/registry/data"
|
path: "{{ data_dir }}/registry/data"
|
||||||
state: directory
|
state: directory
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
|
||||||
- name: Setup Docker Registry Container
|
- name: Setup Docker Registry Container
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: registry
|
name: registry
|
||||||
image: registry:2
|
image: "registry:2"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
|
@ -118,7 +117,7 @@
|
||||||
- "{{ data_dir }}/registry/auth:/auth:rw"
|
- "{{ data_dir }}/registry/auth:/auth:rw"
|
||||||
|
|
||||||
- name: Setup proxy site for the Docker Registry
|
- name: Setup proxy site for the Docker Registry
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ docker_registry_domain }}"
|
site_name: "{{ docker_registry_domain }}"
|
||||||
|
@ -129,25 +128,19 @@
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
path: "{{ data_dir }}/dokuwiki"
|
path: "{{ data_dir }}/dokuwiki"
|
||||||
register: dokuwiki_dir
|
register: dokuwiki_dir
|
||||||
tags:
|
|
||||||
- dokuwiki
|
|
||||||
- name: Fail if Dokuwiki data dir does not exist
|
- name: Fail if Dokuwiki data dir does not exist
|
||||||
ansible.builtin.fail:
|
ansible.builtin.fail:
|
||||||
msg: "Dokuwiki data dir is missing, please restore from the backup!"
|
msg: "Dokuwiki data dir is missing, please restore from the backup!"
|
||||||
when: not dokuwiki_dir.stat.exists
|
when: not dokuwiki_dir.stat.exists
|
||||||
tags:
|
|
||||||
- dokuwiki
|
|
||||||
|
|
||||||
- name: Set correct user for Dokuwiki data
|
- name: Set correct user for Dokuwiki data
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "{{ data_dir }}/dokuwiki"
|
path: "{{ data_dir }}/dokuwiki"
|
||||||
owner: "1001" # According to container config
|
owner: "1001" # According to container config
|
||||||
recurse: yes
|
recurse: yes
|
||||||
tags:
|
|
||||||
- dokuwiki
|
|
||||||
|
|
||||||
- name: Setup Dokuwiki Container
|
- name: Setup Dokuwiki Container
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: dokuwiki
|
name: dokuwiki
|
||||||
image: "{{ dokuwiki_image }}"
|
image: "{{ dokuwiki_image }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -162,23 +155,19 @@
|
||||||
- "{{ data_dir }}/dokuwiki:/bitnami/dokuwiki:rw"
|
- "{{ data_dir }}/dokuwiki:/bitnami/dokuwiki:rw"
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
tags:
|
|
||||||
- dokuwiki
|
|
||||||
|
|
||||||
- name: Setup proxy site for Dokuwiki
|
- name: Setup proxy site for Dokuwiki
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ dokuwiki_domain }}"
|
site_name: "{{ dokuwiki_domain }}"
|
||||||
proxy_port: "{{ dokuwiki_port }}"
|
proxy_port: "{{ dokuwiki_port }}"
|
||||||
tags:
|
|
||||||
- dokuwiki
|
|
||||||
|
|
||||||
|
|
||||||
- name: Setup container for secondary FFMD DNS
|
- name: Setup container for secondary FFMD DNS
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: bind9-md-freifunk-net
|
name: bind9-md-freifunk-net
|
||||||
image: ffmd/bind9-md-freifunk-net:v2022122301
|
image: "ffmd/bind9-md-freifunk-net:2022111601"
|
||||||
pull: true
|
pull: true
|
||||||
state: started
|
state: started
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
|
@ -187,13 +176,12 @@
|
||||||
- "53:53/udp"
|
- "53:53/udp"
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
tags:
|
|
||||||
- ffmd-dns
|
|
||||||
|
|
||||||
- name: Setup forwarding for Discord invite
|
- name: Setup forwarding for Discord invite
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_forward
|
name: setup-http-site-forward
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ discord_invite_domain }}"
|
site_name: "{{ discord_invite_domain }}"
|
||||||
# forward_to: "https://discord.com/invite/8FcDvAf"
|
# forward_to: "https://discord.com/invite/8FcDvAf"
|
||||||
forward_to: "https://sl.n39.eu/discord"
|
forward_to: "https://sl.n39.eu/discord"
|
||||||
|
|
||||||
|
|
|
@ -1,20 +1,19 @@
|
||||||
---
|
---
|
||||||
# this is for a dedicated vm just hosting the unifi controller.
|
# this is for a dedicated vm just hosting the unifi controller.
|
||||||
- name: Setup things on host 'unicorn' (vm for ubiquiti unifi controller)
|
- hosts: unicorn.n39.eu
|
||||||
hosts: unicorn.n39.eu
|
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
data_dir: "/srv/data"
|
data_dir: "/srv/data"
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
# role 'docker_setup' applied through group 'docker_host'
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Setup the docker container for unifi-controller
|
- name: Setup the docker container for unifi-controller
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: unifi-controller
|
name: unifi-controller
|
||||||
image: jacobalberty/unifi:v9.0.114
|
image: "jacobalberty/unifi:v7.1.65"
|
||||||
state: started
|
state: started
|
||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
container_default_behavior: no_defaults
|
container_default_behavior: no_defaults
|
||||||
|
@ -23,13 +22,12 @@
|
||||||
# These fixed ports are needed.
|
# These fixed ports are needed.
|
||||||
# https://help.ui.com/hc/en-us/articles/218506997-UniFi-Ports-Used
|
# https://help.ui.com/hc/en-us/articles/218506997-UniFi-Ports-Used
|
||||||
ports:
|
ports:
|
||||||
- "8080:8080/tcp" # Device command/control
|
- "8080:8080/tcp" # Device command/control
|
||||||
- "8443:8443/tcp" # Web interface + API
|
- "8443:8443/tcp" # Web interface + API
|
||||||
- "8843:8843/tcp" # HTTPS portal
|
- "8843:8843/tcp" # HTTPS portal
|
||||||
- "8880:8880/tcp" # HTTP portal
|
- "8880:8880/tcp" # HTTP portal
|
||||||
- "3478:3478/udp" # STUN service
|
- "3478:3478/udp" # STUN service
|
||||||
- "6789:6789/tcp" # Speed Test (unifi5 only)
|
- "6789:6789/tcp" # Speed Test (unifi5 only)
|
||||||
- "10001:10001/udp" # Used for device discovery.
|
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ data_dir }}/unifi-controller/data:/unifi/data"
|
- "{{ data_dir }}/unifi-controller/data:/unifi/data"
|
||||||
- "{{ data_dir }}/unifi-controller/log:/unifi/log"
|
- "{{ data_dir }}/unifi-controller/log:/unifi/log"
|
||||||
|
|
|
@ -1,170 +0,0 @@
|
||||||
---
|
|
||||||
- name: Setup things on host 'wittgenstein' (raspberry pi for ampel and spaceapi)
|
|
||||||
hosts: wittgenstein.n39.eu
|
|
||||||
become: true
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- role: apache
|
|
||||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
|
||||||
- role: 24367dfa.dehydrated
|
|
||||||
vars:
|
|
||||||
dehydrated_contact_email: "{{ server_admin }}"
|
|
||||||
- role: penguineer.dehydrated_cron
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Install packages needed for the system
|
|
||||||
# This is a list of all packages,
|
|
||||||
# unless they are installed by a specific role
|
|
||||||
ansible.builtin.apt:
|
|
||||||
state: present
|
|
||||||
name:
|
|
||||||
# This is needed for the user-executed tasks
|
|
||||||
- acl
|
|
||||||
# Regular packages
|
|
||||||
- tmux
|
|
||||||
- git-core
|
|
||||||
- cmake
|
|
||||||
- build-essential
|
|
||||||
- libmosquitto-dev
|
|
||||||
- libconfig-dev
|
|
||||||
- mosquitto-clients
|
|
||||||
- python3-paho-mqtt
|
|
||||||
- i2c-tools
|
|
||||||
|
|
||||||
|
|
||||||
- name: Set MAC address for proper DHCP recognition
|
|
||||||
# Uses mac variable from inventory
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: templates/network-interfaces-dhcp-mac.j2
|
|
||||||
dest: /etc/network/interfaces.d/wittgenstein-mac
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
|
|
||||||
- name: Disable IPv6
|
|
||||||
# Because it is not working....
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: files/sysctl-no-ipv6.conf
|
|
||||||
dest: /etc/sysctl.d/99-systcl-no-ipv6.conf
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
|
|
||||||
|
|
||||||
### Gatekeeper user (pi for now)
|
|
||||||
#
|
|
||||||
# All the gatekeeping / door control stuff is here!
|
|
||||||
|
|
||||||
- name: Ensure gatekeeper user is there
|
|
||||||
ansible.builtin.user:
|
|
||||||
name: "{{ gatekeeper_user }}"
|
|
||||||
groups: dialout,audio,plugdev,input,netdev,i2c,gpio
|
|
||||||
append: yes
|
|
||||||
|
|
||||||
- name: Copy management scripts
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: "files/wittgenstein/{{ item }}"
|
|
||||||
dest: "/home/{{ gatekeeper_user }}/{{ item }}"
|
|
||||||
owner: "{{ gatekeeper_user }}"
|
|
||||||
group: "{{ gatekeeper_user }}"
|
|
||||||
mode: "0750"
|
|
||||||
loop:
|
|
||||||
- reboot.sh
|
|
||||||
- unstuck.sh
|
|
||||||
- switch-on.sh
|
|
||||||
- switch-off.sh
|
|
||||||
|
|
||||||
- name: Install start-up cron
|
|
||||||
ansible.builtin.cron:
|
|
||||||
name: Start the gatekeeper services
|
|
||||||
job: "/home/{{ gatekeeper_user }}/reboot.sh"
|
|
||||||
user: "{{ gatekeeper_user }}"
|
|
||||||
special_time: reboot
|
|
||||||
|
|
||||||
|
|
||||||
- name: Download wiringPi library
|
|
||||||
# WiringPi needs to be installed, but that library seems to be
|
|
||||||
# obsolete. We download something and hope it works...
|
|
||||||
ansible.builtin.get_url:
|
|
||||||
url: https://project-downloads.drogon.net/wiringpi-latest.deb
|
|
||||||
dest: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
|
|
||||||
mode: "0644"
|
|
||||||
force: no
|
|
||||||
register: wiringPi_download
|
|
||||||
|
|
||||||
- name: Install wiringPi library # noqa: no-handler
|
|
||||||
ansible.builtin.apt:
|
|
||||||
state: present
|
|
||||||
deb: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
|
|
||||||
when: wiringPi_download.changed
|
|
||||||
|
|
||||||
|
|
||||||
### Ampelsteuerung
|
|
||||||
- name: Clone netz39_space_notification initial checkout
|
|
||||||
# Do this as the gatekeeper user!
|
|
||||||
become: yes
|
|
||||||
become_user: "{{ gatekeeper_user }}"
|
|
||||||
ansible.builtin.git:
|
|
||||||
repo: https://github.com/netz39/space_notification.git
|
|
||||||
dest: "/home/{{ gatekeeper_user }}/netz39_space_notification"
|
|
||||||
clone: yes
|
|
||||||
update: no
|
|
||||||
|
|
||||||
- name: Compile ledcontrol agent
|
|
||||||
# Do this as the gatekeeper user!
|
|
||||||
become: yes
|
|
||||||
become_user: "{{ gatekeeper_user }}"
|
|
||||||
ansible.builtin.shell:
|
|
||||||
chdir: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/ledcontrol"
|
|
||||||
cmd: make
|
|
||||||
creates: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/ledcontrol/ledcontrol"
|
|
||||||
|
|
||||||
- name: Compile statusswitch agent
|
|
||||||
# Do this as the gatekeeper user!
|
|
||||||
become: yes
|
|
||||||
become_user: "{{ gatekeeper_user }}"
|
|
||||||
ansible.builtin.shell:
|
|
||||||
chdir: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/statusswitch"
|
|
||||||
cmd: make
|
|
||||||
creates: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/statusswitch/statusswitch"
|
|
||||||
|
|
||||||
### Space API
|
|
||||||
- name: Setup the SpaceAPI Docker container
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: spaceapi
|
|
||||||
image: "{{ spaceapi_image }}"
|
|
||||||
pull: true
|
|
||||||
state: started
|
|
||||||
detach: yes
|
|
||||||
restart_policy: unless-stopped
|
|
||||||
ports:
|
|
||||||
- "0.0.0.0:{{ spaceapi_host_port }}:8080" # Must be reached by pottwal
|
|
||||||
# - "127.0.0.1:{{ spaceapi_host_port }}:8080"
|
|
||||||
env:
|
|
||||||
TZ: "{{ timezone }}"
|
|
||||||
MQTT_BROKER: "platon.n39.eu"
|
|
||||||
MQTT_TOPIC_STATUS: "{{ spaceapi_topic_status }}"
|
|
||||||
MQTT_TOPIC_LASTCHANGE: "{{ spaceapi_topic_lastchange }}"
|
|
||||||
tags:
|
|
||||||
- spaceapi
|
|
||||||
|
|
||||||
- name: Setup the Ampel Controller Docker container
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: ampelcontroller
|
|
||||||
image: "{{ ampelcontroller_image }}"
|
|
||||||
pull: true
|
|
||||||
state: started
|
|
||||||
detach: yes
|
|
||||||
restart_policy: unless-stopped
|
|
||||||
env:
|
|
||||||
TZ: "{{ timezone }}"
|
|
||||||
MQTT_BROKER: "platon.n39.eu"
|
|
||||||
MQTT_LEVER_STATE_TOPIC: "{{ topic_lever_state }}"
|
|
||||||
MQTT_DOOR_EVENTS_TOPIC: "{{ topic_door_events }}"
|
|
||||||
MQTT_SPACESTATUS_ISOPEN_TOPIC: "{{ spaceapi_topic_status }}"
|
|
||||||
MQTT_SPACESTATUS_LASTCHANGE_TOPIC: "{{ spaceapi_topic_lastchange }}"
|
|
||||||
MQTT_TRAFFIC_LIGHT_TOPIC: "{{ topic_traffic_light }}"
|
|
||||||
tags:
|
|
||||||
- spaceapi
|
|
||||||
|
|
||||||
handlers:
|
|
310
host_vars/cisco-2960-1.n39.eu/port-assignments.yml
Normal file
310
host_vars/cisco-2960-1.n39.eu/port-assignments.yml
Normal file
|
@ -0,0 +1,310 @@
|
||||||
|
---
|
||||||
|
ios_interfaces:
|
||||||
|
GigabitEthernet1/0/1:
|
||||||
|
description: MGMT
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 1
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/2:
|
||||||
|
description: MGMT
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 1
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/3:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/4:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/5:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/6:
|
||||||
|
description: USV
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 1
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/7:
|
||||||
|
description: beaker
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: all
|
||||||
|
trunk_native_vlan: 1
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/8:
|
||||||
|
description: beaker
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: all
|
||||||
|
trunk_native_vlan: 1
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/9:
|
||||||
|
description: beaker ipmi
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 1
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/10:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/11:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/12:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/13:
|
||||||
|
description: patchfeld 1 - Switch an Ausleihliste
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: 1,4,5,7,8,11
|
||||||
|
trunk_native_vlan: 4
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/14:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/15:
|
||||||
|
description: patchfeld 2 - Raspberry Pi Platon
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/16:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/17:
|
||||||
|
description: patchfeld 6 - Access Point Hempels Zimmer
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: 1,4,5,7,8,11
|
||||||
|
trunk_native_vlan: 4
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/18:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/19:
|
||||||
|
description: FräsPC
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/20:
|
||||||
|
description: patchfeld 4 - Switch am Basteltisch
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: 1,4,5,7,8,11
|
||||||
|
trunk_native_vlan: 4
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/21:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/22:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/23:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/24:
|
||||||
|
description: lan
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: access
|
||||||
|
access_vlan: 4
|
||||||
|
trunk_allowed_vlans:
|
||||||
|
trunk_native_vlan:
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/25:
|
||||||
|
description: uplink von descartes
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: all
|
||||||
|
trunk_native_vlan: 1
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/26:
|
||||||
|
description: marx
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: all
|
||||||
|
trunk_native_vlan: 1
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/27:
|
||||||
|
description: unconfigured
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: all
|
||||||
|
trunk_native_vlan: 1
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
||||||
|
GigabitEthernet1/0/28:
|
||||||
|
description: unconfigured
|
||||||
|
enabled: true
|
||||||
|
l2:
|
||||||
|
mode: trunk
|
||||||
|
access_vlan:
|
||||||
|
trunk_allowed_vlans: all
|
||||||
|
trunk_native_vlan: 1
|
||||||
|
state: present
|
||||||
|
lines: []
|
||||||
|
state: present
|
7
host_vars/cisco-2960-1.n39.eu/vars.yml
Normal file
7
host_vars/cisco-2960-1.n39.eu/vars.yml
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
---
|
||||||
|
ansible_host: 172.23.63.45
|
||||||
|
ansible_user: admin
|
||||||
|
ansible_password: "{{ vault_ansible_password }}"
|
||||||
|
ansible_become_password: "{{ vault_ansible_password }}"
|
||||||
|
ansible_connection: network_cli
|
||||||
|
ansible_network_os: ios
|
8
host_vars/cisco-2960-1.n39.eu/vault
Normal file
8
host_vars/cisco-2960-1.n39.eu/vault
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
|
64656665316637386363313263653532393161656531336262613266363231373537396633386231
|
||||||
|
3834356536353235356630333766396233626130653237380a396137336262376539373838623762
|
||||||
|
35396361643263313239386637653330646134616333333961306537306464626461626462626665
|
||||||
|
3763386531316433390a323533353531363335306663343632326562663334303466663664363530
|
||||||
|
38613135333336656131646534633839386330323164643338333763373839306566656633633161
|
||||||
|
62643964343763316264376366636562316336616665663865336633373266353931366336346666
|
||||||
|
616135333836343436633136636163656138
|
23
host_vars/cisco-2960-1.n39.eu/vlans.yml
Normal file
23
host_vars/cisco-2960-1.n39.eu/vlans.yml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
ios_vlans:
|
||||||
|
1:
|
||||||
|
name: MGMT
|
||||||
|
state: present
|
||||||
|
4:
|
||||||
|
name: lan
|
||||||
|
state: present
|
||||||
|
5:
|
||||||
|
name: wan
|
||||||
|
state: present
|
||||||
|
7:
|
||||||
|
name: service
|
||||||
|
state: present
|
||||||
|
8:
|
||||||
|
name: legacy
|
||||||
|
state: present
|
||||||
|
9:
|
||||||
|
name: dmz
|
||||||
|
state: present
|
||||||
|
11:
|
||||||
|
name: ffmd-client
|
||||||
|
state: present
|
|
@ -2,6 +2,8 @@
|
||||||
server_admin: "admin+hobbes@netz39.de"
|
server_admin: "admin+hobbes@netz39.de"
|
||||||
mac: "b8:27:eb:f9:43:43"
|
mac: "b8:27:eb:f9:43:43"
|
||||||
kiosk_user: pi
|
kiosk_user: pi
|
||||||
kiosk_mqtt_host: "mqtt.n39.eu"
|
kiosk_software_version: v1.0.6
|
||||||
kiosk_mqtt_topic: "Netz39/Things/HackingDashboard/Screenshot"
|
kiosk_software_arch: "armv7"
|
||||||
docker_data_root: "/srv/docker"
|
kiosk_url: "https://grafana.n39.eu/d/xpLj6UD4z/hobbes-space-monitor?orgId=1"
|
||||||
|
kiosk_grafana_user: "{{ vault_kiosk_grafana_user }}"
|
||||||
|
kiosk_grafana_pass: "{{ vault_kiosk_grafana_pass }}"
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
$ANSIBLE_VAULT;1.1;AES256
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
32343732363234396136616164383833316634373061376235656566303761646461626138363432
|
32313738636231313036633334333934643839636563646334336533316436653263623461643438
|
||||||
3264633461383739393138396233303839363132343866370a306433306364666438623434383036
|
6362343635626266313466643465343962663931623662320a316635613231313930343937363064
|
||||||
63366634313937623736393636393030333961323335323762663538373631353331353162613362
|
33326164333137633039376363643539346463303934333430626431336637326638363233333234
|
||||||
3661653539306365350a333263383537643738373939376563356566313732613766303931633630
|
3132333533376134380a383837616331303536623665383735663531343538366332313236386137
|
||||||
6462
|
62306436663934383363616332316262313762633261396535663533636665633532316366386430
|
||||||
|
65343830376634633365343337313433643465323662313563366463393664653766623338623635
|
||||||
|
30653263303761316238396634346337636461643231303561353133643162633934323161663539
|
||||||
|
66646364373034633334
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
server_admin: "admin+plumbum@netz39.de"
|
|
||||||
mac: "32:A3:94:A0:23:77"
|
|
|
@ -9,57 +9,38 @@ cleanuri_amqp_user: "cleanuri"
|
||||||
cleanuri_amqp_pass: "{{ vault_cleanuri_amqp_pass }}"
|
cleanuri_amqp_pass: "{{ vault_cleanuri_amqp_pass }}"
|
||||||
cleanuri_amqp_vhost: "/cleanuri"
|
cleanuri_amqp_vhost: "/cleanuri"
|
||||||
|
|
||||||
forgejo_host_port: 9091
|
|
||||||
forgejo_ssh_port: 2222
|
|
||||||
forgejo_domain_name: git.n39.eu
|
|
||||||
forgejo_image: codeberg.org/forgejo/forgejo:10.0.3
|
|
||||||
|
|
||||||
shlink_host_port: 8083
|
shlink_host_port: 8083
|
||||||
shlink_domain_name: sl.n39.eu
|
shlink_domain_name: sl.n39.eu
|
||||||
shlink_image: shlinkio/shlink:4.4.6
|
shlink_geolite_license_key: "{{ vault_shlink_geolite_license_key }}"
|
||||||
shlink_initial_api_key: "{{ vault_shlink_initial_api_key }}"
|
|
||||||
shlink_postgres_password: "{{ vault_shlink_postgres_password }}"
|
|
||||||
|
|
||||||
hedgedoc_host_port: 8084
|
hedgedoc_host_port: 8084
|
||||||
hedgedoc_domain_name: pad.n39.eu
|
hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.9.3
|
||||||
hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.10.2
|
|
||||||
hedgedoc_db_image: postgres:16.8-alpine
|
|
||||||
hedgedoc_postgres_password: "{{ vault_hedgedoc_postgres_password }}"
|
hedgedoc_postgres_password: "{{ vault_hedgedoc_postgres_password }}"
|
||||||
|
|
||||||
redmine_host_port: 8087
|
redmine_host_port: 8087
|
||||||
redmine_domain_name: redmine.n39.eu
|
redmine_image: redmine:4.2.7
|
||||||
redmine_image: redmine:6.0.4
|
redmine_mysql_image: mysql:5.7
|
||||||
redmine_mysql_image: mysql:9.2
|
|
||||||
redmine_database: redmine
|
redmine_database: redmine
|
||||||
redmine_database_password: "{{ vault_redmine_database_password }}"
|
redmine_database_password: "{{ vault_redmine_database_password }}"
|
||||||
|
|
||||||
influxdb_host_port: 8088
|
influxdb_host_port: 8088
|
||||||
influxdb_domain_name: influx.n39.eu
|
influxdb_domain_name: influx.n39.eu
|
||||||
influxdb_image: influxdb:2.7-alpine
|
influxdb_image: influxdb:2.4-alpine
|
||||||
influxdb_init_username: admin
|
influxdb_init_username: admin
|
||||||
influxdb_init_password: "{{ vault_influxdb_init_password }}"
|
influxdb_init_password: "{{ vault_influxdb_init_password }}"
|
||||||
|
|
||||||
jabber_host_port: 8086
|
forgejo_host_port: 9091
|
||||||
prosody_domain_name: jabber.n39.eu
|
|
||||||
prosody_image: netz39/prosody:0.11
|
|
||||||
prosody_web_image: joseluisq/static-web-server:2.36
|
|
||||||
prosody_config_dir: "/etc/prosody"
|
prosody_config_dir: "/etc/prosody"
|
||||||
prosody_data_dir: "{{ data_dir }}/prosody"
|
prosody_data_dir: "{{ data_dir }}/prosody"
|
||||||
|
prosody_domain_name: jabber.n39.eu
|
||||||
|
jabber_host_port: 8086
|
||||||
|
|
||||||
uptimekuma_host_port: 8085
|
uptimekuma_host_port: 8085
|
||||||
uptimekuma_domain_name: uptime.n39.eu
|
|
||||||
uptimekuma_image: louislam/uptime-kuma:1.23.16
|
|
||||||
|
|
||||||
grafana_host_port: 8089
|
grafana_host_port: 8089
|
||||||
grafana_domain_name: grafana.n39.eu
|
grafana_domain_name: grafana.n39.eu
|
||||||
grafana_image: grafana/grafana:11.6.0
|
|
||||||
grafana_admin_password: "{{ vault_grafana_admin_password }}"
|
grafana_admin_password: "{{ vault_grafana_admin_password }}"
|
||||||
|
|
||||||
homebox_host_port: 8092
|
homebox_host_port: 8092
|
||||||
homebox_domain_name: inventory.n39.eu
|
homebox_domain_name: inventory.n39.eu
|
||||||
homebox_image: ghcr.io/hay-kot/homebox:v0.10.3
|
|
||||||
|
|
||||||
renovate_image: renovate/renovate:39.220.1
|
|
||||||
renovate_forgejo_pat: "{{ vault_renovate_forgejo_pat }}"
|
|
||||||
renovate_github_pat: "{{ vault_renovate_github_pat }}"
|
|
||||||
renovate_git_user: "Renovate Bot <accounts+renovatebot@netz39.de>"
|
|
||||||
|
|
|
@ -1,33 +1,20 @@
|
||||||
$ANSIBLE_VAULT;1.1;AES256
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
61323135656430613464613334653239613865623361363734306139383261653563373365306364
|
35323634353263613464653863633861303539636238333464653633653164353632306233626231
|
||||||
3232353634356664323235393135653762383538353635660a363461393133376566613064366233
|
3766386232326433383932636136313831346131336335360a383639643334613033336134373566
|
||||||
32323065633164646535386461373261373461343961383333333063663831353961656265313836
|
36343465336365363732363931383031356532336235313537396338316465366537313032616261
|
||||||
6231356666356266390a333230376264313537376461326331313134313737616137636465336430
|
6634393966623662390a383335316661613332633165333936396335666637306163363133323363
|
||||||
38616261333534393464343630616464326331653163616435613863616165633730353263656565
|
33613639306537396136643438623937363336376332353634333130313434623433303264393461
|
||||||
66346536393737353962666438333661663636636339613633653363323438326635643738656430
|
38663337396465343937396438333261393463303866306234323138396563653837373334356239
|
||||||
38313635323066376532396666653633393736633939306566333337336635386430373662666534
|
64653231633066656662306530656139316530316263356135363538303061646432353338323237
|
||||||
64653662333832313964323039353838353638313337306631613564383933663166633164373132
|
66663161333133313762366261343434666238376537636433313461343065646565633130333061
|
||||||
33326537366135613733386436663366613238636133343065376534323561656265613433336637
|
33376537316338666662643639623637396366336263656537326363663936616234343235373636
|
||||||
64613330306530323238663738356133663166303730633735656562636139626232396130656337
|
33373039373033333533363366356435633863633434643963633664613238363961363733366437
|
||||||
34323238326437303730643736646430646239383239613061333033343733663832656262383732
|
61353936613065303230616239646334313130636133653461663561303037383663643761376235
|
||||||
66343236326537633539353230376365666462393264303532346431383838303963613731343263
|
33303661663063613635306438613738613064386466656430343130356131663262353239326334
|
||||||
63656630623934643763636237366630386333646263336261386162656439323232653066393266
|
63323630333466356263646162336437646133616565353430313737313332363330663236383830
|
||||||
36633239323638396639623734666466343164663539316165386632306235363435303139356433
|
33366138303665386561393136616238346335633163313330386434323239623736333562363862
|
||||||
37633731366565393339326235616264616535363461653531613331356239666534653232376235
|
66636165373264353132626232633537613536303362366535653438303261323735666331363439
|
||||||
36623431343136633964656330313833643161353738303564663662363062653631363661633333
|
61613939373333616364303134353437333965386239623933393932373434666234373736316166
|
||||||
31663339643034333336313630356266393062323637333664646335363961386433303662343734
|
63373935356162326230653437643030313262373965353831396361646136663938336334646633
|
||||||
32313338613064373966393163623863633037353564316361656162323234313435646532343231
|
65313166613131396665393363633166663137363564393063363330366364373936643831373030
|
||||||
30356336626435306332316566323932313564626164316165646530656365363330643033376134
|
333465303435636163616236306264646666
|
||||||
32363530306536633531326535373136326364356237376264646130663430343838323834386264
|
|
||||||
35306561353866346430393837346333396236356465666334656139373764653365396534613034
|
|
||||||
36393239623930656266336130303236393336373063623738653939393563336130316461393535
|
|
||||||
32313932396263306439356663373361393539633639343238393631343830306532336162616565
|
|
||||||
32336264646333613238363065613130633966656164666333303332313536616334623639613630
|
|
||||||
34323665366131663736623638636263616131393133346464653037366465633332363131316332
|
|
||||||
65356563373036353432376234626262313266316435656562646365363539386361653966366465
|
|
||||||
39383536313764663732613462383466616238363765633062333830373038656334363764643663
|
|
||||||
61346664353064333238313038303333386436653738316630383237366532353765346633383862
|
|
||||||
65666235666663666638656337303762626563663135613431616439633731383638653466623434
|
|
||||||
62663164633032666638656464666130623566356636343330386236336266386263323936396330
|
|
||||||
31613339623034663466613930613062343666633530306136623734393862333365646538326261
|
|
||||||
63646439343565366463
|
|
||||||
|
|
|
@ -3,11 +3,5 @@ server_admin: "admin+radon@netz39.de"
|
||||||
pwr_meter_amqp_user: "pwr-meter"
|
pwr_meter_amqp_user: "pwr-meter"
|
||||||
pwr_meter_amqp_pass: "{{ vault_pwr_meter_amqp_pass }}"
|
pwr_meter_amqp_pass: "{{ vault_pwr_meter_amqp_pass }}"
|
||||||
pwr_meter_api_token: "{{ vault_pwr_meter_api_token }}"
|
pwr_meter_api_token: "{{ vault_pwr_meter_api_token }}"
|
||||||
brotherql_printer_host: "brotherql-720nw.n39.eu"
|
# See https://gitea.n39.eu/Netz39_Admin/config.descartes/src/branch/live/dns_dhcp.txt
|
||||||
|
brotherql_printer_ip: "172.23.48.53"
|
||||||
# URL for the grafana kiosk in our Bastelbereich
|
|
||||||
kiosk_grafana_url: "https://grafana.n39.eu/d/xpLj6UD4z/hobbes-space-monitor?orgId=1&kiosk"
|
|
||||||
kiosk_grafana_user: "{{ vault_kiosk_grafana_user }}"
|
|
||||||
kiosk_grafana_pass: "{{ vault_kiosk_grafana_pass }}"
|
|
||||||
kiosk_mqtt_host: "mqtt.n39.eu"
|
|
||||||
kiosk_mqtt_topic: "Netz39/Things/HackingDashboard/Screenshot"
|
|
||||||
|
|
|
@ -1,14 +1,10 @@
|
||||||
$ANSIBLE_VAULT;1.1;AES256
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
64396666646432653766656333333139613631333035393137363036633330336134383932663631
|
61393134306361663861356132333135633566626136383536363763646134386338363362343830
|
||||||
6533326532333366633136346232306637306266343264380a616164643037393036383834313238
|
6339626232333037613437386634396138323438643037390a366338353862653439323961626532
|
||||||
32343437343466343262336137633436343935663465616364303961656565643134346563373461
|
37393438326261363563323233333364323536373735383834383134653935383436356137396166
|
||||||
3337303534646563660a366562323065666630626331346266366139653533386238663361373930
|
3531326465363438310a663232306138333866373637336234326166666261333332386632316163
|
||||||
30623733336361353838373730316537623066326166366634643466386332396333613531613564
|
61616339656436666233343339383835643934366661366333386363386639306631643366623333
|
||||||
38373363303466346639343563356339303037323261383034316439326237636565633462336462
|
30666430623435633961613932323239343239623532316662323937346634656136396539303036
|
||||||
35313561356465393337616162323866393365613537333461656234313464653165333963343331
|
63363365363861646333386364373263303037663266323832663761633633663136616338323362
|
||||||
32343634383335663764353831303864373637393833366333376635343665396366616363663033
|
36326561623063646666373034333335373135343736633066393937653234313932363138643065
|
||||||
37323031316535636131333738633237383665323638613233666432316261376239636234626638
|
38646231333564303861633231353535623436326135303463613738346231633962
|
||||||
33313230643563316662323937656338613362646466323335376363626163383233623831643565
|
|
||||||
31393438363334653863363536373632333930616636323237336236353863616638616165303931
|
|
||||||
63333639393665633537646665613933323632376162363139656632323166393264313333653163
|
|
||||||
64333935356138336562366634636364346461356539363162616438613232306533
|
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
---
|
|
||||||
server_admin: "admin+wittgenstein@netz39.de"
|
|
||||||
mac: "b8:27:eb:48:f1:59"
|
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
|
||||||
gatekeeper_user: pi
|
|
||||||
data_dir: "/srv/data"
|
|
||||||
|
|
||||||
spaceapi_host_port: 8001
|
|
||||||
spaceapi_domain_name: spaceapi.n39.eu
|
|
||||||
spaceapi_image: netz39/spaceapi-service:0.1.1
|
|
||||||
spaceapi_topic_status: "Netz39/SpaceAPI/isOpen"
|
|
||||||
spaceapi_topic_lastchange: "Netz39/SpaceAPI/lastchange"
|
|
||||||
|
|
||||||
ampelcontroller_image: netz39/ampel-controller:0.2.0
|
|
||||||
topic_lever_state: "Netz39/Things/StatusSwitch/Lever/State"
|
|
||||||
topic_door_events: "Netz39/Things/Door/Events"
|
|
||||||
topic_traffic_light: "Netz39/Things/Ampel/Light"
|
|
|
@ -8,19 +8,16 @@ all:
|
||||||
krypton.n39.eu:
|
krypton.n39.eu:
|
||||||
oganesson.n39.eu:
|
oganesson.n39.eu:
|
||||||
platon.n39.eu:
|
platon.n39.eu:
|
||||||
plumbum.n39.eu:
|
|
||||||
pottwal.n39.eu:
|
pottwal.n39.eu:
|
||||||
radon.n39.eu:
|
radon.n39.eu:
|
||||||
unicorn.n39.eu:
|
unicorn.n39.eu:
|
||||||
wittgenstein.n39.eu:
|
|
||||||
k3s-c1.n39.eu:
|
k3s-c1.n39.eu:
|
||||||
k3s-c2.n39.eu:
|
k3s-c2.n39.eu:
|
||||||
k3s-c3.n39.eu:
|
k3s-c3.n39.eu:
|
||||||
k3s-w1.n39.eu:
|
k3s-w1.n39.eu:
|
||||||
k3s-w2.n39.eu:
|
k3s-w2.n39.eu:
|
||||||
k3s-w3.n39.eu:
|
k3s-w3.n39.eu:
|
||||||
# Host rhodium.n39.eu is the OpenWRT router, but cannot be added here
|
cisco-2960-1.n39.eu:
|
||||||
# as it would be treated like a Debian host
|
|
||||||
|
|
||||||
children:
|
children:
|
||||||
docker_host:
|
docker_host:
|
||||||
|
@ -30,13 +27,11 @@ all:
|
||||||
radon.n39.eu:
|
radon.n39.eu:
|
||||||
tau.netz39.de:
|
tau.netz39.de:
|
||||||
unicorn.n39.eu:
|
unicorn.n39.eu:
|
||||||
wittgenstein.n39.eu:
|
|
||||||
proxmox:
|
proxmox:
|
||||||
hosts:
|
hosts:
|
||||||
holmium.n39.eu:
|
holmium.n39.eu:
|
||||||
krypton.n39.eu:
|
krypton.n39.eu:
|
||||||
oganesson.n39.eu:
|
oganesson.n39.eu:
|
||||||
plumbum.n39.eu:
|
|
||||||
pottwal.n39.eu:
|
pottwal.n39.eu:
|
||||||
radon.n39.eu:
|
radon.n39.eu:
|
||||||
unicorn.n39.eu:
|
unicorn.n39.eu:
|
||||||
|
@ -54,7 +49,6 @@ all:
|
||||||
krypton.n39.eu:
|
krypton.n39.eu:
|
||||||
oganesson.n39.eu:
|
oganesson.n39.eu:
|
||||||
platon.n39.eu:
|
platon.n39.eu:
|
||||||
plumbum.n39.eu:
|
|
||||||
pottwal.n39.eu:
|
pottwal.n39.eu:
|
||||||
radon.n39.eu:
|
radon.n39.eu:
|
||||||
wittgenstein.n39.eu:
|
wittgenstein.n39.eu:
|
||||||
|
@ -76,3 +70,6 @@ all:
|
||||||
k3s-w1.n39.eu:
|
k3s-w1.n39.eu:
|
||||||
k3s-w2.n39.eu:
|
k3s-w2.n39.eu:
|
||||||
k3s-w3.n39.eu:
|
k3s-w3.n39.eu:
|
||||||
|
network:
|
||||||
|
hosts:
|
||||||
|
cisco-2960-1.n39.eu:
|
||||||
|
|
6
main.yml
6
main.yml
|
@ -39,9 +39,3 @@
|
||||||
|
|
||||||
- name: Hobbes specific setup
|
- name: Hobbes specific setup
|
||||||
import_playbook: host-hobbes.yml
|
import_playbook: host-hobbes.yml
|
||||||
|
|
||||||
- name: Plumbum specific setup
|
|
||||||
import_playbook: host-plumbum.yml
|
|
||||||
|
|
||||||
- name: Wittgenstein specific setup
|
|
||||||
import_playbook: host-wittgenstein.yml
|
|
||||||
|
|
|
@ -1,24 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
|
||||||
"regexManagers": [
|
|
||||||
{
|
|
||||||
"fileMatch": [
|
|
||||||
"^host-.*.yml$",
|
|
||||||
"^host_vars/.*/vars.yml$"
|
|
||||||
],
|
|
||||||
"datasourceTemplate": "docker",
|
|
||||||
"versioningTemplate": "docker",
|
|
||||||
"matchStrings": [
|
|
||||||
"image: (?<depName>.*?):(?<currentValue>.*?)(@(?<currentDigest>sha256:.*?))?\\s"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
"matchDatasources": ["docker"],
|
|
||||||
"matchPackageNames": ["renovate/renovate"],
|
|
||||||
"schedule": [ "before 1am on friday" ],
|
|
||||||
"automerge": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,26 +1,18 @@
|
||||||
---
|
---
|
||||||
roles:
|
roles:
|
||||||
- name: adriagalin.timezone
|
- src: hifis.unattended_upgrades
|
||||||
src: git+https://github.com/adriagalin/ansible.timezone.git
|
version: v1.12.2
|
||||||
version: 4.0.0
|
- src: git+https://github.com/adriagalin/ansible.timezone.git
|
||||||
- name: 24367dfa.dehydrated
|
version: 3.0.0
|
||||||
src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
|
- src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
|
||||||
version: 2.1.0
|
version: 1.0.3
|
||||||
- name: penguineer.dehydrated_cron
|
- src: penguineer.dehydrated_cron
|
||||||
src: https://github.com/penguineer/ansible-role-dehydrated_cron.git
|
version: v1.0.0
|
||||||
version: v1.1.0
|
- src: git+https://github.com/maz3max/ble-keykeeper-role.git
|
||||||
- name: maz3max.ble_keykeeper
|
|
||||||
src: git+https://github.com/maz3max/ble-keykeeper-role.git
|
|
||||||
version: v1.1.0
|
version: v1.1.0
|
||||||
- src: lespocky.telegraf_docker_in_docker
|
- src: lespocky.telegraf_docker_in_docker
|
||||||
version: v0.2.2
|
version: v0.2.1
|
||||||
- name: netz39.host_docker
|
|
||||||
src: git+https://github.com/netz39/ansible-role-host-docker.git
|
|
||||||
version: v0.5.0
|
|
||||||
|
|
||||||
collections:
|
collections:
|
||||||
- name: community.grafana
|
- name: community.grafana
|
||||||
version: 2.1.0
|
version: 1.5.3
|
||||||
# for role 'hifis.toolkit.unattended_upgrades'
|
|
||||||
- name: hifis.toolkit
|
|
||||||
version: 5.3.0
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Handlers for role apache
|
# Handlers for role apache
|
||||||
---
|
---
|
||||||
- name: Restart apache2
|
- name: restart apache2
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: apache2
|
name: apache2
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: Ensure Apache2 and modules are installed and up to date
|
- name: Ensure Apache2 and modules are installed and up to date
|
||||||
ansible.builtin.apt:
|
apt:
|
||||||
name:
|
name:
|
||||||
- apache2
|
- apache2
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Ensure necessary modules are enabled
|
- name: Ensure necessary modules are enabled
|
||||||
community.general.apache2_module:
|
apache2_module:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
||||||
|
|
||||||
- name: Add symlink to enable configuration
|
- name: Add symlink to enable configuration
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
|
@ -32,4 +32,4 @@
|
||||||
state: link
|
state: link
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Handlers for role apache_letsencrypt
|
# Handlers for role apache_letsencrypt
|
||||||
---
|
---
|
||||||
- name: Restart apache2
|
- name: restart apache2
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: apache2
|
name: apache2
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
||||||
|
|
||||||
- name: Add symlink to enable configuration
|
- name: Add symlink to enable configuration
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
|
@ -17,4 +17,4 @@
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
||||||
|
|
|
@ -19,7 +19,7 @@ cleanuri_amqp_canonizer: "canonizer"
|
||||||
cleanuri_amqp_retrieval: "extractor"
|
cleanuri_amqp_retrieval: "extractor"
|
||||||
|
|
||||||
# Docker images
|
# Docker images
|
||||||
cleanuri_image_webui: mrtux/cleanuri-webui:0.2.2
|
cleanuri_image_webui: mrtux/cleanuri-webui:0.2.0
|
||||||
cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.2
|
cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.0
|
||||||
cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.5.3
|
cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.3.0
|
||||||
cleanuri_image_extractor: mrtux/cleanuri-extractor:0.5.3
|
cleanuri_image_extractor: mrtux/cleanuri-extractor:0.3.0
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Tasks for the cleanuri role
|
# Tasks for the cleanuri role
|
||||||
---
|
---
|
||||||
- name: Ensure CleanURI WebUI is running
|
- name: Ensure CleanURI WebUI is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: cleanuri-webui
|
name: cleanuri-webui
|
||||||
image: "{{ cleanuri_image_webui }}"
|
image: "{{ cleanuri_image_webui }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -15,7 +15,7 @@
|
||||||
REACT_APP_API_GATEWAY: "https://{{ cleanuri_api_domain }}"
|
REACT_APP_API_GATEWAY: "https://{{ cleanuri_api_domain }}"
|
||||||
|
|
||||||
- name: Setup proxy site for the CleanURI WebUI
|
- name: Setup proxy site for the CleanURI WebUI
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ cleanuri_ui_domain }}"
|
site_name: "{{ cleanuri_ui_domain }}"
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
|
|
||||||
- name: Ensure CleanURI API Gateway is running
|
- name: Ensure CleanURI API Gateway is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: cleanuri-apigateway
|
name: cleanuri-apigateway
|
||||||
image: "{{ cleanuri_image_apigateway }}"
|
image: "{{ cleanuri_image_apigateway }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -35,14 +35,14 @@
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
||||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||||
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
||||||
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
||||||
GATEWAY_RESULT_QUEUE: "{{ cleanuri_amqp_results }}"
|
GATEWAY_RESULT_QUEUE: "{{ cleanuri_amqp_results }}"
|
||||||
GATEWAY_TASK_RK: "{{ cleanuri_amqp_canonizer }}"
|
GATEWAY_TASK_RK: "{{ cleanuri_amqp_canonizer }}"
|
||||||
|
|
||||||
- name: Ensure CleanURI Canonizer is running
|
- name: Ensure CleanURI Canonizer is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: cleanuri-canonizer
|
name: cleanuri-canonizer
|
||||||
image: "{{ cleanuri_image_canonizer }}"
|
image: "{{ cleanuri_image_canonizer }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -52,14 +52,14 @@
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
||||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||||
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
||||||
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
||||||
CANONIZER_TASK_QUEUE: "{{ cleanuri_amqp_canonizer }}"
|
CANONIZER_TASK_QUEUE: "{{ cleanuri_amqp_canonizer }}"
|
||||||
EXTRACTOR_TASK_RK: "{{ cleanuri_amqp_retrieval }}"
|
EXTRACTOR_TASK_RK: "{{ cleanuri_amqp_retrieval }}"
|
||||||
|
|
||||||
- name: Ensure CleanURI Extractor is running
|
- name: Ensure CleanURI Extractor is running
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
name: cleanuri-extractor
|
name: cleanuri-extractor
|
||||||
image: "{{ cleanuri_image_extractor }}"
|
image: "{{ cleanuri_image_extractor }}"
|
||||||
pull: true
|
pull: true
|
||||||
|
@ -69,14 +69,14 @@
|
||||||
env:
|
env:
|
||||||
TZ: "{{ timezone }}"
|
TZ: "{{ timezone }}"
|
||||||
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
||||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||||
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
||||||
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
||||||
EXTRACTION_TASK_QUEUE: "{{ cleanuri_amqp_retrieval }}"
|
EXTRACTION_TASK_QUEUE: "{{ cleanuri_amqp_retrieval }}"
|
||||||
|
|
||||||
|
|
||||||
- name: Setup proxy site the CleanURI API Gateway
|
- name: Setup proxy site the CleanURI API Gateway
|
||||||
ansible.builtin.include_role:
|
include_role:
|
||||||
name: setup_http_site_proxy
|
name: setup_http_site_proxy
|
||||||
vars:
|
vars:
|
||||||
site_name: "{{ cleanuri_api_domain }}"
|
site_name: "{{ cleanuri_api_domain }}"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# handlers file for cron-dd24-dyndns
|
# handlers file for cron-dd24-dyndns
|
||||||
---
|
---
|
||||||
- name: Reload cron
|
- name: reload cron
|
||||||
ansible.builtin.shell:
|
ansible.builtin.shell:
|
||||||
cmd: service cron reload
|
cmd: service cron reload
|
||||||
warn: no
|
warn: no
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Make sure cron and curl are installed
|
- name: Make sure cron and curl are installed
|
||||||
ansible.builtin.apt:
|
apt:
|
||||||
name:
|
name:
|
||||||
- cron
|
- cron
|
||||||
- curl
|
- curl
|
||||||
|
@ -13,6 +13,6 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
notify: Reload cron
|
notify: reload cron
|
||||||
# There is ansible.builtin.cron, but this makes configuration much
|
# There is ansible.builtin.cron, but this makes configuration much
|
||||||
# more complicated, so we stick to the template.
|
# more complicated, so we stick to the template.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# /etc/cron.d/dd24-dyndns: Cron call to renew DynDNS entry
|
# /etc/cron.d/dd24-dyndns: Cron call to renew DynDNS entry
|
||||||
|
|
||||||
SHELL=/bin/bash
|
SHELL=/bin/sh
|
||||||
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
||||||
|
|
||||||
*/5 * * * * root curl --silent --show-error "https://dynamicdns.key-systems.net/update.php?hostname={{dyndns_domain}}&password={{dyndns_password}}&ip={{dyndns_ip}}" > >(grep 'code\|description' | paste -d',' - - | logger -p user.debug -t dd24) 2> >(/usr/bin/logger -p user.error -t dd24)
|
*/5 * * * * root curl --silent --show-error "https://dynamicdns.key-systems.net/update.php?hostname={{dyndns_domain}}&password={{dyndns_password}}&ip={{dyndns_ip}}" > /dev/null 2> >(/usr/bin/logger -p user.error -t dd24)
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
# desec.io Cron configuration
|
|
||||||
---
|
|
||||||
dyndns_domain: www.example.com
|
|
||||||
dyndns_token: yourtoken
|
|
|
@ -1,10 +0,0 @@
|
||||||
# handlers file for desec_dyndns_cron
|
|
||||||
---
|
|
||||||
- name: Reload cron
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: service cron reload
|
|
||||||
warn: no
|
|
||||||
# Use the shell call because the task sometimes has problems finding the service state
|
|
||||||
# service:
|
|
||||||
# name: cron
|
|
||||||
# state: restarted
|
|
|
@ -1,18 +0,0 @@
|
||||||
---
|
|
||||||
- name: Make sure cron and curl are installed
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name:
|
|
||||||
- cron
|
|
||||||
- curl
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Setup cron file for desec.io updates
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: "templates/desec-dyndns.cron.j2"
|
|
||||||
dest: "/etc/cron.d/desec-dyndns"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: "0644"
|
|
||||||
notify: Reload cron
|
|
||||||
# There is ansible.builtin.cron, but this makes configuration much
|
|
||||||
# more complicated, so we stick to the template.
|
|
|
@ -1,6 +0,0 @@
|
||||||
# /etc/cron.d/desec-dyndns: Cron call to renew DynDNS entry
|
|
||||||
|
|
||||||
SHELL=/bin/bash
|
|
||||||
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
|
||||||
|
|
||||||
*/5 * * * * root curl --silent --show-error --user {{ dyndns_domain }}:{{ dyndns_token }} "https://update.dedyn.io/" > >(logger -p user.debug -t desec) 2> >(/usr/bin/logger -p user.error -t desec)
|
|
5
roles/docker_setup/defaults/main.yml
Normal file
5
roles/docker_setup/defaults/main.yml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
docker_compose_version: "1.25.4"
|
||||||
|
docker_compose_path: /usr/local/bin/docker-compose
|
||||||
|
docker_data_root: "/var/lib/docker"
|
||||||
|
docker_storage_driver: "overlay2"
|
6
roles/docker_setup/handlers/main.yml
Normal file
6
roles/docker_setup/handlers/main.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: restart docker
|
||||||
|
service:
|
||||||
|
name: docker
|
||||||
|
state: restarted
|
||||||
|
enabled: yes
|
84
roles/docker_setup/tasks/main.yml
Normal file
84
roles/docker_setup/tasks/main.yml
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
# This file is a mash-up of:
|
||||||
|
# https://github.com/geerlingguy/ansible-role-docker/blob/master/tasks/docker-compose.yml
|
||||||
|
# https://www.digitalocean.com/community/tutorials/how-to-install-docker-compose-on-debian-9
|
||||||
|
# and our own stuff …
|
||||||
|
---
|
||||||
|
- name: Gather package facts
|
||||||
|
package_facts:
|
||||||
|
manager: "auto"
|
||||||
|
|
||||||
|
- name: Exit if docker.io is installed
|
||||||
|
fail:
|
||||||
|
msg: "Please remove docker.io (Debian vanilla docker package) first!"
|
||||||
|
when: "'docker.io' in ansible_facts.packages"
|
||||||
|
|
||||||
|
- name: Install Docker APT deps
|
||||||
|
package:
|
||||||
|
name: "{{ packages }}"
|
||||||
|
state: present
|
||||||
|
vars:
|
||||||
|
packages:
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- gnupg2
|
||||||
|
- software-properties-common
|
||||||
|
|
||||||
|
- name: add Docker apt-key
|
||||||
|
apt_key:
|
||||||
|
url: https://download.docker.com/linux/debian/gpg
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: add Docker's APT repository
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/docker.list.j2
|
||||||
|
dest: /etc/apt/sources.list.d/docker.list
|
||||||
|
register: apt_repo
|
||||||
|
|
||||||
|
- name: Update package cache # noqa 503
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
when: apt_repo.changed
|
||||||
|
|
||||||
|
- name: install Docker
|
||||||
|
package:
|
||||||
|
name: "{{ packages }}"
|
||||||
|
state: present
|
||||||
|
vars:
|
||||||
|
packages:
|
||||||
|
- docker-ce
|
||||||
|
- python3-docker
|
||||||
|
|
||||||
|
- name: Set docker configuration
|
||||||
|
template:
|
||||||
|
src: templates/daemon.json.j2
|
||||||
|
dest: /etc/docker/daemon.json
|
||||||
|
mode: "0644"
|
||||||
|
notify: restart docker
|
||||||
|
|
||||||
|
- name: Check current docker-compose version.
|
||||||
|
command: docker-compose --version
|
||||||
|
register: docker_compose_current_version
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Delete existing docker-compose version if it's different.
|
||||||
|
file:
|
||||||
|
path: "{{ docker_compose_path }}"
|
||||||
|
state: absent
|
||||||
|
when: >
|
||||||
|
docker_compose_current_version.stdout is defined
|
||||||
|
and docker_compose_version not in docker_compose_current_version.stdout
|
||||||
|
|
||||||
|
- name: Install Docker Compose (if configured).
|
||||||
|
get_url:
|
||||||
|
url: https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-Linux-x86_64
|
||||||
|
dest: "{{ docker_compose_path }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Place admin users in docker group
|
||||||
|
user:
|
||||||
|
name: "{{ item.logname }}"
|
||||||
|
groups: [docker]
|
||||||
|
append: yes
|
||||||
|
when: item.docker
|
||||||
|
with_items: "{{ users }}"
|
9
roles/docker_setup/templates/daemon.json.j2
Normal file
9
roles/docker_setup/templates/daemon.json.j2
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"exec-opts": ["native.cgroupdriver=systemd"],
|
||||||
|
"log-driver": "json-file",
|
||||||
|
"log-opts": {
|
||||||
|
"max-size": "100m"
|
||||||
|
},
|
||||||
|
"data-root": "{{ docker_data_root }}",
|
||||||
|
"storage-driver": "{{ docker_storage_driver }}"
|
||||||
|
}
|
2
roles/docker_setup/templates/docker.list.j2
Normal file
2
roles/docker_setup/templates/docker.list.j2
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
deb https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable
|
||||||
|
|
|
@ -1,8 +0,0 @@
|
||||||
# Defaults for nfs_host
|
|
||||||
---
|
|
||||||
nfs_host_exports: []
|
|
||||||
# - directory: "/srv/nfs"
|
|
||||||
# hosts: "k3s-w[0-9]+.n39.eu"
|
|
||||||
# options: rw,sync,no_subtree_check
|
|
||||||
|
|
||||||
nfs_host_storage_device: "/dev/sdb"
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
- name: Reload nfs
|
|
||||||
ansible.builtin.command: 'exportfs -ra'
|
|
|
@ -1,41 +0,0 @@
|
||||||
---
|
|
||||||
- name: Install required packages
|
|
||||||
ansible.builtin.apt:
|
|
||||||
state: present
|
|
||||||
name:
|
|
||||||
- nfs-kernel-server
|
|
||||||
- nfs-common
|
|
||||||
- parted
|
|
||||||
|
|
||||||
- name: Create a new ext4 primary partition
|
|
||||||
community.general.parted:
|
|
||||||
device: "{{ nfs_host_storage_device }}"
|
|
||||||
number: 1
|
|
||||||
state: present
|
|
||||||
fs_type: ext4
|
|
||||||
|
|
||||||
- name: Ensure nfs mountpoints exist
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.directory }}"
|
|
||||||
state: directory
|
|
||||||
owner: nobody
|
|
||||||
group: nogroup
|
|
||||||
mode: '0777'
|
|
||||||
with_items: "{{ nfs_host_exports }}"
|
|
||||||
|
|
||||||
- name: Mount up device by label
|
|
||||||
ansible.posix.mount:
|
|
||||||
path: "{{ nfs_host_exports[0].directory }}"
|
|
||||||
src: /dev/sdb1
|
|
||||||
fstype: ext4
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Put /etc/exports in place from template
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: templates/exports.j2
|
|
||||||
dest: "/etc/exports"
|
|
||||||
notify: Reload nfs
|
|
||||||
|
|
||||||
- name: Ensure nfs is running.
|
|
||||||
ansible.builtin.service: "name=nfs-kernel-server state=started enabled=yes"
|
|
||||||
when: nfs_host_exports|length
|
|
|
@ -1,3 +0,0 @@
|
||||||
{% for export in nfs_host_exports %}
|
|
||||||
{{ export.directory }} {{ export.hosts }}({{ export.options }})
|
|
||||||
{% endfor %}
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Handlers für nginx-https-proxy
|
# Handlers für nginx-https-proxy
|
||||||
---
|
---
|
||||||
- name: Restart nginx
|
- name: restart nginx
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: nginx
|
name: nginx
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
|
@ -8,9 +8,9 @@
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
state: present
|
state: present
|
||||||
name:
|
name:
|
||||||
- apt-transport-https
|
- apt-transport-https
|
||||||
- ca-certificates
|
- ca-certificates
|
||||||
- gnupg2
|
- gnupg2
|
||||||
|
|
||||||
### Setup APT cache for the nginx repository
|
### Setup APT cache for the nginx repository
|
||||||
#
|
#
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
# for SSL passthrough.
|
# for SSL passthrough.
|
||||||
|
|
||||||
- name: Add nginx apt-key
|
- name: Add nginx apt-key
|
||||||
ansible.builtin.apt_key:
|
apt_key:
|
||||||
url: https://nginx.org/keys/nginx_signing.key
|
url: https://nginx.org/keys/nginx_signing.key
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@
|
||||||
src: files/apt-preference-99nginx
|
src: files/apt-preference-99nginx
|
||||||
dest: /etc/apt/preferences.d/99nginx
|
dest: /etc/apt/preferences.d/99nginx
|
||||||
|
|
||||||
- name: Update package cache # noqa: no-handler
|
- name: Update package cache # noqa 503
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
update_cache: true
|
update_cache: true
|
||||||
when: apt_repo.changed
|
when: apt_repo.changed
|
||||||
|
@ -45,7 +45,7 @@
|
||||||
state: present
|
state: present
|
||||||
name:
|
name:
|
||||||
# This version of nginx comes with the ngx_stream_core_module module
|
# This version of nginx comes with the ngx_stream_core_module module
|
||||||
- nginx
|
- nginx
|
||||||
|
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: '0644'
|
mode: '0644'
|
||||||
notify: Restart nginx
|
notify: restart nginx
|
||||||
|
|
||||||
- name: Create directory for dehydrated forwardings
|
- name: Create directory for dehydrated forwardings
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
|
@ -74,7 +74,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: '0644'
|
mode: '0644'
|
||||||
loop: "{{ ingress }}"
|
loop: "{{ ingress }}"
|
||||||
notify: Restart nginx
|
notify: restart nginx
|
||||||
|
|
||||||
- name: Setup nginx configuration
|
- name: Setup nginx configuration
|
||||||
# Note the order here: The nginx configuration _needs_ he dehydrated-hosts
|
# Note the order here: The nginx configuration _needs_ he dehydrated-hosts
|
||||||
|
@ -86,4 +86,4 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: '0644'
|
mode: '0644'
|
||||||
notify: Restart nginx
|
notify: restart nginx
|
||||||
|
|
5
roles/setup-http-site-forward/handlers/main.yml
Normal file
5
roles/setup-http-site-forward/handlers/main.yml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
- name: restart apache2
|
||||||
|
service:
|
||||||
|
name: apache2
|
||||||
|
state: restarted
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: Add or update Apache2 site
|
- name: Add or update Apache2 site
|
||||||
ansible.builtin.template:
|
template:
|
||||||
src: templates/apache-docker-forward-site.j2
|
src: templates/apache-docker-forward-site.j2
|
||||||
dest: /etc/apache2/sites-available/{{ site_name }}.conf
|
dest: /etc/apache2/sites-available/{{ site_name }}.conf
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
||||||
|
|
||||||
- name: Activate Apache2 site
|
- name: Activate Apache2 site
|
||||||
ansible.builtin.command: a2ensite {{ site_name }}
|
command: a2ensite {{ site_name }}
|
||||||
args:
|
args:
|
||||||
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
|
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
|
@ -1,5 +0,0 @@
|
||||||
---
|
|
||||||
- name: Restart apache2
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: apache2
|
|
||||||
state: restarted
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Restart apache2
|
- name: restart apache2
|
||||||
ansible.builtin.service:
|
service:
|
||||||
name: apache2
|
name: apache2
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
- name: Add or update Apache2 site
|
- name: Add or update Apache2 site
|
||||||
ansible.builtin.template:
|
template:
|
||||||
src: templates/apache-docker-proxy-site.j2
|
src: templates/apache-docker-proxy-site.j2
|
||||||
dest: /etc/apache2/sites-available/{{ site_name }}.conf
|
dest: /etc/apache2/sites-available/{{ site_name }}.conf
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
||||||
|
|
||||||
- name: Activate Apache2 site
|
- name: Activate Apache2 site
|
||||||
ansible.builtin.command: a2ensite {{ site_name }}
|
command: a2ensite {{ site_name }}
|
||||||
args:
|
args:
|
||||||
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
|
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
|
||||||
notify: Restart apache2
|
notify: restart apache2
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
---
|
---
|
||||||
- name: Update aliases
|
- name: Update aliases
|
||||||
ansible.builtin.shell: which newaliases && newaliases || true
|
shell: which newaliases && newaliases || true
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: Ensure sudo is installed
|
- name: Ensure sudo is installed
|
||||||
ansible.builtin.package:
|
package:
|
||||||
name:
|
name:
|
||||||
- sudo
|
- sudo
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Configure group sudo for sudoers without password
|
- name: Configure group sudo for sudoers without password
|
||||||
ansible.builtin.lineinfile:
|
lineinfile:
|
||||||
path: /etc/sudoers
|
path: /etc/sudoers
|
||||||
state: present
|
state: present
|
||||||
regexp: '^%sudo\s'
|
regexp: '^%sudo\s'
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
validate: /usr/sbin/visudo -cf %s
|
validate: /usr/sbin/visudo -cf %s
|
||||||
|
|
||||||
- name: Add users | create users' shell and home dir
|
- name: Add users | create users' shell and home dir
|
||||||
ansible.builtin.user:
|
user:
|
||||||
name: "{{ item.logname }}"
|
name: "{{ item.logname }}"
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
createhome: yes
|
createhome: yes
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
with_items: "{{ users }}"
|
with_items: "{{ users }}"
|
||||||
|
|
||||||
- name: Add authorized keys for user
|
- name: Add authorized keys for user
|
||||||
ansible.posix.authorized_key:
|
authorized_key:
|
||||||
user: "{{ item.0.logname }}"
|
user: "{{ item.0.logname }}"
|
||||||
key: "{{ item.1 }}"
|
key: "{{ item.1 }}"
|
||||||
state: present
|
state: present
|
||||||
|
@ -32,7 +32,7 @@
|
||||||
- skip_missing: true
|
- skip_missing: true
|
||||||
|
|
||||||
- name: Place user in sudo group
|
- name: Place user in sudo group
|
||||||
ansible.builtin.user:
|
user:
|
||||||
name: "{{ item.logname }}"
|
name: "{{ item.logname }}"
|
||||||
groups: [sudo]
|
groups: [sudo]
|
||||||
append: yes
|
append: yes
|
||||||
|
@ -40,12 +40,12 @@
|
||||||
with_items: "{{ users }}"
|
with_items: "{{ users }}"
|
||||||
|
|
||||||
- name: Check if /etc/aliases exists
|
- name: Check if /etc/aliases exists
|
||||||
ansible.builtin.stat:
|
stat:
|
||||||
path: /etc/aliases
|
path: /etc/aliases
|
||||||
register: aliases
|
register: aliases
|
||||||
|
|
||||||
- name: Set system email alias
|
- name: Set system email alias
|
||||||
ansible.builtin.lineinfile:
|
lineinfile:
|
||||||
path: /etc/aliases
|
path: /etc/aliases
|
||||||
state: present
|
state: present
|
||||||
regexp: "^{{ item.logname }}:"
|
regexp: "^{{ item.logname }}:"
|
||||||
|
|
|
@ -1,21 +1,21 @@
|
||||||
---
|
---
|
||||||
- name: Configure local ssh to access n39 hosts
|
- name: configure local ssh to access n39 hosts
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Ensure $HOME/.ssh/config.d/ dir is present
|
- name: ensure {{ lookup('env', 'HOME') }}/.ssh/config.d/ dir is present
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "{{ lookup('env', 'HOME') }}/.ssh/config.d/"
|
path: "{{ lookup('env', 'HOME') }}/.ssh/config.d/"
|
||||||
state: directory
|
state: directory
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
|
|
||||||
- name: Put ssh config for access to n39 internal systems in place
|
- name: template ssh config for access to internal systems
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: templates/ssh_config.j2
|
src: templates/ssh_config.j2
|
||||||
dest: "{{ lookup('env', 'HOME') }}/.ssh/config.d/n39_config"
|
dest: "{{ lookup('env', 'HOME') }}/.ssh/config.d/n39_config"
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
|
|
||||||
- name: Ensure that n39 access config is included
|
- name: ensure that n39 access config is included
|
||||||
ansible.builtin.lineinfile:
|
ansible.builtin.lineinfile:
|
||||||
path: ~/.ssh/config
|
path: ~/.ssh/config
|
||||||
insertbefore: BOF
|
insertbefore: BOF
|
||||||
|
|
36
templates/hobbes/grafana-kiosk.service.j2
Normal file
36
templates/hobbes/grafana-kiosk.service.j2
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Grafana Kiosk
|
||||||
|
Documentation=https://github.com/grafana/grafana-kiosk
|
||||||
|
Documentation=https://grafana.com/blog/2019/05/02/grafana-tutorial-how-to-create-kiosks-to-display-dashboards-on-a-tv
|
||||||
|
After=network.target
|
||||||
|
Wants=graphical.target
|
||||||
|
After=graphical.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User={{ kiosk_user }}
|
||||||
|
Environment="DISPLAY=:0"
|
||||||
|
Environment="XAUTHORITY=/home/{{ kiosk_user }}/.Xauthority"
|
||||||
|
|
||||||
|
# These should work according to the docs, but are nowhere in the code?
|
||||||
|
#Environment="KIOSK_MODE=full"
|
||||||
|
#Environment="KIOSK_AUTOFIT=false"
|
||||||
|
#Environment="KIOSK_LXDE_ENABLED=true"
|
||||||
|
#Environment="KIOSK_LXDE_HOME=/home/{{ kiosk_user }}"
|
||||||
|
#Environment="KIOSK_URL={{ kiosk_url }}"
|
||||||
|
#Environment="KIOSK_LOGIN_METHOD=local"
|
||||||
|
#Environment="KIOSK_LOGIN_USER={{ kiosk_grafana_user }}"
|
||||||
|
#Environment="KIOSK_LOGIN_PASSWORD={{ kiosk_grafana_pass }}"
|
||||||
|
|
||||||
|
# Disable screensaver etc.
|
||||||
|
ExecStartPre=xset s off
|
||||||
|
ExecStartPre=xset -dpms
|
||||||
|
ExecStartPre=xset s noblank
|
||||||
|
|
||||||
|
ExecStart=/home/{{ kiosk_user }}/bin/grafana-kiosk -kiosk-mode=full -autofit=false -lxde-home=/home/{{ kiosk_user }} -URL="{{ kiosk_url }}" -login-method=local -username={{ kiosk_grafana_user }} --password={{ kiosk_grafana_pass }}
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=30s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=graphical.target
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Check if the script is run by root
|
|
||||||
if [ "$EUID" -ne 0 ]
|
|
||||||
then echo "Please run as root"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# MQTT broker details
|
|
||||||
BROKER="{{ kiosk_mqtt_host }}"
|
|
||||||
TOPIC="{{ kiosk_mqtt_topic }}"
|
|
||||||
|
|
||||||
# Variable to store the PID of the fbi process
|
|
||||||
fbi_pid=0
|
|
||||||
|
|
||||||
# Function to be executed on SIGTERM
|
|
||||||
on_sigterm() {
|
|
||||||
echo "SIGTERM received, exiting..."
|
|
||||||
|
|
||||||
# Kill the fbi process
|
|
||||||
# As the process forks itself, we do not get a reliable PID and killall is needed
|
|
||||||
killall fbi
|
|
||||||
|
|
||||||
# Remove the temporary file
|
|
||||||
rm -f /tmp/grafana.png
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Trap SIGTERM and call on_sigterm() when it is received
|
|
||||||
trap 'on_sigterm' SIGTERM
|
|
||||||
|
|
||||||
while true
|
|
||||||
do
|
|
||||||
# Subscribe to the topic and save received data to a file
|
|
||||||
mosquitto_sub -h $BROKER -t $TOPIC -C 1 > /tmp/grafana.png
|
|
||||||
|
|
||||||
# Kill the previous fbi process
|
|
||||||
# As the process forks itself, we do not get a reliable PID and killall is needed
|
|
||||||
killall fbi
|
|
||||||
|
|
||||||
# Display the image
|
|
||||||
fbi -T 1 -noverbose -a /tmp/grafana.png &
|
|
||||||
|
|
||||||
# Wait to avoid a race condition between
|
|
||||||
# fbi starting and mosquitto truncating the file
|
|
||||||
sleep 1
|
|
||||||
done
|
|
|
@ -1,8 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
docker run --rm \
|
|
||||||
-e RENOVATE_TOKEN={{ renovate_forgejo_pat }} \
|
|
||||||
-e RENOVATE_ENDPOINT=https://{{ forgejo_domain_name }}/api/v1 \
|
|
||||||
-e RENOVATE_PLATFORM=gitea \
|
|
||||||
-e RENOVATE_GIT_AUTHOR={{ renovate_git_user | quote }} \
|
|
||||||
-e GITHUB_COM_TOKEN={{ renovate_github_pat }} \
|
|
||||||
{{ renovate_image }} --autodiscover
|
|
|
@ -44,9 +44,9 @@
|
||||||
RequestHeader set "X-Forwarded-SSL" expr=%{HTTPS}
|
RequestHeader set "X-Forwarded-SSL" expr=%{HTTPS}
|
||||||
ProxyPreserveHost {{ proxy_preserve_host | default("Off") }}
|
ProxyPreserveHost {{ proxy_preserve_host | default("Off") }}
|
||||||
|
|
||||||
ProxyPass /json http://172.23.48.7:8001/json
|
ProxyPass /json http://172.23.48.7/spaceapi
|
||||||
ProxyPass /text http://172.23.48.7:8001/text
|
ProxyPass /text http://172.23.48.7/state.txt
|
||||||
ProxyPass /state.png http://172.23.48.7:8001/state.png
|
ProxyPass /state.png http://172.23.48.7/state.png
|
||||||
</VirtualHost>
|
</VirtualHost>
|
||||||
</IfFile>
|
</IfFile>
|
||||||
</IfFile>
|
</IfFile>
|
||||||
|
|
|
@ -25,11 +25,3 @@ Host {{ host }}
|
||||||
Port 22
|
Port 22
|
||||||
|
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
{# This is our router #}
|
|
||||||
Host rhodium.n39.eu
|
|
||||||
Hostname rhodium.n39.eu
|
|
||||||
IdentityFile {{ setup_ssh_key }}
|
|
||||||
User root
|
|
||||||
ProxyJump ssh.n39.eu
|
|
||||||
Port 22
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue