Compare commits
4 commits
master
...
sig-neuer-
Author | SHA1 | Date | |
---|---|---|---|
a12729ba3b | |||
45ffd0e23c | |||
5432c8b2b5 | |||
77cc80f798 |
76 changed files with 556 additions and 647 deletions
.mailmap.yamllintREADME.mdinventory.ymlmain.ymlrenovate.jsonrequirements.ymlsetup-ssh.yml
files
group-all.ymlgroup-docker_host.ymlgroup-k3s.ymlgroup-proxmox.ymlgroup_vars
host-beaker.ymlhost-hobbes.ymlhost-holmium.ymlhost-krypton.ymlhost-oganesson.ymlhost-platon.ymlhost-plumbum.ymlhost-pottwal.ymlhost-radon.ymlhost-tau.ymlhost-unicorn.ymlhost-vyos.ymlhost-wittgenstein.ymlhost_vars
pottwal.n39.eu
radon.n39.eu
vyos.n39.eu
wittgenstein.n39.eu
roles
apache
apache_letsencrypt
cleanuri
dd24_dyndns_cron
desec_dyndns_cron
docker_setup
nfs-host
nfs_host/handlers
nginx_https_ingress
setup-http-site-forward
setup_http_site_forward/handlers
setup_http_site_proxy
users
templates
4
.mailmap
4
.mailmap
|
@ -2,8 +2,4 @@ Alexander Dahl <alex@netz39.de> <post@lespocky.de>
|
|||
David Kilias <dkdent@netz39.de>
|
||||
David Kilias <dkdent@netz39.de> <david.kilias@gmail.com>
|
||||
Maximilian Grau <mg-95@t-online.de>
|
||||
Maximilian Grau <mg-95@t-online.de> <mg-95@gitea.n39.eu>
|
||||
Jens Winter-Hübenthal <jens.winter@gmail.com>
|
||||
Jens Winter-Hübenthal <jens.winter@gmail.com> <jens.winter-huebenthal@bridgefield.de>
|
||||
Stefan Haun <tux@netz39.de>
|
||||
<timo@netz39.de> <n39@therr.de>
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
extends: default
|
||||
|
||||
rules:
|
||||
comments-indentation: disable
|
||||
line-length: disable
|
||||
truthy:
|
||||
allowed-values:
|
||||
|
|
|
@ -20,9 +20,6 @@ SSH_KEY=<absolute/path/to/ssh/private/key>
|
|||
ansible-playbook setup-ssh.yml --ask-vault-pass -e "setup_ssh_logname=$LOGUSER" -e "setup_ssh_key=$SSH_KEY"
|
||||
```
|
||||
|
||||
This playbook also adds `rhodium.n39.eu` (OpenWRT router), but our Ansible cannot set up SSH keys (yet).
|
||||
Please [add your key to OpenWRT manually](https://openwrt.org/docs/guide-user/security/dropbear.public-key.auth#from_the_luci_web_interface).
|
||||
|
||||
## Edit vault encrypted vars files
|
||||
|
||||
```bash
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
asterisk ALL=(root) NOPASSWD: /usr/sbin/i2cget, /usr/sbin/i2cset
|
|
@ -1,13 +1,11 @@
|
|||
#!/bin/sh
|
||||
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games'
|
||||
echo 'set PCM volume'
|
||||
sudo amixer set "PCM" "70%"
|
||||
echo 'start i2c-foo'
|
||||
sudo modprobe i2c_dev
|
||||
sudo modprobe i2c_bcm2708
|
||||
echo 'starting log'
|
||||
tmux new-session -s status -d 'sudo less /var/log/shuttercontrol.log'
|
||||
cd /home/pi/netz39_rollladensteuerung/raspberry/shuttercontrol
|
||||
cd /home/pi/netz39_rollladensteuerung/raspberry/shuttercontrol
|
||||
echo 'switch-on.sh'
|
||||
../switch-on.sh
|
||||
cd /home/pi
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
#!/bin/sh
|
||||
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games'
|
||||
|
||||
echo 'switch-on.sh'
|
||||
/home/pi/switch-on.sh
|
||||
|
||||
echo 'start i2c-foo'
|
||||
sudo modprobe i2c_dev
|
||||
sudo modprobe i2c_bcm2708
|
||||
|
||||
# wait for network devices
|
||||
sleep 30
|
||||
|
||||
cd /home/pi
|
||||
echo 'start ampel controller'
|
||||
tmux new-session -s ampel -d 'cd /home/pi/netz39_space_notification/raspberry/ledcontrol && ./ledcontrol'
|
||||
|
||||
echo 'start lever controller'
|
||||
tmux new-window -t ampel:1 'cd /home/pi/netz39_space_notification/raspberry/statusswitch && ./statusswitch'
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
gpio write 2 0
|
||||
gpio write 3 0
|
||||
|
||||
gpio mode 2 tri
|
||||
gpio mode 3 tri
|
|
@ -1,11 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# INT
|
||||
gpio mode 0 tri
|
||||
|
||||
# Power
|
||||
gpio mode 2 out
|
||||
gpio mode 3 out
|
||||
|
||||
gpio write 2 1
|
||||
gpio write 3 1
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
logger -t unstuck "unstuck $(date)"
|
||||
|
||||
killall tmux
|
||||
|
||||
sleep 1
|
||||
/home/pi/reboot.sh
|
|
@ -1,33 +1,34 @@
|
|||
---
|
||||
- name: Tasks for all hosts
|
||||
hosts: all
|
||||
# tasks for all hosts
|
||||
|
||||
- hosts: all
|
||||
become: true
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
roles:
|
||||
- role: adriagalin.timezone
|
||||
- role: ansible.timezone
|
||||
vars:
|
||||
ag_timezone: "{{ timezone }}"
|
||||
- role: users
|
||||
|
||||
tasks:
|
||||
- name: Update and clean package cache
|
||||
ansible.builtin.apt:
|
||||
apt:
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
autoclean: true
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure unattended-upgrades is installed and up to date
|
||||
ansible.builtin.apt:
|
||||
apt:
|
||||
name: unattended-upgrades
|
||||
state: present
|
||||
|
||||
- name: Setup unattended-upgrades
|
||||
ansible.builtin.include_role:
|
||||
name: hifis.toolkit.unattended_upgrades
|
||||
include_role:
|
||||
name: hifis.unattended_upgrades
|
||||
vars:
|
||||
unattended_origins_patterns:
|
||||
- "origin=*"
|
||||
|
|
|
@ -1,18 +1,15 @@
|
|||
---
|
||||
- name: Tasks for docker hosts
|
||||
hosts: docker_host
|
||||
- hosts: docker_host
|
||||
become: true
|
||||
|
||||
roles:
|
||||
- role: netz39.host_docker
|
||||
- role: docker_setup
|
||||
|
||||
- name: Tasks for docker hosts at location space
|
||||
hosts: docker_host:&location_space
|
||||
- hosts: docker_host:&location_space
|
||||
become: true
|
||||
|
||||
roles:
|
||||
- role: lespocky.telegraf_docker_in_docker
|
||||
when: (ansible_architecture == "x86_64")
|
||||
vars:
|
||||
tdid_conf_dir: "/etc/telegraf"
|
||||
tdid_influxdb_org: "{{ influxdb_org }}"
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
---
|
||||
- name: Tasks for kubernetes hosts
|
||||
hosts: k3s
|
||||
- hosts: k3s
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: Ensure nfs-common is installed on k3s VMs
|
||||
ansible.builtin.apt:
|
||||
pkg: nfs-common
|
||||
state: present
|
||||
state: present
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Tasks for virtual machines on proxmox host
|
||||
hosts: proxmox
|
||||
- hosts: proxmox
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
|
|
|
@ -24,6 +24,11 @@ users:
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGmU7MfOFuc6z5Vbwh4CbBFSg19f8B9rUO2ITjgmEvkY alex@lemmy
|
||||
sudo: yes
|
||||
docker: yes
|
||||
- logname: "kwasir"
|
||||
viewname: "Peter Seidel"
|
||||
email: "kwasir@netz39.de"
|
||||
sudo: yes
|
||||
docker: yes
|
||||
- logname: "tux"
|
||||
viewname: "Stefan Haun"
|
||||
email: "tux@netz39.de"
|
||||
|
@ -56,23 +61,12 @@ users:
|
|||
ssh_pub:
|
||||
- !unsafe >
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILKhKHCPibswu2p6UQHKsBSqGaXzMFM+oMX0XEWsxCIc timo@Space-Lap
|
||||
- !unsafe >
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMJoS7nsLLzSAsZA4us2/7JaQhgRjj/BY+LOpDQnfy8u timo@mac
|
||||
sudo: yes
|
||||
docker: yes
|
||||
- logname: "jens"
|
||||
viewname: "Jens Winter-Hübenthal"
|
||||
email: "jens.winter@gmail.com"
|
||||
ssh_pub:
|
||||
- !unsafe >
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIII4FS2sROKs2nIW8uzDuWmj8q127UoljtkVMthY8g// jens@work-lenovo
|
||||
sudo: yes
|
||||
docker: yes
|
||||
|
||||
# Data for dyndns updates
|
||||
# Data for DD24 dyndns updates
|
||||
dyndns_domain: "dyndns.n39.eu"
|
||||
dyndns_password: "{{ vault_dyndns_password }}"
|
||||
dyndns_token: "{{ vault_dyndns_token }}"
|
||||
|
||||
# Shared influxdb items
|
||||
influxdb_org: "netz39"
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
$ANSIBLE_VAULT;1.1;AES256
|
||||
37306233306262383862373661626635346436316265663162343433303432653536376632316439
|
||||
6336396564613232363337303266643965346333396331620a316536636666393461353633366466
|
||||
39333362306166376462353739626139623835326461373834303330346538366637626363306438
|
||||
3033376133373330330a356236396366643938323666663836643738386337373362323933623838
|
||||
30316663646134623232336563343562393037363463303739626464633461323539306261316638
|
||||
61343330626263393065636230303632663965653939373437386561656539646533653661613236
|
||||
35326334313232633738633933653939383830636361373938373864643133363539623734646435
|
||||
32336630613231353337336466646164373734386539653936313865316336616264373061633139
|
||||
3839
|
||||
34303066383937623831333466333965323161376134353838346235323662373164303163363734
|
||||
3134626237346361656533636161363331666537633538380a613761643431356530343663626666
|
||||
62646361316364333533316638646261373661633863363733366337373338336565366536386237
|
||||
3138646266613837310a396139363830613463393861336161363533343362383462623265356563
|
||||
31333862613937306463353130316365636634353862363039663762326263313366363530636631
|
||||
3630653638333831303432316266633833643739643533353536
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
---
|
||||
docker_data_root: "/srv/docker"
|
||||
docker_image_prune: true
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Setup things on host 'beaker' (proxmox server im space)
|
||||
hosts: beaker.n39.eu
|
||||
- hosts: beaker.n39.eu
|
||||
become: true
|
||||
|
||||
vars:
|
||||
|
@ -10,7 +9,7 @@
|
|||
|
||||
tasks:
|
||||
|
||||
- name: Enable proxmox gui login for admin users
|
||||
- name: enable proxmox gui login for admin users
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/pve/user.cfg
|
||||
regexp: "^user:{{ item.logname }}@pam"
|
||||
|
@ -19,10 +18,11 @@
|
|||
state: present
|
||||
loop: "{{ users }}"
|
||||
|
||||
- name: Configure proxmox admin group
|
||||
- name: configure proxmox admin group
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/pve/user.cfg
|
||||
regexp: "^group:Admins:"
|
||||
line: "group:Admins:{{ users | map(attribute='logname') | join(\"@pam,\") }}@pam::"
|
||||
# group:Admins:kwasir@pam,lespocky@pam,tux@pam::
|
||||
line: "group:Admins:{{ users | map(attribute = 'logname') | join(\"@pam,\") }}@pam::"
|
||||
|
||||
handlers:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Setup things on host 'hobbes' (raspberry pi for kiosk screen)
|
||||
hosts: hobbes.n39.eu
|
||||
- hosts: hobbes.n39.eu
|
||||
become: true
|
||||
|
||||
vars:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Setup things on host 'holmium' (http ingress vm)
|
||||
hosts: holmium.n39.eu
|
||||
- hosts: holmium.n39.eu
|
||||
become: true
|
||||
|
||||
vars:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Setup things on host 'krypton' (ldap vm)
|
||||
hosts: krypton.n39.eu
|
||||
- hosts: krypton.n39.eu
|
||||
become: true
|
||||
|
||||
vars:
|
||||
|
@ -20,26 +19,26 @@
|
|||
|
||||
|
||||
roles:
|
||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
||||
# role 'docker_setup' applied through group 'docker_host'
|
||||
- role: apache
|
||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
||||
- role: 24367dfa.dehydrated
|
||||
- role: ansible-role-dehydrated
|
||||
vars:
|
||||
dehydrated_contact_email: "{{ server_admin }}"
|
||||
dehydrated_domains:
|
||||
- name: entities-validation.svc.n39.eu
|
||||
- role: penguineer.dehydrated_cron
|
||||
- role: ansible-role-dehydrated_cron
|
||||
|
||||
tasks:
|
||||
|
||||
# - name: Setup dehydrated challenge endpoint for {{ openldap_domain }}
|
||||
# ansible.builtin.include_role:
|
||||
# include_role:
|
||||
# name: setup-http-dehydrated
|
||||
# vars:
|
||||
# site_name: "{{ openldap_domain }}"
|
||||
|
||||
- name: Ensure openLDAP directories are present.
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
mode: "0755"
|
||||
state: directory
|
||||
|
@ -50,7 +49,7 @@
|
|||
- path: "{{ dehydrated_certs_dir }}/{{ openldap_domain }}"
|
||||
|
||||
- name: Ensure container for openLDAP is running.
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: openLDAP
|
||||
image: osixia/openldap:1.5.0
|
||||
detach: yes
|
||||
|
@ -99,9 +98,9 @@
|
|||
rule: allow
|
||||
port: '389'
|
||||
proto: tcp
|
||||
from: "{{ item }}"
|
||||
from: "{{ item }}"
|
||||
comment: LDAP Docker Access
|
||||
loop: "{{ docker_ip_ranges }}"
|
||||
loop: "{{ docker_ip_ranges }}"
|
||||
|
||||
- name: Allow access to openLDAP from local docker container [2/2]
|
||||
become: true
|
||||
|
@ -109,15 +108,15 @@
|
|||
rule: allow
|
||||
port: '636'
|
||||
proto: tcp
|
||||
from: "{{ item }}"
|
||||
from: "{{ item }}"
|
||||
comment: LDAP Docker Access
|
||||
loop: "{{ docker_ip_ranges }}"
|
||||
loop: "{{ docker_ip_ranges }}"
|
||||
|
||||
|
||||
- name: Ensure container for entities validation service is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: entities_validation_svc
|
||||
image: netz39/entities_validation_svc:v1.0.4
|
||||
image: netz39/entities_validation_svc:v1.0.0
|
||||
pull: true
|
||||
state: started
|
||||
detach: yes
|
||||
|
@ -128,7 +127,7 @@
|
|||
TZ: "{{ timezone }}"
|
||||
|
||||
- name: Setup proxy site entities-validation.svc.n39.eu
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: entities-validation.svc.n39.eu
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Setup things on host 'oganesson' (ssh jump host vm)
|
||||
hosts: oganesson.n39.eu
|
||||
- hosts: oganesson.n39.eu
|
||||
become: true
|
||||
|
||||
vars:
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
---
|
||||
- name: Setup things on host 'platon' (raspberry pi for entrance door)
|
||||
hosts: platon.n39.eu
|
||||
- hosts: platon.n39.eu
|
||||
become: true
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
door_open_command: '/home/pi/sesame-open.sh'
|
||||
ble_keykeeper_dir: '/home/pi/netz39_ble_keykeeper'
|
||||
roles:
|
||||
- role: maz3max.ble_keykeeper
|
||||
- role: ble-keykeeper-role
|
||||
vars:
|
||||
ble_keykeeper_user: "{{ gatekeeper_user }}"
|
||||
ble_keykeeper_group: "{{ gatekeeper_user }}"
|
||||
|
@ -64,7 +63,7 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: Restart mosquitto service
|
||||
notify: restart mosquitto
|
||||
|
||||
|
||||
### Sesam for SSH access
|
||||
|
@ -142,7 +141,7 @@
|
|||
mode: "0644"
|
||||
register: wiringPi_copy
|
||||
|
||||
- name: Install wiringPi library # noqa: no-handler
|
||||
- name: Install wiringPi library # noqa 503
|
||||
ansible.builtin.apt:
|
||||
state: present
|
||||
deb: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
|
||||
|
@ -246,7 +245,7 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Restart rsyslog
|
||||
notify: restart rsyslog
|
||||
|
||||
|
||||
### Asterisk
|
||||
|
@ -259,7 +258,7 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Restart asterisk
|
||||
notify: restart asterisk
|
||||
|
||||
- name: Set up extensions for asterisk
|
||||
# This uses the variables gatekeeper_user and door_open_command
|
||||
|
@ -269,25 +268,14 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Restart asterisk
|
||||
notify: restart asterisk
|
||||
|
||||
- name: Ensure asterisk is in the right groups
|
||||
ansible.builtin.user:
|
||||
name: asterisk
|
||||
groups: audio,i2c,gpio
|
||||
append: yes
|
||||
notify: Restart asterisk
|
||||
|
||||
# Asterisk now executes shell scripts with reduced privileges, so we need to
|
||||
# use sudo for I2C access.
|
||||
- name: Set up sudo configuration for Asterisk I2C access
|
||||
ansible.builtin.copy:
|
||||
src: "files/platon/11_asterisk_i2c"
|
||||
dest: "/etc/sudoers.d/"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
# Asterisk restart is not necessary
|
||||
notify: restart asterisk
|
||||
|
||||
- name: Copy sounds
|
||||
ansible.builtin.copy:
|
||||
|
@ -305,20 +293,20 @@
|
|||
|
||||
|
||||
handlers:
|
||||
- name: Restart mosquitto service
|
||||
ansible.builtin.service:
|
||||
- name: restart mosquitto
|
||||
service:
|
||||
name: mosquitto
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Restart rsyslog
|
||||
ansible.builtin.service:
|
||||
- name: restart rsyslog
|
||||
service:
|
||||
name: rsyslog
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Restart asterisk
|
||||
ansible.builtin.service:
|
||||
- name: restart asterisk
|
||||
service:
|
||||
name: asterisk
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
---
|
||||
- name: Setup things on host 'plumbum' (nfs server)
|
||||
hosts: plumbum.n39.eu
|
||||
- hosts: plumbum.n39.eu
|
||||
become: true
|
||||
|
||||
roles:
|
||||
- role: nfs_host
|
||||
- role: nfs-host
|
||||
vars:
|
||||
nfs_host_exports:
|
||||
- directory: "/srv/nfs/backup"
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
---
|
||||
- name: Setup things on host 'pottwal' (the big docker container host)
|
||||
hosts: pottwal.n39.eu
|
||||
- hosts: pottwal.n39.eu
|
||||
become: true
|
||||
|
||||
roles:
|
||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
||||
# role 'docker_setup' applied through group 'docker_host'
|
||||
- role: apache
|
||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
||||
- role: 24367dfa.dehydrated
|
||||
- role: ansible-role-dehydrated
|
||||
vars:
|
||||
dehydrated_contact_email: "{{ server_admin }}"
|
||||
dehydrated_domains:
|
||||
|
@ -26,11 +25,9 @@
|
|||
- name: "{{ grafana_domain_name }}"
|
||||
- name: "{{ homebox_domain_name }}"
|
||||
- name: spaceapi.n39.eu
|
||||
- role: penguineer.dehydrated_cron
|
||||
- role: ansible-role-dehydrated_cron
|
||||
- role: dd24_dyndns_cron
|
||||
# variables are set in the inventory
|
||||
- role: desec_dyndns_cron
|
||||
# variables are set in the inventory
|
||||
- role: cleanuri
|
||||
vars:
|
||||
cleanuri_ui_domain: uritools.n39.eu
|
||||
|
@ -55,7 +52,7 @@
|
|||
# If port 2222 is changed here, it must also be adapted
|
||||
# in the forgejo config file (see application volume)!!
|
||||
- name: Setup the docker container for forgejo
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: forgejo
|
||||
image: "{{ forgejo_image }}"
|
||||
pull: true
|
||||
|
@ -81,7 +78,7 @@
|
|||
tags: ["forgejo"]
|
||||
|
||||
- name: Setup proxy site "{{ forgejo_domain_name }}"
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ forgejo_domain_name }}"
|
||||
|
@ -89,7 +86,7 @@
|
|||
tags: ["forgejo"]
|
||||
|
||||
- name: Ensure apt-cacher container is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: apt_cacher_ng
|
||||
image: mrtux/apt-cacher-ng:latest
|
||||
pull: true
|
||||
|
@ -102,7 +99,7 @@
|
|||
TZ: "{{ timezone }}"
|
||||
|
||||
- name: Setup docker network
|
||||
community.docker.docker_network:
|
||||
docker_network:
|
||||
name: shlinknet
|
||||
state: present
|
||||
internal: true
|
||||
|
@ -118,9 +115,9 @@
|
|||
- shlink
|
||||
|
||||
- name: Ensure shlink database container is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: shlinkdb
|
||||
image: postgres:16.8-alpine
|
||||
image: postgres:16.1-alpine
|
||||
pull: true
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
|
@ -138,7 +135,7 @@
|
|||
- shlink
|
||||
|
||||
- name: Ensure container for shlink is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: shlink
|
||||
image: "{{ shlink_image }}"
|
||||
pull: true
|
||||
|
@ -150,6 +147,7 @@
|
|||
env:
|
||||
TZ: "{{ timezone }}"
|
||||
DEFAULT_DOMAIN: "{{ shlink_domain_name }}"
|
||||
GEOLITE_LICENSE_KEY: "{{ shlink_geolite_license_key }}"
|
||||
INITIAL_API_KEY: "{{ shlink_initial_api_key }}"
|
||||
DB_DRIVER: "postgres"
|
||||
DB_HOST: shlinkdb
|
||||
|
@ -167,7 +165,7 @@
|
|||
- shlink
|
||||
|
||||
- name: Setup proxy site {{ shlink_domain_name }}
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ shlink_domain_name }}"
|
||||
|
@ -184,7 +182,7 @@
|
|||
msg: "prosody data dir is missing, please restore from the backup!"
|
||||
when: not prosody_dir.stat.exists
|
||||
|
||||
- name: Ensure prosody main config dir exists
|
||||
- name: Ensure prosody config dir exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ prosody_config_dir }}"
|
||||
state: directory
|
||||
|
@ -204,7 +202,7 @@
|
|||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Ensure prosody conf.d dir exists
|
||||
- name: Ensure prosody certs dir exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ prosody_config_dir }}/conf.d"
|
||||
state: directory
|
||||
|
@ -235,7 +233,7 @@
|
|||
- Restart prosody
|
||||
|
||||
- name: Ensure container for prosody XMPP server is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: prosody
|
||||
image: "{{ prosody_image }}"
|
||||
pull: true
|
||||
|
@ -257,7 +255,7 @@
|
|||
|
||||
|
||||
- name: Ensure container for static XMPP website is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: jabber-static-website
|
||||
image: "{{ prosody_web_image }}"
|
||||
pull: true
|
||||
|
@ -272,18 +270,14 @@
|
|||
- "127.0.0.1:{{ jabber_host_port }}:80"
|
||||
volumes:
|
||||
- "{{ prosody_data_dir }}/var/www:/public:ro"
|
||||
tags:
|
||||
- prosody-web
|
||||
|
||||
- name: Setup proxy site {{ prosody_domain_name }}
|
||||
# point to static website for now
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ prosody_domain_name }}"
|
||||
proxy_port: "{{ jabber_host_port }}"
|
||||
tags:
|
||||
- prosody-web
|
||||
|
||||
- name: Check if hedgedoc data dir exists
|
||||
ansible.builtin.stat:
|
||||
|
@ -299,7 +293,7 @@
|
|||
- hedgedoc
|
||||
|
||||
- name: Ensure the hedgedoc directories exist
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
mode: "{{ item.mode }}"
|
||||
state: directory
|
||||
|
@ -312,7 +306,7 @@
|
|||
- hedgedoc
|
||||
|
||||
- name: Setup docker network
|
||||
community.docker.docker_network:
|
||||
docker_network:
|
||||
name: hedgedocnet
|
||||
state: present
|
||||
internal: true
|
||||
|
@ -320,7 +314,7 @@
|
|||
- hedgedoc
|
||||
|
||||
- name: Install HedgeDoc database container
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: hedgedocdb
|
||||
image: "{{ hedgedoc_db_image }}"
|
||||
pull: true
|
||||
|
@ -340,7 +334,7 @@
|
|||
- hedgedoc
|
||||
|
||||
- name: Ensure container for hedgedoc is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: hedgedoc
|
||||
image: "{{ hedgedoc_image }}"
|
||||
pull: true
|
||||
|
@ -372,7 +366,7 @@
|
|||
- hedgedoc
|
||||
|
||||
- name: Setup proxy site "{{ hedgedoc_domain_name }}"
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ hedgedoc_domain_name }}"
|
||||
|
@ -381,7 +375,7 @@
|
|||
- hedgedoc
|
||||
|
||||
- name: Ensure the influxdb directories exist
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
mode: 0700
|
||||
state: directory
|
||||
|
@ -391,7 +385,7 @@
|
|||
- "{{ data_dir }}/influxdb/cfg"
|
||||
|
||||
- name: Ensure container for influxdb is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: influxdb
|
||||
image: "{{ influxdb_image }}"
|
||||
pull: true
|
||||
|
@ -411,7 +405,7 @@
|
|||
- "{{ data_dir }}/influxdb/cfg:/etc/influxdb2"
|
||||
|
||||
- name: Setup proxy site {{ influxdb_domain_name }}
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ influxdb_domain_name }}"
|
||||
|
@ -436,7 +430,7 @@
|
|||
- redmine
|
||||
|
||||
- name: Setup Redmine docker network
|
||||
community.docker.docker_network:
|
||||
docker_network:
|
||||
name: redminenet
|
||||
state: present
|
||||
internal: true
|
||||
|
@ -444,7 +438,7 @@
|
|||
- redmine
|
||||
|
||||
- name: Setup Redmine MySQL container
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: redminedb
|
||||
image: "{{ redmine_mysql_image }}"
|
||||
pull: true
|
||||
|
@ -457,14 +451,13 @@
|
|||
MYSQL_DATABASE: "{{ redmine_database }}"
|
||||
volumes:
|
||||
- "{{ data_dir }}/redmine/mysql:/var/lib/mysql"
|
||||
- "{{ data_dir }}/redmine/mysql-config:/etc/mysql/conf.d"
|
||||
networks:
|
||||
- name: redminenet
|
||||
tags:
|
||||
- redmine
|
||||
|
||||
- name: Setup Redmine container
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: redmine
|
||||
image: "{{ redmine_image }}"
|
||||
pull: true
|
||||
|
@ -490,7 +483,7 @@
|
|||
- redmine
|
||||
|
||||
- name: Setup proxy site "{{ redmine_domain_name }}"
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ redmine_domain_name }}"
|
||||
|
@ -499,7 +492,7 @@
|
|||
- redmine
|
||||
|
||||
- name: Ensure the uptime-kuma directories exist
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
mode: "0755"
|
||||
state: directory
|
||||
|
@ -509,7 +502,7 @@
|
|||
- uptimekuma
|
||||
|
||||
- name: Ensure container for uptime-kuma is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: uptime-kuma
|
||||
image: "{{ uptimekuma_image }}"
|
||||
pull: true
|
||||
|
@ -526,7 +519,7 @@
|
|||
- uptimekuma
|
||||
|
||||
- name: Setup proxy site "{{ uptimekuma_domain_name }}"
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ uptimekuma_domain_name }}"
|
||||
|
@ -535,7 +528,7 @@
|
|||
- uptimekuma
|
||||
|
||||
- name: Ensure the grafana directories exist
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
owner: "{{ item.owner | default('root') }}"
|
||||
mode: "{{ item.mode }}"
|
||||
|
@ -552,7 +545,7 @@
|
|||
- grafana
|
||||
|
||||
- name: Ensure container for grafana is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: grafana
|
||||
image: "{{ grafana_image }}"
|
||||
pull: true
|
||||
|
@ -573,7 +566,7 @@
|
|||
- grafana
|
||||
|
||||
- name: Setup proxy site "{{ grafana_domain_name }}"
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ grafana_domain_name }}"
|
||||
|
@ -583,7 +576,7 @@
|
|||
- grafana
|
||||
|
||||
- name: Ensure the homebox directories exist
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
owner: "{{ item.owner | default('root') }}"
|
||||
mode: "{{ item.mode }}"
|
||||
|
@ -597,7 +590,7 @@
|
|||
- homebox
|
||||
|
||||
- name: Ensure container for homebox is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: homebox
|
||||
image: "{{ homebox_image }}"
|
||||
pull: true
|
||||
|
@ -617,7 +610,7 @@
|
|||
- homebox
|
||||
|
||||
- name: Setup proxy site {{ homebox_domain_name }}
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ homebox_domain_name }}"
|
||||
|
@ -627,7 +620,7 @@
|
|||
- homebox
|
||||
|
||||
- name: Setup proxy site spaceapi.n39.eu
|
||||
ansible.builtin.template:
|
||||
template:
|
||||
src: templates/pottwal/spaceapi-apache-site.j2
|
||||
dest: /etc/apache2/sites-available/spaceapi.n39.eu.conf
|
||||
mode: "0644"
|
||||
|
@ -635,16 +628,13 @@
|
|||
site_name: "spaceapi.n39.eu"
|
||||
proxy_preserve_host: "On"
|
||||
notify: Restart apache2
|
||||
tags:
|
||||
- spaceapi
|
||||
|
||||
# Renovate configuration is sourced from `renovate.json` in each repository
|
||||
- name: Ensure renovate bot cronjob is present
|
||||
ansible.builtin.template:
|
||||
src: templates/pottwal/renovate-cron.j2
|
||||
dest: /etc/cron.hourly/renovate-bot
|
||||
mode: "0700"
|
||||
notify: Reload cron
|
||||
notify: reload cron
|
||||
tags:
|
||||
- renovate
|
||||
|
||||
|
@ -656,14 +646,14 @@
|
|||
restart: yes
|
||||
|
||||
- name: Restart apache2
|
||||
ansible.builtin.service:
|
||||
service:
|
||||
name: apache2
|
||||
state: restarted
|
||||
|
||||
- name: Reload cron
|
||||
- name: reload cron
|
||||
ansible.builtin.shell:
|
||||
cmd: service cron reload
|
||||
# Use the shell call because the task sometimes has problems finding the service state
|
||||
# ansible.builtin.service:
|
||||
# service:
|
||||
# name: cron
|
||||
# state: restarted
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Setup things on host 'radon' (services for space automation)
|
||||
hosts: radon.n39.eu
|
||||
- hosts: radon.n39.eu
|
||||
become: true
|
||||
|
||||
vars:
|
||||
|
@ -8,13 +7,13 @@
|
|||
|
||||
data_dir: "/srv/data"
|
||||
|
||||
mosquitto_image: eclipse-mosquitto:2.0.21
|
||||
mosquitto_image: eclipse-mosquitto:2.0.18
|
||||
mosquitto_data: "{{ data_dir }}/mosquitto"
|
||||
|
||||
nodered_image: nodered/node-red:3.0.1-1-18
|
||||
nodered_data: "{{ data_dir }}/nodered"
|
||||
|
||||
rabbitmq_image: bitnami/rabbitmq:4.0.7
|
||||
rabbitmq_image: bitnami/rabbitmq:3.12.12
|
||||
rabbitmq_data: "{{ data_dir }}/rabbitmq"
|
||||
|
||||
pwr_meter_pulse_gw_image: netz39/power-meter-pulse-gateway:0.3.0
|
||||
|
@ -22,10 +21,10 @@
|
|||
brotherql_host_port: 9004
|
||||
|
||||
roles:
|
||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
||||
# role 'docker_setup' applied through group 'docker_host'
|
||||
- role: apache
|
||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
||||
- role: 24367dfa.dehydrated
|
||||
- role: ansible-role-dehydrated
|
||||
vars:
|
||||
dehydrated_contact_email: "{{ server_admin }}"
|
||||
dehydrated_domains:
|
||||
|
@ -33,12 +32,12 @@
|
|||
- name: rabbitmq.n39.eu
|
||||
- name: pwr-meter-pulse-gw-19i.svc.n39.eu
|
||||
- name: labelprinter.n39.eu
|
||||
- role: penguineer.dehydrated_cron
|
||||
- role: ansible-role-dehydrated_cron
|
||||
|
||||
|
||||
tasks:
|
||||
- name: Ensure the mosquitto directories exist
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
mode: 0755
|
||||
state: directory
|
||||
|
@ -50,16 +49,16 @@
|
|||
- mosquitto
|
||||
|
||||
- name: Make sure mosquitto config is there
|
||||
ansible.builtin.template:
|
||||
template:
|
||||
src: "templates/mosquitto.conf.j2"
|
||||
dest: "{{ mosquitto_data }}/config/mosquitto.conf"
|
||||
mode: 0644
|
||||
notify: Restart mosquitto container
|
||||
notify: restart mosquitto
|
||||
tags:
|
||||
- mosquitto
|
||||
|
||||
- name: Ensure mosquitto is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: mosquitto
|
||||
image: "{{ mosquitto_image }}"
|
||||
pull: true
|
||||
|
@ -89,7 +88,7 @@
|
|||
when: not nodered_dir.stat.exists
|
||||
|
||||
- name: Ensure nodered is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: nodered
|
||||
image: "{{ nodered_image }}"
|
||||
pull: true
|
||||
|
@ -109,7 +108,7 @@
|
|||
restart_policy: unless-stopped
|
||||
|
||||
- name: Setup proxy site nodered.n39.eu
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "nodered.n39.eu"
|
||||
|
@ -130,7 +129,7 @@
|
|||
- rabbitmq
|
||||
|
||||
- name: Ensure rabbitmq docker container is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: rabbitmq
|
||||
image: "{{ rabbitmq_image }}"
|
||||
ports:
|
||||
|
@ -151,7 +150,7 @@
|
|||
- rabbitmq
|
||||
|
||||
- name: Setup proxy site rabbitmq.n39.eu
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "rabbitmq.n39.eu"
|
||||
|
@ -160,7 +159,7 @@
|
|||
- rabbitmq
|
||||
|
||||
- name: Ensure Power Meter Pulse Gateway for 19i room is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: pwr-meter-pulse-gw-19i
|
||||
image: "{{ pwr_meter_pulse_gw_image }}"
|
||||
ports:
|
||||
|
@ -178,7 +177,7 @@
|
|||
restart_policy: unless-stopped
|
||||
|
||||
- name: Setup proxy site pwr-meter-pulse-gw-19i.svc.n39.eu
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "pwr-meter-pulse-gw-19i.svc.n39.eu"
|
||||
|
@ -186,9 +185,9 @@
|
|||
|
||||
|
||||
- name: Setup docker container for BrotherQL Web UI printer
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: brotherql-web
|
||||
image: dersimn/brother_ql_web:2.1.9-alpine
|
||||
image: dersimn/brother_ql_web:2.1.7-alpine
|
||||
pull: true
|
||||
restart_policy: unless-stopped
|
||||
state: started
|
||||
|
@ -202,7 +201,7 @@
|
|||
- labelprinter
|
||||
|
||||
- name: Setup proxy site labelprinter.n39.eu
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: labelprinter.n39.eu
|
||||
|
@ -211,9 +210,9 @@
|
|||
- labelprinter
|
||||
|
||||
- name: Setup docker container for Grafana Screenshots
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: grafana-screenshot
|
||||
image: mrtux/grafana-screenshot:0.1.3
|
||||
image: mrtux/grafana-screenshot:0.1.0
|
||||
pull: true
|
||||
restart_policy: unless-stopped
|
||||
detach: yes
|
||||
|
@ -227,8 +226,8 @@
|
|||
- grafana-screenshot
|
||||
|
||||
handlers:
|
||||
- name: Restart mosquitto container
|
||||
community.docker.docker_container:
|
||||
- name: restart mosquitto
|
||||
docker_container:
|
||||
name: mosquitto
|
||||
state: started
|
||||
restart: yes
|
||||
|
|
43
host-tau.yml
43
host-tau.yml
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Setup things on host 'tau' (vserver for wiki etc.)
|
||||
hosts: tau.netz39.de
|
||||
- hosts: tau.netz39.de
|
||||
become: true
|
||||
|
||||
vars:
|
||||
|
@ -15,18 +14,18 @@
|
|||
dokuwiki_port: 9005
|
||||
# This container is pinned, because there are issues
|
||||
# with backwards compatibility within the same tag!
|
||||
dokuwiki_image: bitnami/dokuwiki:20240206.1.0
|
||||
dokuwiki_image: bitnami/dokuwiki:20230404@sha256:37e5b6e946851fc1fbd0f37a11e4521fdaca7da698d461da5207168c7364c142
|
||||
|
||||
discord_invite_domain: discord.netz39.de
|
||||
|
||||
roles:
|
||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
||||
# role 'docker_setup' applied through group 'docker_host'
|
||||
- role: apache
|
||||
- role: penguineer.dehydrated_cron
|
||||
- role: ansible-role-dehydrated_cron
|
||||
|
||||
tasks:
|
||||
- name: Setup docker network
|
||||
community.docker.docker_network:
|
||||
docker_network:
|
||||
name: dockernet
|
||||
driver: bridge
|
||||
ipam_config:
|
||||
|
@ -35,8 +34,8 @@
|
|||
state: present
|
||||
|
||||
- name: Setup Dehydrated
|
||||
ansible.builtin.include_role:
|
||||
name: 24367dfa.dehydrated
|
||||
include_role:
|
||||
name: ansible-role-dehydrated
|
||||
vars:
|
||||
dehydrated_contact_email: "{{ server_admin }}"
|
||||
dehydrated_domains:
|
||||
|
@ -52,14 +51,14 @@
|
|||
deploy_challenge_hook: "/bin/systemctl restart apache2"
|
||||
|
||||
- name: Setup proxy site testredmine.netz39.de
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: testredmine.netz39.de
|
||||
proxy_port: 9004
|
||||
|
||||
- name: Setup phpmyadmin
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: phpmyadmin
|
||||
state: started
|
||||
image: phpmyadmin:5.2
|
||||
|
@ -76,7 +75,7 @@
|
|||
- 9001:80
|
||||
|
||||
- name: Setup proxy site mysql.adm.netz39.de
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: mysql.adm.netz39.de
|
||||
|
@ -86,20 +85,20 @@
|
|||
- name: Check if Docker Registry auth dir exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ data_dir }}/registry/auth"
|
||||
register: docker_registry_auth_dir
|
||||
- name: Fail if Docker Registry auth dir does not exist
|
||||
register: docker_dir
|
||||
- name: Fail if docker registry data dir does not exist
|
||||
ansible.builtin.fail:
|
||||
msg: "Docker Registry auth dir is missing, please restore from the backup!"
|
||||
when: not docker_registry_auth_dir.stat.exists
|
||||
when: not docker_dir.stat.exists
|
||||
- name: Ensure the Docker Registry data directory exists
|
||||
# This may not be part of the backup
|
||||
ansible.builtin.file:
|
||||
file:
|
||||
path: "{{ data_dir }}/registry/data"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Setup Docker Registry Container
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: registry
|
||||
image: registry:2
|
||||
pull: true
|
||||
|
@ -118,7 +117,7 @@
|
|||
- "{{ data_dir }}/registry/auth:/auth:rw"
|
||||
|
||||
- name: Setup proxy site for the Docker Registry
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ docker_registry_domain }}"
|
||||
|
@ -147,7 +146,7 @@
|
|||
- dokuwiki
|
||||
|
||||
- name: Setup Dokuwiki Container
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: dokuwiki
|
||||
image: "{{ dokuwiki_image }}"
|
||||
pull: true
|
||||
|
@ -166,7 +165,7 @@
|
|||
- dokuwiki
|
||||
|
||||
- name: Setup proxy site for Dokuwiki
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ dokuwiki_domain }}"
|
||||
|
@ -176,7 +175,7 @@
|
|||
|
||||
|
||||
- name: Setup container for secondary FFMD DNS
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: bind9-md-freifunk-net
|
||||
image: ffmd/bind9-md-freifunk-net:v2022122301
|
||||
pull: true
|
||||
|
@ -191,8 +190,8 @@
|
|||
- ffmd-dns
|
||||
|
||||
- name: Setup forwarding for Discord invite
|
||||
ansible.builtin.include_role:
|
||||
name: setup_http_site_forward
|
||||
include_role:
|
||||
name: setup-http-site-forward
|
||||
vars:
|
||||
site_name: "{{ discord_invite_domain }}"
|
||||
# forward_to: "https://discord.com/invite/8FcDvAf"
|
||||
|
|
|
@ -1,20 +1,19 @@
|
|||
---
|
||||
# this is for a dedicated vm just hosting the unifi controller.
|
||||
- name: Setup things on host 'unicorn' (vm for ubiquiti unifi controller)
|
||||
hosts: unicorn.n39.eu
|
||||
- hosts: unicorn.n39.eu
|
||||
become: true
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
data_dir: "/srv/data"
|
||||
|
||||
roles:
|
||||
# role 'netz39.host_docker' applied through group 'docker_host'
|
||||
# role 'docker_setup' applied through group 'docker_host'
|
||||
|
||||
tasks:
|
||||
- name: Setup the docker container for unifi-controller
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: unifi-controller
|
||||
image: jacobalberty/unifi:v9.0.114
|
||||
image: jacobalberty/unifi:v8.0.26
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
container_default_behavior: no_defaults
|
||||
|
@ -23,13 +22,13 @@
|
|||
# These fixed ports are needed.
|
||||
# https://help.ui.com/hc/en-us/articles/218506997-UniFi-Ports-Used
|
||||
ports:
|
||||
- "8080:8080/tcp" # Device command/control
|
||||
- "8443:8443/tcp" # Web interface + API
|
||||
- "8843:8843/tcp" # HTTPS portal
|
||||
- "8880:8880/tcp" # HTTP portal
|
||||
- "3478:3478/udp" # STUN service
|
||||
- "6789:6789/tcp" # Speed Test (unifi5 only)
|
||||
- "10001:10001/udp" # Used for device discovery.
|
||||
- "8080:8080/tcp" # Device command/control
|
||||
- "8443:8443/tcp" # Web interface + API
|
||||
- "8843:8843/tcp" # HTTPS portal
|
||||
- "8880:8880/tcp" # HTTP portal
|
||||
- "3478:3478/udp" # STUN service
|
||||
- "6789:6789/tcp" # Speed Test (unifi5 only)
|
||||
- "10001:10001/udp" # Used for device discovery.
|
||||
volumes:
|
||||
- "{{ data_dir }}/unifi-controller/data:/unifi/data"
|
||||
- "{{ data_dir }}/unifi-controller/log:/unifi/log"
|
||||
|
|
78
host-vyos.yml
Normal file
78
host-vyos.yml
Normal file
|
@ -0,0 +1,78 @@
|
|||
---
|
||||
- hosts: vyos.n39.eu
|
||||
become: true
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
roles:
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Configure a DHCP Server
|
||||
vyos.vyos.vyos_config:
|
||||
lines:
|
||||
- set system option keyboard-layout 'de'
|
||||
|
||||
- name: Replace provided configuration with device configuration
|
||||
vyos.vyos.vyos_interfaces:
|
||||
config:
|
||||
- name: eth0
|
||||
description: Trunk
|
||||
vifs:
|
||||
- vlan_id: 1
|
||||
description: VIF 1 - mgmt
|
||||
- vlan_id: 4
|
||||
description: VIF 4 - user
|
||||
- vlan_id: 7
|
||||
description: VIF 7 - telekom pppoe
|
||||
- vlan_id: 9
|
||||
description: VIF 9 - dmz
|
||||
- vlan_id: 71
|
||||
description: VIF 71 - telekom status
|
||||
- vlan_id: 100
|
||||
description: VIF 100 - RAW Internet
|
||||
state: replaced
|
||||
|
||||
- name: Merge provided configuration with device configuration
|
||||
vyos.vyos.vyos_l3_interfaces:
|
||||
config:
|
||||
- name: eth0.1
|
||||
ipv4:
|
||||
- address: 172.23.63.2/24
|
||||
- name: eth0.4
|
||||
ipv4:
|
||||
- address: 172.23.48.4/22
|
||||
- name: eth0.9
|
||||
ipv4:
|
||||
- address: 172.23.52.2/22
|
||||
- name: eth0.71
|
||||
ipv4:
|
||||
- address: 192.168.100.3/24
|
||||
- name: eth0.100
|
||||
ipv4:
|
||||
- address: 192.168.39.1/24
|
||||
state: replaced
|
||||
|
||||
- name: Configure a DHCP Server
|
||||
vyos.vyos.vyos_config:
|
||||
lines:
|
||||
- set service dhcp-server disabled 'false'
|
||||
- set service dhcp-server shared-network-name Client subnet 172.23.63.0/24 default-router '172.23.63.2'
|
||||
- set service dhcp-server shared-network-name Client subnet 172.23.48.0/22 default-router '172.23.48.4' start '172.23.48.150' stop '172.23.51.250'
|
||||
- set service dhcp-server shared-network-name Client subnet 172.23.52.0/22 default-router '172.23.52.2' start '172.23.53.0' stop '172.23.53.255'
|
||||
- set service dhcp-server shared-network-name Client subnet 192.168.39.0/24 default-router '192.168.39.1'
|
||||
save: yes
|
||||
|
||||
- name: And a DNS forwarder
|
||||
vyos.vyos.vyos_config:
|
||||
lines:
|
||||
- set system static-host-mapping host-name ipv6.host.domain.loc inet xxxx:xxxx:xxxx:xxxx::x
|
||||
- set system static-host-mapping host-name ipv6.host.domain.loc alias host.domain.loc
|
||||
save: yes
|
||||
|
||||
handlers:
|
||||
- name: restart
|
||||
vyos.vyos.command:
|
||||
commands:
|
||||
- restart
|
|
@ -1,170 +0,0 @@
|
|||
---
|
||||
- name: Setup things on host 'wittgenstein' (raspberry pi for ampel and spaceapi)
|
||||
hosts: wittgenstein.n39.eu
|
||||
become: true
|
||||
|
||||
roles:
|
||||
- role: apache
|
||||
- role: apache_letsencrypt # Uses configuration from dehydrated setup
|
||||
- role: 24367dfa.dehydrated
|
||||
vars:
|
||||
dehydrated_contact_email: "{{ server_admin }}"
|
||||
- role: penguineer.dehydrated_cron
|
||||
|
||||
tasks:
|
||||
- name: Install packages needed for the system
|
||||
# This is a list of all packages,
|
||||
# unless they are installed by a specific role
|
||||
ansible.builtin.apt:
|
||||
state: present
|
||||
name:
|
||||
# This is needed for the user-executed tasks
|
||||
- acl
|
||||
# Regular packages
|
||||
- tmux
|
||||
- git-core
|
||||
- cmake
|
||||
- build-essential
|
||||
- libmosquitto-dev
|
||||
- libconfig-dev
|
||||
- mosquitto-clients
|
||||
- python3-paho-mqtt
|
||||
- i2c-tools
|
||||
|
||||
|
||||
- name: Set MAC address for proper DHCP recognition
|
||||
# Uses mac variable from inventory
|
||||
ansible.builtin.template:
|
||||
src: templates/network-interfaces-dhcp-mac.j2
|
||||
dest: /etc/network/interfaces.d/wittgenstein-mac
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Disable IPv6
|
||||
# Because it is not working....
|
||||
ansible.builtin.copy:
|
||||
src: files/sysctl-no-ipv6.conf
|
||||
dest: /etc/sysctl.d/99-systcl-no-ipv6.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
|
||||
### Gatekeeper user (pi for now)
|
||||
#
|
||||
# All the gatekeeping / door control stuff is here!
|
||||
|
||||
- name: Ensure gatekeeper user is there
|
||||
ansible.builtin.user:
|
||||
name: "{{ gatekeeper_user }}"
|
||||
groups: dialout,audio,plugdev,input,netdev,i2c,gpio
|
||||
append: yes
|
||||
|
||||
- name: Copy management scripts
|
||||
ansible.builtin.copy:
|
||||
src: "files/wittgenstein/{{ item }}"
|
||||
dest: "/home/{{ gatekeeper_user }}/{{ item }}"
|
||||
owner: "{{ gatekeeper_user }}"
|
||||
group: "{{ gatekeeper_user }}"
|
||||
mode: "0750"
|
||||
loop:
|
||||
- reboot.sh
|
||||
- unstuck.sh
|
||||
- switch-on.sh
|
||||
- switch-off.sh
|
||||
|
||||
- name: Install start-up cron
|
||||
ansible.builtin.cron:
|
||||
name: Start the gatekeeper services
|
||||
job: "/home/{{ gatekeeper_user }}/reboot.sh"
|
||||
user: "{{ gatekeeper_user }}"
|
||||
special_time: reboot
|
||||
|
||||
|
||||
- name: Download wiringPi library
|
||||
# WiringPi needs to be installed, but that library seems to be
|
||||
# obsolete. We download something and hope it works...
|
||||
ansible.builtin.get_url:
|
||||
url: https://project-downloads.drogon.net/wiringpi-latest.deb
|
||||
dest: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
|
||||
mode: "0644"
|
||||
force: no
|
||||
register: wiringPi_download
|
||||
|
||||
- name: Install wiringPi library # noqa: no-handler
|
||||
ansible.builtin.apt:
|
||||
state: present
|
||||
deb: "/home/{{ gatekeeper_user }}/wiringpi-latest.deb"
|
||||
when: wiringPi_download.changed
|
||||
|
||||
|
||||
### Ampelsteuerung
|
||||
- name: Clone netz39_space_notification initial checkout
|
||||
# Do this as the gatekeeper user!
|
||||
become: yes
|
||||
become_user: "{{ gatekeeper_user }}"
|
||||
ansible.builtin.git:
|
||||
repo: https://github.com/netz39/space_notification.git
|
||||
dest: "/home/{{ gatekeeper_user }}/netz39_space_notification"
|
||||
clone: yes
|
||||
update: no
|
||||
|
||||
- name: Compile ledcontrol agent
|
||||
# Do this as the gatekeeper user!
|
||||
become: yes
|
||||
become_user: "{{ gatekeeper_user }}"
|
||||
ansible.builtin.shell:
|
||||
chdir: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/ledcontrol"
|
||||
cmd: make
|
||||
creates: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/ledcontrol/ledcontrol"
|
||||
|
||||
- name: Compile statusswitch agent
|
||||
# Do this as the gatekeeper user!
|
||||
become: yes
|
||||
become_user: "{{ gatekeeper_user }}"
|
||||
ansible.builtin.shell:
|
||||
chdir: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/statusswitch"
|
||||
cmd: make
|
||||
creates: "/home/{{ gatekeeper_user }}/netz39_space_notification/raspberry/statusswitch/statusswitch"
|
||||
|
||||
### Space API
|
||||
- name: Setup the SpaceAPI Docker container
|
||||
community.docker.docker_container:
|
||||
name: spaceapi
|
||||
image: "{{ spaceapi_image }}"
|
||||
pull: true
|
||||
state: started
|
||||
detach: yes
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- "0.0.0.0:{{ spaceapi_host_port }}:8080" # Must be reached by pottwal
|
||||
# - "127.0.0.1:{{ spaceapi_host_port }}:8080"
|
||||
env:
|
||||
TZ: "{{ timezone }}"
|
||||
MQTT_BROKER: "platon.n39.eu"
|
||||
MQTT_TOPIC_STATUS: "{{ spaceapi_topic_status }}"
|
||||
MQTT_TOPIC_LASTCHANGE: "{{ spaceapi_topic_lastchange }}"
|
||||
tags:
|
||||
- spaceapi
|
||||
|
||||
- name: Setup the Ampel Controller Docker container
|
||||
community.docker.docker_container:
|
||||
name: ampelcontroller
|
||||
image: "{{ ampelcontroller_image }}"
|
||||
pull: true
|
||||
state: started
|
||||
detach: yes
|
||||
restart_policy: unless-stopped
|
||||
env:
|
||||
TZ: "{{ timezone }}"
|
||||
MQTT_BROKER: "platon.n39.eu"
|
||||
MQTT_LEVER_STATE_TOPIC: "{{ topic_lever_state }}"
|
||||
MQTT_DOOR_EVENTS_TOPIC: "{{ topic_door_events }}"
|
||||
MQTT_SPACESTATUS_ISOPEN_TOPIC: "{{ spaceapi_topic_status }}"
|
||||
MQTT_SPACESTATUS_LASTCHANGE_TOPIC: "{{ spaceapi_topic_lastchange }}"
|
||||
MQTT_TRAFFIC_LIGHT_TOPIC: "{{ topic_traffic_light }}"
|
||||
tags:
|
||||
- spaceapi
|
||||
|
||||
handlers:
|
|
@ -12,24 +12,25 @@ cleanuri_amqp_vhost: "/cleanuri"
|
|||
forgejo_host_port: 9091
|
||||
forgejo_ssh_port: 2222
|
||||
forgejo_domain_name: git.n39.eu
|
||||
forgejo_image: codeberg.org/forgejo/forgejo:10.0.3
|
||||
forgejo_image: codeberg.org/forgejo/forgejo:1.21.4-0
|
||||
|
||||
shlink_host_port: 8083
|
||||
shlink_domain_name: sl.n39.eu
|
||||
shlink_image: shlinkio/shlink:4.4.6
|
||||
shlink_image: shlinkio/shlink:3.7.3
|
||||
shlink_geolite_license_key: "{{ vault_shlink_geolite_license_key }}"
|
||||
shlink_initial_api_key: "{{ vault_shlink_initial_api_key }}"
|
||||
shlink_postgres_password: "{{ vault_shlink_postgres_password }}"
|
||||
|
||||
hedgedoc_host_port: 8084
|
||||
hedgedoc_domain_name: pad.n39.eu
|
||||
hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.10.2
|
||||
hedgedoc_db_image: postgres:16.8-alpine
|
||||
hedgedoc_image: quay.io/hedgedoc/hedgedoc:1.9.9
|
||||
hedgedoc_db_image: postgres:16.1-alpine
|
||||
hedgedoc_postgres_password: "{{ vault_hedgedoc_postgres_password }}"
|
||||
|
||||
redmine_host_port: 8087
|
||||
redmine_domain_name: redmine.n39.eu
|
||||
redmine_image: redmine:6.0.4
|
||||
redmine_mysql_image: mysql:9.2
|
||||
redmine_image: redmine:5.1.1
|
||||
redmine_mysql_image: mysql:8.3
|
||||
redmine_database: redmine
|
||||
redmine_database_password: "{{ vault_redmine_database_password }}"
|
||||
|
||||
|
@ -42,24 +43,24 @@ influxdb_init_password: "{{ vault_influxdb_init_password }}"
|
|||
jabber_host_port: 8086
|
||||
prosody_domain_name: jabber.n39.eu
|
||||
prosody_image: netz39/prosody:0.11
|
||||
prosody_web_image: joseluisq/static-web-server:2.36
|
||||
prosody_web_image: joseluisq/static-web-server:2.24
|
||||
prosody_config_dir: "/etc/prosody"
|
||||
prosody_data_dir: "{{ data_dir }}/prosody"
|
||||
|
||||
uptimekuma_host_port: 8085
|
||||
uptimekuma_domain_name: uptime.n39.eu
|
||||
uptimekuma_image: louislam/uptime-kuma:1.23.16
|
||||
uptimekuma_image: louislam/uptime-kuma:1.23.11
|
||||
|
||||
grafana_host_port: 8089
|
||||
grafana_domain_name: grafana.n39.eu
|
||||
grafana_image: grafana/grafana:11.6.0
|
||||
grafana_image: grafana/grafana:10.2.3
|
||||
grafana_admin_password: "{{ vault_grafana_admin_password }}"
|
||||
|
||||
homebox_host_port: 8092
|
||||
homebox_domain_name: inventory.n39.eu
|
||||
homebox_image: ghcr.io/hay-kot/homebox:v0.10.3
|
||||
|
||||
renovate_image: renovate/renovate:39.220.1
|
||||
renovate_image: renovate/renovate:37.107.0
|
||||
renovate_forgejo_pat: "{{ vault_renovate_forgejo_pat }}"
|
||||
renovate_github_pat: "{{ vault_renovate_github_pat }}"
|
||||
renovate_git_user: "Renovate Bot <accounts+renovatebot@netz39.de>"
|
||||
renovate_git_user: "Renovate Bot <accounts+renovatebot@netz39.de>"
|
|
@ -1,33 +1,35 @@
|
|||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61323135656430613464613334653239613865623361363734306139383261653563373365306364
|
||||
3232353634356664323235393135653762383538353635660a363461393133376566613064366233
|
||||
32323065633164646535386461373261373461343961383333333063663831353961656265313836
|
||||
6231356666356266390a333230376264313537376461326331313134313737616137636465336430
|
||||
38616261333534393464343630616464326331653163616435613863616165633730353263656565
|
||||
66346536393737353962666438333661663636636339613633653363323438326635643738656430
|
||||
38313635323066376532396666653633393736633939306566333337336635386430373662666534
|
||||
64653662333832313964323039353838353638313337306631613564383933663166633164373132
|
||||
33326537366135613733386436663366613238636133343065376534323561656265613433336637
|
||||
64613330306530323238663738356133663166303730633735656562636139626232396130656337
|
||||
34323238326437303730643736646430646239383239613061333033343733663832656262383732
|
||||
66343236326537633539353230376365666462393264303532346431383838303963613731343263
|
||||
63656630623934643763636237366630386333646263336261386162656439323232653066393266
|
||||
36633239323638396639623734666466343164663539316165386632306235363435303139356433
|
||||
37633731366565393339326235616264616535363461653531613331356239666534653232376235
|
||||
36623431343136633964656330313833643161353738303564663662363062653631363661633333
|
||||
31663339643034333336313630356266393062323637333664646335363961386433303662343734
|
||||
32313338613064373966393163623863633037353564316361656162323234313435646532343231
|
||||
30356336626435306332316566323932313564626164316165646530656365363330643033376134
|
||||
32363530306536633531326535373136326364356237376264646130663430343838323834386264
|
||||
35306561353866346430393837346333396236356465666334656139373764653365396534613034
|
||||
36393239623930656266336130303236393336373063623738653939393563336130316461393535
|
||||
32313932396263306439356663373361393539633639343238393631343830306532336162616565
|
||||
32336264646333613238363065613130633966656164666333303332313536616334623639613630
|
||||
34323665366131663736623638636263616131393133346464653037366465633332363131316332
|
||||
65356563373036353432376234626262313266316435656562646365363539386361653966366465
|
||||
39383536313764663732613462383466616238363765633062333830373038656334363764643663
|
||||
61346664353064333238313038303333386436653738316630383237366532353765346633383862
|
||||
65666235666663666638656337303762626563663135613431616439633731383638653466623434
|
||||
62663164633032666638656464666130623566356636343330386236336266386263323936396330
|
||||
31613339623034663466613930613062343666633530306136623734393862333365646538326261
|
||||
63646439343565366463
|
||||
33383365663237393662386363396661323463396239653830396164316538323362376337326233
|
||||
3730346439376362396261333165616536616135366637660a323435346262353061323130303362
|
||||
66656533633333346536653638303463386334306461313830383631323331343332376435636631
|
||||
6662323136306538370a616563376364373737396134616161356134636663613262366234306530
|
||||
36396635383933393034613863386338613266333834616666393939393261613235366132663866
|
||||
65356162633934633066336532396662356163386335313833386230643165633464663065383131
|
||||
32366534366335623136363738653066336339623832636166343239613535623333343035633662
|
||||
39663638386364306361643861666530323730626265633865343534643564363961636463356431
|
||||
34333463383137363064323634656561313464303266653138383038303964363263326165643130
|
||||
63613162383030323263613964303932623339626139326161393439663763386462313562636330
|
||||
65323762356135333736316564623565316533646132303265623364386263383733306136326237
|
||||
36353638663936666633393332623436643436333164633566386130396638633637323538633033
|
||||
37326663636139646236636164353066626136396566633363333236313266656139646563386234
|
||||
66633735396532613838646561643563623538363264383763303363313266363134353232643666
|
||||
33333531386238356232306164346331353366636565656463323134333362353764663437663330
|
||||
63363065353430353861316134656533363533666335373163303661656134646237386664636564
|
||||
33643836386331363461383934666335336533353331363735346566323139646362343765383766
|
||||
38306462313033353831626439626235393036393765306139313161353163636233616139396430
|
||||
65373236336562343939356266333037643838363333396462336331303366333362393462313233
|
||||
61343632646431616461343861626437623362343762393265323461343263396338373434623065
|
||||
63316438323732326531316534613531366464623435623163663830343962343532353136366338
|
||||
36646663363763633432376632353164366463336166356465333238383637623633663039343938
|
||||
31613662666336316137376338393131393331653331663465373266313164303962363932656430
|
||||
63623733313362323166356435346634333431306431393633346430336461663965643362643634
|
||||
37666635633937323635373837333034653835363939623938306462366234343130386430303965
|
||||
39323563343033343137363065396134366434383637363661323538343135323336333962646131
|
||||
36356436396431646566613133336263653466633237373839626361393738383865313238363535
|
||||
66623031623562386634366238383632326238636231626637633436326336633335626433653363
|
||||
36626237333962653863613932376230396535303834323030353239623161653734303966633930
|
||||
38653338636431383730376262386630613062366231366239616233616439386531313338343166
|
||||
38656138303363643933633864616366343039626237616633643961346338623737313266633537
|
||||
36623731323639643530363766323530616462366264613265303861663863306461393834393565
|
||||
38656535353138306264386433323337343661326435316632323838303639353830613832613461
|
||||
6461613532393237393866613765396530633561663838613461
|
||||
|
|
|
@ -6,8 +6,9 @@ pwr_meter_api_token: "{{ vault_pwr_meter_api_token }}"
|
|||
brotherql_printer_host: "brotherql-720nw.n39.eu"
|
||||
|
||||
# URL for the grafana kiosk in our Bastelbereich
|
||||
kiosk_grafana_url: "https://grafana.n39.eu/d/xpLj6UD4z/hobbes-space-monitor?orgId=1&kiosk"
|
||||
kiosk_grafana_url: "https://grafana.n39.eu/d/xpLj6UD4z/hobbes-space-monitor?orgId=1"
|
||||
kiosk_grafana_user: "{{ vault_kiosk_grafana_user }}"
|
||||
kiosk_grafana_pass: "{{ vault_kiosk_grafana_pass }}"
|
||||
kiosk_mqtt_host: "mqtt.n39.eu"
|
||||
kiosk_mqtt_topic: "Netz39/Things/HackingDashboard/Screenshot"
|
||||
|
||||
|
|
44
host_vars/vyos.n39.eu/dns-dhcp.yml
Normal file
44
host_vars/vyos.n39.eu/dns-dhcp.yml
Normal file
|
@ -0,0 +1,44 @@
|
|||
entries:
|
||||
- hostname: sw-1
|
||||
mac: 00:0f:cb:ed:5e:e0
|
||||
address: 172.23.63.34
|
||||
# aliases:
|
||||
# - name:
|
||||
- hostname: sw-4
|
||||
mac: 00:1e:8c:74:79:62
|
||||
address: 172.23.63.35
|
||||
aliases:
|
||||
- name: gx-2124x.n39.eu
|
||||
- hostname: beaker-mgmt
|
||||
mac: 00:19:99:ec:07:5d
|
||||
address: 172.23.63.43
|
||||
- hostname: sw-2.n39.eu
|
||||
mac: 10:0d:7f:4a:c0:21
|
||||
address: 172.23.63.94
|
||||
aliases:
|
||||
- name: gs108tv2-1.n39.eu
|
||||
- hostname: sw-3.n39.eu
|
||||
mac: 20:0c:c8:4e:2c:56
|
||||
address: 172.23.63.104
|
||||
aliases:
|
||||
- name: gs105pe-1.n39.eu
|
||||
- hostname: pottwal.n39.eu
|
||||
mac: 52:82:de:fc:1e:94
|
||||
address: 172.23.52.44
|
||||
aliases:
|
||||
- name: apt-proxy.n39.eu
|
||||
- name: label.n39.eu
|
||||
- name: syslog.n39.eu
|
||||
- name: pad.n39.eu
|
||||
- name: git.n39.eu
|
||||
- name: uritools.n39.eu
|
||||
- name: uritools-api.n39.eu
|
||||
- name: uptime.n39.eu
|
||||
- name: redmine.n39.eu
|
||||
- name: influx.n39.eu
|
||||
- name: jabber.n39.eu
|
||||
- name: conference.jabber.n39.eu
|
||||
- name: grafana.n39.eu
|
||||
- name: inventory.n39.eu
|
||||
- name: sl.n39.eu
|
||||
- name: spaceapi.n39.eu
|
6
host_vars/vyos.n39.eu/vars.yml
Normal file
6
host_vars/vyos.n39.eu/vars.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
ansible_user: vyos
|
||||
ansible_connection: ansible.netcommon.network_cli
|
||||
ansible_network_os: vyos
|
||||
vyos_user: "vyos"
|
||||
vyos_password: "${{ vault_vyos_password }}"
|
6
host_vars/vyos.n39.eu/vault
Normal file
6
host_vars/vyos.n39.eu/vault
Normal file
|
@ -0,0 +1,6 @@
|
|||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61653838316339323134626339363035316239326637346534623263343732393865396538323063
|
||||
6538373236396131653532633134333035376666383262640a343961313831326634393166653539
|
||||
36376634353437346230323037656364636235336530396661653739626432373938646634663065
|
||||
6466363965396130650a376339356330313763383936326662663361366535306463333934356238
|
||||
37623336363637383233323966386231333437343435386462323532366461663364
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
server_admin: "admin+wittgenstein@netz39.de"
|
||||
mac: "b8:27:eb:48:f1:59"
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
gatekeeper_user: pi
|
||||
data_dir: "/srv/data"
|
||||
|
||||
spaceapi_host_port: 8001
|
||||
spaceapi_domain_name: spaceapi.n39.eu
|
||||
spaceapi_image: netz39/spaceapi-service:0.1.1
|
||||
spaceapi_topic_status: "Netz39/SpaceAPI/isOpen"
|
||||
spaceapi_topic_lastchange: "Netz39/SpaceAPI/lastchange"
|
||||
|
||||
ampelcontroller_image: netz39/ampel-controller:0.2.0
|
||||
topic_lever_state: "Netz39/Things/StatusSwitch/Lever/State"
|
||||
topic_door_events: "Netz39/Things/Door/Events"
|
||||
topic_traffic_light: "Netz39/Things/Ampel/Light"
|
|
@ -12,15 +12,13 @@ all:
|
|||
pottwal.n39.eu:
|
||||
radon.n39.eu:
|
||||
unicorn.n39.eu:
|
||||
wittgenstein.n39.eu:
|
||||
k3s-c1.n39.eu:
|
||||
k3s-c2.n39.eu:
|
||||
k3s-c3.n39.eu:
|
||||
k3s-w1.n39.eu:
|
||||
k3s-w2.n39.eu:
|
||||
k3s-w3.n39.eu:
|
||||
# Host rhodium.n39.eu is the OpenWRT router, but cannot be added here
|
||||
# as it would be treated like a Debian host
|
||||
# vyos.n39.eu:
|
||||
|
||||
children:
|
||||
docker_host:
|
||||
|
@ -30,7 +28,6 @@ all:
|
|||
radon.n39.eu:
|
||||
tau.netz39.de:
|
||||
unicorn.n39.eu:
|
||||
wittgenstein.n39.eu:
|
||||
proxmox:
|
||||
hosts:
|
||||
holmium.n39.eu:
|
||||
|
@ -65,6 +62,7 @@ all:
|
|||
k3s-w1.n39.eu:
|
||||
k3s-w2.n39.eu:
|
||||
k3s-w3.n39.eu:
|
||||
rousseau.n39.eu:
|
||||
location_internet:
|
||||
hosts:
|
||||
tau.netz39.de:
|
||||
|
|
3
main.yml
3
main.yml
|
@ -42,6 +42,3 @@
|
|||
|
||||
- name: Plumbum specific setup
|
||||
import_playbook: host-plumbum.yml
|
||||
|
||||
- name: Wittgenstein specific setup
|
||||
import_playbook: host-wittgenstein.yml
|
||||
|
|
|
@ -11,14 +11,24 @@
|
|||
"matchStrings": [
|
||||
"image: (?<depName>.*?):(?<currentValue>.*?)(@(?<currentDigest>sha256:.*?))?\\s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"^roles/docker_setup/defaults/main.yml$"
|
||||
],
|
||||
"datasourceTemplate": "github-release",
|
||||
"versioningTemplate": "semver",
|
||||
"depNameTemplate": "docker-compose",
|
||||
"matchStrings": [
|
||||
"docker_compose_version: (?<currentValue>.*?)\\s"
|
||||
]
|
||||
}
|
||||
],
|
||||
"packageRules": [
|
||||
{
|
||||
"matchDatasources": ["docker"],
|
||||
"matchPackageNames": ["renovate/renovate"],
|
||||
"schedule": [ "before 1am on friday" ],
|
||||
"automerge": true
|
||||
"schedule": [ "on friday" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
}
|
|
@ -1,26 +1,18 @@
|
|||
---
|
||||
roles:
|
||||
- name: adriagalin.timezone
|
||||
src: git+https://github.com/adriagalin/ansible.timezone.git
|
||||
- src: hifis.unattended_upgrades
|
||||
version: v3.2.1
|
||||
- src: git+https://github.com/adriagalin/ansible.timezone.git
|
||||
version: 4.0.0
|
||||
- name: 24367dfa.dehydrated
|
||||
src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
|
||||
version: 2.1.0
|
||||
- name: penguineer.dehydrated_cron
|
||||
src: https://github.com/penguineer/ansible-role-dehydrated_cron.git
|
||||
- src: git+https://github.com/24367dfa/ansible-role-dehydrated.git
|
||||
version: 2.0.0
|
||||
- src: https://github.com/penguineer/ansible-role-dehydrated_cron.git
|
||||
version: v1.1.0
|
||||
- name: maz3max.ble_keykeeper
|
||||
src: git+https://github.com/maz3max/ble-keykeeper-role.git
|
||||
- src: git+https://github.com/maz3max/ble-keykeeper-role.git
|
||||
version: v1.1.0
|
||||
- src: lespocky.telegraf_docker_in_docker
|
||||
version: v0.2.2
|
||||
- name: netz39.host_docker
|
||||
src: git+https://github.com/netz39/ansible-role-host-docker.git
|
||||
version: v0.5.0
|
||||
version: v0.2.1
|
||||
|
||||
collections:
|
||||
- name: community.grafana
|
||||
version: 2.1.0
|
||||
# for role 'hifis.toolkit.unattended_upgrades'
|
||||
- name: hifis.toolkit
|
||||
version: 5.3.0
|
||||
version: 1.6.1
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Handlers for role apache
|
||||
---
|
||||
- name: Restart apache2
|
||||
ansible.builtin.service:
|
||||
- name: restart apache2
|
||||
service:
|
||||
name: apache2
|
||||
state: restarted
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
- name: Ensure Apache2 and modules are installed and up to date
|
||||
ansible.builtin.apt:
|
||||
apt:
|
||||
name:
|
||||
- apache2
|
||||
state: present
|
||||
|
||||
- name: Ensure necessary modules are enabled
|
||||
community.general.apache2_module:
|
||||
apache2_module:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
|
@ -23,7 +23,7 @@
|
|||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
||||
|
||||
- name: Add symlink to enable configuration
|
||||
ansible.builtin.file:
|
||||
|
@ -32,4 +32,4 @@
|
|||
state: link
|
||||
owner: root
|
||||
group: root
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Handlers for role apache_letsencrypt
|
||||
---
|
||||
- name: Restart apache2
|
||||
ansible.builtin.service:
|
||||
- name: restart apache2
|
||||
service:
|
||||
name: apache2
|
||||
state: restarted
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
||||
|
||||
- name: Add symlink to enable configuration
|
||||
ansible.builtin.file:
|
||||
|
@ -17,4 +17,4 @@
|
|||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
||||
|
|
|
@ -19,7 +19,7 @@ cleanuri_amqp_canonizer: "canonizer"
|
|||
cleanuri_amqp_retrieval: "extractor"
|
||||
|
||||
# Docker images
|
||||
cleanuri_image_webui: mrtux/cleanuri-webui:0.2.2
|
||||
cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.2
|
||||
cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.5.3
|
||||
cleanuri_image_extractor: mrtux/cleanuri-extractor:0.5.3
|
||||
cleanuri_image_webui: mrtux/cleanuri-webui:0.2.0
|
||||
cleanuri_image_apigateway: mrtux/cleanuri-apigateway:0.3.1
|
||||
cleanuri_image_canonizer: mrtux/cleanuri-canonizer:0.4.0
|
||||
cleanuri_image_extractor: mrtux/cleanuri-extractor:0.4.0
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Tasks for the cleanuri role
|
||||
---
|
||||
- name: Ensure CleanURI WebUI is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: cleanuri-webui
|
||||
image: "{{ cleanuri_image_webui }}"
|
||||
pull: true
|
||||
|
@ -15,7 +15,7 @@
|
|||
REACT_APP_API_GATEWAY: "https://{{ cleanuri_api_domain }}"
|
||||
|
||||
- name: Setup proxy site for the CleanURI WebUI
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ cleanuri_ui_domain }}"
|
||||
|
@ -23,7 +23,7 @@
|
|||
|
||||
|
||||
- name: Ensure CleanURI API Gateway is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: cleanuri-apigateway
|
||||
image: "{{ cleanuri_image_apigateway }}"
|
||||
pull: true
|
||||
|
@ -35,14 +35,14 @@
|
|||
env:
|
||||
TZ: "{{ timezone }}"
|
||||
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
||||
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
||||
GATEWAY_RESULT_QUEUE: "{{ cleanuri_amqp_results }}"
|
||||
GATEWAY_TASK_RK: "{{ cleanuri_amqp_canonizer }}"
|
||||
|
||||
- name: Ensure CleanURI Canonizer is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: cleanuri-canonizer
|
||||
image: "{{ cleanuri_image_canonizer }}"
|
||||
pull: true
|
||||
|
@ -52,14 +52,14 @@
|
|||
env:
|
||||
TZ: "{{ timezone }}"
|
||||
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
||||
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
||||
CANONIZER_TASK_QUEUE: "{{ cleanuri_amqp_canonizer }}"
|
||||
EXTRACTOR_TASK_RK: "{{ cleanuri_amqp_retrieval }}"
|
||||
|
||||
- name: Ensure CleanURI Extractor is running
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
name: cleanuri-extractor
|
||||
image: "{{ cleanuri_image_extractor }}"
|
||||
pull: true
|
||||
|
@ -69,14 +69,14 @@
|
|||
env:
|
||||
TZ: "{{ timezone }}"
|
||||
AMQP_HOST: "{{ cleanuri_amqp_host }}"
|
||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||
AMQP_USER: "{{ cleanuri_amqp_user }}"
|
||||
AMQP_PASS: "{{ cleanuri_amqp_pass }}"
|
||||
AMQP_VHOST: "{{ cleanuri_amqp_vhost }}"
|
||||
EXTRACTION_TASK_QUEUE: "{{ cleanuri_amqp_retrieval }}"
|
||||
|
||||
|
||||
- name: Setup proxy site the CleanURI API Gateway
|
||||
ansible.builtin.include_role:
|
||||
include_role:
|
||||
name: setup_http_site_proxy
|
||||
vars:
|
||||
site_name: "{{ cleanuri_api_domain }}"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# handlers file for cron-dd24-dyndns
|
||||
---
|
||||
- name: Reload cron
|
||||
- name: reload cron
|
||||
ansible.builtin.shell:
|
||||
cmd: service cron reload
|
||||
warn: no
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Make sure cron and curl are installed
|
||||
ansible.builtin.apt:
|
||||
apt:
|
||||
name:
|
||||
- cron
|
||||
- curl
|
||||
|
@ -13,6 +13,6 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Reload cron
|
||||
notify: reload cron
|
||||
# There is ansible.builtin.cron, but this makes configuration much
|
||||
# more complicated, so we stick to the template.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# /etc/cron.d/dd24-dyndns: Cron call to renew DynDNS entry
|
||||
|
||||
SHELL=/bin/bash
|
||||
SHELL=/bin/sh
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
||||
|
||||
*/5 * * * * root curl --silent --show-error "https://dynamicdns.key-systems.net/update.php?hostname={{dyndns_domain}}&password={{dyndns_password}}&ip={{dyndns_ip}}" > >(grep 'code\|description' | paste -d',' - - | logger -p user.debug -t dd24) 2> >(/usr/bin/logger -p user.error -t dd24)
|
||||
*/5 * * * * root curl --silent --show-error "https://dynamicdns.key-systems.net/update.php?hostname={{dyndns_domain}}&password={{dyndns_password}}&ip={{dyndns_ip}}" > /dev/null 2> >(/usr/bin/logger -p user.error -t dd24)
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
# desec.io Cron configuration
|
||||
---
|
||||
dyndns_domain: www.example.com
|
||||
dyndns_token: yourtoken
|
|
@ -1,10 +0,0 @@
|
|||
# handlers file for desec_dyndns_cron
|
||||
---
|
||||
- name: Reload cron
|
||||
ansible.builtin.shell:
|
||||
cmd: service cron reload
|
||||
warn: no
|
||||
# Use the shell call because the task sometimes has problems finding the service state
|
||||
# service:
|
||||
# name: cron
|
||||
# state: restarted
|
|
@ -1,18 +0,0 @@
|
|||
---
|
||||
- name: Make sure cron and curl are installed
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- cron
|
||||
- curl
|
||||
state: present
|
||||
|
||||
- name: Setup cron file for desec.io updates
|
||||
ansible.builtin.template:
|
||||
src: "templates/desec-dyndns.cron.j2"
|
||||
dest: "/etc/cron.d/desec-dyndns"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Reload cron
|
||||
# There is ansible.builtin.cron, but this makes configuration much
|
||||
# more complicated, so we stick to the template.
|
|
@ -1,6 +0,0 @@
|
|||
# /etc/cron.d/desec-dyndns: Cron call to renew DynDNS entry
|
||||
|
||||
SHELL=/bin/bash
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
||||
|
||||
*/5 * * * * root curl --silent --show-error --user {{ dyndns_domain }}:{{ dyndns_token }} "https://update.dedyn.io/" > >(logger -p user.debug -t desec) 2> >(/usr/bin/logger -p user.error -t desec)
|
5
roles/docker_setup/defaults/main.yml
Normal file
5
roles/docker_setup/defaults/main.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
docker_compose_version: "1.25.4"
|
||||
docker_compose_path: /usr/local/bin/docker-compose
|
||||
docker_data_root: "/var/lib/docker"
|
||||
docker_storage_driver: "overlay2"
|
6
roles/docker_setup/handlers/main.yml
Normal file
6
roles/docker_setup/handlers/main.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- name: restart docker
|
||||
service:
|
||||
name: docker
|
||||
state: restarted
|
||||
enabled: yes
|
84
roles/docker_setup/tasks/main.yml
Normal file
84
roles/docker_setup/tasks/main.yml
Normal file
|
@ -0,0 +1,84 @@
|
|||
# This file is a mash-up of:
|
||||
# https://github.com/geerlingguy/ansible-role-docker/blob/master/tasks/docker-compose.yml
|
||||
# https://www.digitalocean.com/community/tutorials/how-to-install-docker-compose-on-debian-9
|
||||
# and our own stuff …
|
||||
---
|
||||
- name: Gather package facts
|
||||
package_facts:
|
||||
manager: "auto"
|
||||
|
||||
- name: Exit if docker.io is installed
|
||||
fail:
|
||||
msg: "Please remove docker.io (Debian vanilla docker package) first!"
|
||||
when: "'docker.io' in ansible_facts.packages"
|
||||
|
||||
- name: Install Docker APT deps
|
||||
package:
|
||||
name: "{{ packages }}"
|
||||
state: present
|
||||
vars:
|
||||
packages:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- gnupg2
|
||||
- software-properties-common
|
||||
|
||||
- name: add Docker apt-key
|
||||
apt_key:
|
||||
url: https://download.docker.com/linux/debian/gpg
|
||||
state: present
|
||||
|
||||
- name: add Docker's APT repository
|
||||
ansible.builtin.template:
|
||||
src: templates/docker.list.j2
|
||||
dest: /etc/apt/sources.list.d/docker.list
|
||||
register: apt_repo
|
||||
|
||||
- name: Update package cache # noqa 503
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
when: apt_repo.changed
|
||||
|
||||
- name: install Docker
|
||||
package:
|
||||
name: "{{ packages }}"
|
||||
state: present
|
||||
vars:
|
||||
packages:
|
||||
- docker-ce
|
||||
- python3-docker
|
||||
|
||||
- name: Set docker configuration
|
||||
template:
|
||||
src: templates/daemon.json.j2
|
||||
dest: /etc/docker/daemon.json
|
||||
mode: "0644"
|
||||
notify: restart docker
|
||||
|
||||
- name: Check current docker-compose version.
|
||||
command: docker-compose --version
|
||||
register: docker_compose_current_version
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Delete existing docker-compose version if it's different.
|
||||
file:
|
||||
path: "{{ docker_compose_path }}"
|
||||
state: absent
|
||||
when: >
|
||||
docker_compose_current_version.stdout is defined
|
||||
and docker_compose_version not in docker_compose_current_version.stdout
|
||||
|
||||
- name: Install Docker Compose (if configured).
|
||||
get_url:
|
||||
url: https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-Linux-x86_64
|
||||
dest: "{{ docker_compose_path }}"
|
||||
mode: "0755"
|
||||
|
||||
- name: Place admin users in docker group
|
||||
user:
|
||||
name: "{{ item.logname }}"
|
||||
groups: [docker]
|
||||
append: yes
|
||||
when: item.docker
|
||||
with_items: "{{ users }}"
|
9
roles/docker_setup/templates/daemon.json.j2
Normal file
9
roles/docker_setup/templates/daemon.json.j2
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"exec-opts": ["native.cgroupdriver=systemd"],
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "100m"
|
||||
},
|
||||
"data-root": "{{ docker_data_root }}",
|
||||
"storage-driver": "{{ docker_storage_driver }}"
|
||||
}
|
2
roles/docker_setup/templates/docker.list.j2
Normal file
2
roles/docker_setup/templates/docker.list.j2
Normal file
|
@ -0,0 +1,2 @@
|
|||
deb https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
# Defaults for nfs_host
|
||||
# Defaults for nfs-host
|
||||
---
|
||||
nfs_host_exports: []
|
||||
# - directory: "/srv/nfs"
|
3
roles/nfs-host/handlers/main.yml
Normal file
3
roles/nfs-host/handlers/main.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
- name: reload nfs
|
||||
command: 'exportfs -ra'
|
|
@ -3,9 +3,9 @@
|
|||
ansible.builtin.apt:
|
||||
state: present
|
||||
name:
|
||||
- nfs-kernel-server
|
||||
- nfs-common
|
||||
- parted
|
||||
- nfs-kernel-server
|
||||
- nfs-common
|
||||
- parted
|
||||
|
||||
- name: Create a new ext4 primary partition
|
||||
community.general.parted:
|
||||
|
@ -14,7 +14,7 @@
|
|||
state: present
|
||||
fs_type: ext4
|
||||
|
||||
- name: Ensure nfs mountpoints exist
|
||||
- name: ensure nfs mountpoints exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.directory }}"
|
||||
state: directory
|
||||
|
@ -30,11 +30,11 @@
|
|||
fstype: ext4
|
||||
state: present
|
||||
|
||||
- name: Put /etc/exports in place from template
|
||||
- name: template /etc/exports
|
||||
ansible.builtin.template:
|
||||
src: templates/exports.j2
|
||||
dest: "/etc/exports"
|
||||
notify: Reload nfs
|
||||
notify: reload nfs
|
||||
|
||||
- name: Ensure nfs is running.
|
||||
ansible.builtin.service: "name=nfs-kernel-server state=started enabled=yes"
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
- name: Reload nfs
|
||||
ansible.builtin.command: 'exportfs -ra'
|
|
@ -1,7 +1,7 @@
|
|||
# Handlers für nginx-https-proxy
|
||||
---
|
||||
- name: Restart nginx
|
||||
ansible.builtin.service:
|
||||
- name: restart nginx
|
||||
service:
|
||||
name: nginx
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
ansible.builtin.apt:
|
||||
state: present
|
||||
name:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- gnupg2
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- gnupg2
|
||||
|
||||
### Setup APT cache for the nginx repository
|
||||
#
|
||||
|
@ -18,7 +18,7 @@
|
|||
# for SSL passthrough.
|
||||
|
||||
- name: Add nginx apt-key
|
||||
ansible.builtin.apt_key:
|
||||
apt_key:
|
||||
url: https://nginx.org/keys/nginx_signing.key
|
||||
state: present
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
|||
src: files/apt-preference-99nginx
|
||||
dest: /etc/apt/preferences.d/99nginx
|
||||
|
||||
- name: Update package cache # noqa: no-handler
|
||||
- name: Update package cache # noqa 503
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
when: apt_repo.changed
|
||||
|
@ -45,7 +45,7 @@
|
|||
state: present
|
||||
name:
|
||||
# This version of nginx comes with the ngx_stream_core_module module
|
||||
- nginx
|
||||
- nginx
|
||||
|
||||
|
||||
### Configuration
|
||||
|
@ -56,7 +56,7 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: Restart nginx
|
||||
notify: restart nginx
|
||||
|
||||
- name: Create directory for dehydrated forwardings
|
||||
ansible.builtin.file:
|
||||
|
@ -74,7 +74,7 @@
|
|||
group: root
|
||||
mode: '0644'
|
||||
loop: "{{ ingress }}"
|
||||
notify: Restart nginx
|
||||
notify: restart nginx
|
||||
|
||||
- name: Setup nginx configuration
|
||||
# Note the order here: The nginx configuration _needs_ he dehydrated-hosts
|
||||
|
@ -86,4 +86,4 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: Restart nginx
|
||||
notify: restart nginx
|
||||
|
|
5
roles/setup-http-site-forward/handlers/main.yml
Normal file
5
roles/setup-http-site-forward/handlers/main.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
- name: restart apache2
|
||||
service:
|
||||
name: apache2
|
||||
state: restarted
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
- name: Add or update Apache2 site
|
||||
ansible.builtin.template:
|
||||
template:
|
||||
src: templates/apache-docker-forward-site.j2
|
||||
dest: /etc/apache2/sites-available/{{ site_name }}.conf
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
||||
|
||||
- name: Activate Apache2 site
|
||||
ansible.builtin.command: a2ensite {{ site_name }}
|
||||
command: a2ensite {{ site_name }}
|
||||
args:
|
||||
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
- name: Restart apache2
|
||||
ansible.builtin.service:
|
||||
name: apache2
|
||||
state: restarted
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Restart apache2
|
||||
ansible.builtin.service:
|
||||
- name: restart apache2
|
||||
service:
|
||||
name: apache2
|
||||
state: restarted
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
---
|
||||
- name: Add or update Apache2 site
|
||||
ansible.builtin.template:
|
||||
template:
|
||||
src: templates/apache-docker-proxy-site.j2
|
||||
dest: /etc/apache2/sites-available/{{ site_name }}.conf
|
||||
mode: "0644"
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
||||
|
||||
- name: Activate Apache2 site
|
||||
ansible.builtin.command: a2ensite {{ site_name }}
|
||||
command: a2ensite {{ site_name }}
|
||||
args:
|
||||
creates: /etc/apache2/sites-enabled/{{ site_name }}.conf
|
||||
notify: Restart apache2
|
||||
notify: restart apache2
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
---
|
||||
- name: Update aliases
|
||||
ansible.builtin.shell: which newaliases && newaliases || true
|
||||
shell: which newaliases && newaliases || true
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
- name: Ensure sudo is installed
|
||||
ansible.builtin.package:
|
||||
package:
|
||||
name:
|
||||
- sudo
|
||||
state: present
|
||||
|
||||
- name: Configure group sudo for sudoers without password
|
||||
ansible.builtin.lineinfile:
|
||||
lineinfile:
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
regexp: '^%sudo\s'
|
||||
|
@ -14,7 +14,7 @@
|
|||
validate: /usr/sbin/visudo -cf %s
|
||||
|
||||
- name: Add users | create users' shell and home dir
|
||||
ansible.builtin.user:
|
||||
user:
|
||||
name: "{{ item.logname }}"
|
||||
shell: /bin/bash
|
||||
createhome: yes
|
||||
|
@ -22,7 +22,7 @@
|
|||
with_items: "{{ users }}"
|
||||
|
||||
- name: Add authorized keys for user
|
||||
ansible.posix.authorized_key:
|
||||
authorized_key:
|
||||
user: "{{ item.0.logname }}"
|
||||
key: "{{ item.1 }}"
|
||||
state: present
|
||||
|
@ -32,7 +32,7 @@
|
|||
- skip_missing: true
|
||||
|
||||
- name: Place user in sudo group
|
||||
ansible.builtin.user:
|
||||
user:
|
||||
name: "{{ item.logname }}"
|
||||
groups: [sudo]
|
||||
append: yes
|
||||
|
@ -40,12 +40,12 @@
|
|||
with_items: "{{ users }}"
|
||||
|
||||
- name: Check if /etc/aliases exists
|
||||
ansible.builtin.stat:
|
||||
stat:
|
||||
path: /etc/aliases
|
||||
register: aliases
|
||||
|
||||
- name: Set system email alias
|
||||
ansible.builtin.lineinfile:
|
||||
lineinfile:
|
||||
path: /etc/aliases
|
||||
state: present
|
||||
regexp: "^{{ item.logname }}:"
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
---
|
||||
- name: Configure local ssh to access n39 hosts
|
||||
- name: configure local ssh to access n39 hosts
|
||||
hosts: localhost
|
||||
|
||||
tasks:
|
||||
- name: Ensure $HOME/.ssh/config.d/ dir is present
|
||||
- name: ensure {{ lookup('env', 'HOME') }}/.ssh/config.d/ dir is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ lookup('env', 'HOME') }}/.ssh/config.d/"
|
||||
state: directory
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Put ssh config for access to n39 internal systems in place
|
||||
- name: template ssh config for access to internal systems
|
||||
ansible.builtin.template:
|
||||
src: templates/ssh_config.j2
|
||||
dest: "{{ lookup('env', 'HOME') }}/.ssh/config.d/n39_config"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure that n39 access config is included
|
||||
- name: ensure that n39 access config is included
|
||||
ansible.builtin.lineinfile:
|
||||
path: ~/.ssh/config
|
||||
insertbefore: BOF
|
||||
|
|
|
@ -44,9 +44,9 @@
|
|||
RequestHeader set "X-Forwarded-SSL" expr=%{HTTPS}
|
||||
ProxyPreserveHost {{ proxy_preserve_host | default("Off") }}
|
||||
|
||||
ProxyPass /json http://172.23.48.7:8001/json
|
||||
ProxyPass /text http://172.23.48.7:8001/text
|
||||
ProxyPass /state.png http://172.23.48.7:8001/state.png
|
||||
ProxyPass /json http://172.23.48.7/spaceapi
|
||||
ProxyPass /text http://172.23.48.7/state.txt
|
||||
ProxyPass /state.png http://172.23.48.7/state.png
|
||||
</VirtualHost>
|
||||
</IfFile>
|
||||
</IfFile>
|
||||
|
|
|
@ -25,11 +25,3 @@ Host {{ host }}
|
|||
Port 22
|
||||
|
||||
{% endfor %}
|
||||
|
||||
{# This is our router #}
|
||||
Host rhodium.n39.eu
|
||||
Hostname rhodium.n39.eu
|
||||
IdentityFile {{ setup_ssh_key }}
|
||||
User root
|
||||
ProxyJump ssh.n39.eu
|
||||
Port 22
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue