Migrate services part

This commit is contained in:
Tobias Reisinger 2024-09-27 00:02:36 +02:00
parent 7c59e4ae57
commit 73bce8f6e5
Signed by: serguzim
GPG key ID: 13AD60C237A28DFE
157 changed files with 3883 additions and 9 deletions
roles
_TEMPLATE
acme_dns
authentik
backup
caddy
coder
common
handlers
tasks
docker/tasks
extra_services
faas
forgejo
files/templates/custom
tasks
templates
vars
forgejo_runner
harbor
healthcheck
homebox
immich
influxdb
jellyfin
lego
linkwarden
mailcow
minecraft_2
minio
ntfy
reitanlage_oranienburg
shlink
synapse
tandoor
teamspeak_fallback

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ NAME_svc }}"
env: "{{ NAME_env }}"
compose: "{{ NAME_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,17 @@
---
NAME_svc:
domain: NAME.serguzim.me
name: NAME
port: 80
NAME_env:
EXAMPLE: value
NAME_compose:
watchtower: true
image:
volumes:
- data:/data
file:
volumes:
data:

View file

@ -0,0 +1,37 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ acme_dns_svc }}"
env: "{{ acme_dns_env }}"
compose: "{{ acme_dns_compose }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Setting the service config path
ansible.builtin.set_fact:
config_path: "{{ (service_path, 'config') | path_join }}"
- name: Create a service-config directory
ansible.builtin.file:
path: "{{ config_path }}"
state: directory
mode: "0700"
- name: Template config
ansible.builtin.template:
src: config.cfg.j2
dest: "{{ (config_path, 'config.cfg') | path_join }}"
mode: "0600"
register: cmd_result
- name: Set the docker force-recreate flag
ansible.builtin.set_fact:
docker_force_recreate: --force-recreate
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
- name: Import start tasks for common service
ansible.builtin.import_tasks: tasks/start-common-service.yml

View file

@ -0,0 +1,32 @@
[general]
listen = "0.0.0.0:53"
protocol = "both"
domain = "{{ svc.domain }}"
nsname = "{{ svc.domain }}"
nsadmin = "{{ svc.nsadmin }}"
records = [
"{{ svc.domain }}. A {{ svc.records.a }}",
"{{ svc.domain }}. AAAA {{ svc.records.aaaa }}",
"{{ svc.domain }}. NS {{ svc.domain }}.",
]
debug = false
[database]
engine = "postgres"
connection = "postgres://{{ svc.db.user }}:{{ svc.db.pass }}@{{ svc.db.host }}/{{ svc.db.db }}"
[api]
ip = "0.0.0.0"
disable_registration = false
port = "{{ svc.port }}"
tls = "none"
corsorigins = [
"*"
]
use_header = true
header_name = "X-Forwarded-For"
[logconfig]
loglevel = "info"
logtype = "stdout"
logformat = "text"

View file

@ -0,0 +1,28 @@
---
acme_dns_svc:
domain: "{{ acme_dns.host }}"
name: acme-dns
port: 80
nsadmin: "{{ admin_email | regex_replace('@', '.') }}"
records:
a: "{{ ansible_facts.default_ipv4.address }}"
aaaa: "{{ ansible_facts.default_ipv6.address }}"
db:
host: "{{ postgres.host }}"
port: "{{ postgres.port }}"
user: "{{ vault_acmedns.db.user }}"
pass: "{{ vault_acmedns.db.pass }}"
db: acme_dns
acme_dns_compose:
watchtower: true
monitoring: true
image: joohoi/acme-dns
volumes:
- ./config:/etc/acme-dns:ro
file:
services:
app:
ports:
- "53:53"
- 53:53/udp

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ authentik_svc }}"
env: "{{ authentik_env }}"
compose: "{{ authentik_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,60 @@
---
authentik_svc:
domain: auth.serguzim.me
name: authentik
port: 9000
image_tag: 2024.2
db:
host: "{{ postgres.host }}"
database: authentik
user: "{{ vault_authentik.db.user }}"
pass: "{{ vault_authentik.db.pass }}"
authentik_env:
AUTHENTIK_SECRET_KEY: "{{ vault_authentik.secret_key }}"
AUTHENTIK_EMAIL__HOST: "{{ mailer.host }}"
AUTHENTIK_EMAIL__PORT: "{{ mailer.port }}"
AUTHENTIK_EMAIL__USERNAME: "{{ vault_authentik.mail.user }}"
AUTHENTIK_EMAIL__PASSWORD: "{{ vault_authentik.mail.pass }}"
AUTHENTIK_EMAIL__USE_TLS: true
AUTHENTIK_EMAIL__USE_SSL: false
AUTHENTIK_EMAIL__TIMEOUT: 10
AUTHENTIK_EMAIL__FROM: auth@serguzim.me
AUTHENTIK_AVATARS: none
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: "{{ svc.db.host }}"
AUTHENTIK_POSTGRESQL__NAME: "{{ svc.db.database }}"
AUTHENTIK_POSTGRESQL__USER: "{{ svc.db.user }}"
AUTHENTIK_POSTGRESQL__PASSWORD: "{{ svc.db.pass }}"
authentik_compose:
watchtower: false
image: ghcr.io/goauthentik/server:{{ svc.image_tag }}
file:
services:
app:
command: server
depends_on:
- redis
worker:
image: ghcr.io/goauthentik/server:{{ svc.image_tag }}
restart: always
command: worker
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
env_file:
- service.env
depends_on:
- redis
networks:
default:
redis:
image: redis:alpine
restart: always
networks:
default:

View file

@ -0,0 +1,3 @@
FROM restic/restic
RUN apk add curl

View file

@ -0,0 +1,4 @@
[Timer]
OnCalendar=*-*-* 04:10:00
[Install]
WantedBy=timers.target

View file

@ -0,0 +1,3 @@
export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
mkdir -p "$MAILCOW_BACKUP_LOCATION"
/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all

View file

@ -0,0 +1,5 @@
backup_path="$BACKUP_LOCATION/immich"
mkdir -p "$backup_path"
cd /opt/services/immich || exit
docker compose exec database sh -c 'pg_dump -U "$DB_USERNAME" "$DB_DATABASE"' | gzip >"$backup_path/immich.sql.gz"

View file

@ -0,0 +1,14 @@
mkdir -p "$BACKUP_LOCATION/postgres"
cd "$BACKUP_LOCATION/postgres" || exit
postgres_tables=$(sudo -u postgres psql -Atc "SELECT datname FROM pg_database WHERE datistemplate = false;")
for i in $postgres_tables
do
printf "dumping %s ..." "$i"
sudo -u postgres pg_dump "$i" | gzip >"pg_dump_$i.sql.gz"
echo " done"
done
echo "dumping all"
sudo -u postgres pg_dumpall | gzip >"pg_dumpall.sql.gz"

View file

@ -0,0 +1,3 @@
export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
mkdir -p "$MAILCOW_BACKUP_LOCATION"
/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all

View file

@ -0,0 +1,16 @@
---
- name: Set backup.d path
ansible.builtin.set_fact:
backup_d_path: "{{ (service_path, 'backup.d') | path_join }}"
- name: Create backup.d directory
ansible.builtin.file:
path: "{{ backup_d_path }}"
state: directory
mode: "0755"
- name: Copy the additional backup scripts
ansible.builtin.copy:
src: "{{ item }}"
dest: "{{ backup_d_path }}"
mode: "0755"
with_fileglob:
- "{{ ansible_facts.hostname }}/*"

View file

@ -0,0 +1,12 @@
---
- name: Copy the Dockerfile
ansible.builtin.copy:
src: Dockerfile
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
mode: "0644"
register: cmd_result
- name: Set the docker rebuild flag
ansible.builtin.set_fact:
docker_rebuild: true
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.

View file

@ -0,0 +1,39 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ backup_svc }}"
env: "{{ backup_env }}"
compose: "{{ backup_compose }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Copy the main backup script
ansible.builtin.template:
src: "backup.sh.j2"
dest: "{{ (service_path, 'backup.sh') | path_join }}"
mode: "0755"
- name: Import tasks specific to docker
ansible.builtin.import_tasks: docker.yml
- name: Import tasks specific to the backup.d scripts
ansible.builtin.import_tasks: backup.d.yml
- name: Import tasks specific to systemd
ansible.builtin.import_tasks: systemd.yml
- name: Build service
ansible.builtin.command:
cmd: docker compose build --pull
chdir: "{{ service_path }}"
register: cmd_result
when: docker_rebuild
changed_when: true
- name: Verify service
ansible.builtin.command:
cmd: docker compose run --rm app check
chdir: "{{ service_path }}"
changed_when: false

View file

@ -0,0 +1,20 @@
---
- name: Copy the system service
ansible.builtin.template:
src: backup.service.j2
dest: /etc/systemd/system/backup.service
mode: "0644"
become: true
- name: Copy the system timer
ansible.builtin.copy:
src: backup.timer
dest: /etc/systemd/system/backup.timer
mode: "0644"
become: true
- name: Enable the system timer
ansible.builtin.systemd_service:
name: backup.timer
state: started
enabled: true
daemon_reload: true
become: true

View file

@ -0,0 +1,11 @@
[Unit]
Description=Autostart several tools and services
StartLimitIntervalSec=7200
StartLimitBurst=5
[Service]
Type=oneshot
ExecStart={{ service_path }}/backup.sh
WorkingDirectory={{ service_path }}
Restart=on-failure
RestartSec=15min

View file

@ -0,0 +1,68 @@
#!/usr/bin/env bash
set -e
set -a
. "{{ service_path }}/service.env"
set +a
duration_start=$(date +%s)
_duration_get () {
duration_end=$(date +%s)
echo "$((duration_end - duration_start))"
}
hc_url="https://hc-ping.com/$HC_UID"
uptime_kuma_url="https://status.serguzim.me/api/push/$UPTIME_KUMA_TOKEN"
_hc_ping () {
curl -fsSL --retry 3 "$hc_url$1" >/dev/null
}
_uptime_kuma_ping () {
duration=$(_duration_get)
curl -fsSL --retry 3 \
--url-query "status=$1" \
--url-query "msg=$2" \
--url-query "ping=${duration}000" \
"$uptime_kuma_url" >/dev/null
}
_fail () {
_hc_ping "/fail"
_uptime_kuma_ping "down" "$1"
rm -rf "$BACKUP_LOCATION"
exit 1
}
_success () {
_hc_ping
_uptime_kuma_ping "up" "backup successful"
}
_hc_ping "/start"
BACKUP_LOCATION="$(mktemp -d --suffix=-backup)"
export BACKUP_LOCATION
cd "$BACKUP_LOCATION" || _fail "failed to cd to $BACKUP_LOCATION"
shopt -s nullglob
for file in "{{ service_path }}/backup.d/"*
do
file_name="$(basename "$file")"
echo ""
echo "running $file_name"
time "$file" >"/tmp/$file_name.log" || _fail "error while running $file_name"
done || true
cd "{{ service_path }}"
docker compose run --rm -v "$BACKUP_LOCATION:/backup/misc" app backup /backup || _fail "error during restic backup"
_success
rm -rf "$BACKUP_LOCATION"
echo "forgetting old backups for {{ ansible_facts.hostname }}"
docker compose run --rm app forget --host "{{ ansible_facts.hostname }}" --prune \
--keep-last 7 \
--keep-daily 14 \
--keep-weekly 16 \
--keep-monthly 12 \
--keep-yearly 2

View file

@ -0,0 +1,59 @@
---
backup_image: registry.serguzim.me/services/backup
backup_svc:
name: backup
backup_volumes_service: "{{ host_backup.volumes | map_backup_volumes_service }}"
backup_env:
HC_UID: "{{ host_backup.hc_uid }}"
UPTIME_KUMA_TOKEN: "{{ host_backup.uptime_kuma_token }}"
RESTIC_REPOSITORY: "{{ vault_backup.restic.s3.repository }}"
RESTIC_PASSWORD: "{{ vault_backup.restic.s3.password }}"
AWS_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}"
AWS_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}"
#RESTIC_S3_REPOSITORY: "{{ vault_backup.restic.s3.repository }}"
#RESTIC_S3_PASSWORD: "{{ vault_backup.restic.s3.password }}"
#RESITC_S3_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}"
#RESITC_S3_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}"
#RESTIC_BORGBASE: "{{ vault_backup.restic.borgbase }}"
backup_compose:
watchtower: false
image: "{{ backup_image }}"
volumes: "{{ backup_volumes_service }}"
file:
services:
app:
build:
context: .
entrypoint:
- /usr/bin/restic
- --retry-lock=1m
restart: never
hostname: "{{ ansible_facts.hostname }}"
mount:
build:
context: .
image: "{{ backup_image }}"
restart: never
hostname: "{{ ansible_facts.hostname }}"
env_file:
- service.env
entrypoint:
- /usr/bin/restic
- --retry-lock=1m
command:
- mount
- /mnt
privileged: true
devices:
- /dev/fuse
volumes: "{{ host_backup.volumes | map_backup_volumes }}"

View file

@ -0,0 +1 @@
caddy_ports_extra: []

View file

@ -0,0 +1,8 @@
FROM caddy:2-builder AS builder
RUN xcaddy build \
--with github.com/caddy-dns/acmedns@main
FROM caddy:2-alpine
COPY --from=builder /usr/bin/caddy /usr/bin/caddy

View file

@ -0,0 +1,46 @@
(auth_serguzim_me) {
# always forward outpost path to actual outpost
reverse_proxy /outpost.goauthentik.io/* authentik:9000
# forward authentication to outpost
forward_auth authentik:9000 {
uri /outpost.goauthentik.io/auth/caddy
# capitalization of the headers is important, otherwise they will be empty
copy_headers X-Authentik-Username X-Authentik-Groups X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version
# optional, in this config trust all private ranges, should probably be set to the outposts IP
trusted_proxies private_ranges
}
}
(default) {
encode zstd gzip
}
(acmedns) {
tls {
dns acmedns {
username "{$ACMEDNS_USER}"
password "{$ACMEDNS_PASS}"
subdomain "{$ACMEDNS_SUBD}"
server_url "{$ACMEDNS_URL}"
}
}
}
(faas) {
rewrite * /function/{args[0]}{uri}
reverse_proxy https://faas.serguzim.me {
header_up Host {http.reverse_proxy.upstream.hostport}
}
}
(analytics) {
handle_path /_a/* {
reverse_proxy https://analytics.serguzim.me {
header_up X-Analytics-IP {remote}
header_up Host {http.reverse_proxy.upstream.hostport}
}
}
}

View file

@ -0,0 +1,57 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ caddy_svc }}"
env: "{{ caddy_env }}"
compose: "{{ caddy_compose }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Copy the Dockerfile
ansible.builtin.copy:
src: Dockerfile
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
mode: "0644"
register: cmd_result
- name: Set the docker rebuild flag
ansible.builtin.set_fact:
docker_rebuild: true
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
- name: Set caddy config path
ansible.builtin.set_fact:
config_path: "{{ (service_path, 'config') | path_join }}"
- name: Create config directory
ansible.builtin.file:
path: "{{ config_path }}"
state: directory
mode: "0755"
- name: Template caddyfile
ansible.builtin.template:
src: Caddyfile.j2
dest: "{{ (config_path, 'Caddyfile') | path_join }}"
mode: "0644"
notify: Reload caddy
- name: Copy snippets file
ansible.builtin.copy:
src: snippets
dest: "{{ (config_path, 'snippets') | path_join }}"
mode: "0644"
notify: Reload caddy
- name: Create sites-config directory
ansible.builtin.file:
path: "{{ caddy_config_path }}"
state: directory
mode: "0755"
- name: Import start tasks for common service
ansible.builtin.import_tasks: tasks/start-common-service.yml

View file

@ -0,0 +1,11 @@
{
email {{ admin_email }}
servers {
metrics
strict_sni_host on
}
}
import /etc/caddy/snippets
import /etc/caddy/conf.d/*.conf

40
roles/caddy/vars/main.yml Normal file
View file

@ -0,0 +1,40 @@
---
caddy_acmedns_user: "{{ vault_caddy.acmedns.user }}"
caddy_acmedns_pass: "{{ vault_caddy.acmedns.pass }}"
caddy_acmedns_subd: "{{ vault_caddy.acmedns.subd }}"
caddy_acmedns_url: "https://{{ acme_dns.host }}"
caddy_ports_default:
- 80:80
- 443:443
- 443:443/udp
- "{{ host_vpn.ip }}:2019:2019"
caddy_ports: "{{ caddy_ports_default | union(caddy_ports_extra) }}"
caddy_svc:
name: caddy
caddy_env:
CADDY_ADMIN: 0.0.0.0:2019
ACMEDNS_USER: "{{ caddy_acmedns_user }}"
ACMEDNS_PASS: "{{ caddy_acmedns_pass }}"
ACMEDNS_SUBD: "{{ caddy_acmedns_subd }}"
ACMEDNS_URL: "{{ caddy_acmedns_url }}"
caddy_compose:
watchtower: false
image: registry.serguzim.me/services/caddy:2-alpine
volumes:
- "./config:/etc/caddy/"
- data:/data
file:
services:
app:
build:
context: .
ports: "{{ caddy_ports }}"
extra_hosts:
- host.docker.internal:host-gateway
volumes:
data:

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ coder_svc }}"
env: "{{ coder_env }}"
compose: "{{ coder_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

35
roles/coder/vars/main.yml Normal file
View file

@ -0,0 +1,35 @@
---
coder_svc:
domain: coder.serguzim.me
additional_domains:
- "*.coder.serguzim.me"
caddy_extra: import acmedns
name: coder
port: 7080
db:
host: "{{ postgres.host }}"
port: "{{ postgres.port }}"
ssh_port: 22
ssh_port_alt: 3022
coder_env:
CODER_ADDRESS: "0.0.0.0:7080"
CODER_ACCESS_URL: https://{{ svc.domain }}
CODER_WILDCARD_ACCESS_URL: "*.{{ svc.domain }}"
CODER_PG_CONNECTION_URL: postgres://{{ vault_coder.db.user }}:{{ vault_coder.db.pass }}@{{ svc.db.host }}:{{ svc.db.port }}/coder?sslmode=verify-full
CODER_OIDC_ISSUER_URL: https://auth.serguzim.me/application/o/coder-serguzim-me/
CODER_OIDC_CLIENT_ID: "{{ vault_coder.oidc_client.id }}"
CODER_OIDC_CLIENT_SECRET: "{{ vault_coder.oidc_client.secret }}"
coder_compose:
watchtower: true
image: ghcr.io/coder/coder:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
file:
services:
app:
group_add:
- "972" # docker group on host

View file

@ -0,0 +1,7 @@
---
- name: Reload caddy
ansible.builtin.command:
cmd: docker compose exec app sh -c "caddy validate --config /etc/caddy/Caddyfile && caddy reload --config /etc/caddy/Caddyfile"
chdir: "{{ caddy_path }}"
when: "'local-dev' != inventory_hostname"
changed_when: true

View file

@ -0,0 +1,9 @@
---
- name: Create the services directory
ansible.builtin.file:
path: "{{ services_path }}"
state: directory
mode: "0755"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: true

View file

@ -0,0 +1,55 @@
- name: Install aptitude
apt:
name: aptitude
state: latest
update_cache: true
become: true
- name: Install required system packages
apt:
pkg:
- apt-transport-https
- ca-certificates
- curl
- software-properties-common
- python3-pip
- virtualenv
- python3-setuptools
state: latest
update_cache: true
become: true
- name: Add Docker GPG apt Key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
become: true
- name: Add Docker Repository
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu focal stable
state: present
become: true
- name: Update apt and install docker packages
apt:
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
state: latest
update_cache: true
become: true
- name: Add user to the Docker group
user:
name: "{{ ansible_user }}"
groups: docker
append: yes
become: true
- name: Create a network
community.docker.docker_network:
name: apps

View file

@ -0,0 +1,11 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy extra services
block:
- name: Import tasks to template the site and functions for the reverse proxy
ansible.builtin.include_tasks: tasks/steps/template-site-config.yml
loop: "{{ extra_services_all }}"
loop_control:
loop_var: svc

View file

@ -0,0 +1,14 @@
---
extra_services_default:
- domain: cloud-old.serguzim.me
docker_host: nextcloud
port: 80
caddy_extra: |
redir /.well-known/host-meta /public.php?service=host-meta 301
redir /.well-known/host-meta.json /public.php?service=host-meta-json 301
redir /.well-known/webfinger /public.php?service=webfinger 301
redir /.well-known/carddav /remote.php/dav/ 301
redir /.well-known/caldav /remote.php/dav/ 301
extra_services_hidden: "{{ vault_extra_services }}"
extra_services_all: "{{ extra_services_default | union(extra_services_hidden) }}"

10
roles/faas/tasks/main.yml Normal file
View file

@ -0,0 +1,10 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ faas_svc }}"
block:
- name: Import tasks to template the site and functions for the reverse proxy
ansible.builtin.import_tasks: tasks/steps/template-site-config.yml

24
roles/faas/vars/main.yml Normal file
View file

@ -0,0 +1,24 @@
---
faas_svc:
name: faas
domain: faas.serguzim.me
docker_host: host.docker.internal
port: 8080
extra_svcs:
- domain: serguzim.me
faas_function: webpage-serguzim-me
www_domain: true
hsts: true
caddy_extra: |
header /.well-known/* Access-Control-Allow-Origin *
handle /.well-known/webfinger {
map {query.resource} {user} {
acct:tobias@msrg.cc serguzim
acct:serguzim@msrg.cc serguzim
}
rewrite * /.well-known/webfinger/{user}.json
import faas webpage-msrg-cc
}
- domain: xn--sder-5qa.stream
faas_function: webpage-soeder-stream

View file

@ -0,0 +1 @@
<a class="item" href="https://www.serguzim.me/imprint/">Impressum</a>

View file

@ -0,0 +1,39 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ forgejo_svc }}"
env: "{{ forgejo_env }}"
compose: "{{ forgejo_compose }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Copy the template files
ansible.builtin.copy:
src: templates/
dest: "{{ (service_path, 'templates') | path_join }}"
mode: "0644"
register: cmd_result
- name: Set the docker force-recreate flag
ansible.builtin.set_fact:
docker_force_recreate: --force-recreate
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
- name: Template the custom footer
ansible.builtin.template:
src: footer.tmpl.j2
dest: "{{ (service_path, 'templates', 'custom', 'footer.tmpl') | path_join }}"
mode: "0644"
register: cmd_result
- name: Set the docker force-recreate flag
ansible.builtin.set_fact:
docker_force_recreate: --force-recreate
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
- name: Import start tasks for common service
ansible.builtin.import_tasks: tasks/start-common-service.yml

View file

@ -0,0 +1 @@
<script async src="/_a/script.js" data-website-id="{{ vault_forgejo.umami }}"></script>

View file

@ -0,0 +1,98 @@
---
forgejo_svc:
domain: git.serguzim.me
name: forgejo
port: 3000
caddy_extra: |
import analytics
header /attachments/* Access-Control-Allow-Origin *
db:
host: "{{ postgres.host }}"
port: "{{ postgres.port }}"
ssh_port: 22
forgejo_env:
FORGEJO__database__DB_TYPE: postgres
FORGEJO__database__HOST: "{{ svc.db.host }}:{{ svc.db.port }}"
FORGEJO__database__NAME: forgejo
FORGEJO__database__USER: "{{ vault_forgejo.db.user }}"
FORGEJO__database__PASSWD: "{{ vault_forgejo.db.pass }}"
FORGEJO__database__SSL_MODE: verify-full
FORGEJO__repository__ENABLE_PUSH_CREATE_USER: true
FORGEJO__repository__ENABLE_PUSH_CREATE_ORG: true
FORGEJO__repository__DEFAULT_BRANCH: main
FORGEJO__cors__ENABLED: true
FORGEJO__cors__SCHEME: https
FORGEJO__ui__DEFAULT_THEME: forgejo-dark
FORGEJO__server__DOMAIN: "{{ svc.domain }}"
FORGEJO__server__SSH_DOMAIN: "{{ svc.domain }}"
FORGEJO__server__SSH_PORT: "{{ svc.ssh_port }}"
FORGEJO__server__ROOT_URL: https://{{ svc.domain }}
FORGEJO__server__OFFLINE_MODE: true
FORGEJO__server__LFS_JWT_SECRET: "{{ vault_forgejo.server_lfs_jwt_secret }}"
FORGEJO__server__LFS_START_SERVER: true
FORGEJO__security__INSTALL_LOCK: true
FORGEJO__security__INTERNAL_TOKEN: "{{ vault_forgejo.security_internal_token }}"
FORGEJO__security__SECRET_KEY: "{{ vault_forgejo.security_secret_key }}"
FORGEJO__openid__ENABLE_OPENID_SIGNUP: true
FORGEJO__openid__ENABLE_OPENID_SIGNIN: false
FORGEJO__service__ALLOW_ONLY_EXTERNAL_REGISTRATION: true
FORGEJO__service__ENABLE_BASIC_AUTHENTICATION: false
FORGEJO__service__DEFAULT_KEEP_EMAIL_PRIVATE: true
FORGEJO__service__NO_REPLY_ADDRESS: discard.msrg.cc
FORGEJO__webhook__DELIVER_TIMEOUT: 60
FORGEJO__mailer__ENABLED: true
FORGEJO__mailer__PROTOCOL: smtp+starttls
FORGEJO__mailer__SMTP_ADDR: mail.serguzim.me
FORGEJO__mailer__SMTP_PORT: 587
FORGEJO__mailer__FROM: Forgejo <git@serguzim.me>
FORGEJO__mailer__USER: git@serguzim.me
FORGEJO__mailer__PASSWD: "{{ vault_forgejo.mailer_passwd }}"
FORGEJO__mailer__SEND_AS_PLAIN_TEXT: true
FORGEJO__picture__DISABLE_GRAVATAR: true
FORGEJO__attachment__MAX_FILES: 10
FORGEJO__oauth2__JWT_SECRET: "{{ vault_forgejo.oauth2_jwt_secret }}"
FORGEJO__metrics__ENABLED: true
FORGEJO__metrics__TOKEN: "{{ vault_metrics_token }}"
FORGEJO__actions__ENABLED: true
FORGEJO__storage__STORAGE_TYPE: minio
FORGEJO__storage__MINIO_ENDPOINT: s3.nl-ams.scw.cloud
FORGEJO__storage__MINIO_ACCESS_KEY_ID: "{{ opentofu.scaleway_service_keys.forgejo.access_key }}"
FORGEJO__storage__MINIO_SECRET_ACCESS_KEY: "{{ opentofu.scaleway_service_keys.forgejo.secret_key }}"
FORGEJO__storage__MINIO_BUCKET: forgejo.serguzim.me
FORGEJO__storage__MINIO_LOCATION: ns-ams
FORGEJO__storage__MINIO_USE_SSL: true
FORGEJO__other__SHOW_FOOTER_VERSION: true
FORGEJO__other__SHOW_FOOTER_TEMPLATE_LOAD_TIME: false
forgejo_compose:
watchtower: true
image: codeberg.org/forgejo/forgejo:7.0
volumes:
- data:/data
- ./templates:/data/gitea/templates
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
file:
services:
app:
ports:
- "{{ svc.ssh_port }}:{{ svc.ssh_port }}"
volumes:
data:

View file

@ -0,0 +1,81 @@
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: /data/.runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
#envs:
# A_TEST_ENV_NAME_1: a_test_env_value_1
# A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
#env_file: .env
# The timeout for a job to be finished.
# Please note that the Forgejo instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Forgejo instance.
insecure: false
# The timeout for fetching the job from the Forgejo instance.
fetch_timeout: 5s
# The interval for fetching the job from the Forgejo instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `deamon`, will use labels in `.runner` file.
labels: []
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, create a network automatically.
network: ""
# Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
# Only takes effect if "network" is set to "".
enable_ipv6: false
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway).
options:
# The parent directory of a job's working directory.
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View file

@ -0,0 +1,42 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ forgejo_runner_svc }}"
env: "{{ forgejo_runner_env }}"
compose: "{{ forgejo_runner_compose }}"
block:
- name: Import tasks to create service directory
ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
- name: Import tasks to template docker compose file
ansible.builtin.import_tasks: tasks/steps/template-docker-compose.yml
- name: Copy the config
ansible.builtin.copy:
src: config.yml
dest: "{{ (service_path, 'config.yml') | path_join }}"
mode: "0755"
- name: Check if service.env already exists
ansible.builtin.stat:
path: "{{ (service_path, 'service.env') | path_join }}"
register: env_file
- name: Import tasks to prompt for the registration token
ansible.builtin.import_tasks: tasks/prompt-registration-token.yml
when: not env_file.stat.exists or force_forgejo_runner_registration | default(False)
- name: Import tasks create a service.env file
ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
- name: Import start tasks for common service
ansible.builtin.import_tasks: tasks/start-common-service.yml
- name: Register runner
ansible.builtin.command:
cmd: docker compose run --rm -it app sh -c
'forgejo-runner register --no-interactive --token ${FORGEJO_RUNNER_REGISTRATION_TOKEN} --instance ${FORGEJO_INSTANCE_URL}'
chdir: "{{ service_path }}"
when: not env_file.stat.exists or force_forgejo_runner_registration | default(False)
changed_when: true # "when" checks enough. We are sure to change something here.

View file

@ -0,0 +1,10 @@
---
- name: Input forgejo-runner registration token
ansible.builtin.pause:
prompt: Enter a secret
echo: false
register: promt_registration_token
- name: Put registration token into env vars
ansible.builtin.set_fact:
forgejo_runner_env: "{{ forgejo_runner_env | combine({'FORGEJO_RUNNER_REGISTRATION_TOKEN': promt_registration_token.user_input}, recursive=True) }}"

View file

@ -0,0 +1,32 @@
---
forgejo_runner_svc:
name: forgejo-runner
forgejo_runner_env:
FORGEJO_INSTANCE_URL: https://git.serguzim.me/
FORGEJO_RUNNER_REGISTRATION_TOKEN:
DOCKER_HOST: tcp://docker-in-docker:2375
forgejo_runner_compose:
watchtower: true
image: code.forgejo.org/forgejo/runner:3.3.0
volumes:
- ./config.yml:/config/config.yml
- data:/data
file:
services:
app:
hostname: "{{ ansible_facts.hostname }}"
command: forgejo-runner --config /config/config.yml daemon
depends_on:
- docker-in-docker
links:
- docker-in-docker
docker-in-docker:
image: docker:dind
privileged: true
command: dockerd -H tcp://0.0.0.0:2375 --tls=false
networks:
default:
volumes:
data:

View file

@ -0,0 +1,44 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ harbor_svc }}"
env: "{{ harbor_env }}"
yml: "{{ harbor_yml }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Import tasks to template the site for the reverse proxy
ansible.builtin.import_tasks: tasks/steps/template-site-config.yml
- name: Template config
ansible.builtin.template:
src: yml.j2
dest: "{{ (service_path, 'harbor.yml') | path_join }}"
mode: "0644"
- name: Download harbor
ansible.builtin.unarchive:
src: https://github.com/goharbor/harbor/releases/download/v{{ svc.harbor_version }}/harbor-online-installer-v{{ svc.harbor_version }}.tgz
dest: "{{ service_path }}"
remote_src: true
- name: Run the harbor prepare command
ansible.builtin.command:
cmd: "{{ service_path }}/harbor/prepare"
chdir: "{{ service_path }}"
creates: "{{ (service_path, 'docker-compose.yml') | path_join }}"
environment:
HARBOR_BUNDLE_DIR: "{{ service_path }}"
- name: Run the harbor install command
ansible.builtin.command:
cmd: "{{ service_path }}/harbor/install.sh"
chdir: "{{ service_path }}"
environment:
HARBOR_BUNDLE_DIR: "{{ service_path }}"
become: true
changed_when: true # TODO find way to recognize need to run install command

103
roles/harbor/vars/main.yml Normal file
View file

@ -0,0 +1,103 @@
---
harbor_port_http: 20080
harbor_port_https: 20443
harbor_port_metrics: 29000
harbor_db_host: "{{ postgres.host }}"
harbor_db_port: "{{ postgres.port }}"
harbor_db_database: harbor
harbor_db_user: "{{ vault_harbor.db.user }}"
harbor_db_pass: "{{ vault_harbor.db.pass }}"
harbor_version: 2.9.0
harbor_svc:
name: harbor
domain: registry.serguzim.me
caddy_extra: |
reverse_proxy /metrics host.docker.internal:{{ harbor_port_metrics }}
reverse_proxy host.docker.internal:{{ harbor_port_https }} {
transport http {
tls
tls_server_name registry.serguzim.me
}
}
caddy_default: false
db:
host: "{{ postgres.host }}"
port: "{{ postgres.port }}"
database: harbor
user: "{{ vault_harbor.db.user }}"
pass: "{{ vault_harbor.db.pass }}"
harbor_version: 2.9.0
harbor_yml:
hostname: "{{ harbor_svc.domain }}"
http:
port: "{{ harbor_port_http }}"
https:
port: "{{ harbor_port_https }}"
certificate: "{{ (service_path, 'server.crt') | path_join }}"
private_key: "{{ (service_path, 'server.key') | path_join }}"
external_url: https://registry.serguzim.me
harbor_admin_password: "{{ vault_harbor.admin_password }}"
data_volume: "{{ (service_path, 'data') | path_join }}"
storage_service:
s3:
accesskey: "{{ vault_harbor.minio.accesskey }}"
secretkey: "{{ vault_harbor.minio.secretkey }}"
region: de-contabo-1
regionendpoint: https://s3.serguzim.me
bucket: registry
secure: true
trivy:
ignore_unfixed: false
skip_update: false
offline_scan: false
security_check: vuln
insecure: false
jobservice:
max_job_workers: 10
job_loggers:
- STD_OUTPUT
- FILE
logger_sweeper_duration: 1
notification:
webhook_job_max_retry: 3
webhook_job_http_client_timeout: 3
log:
level: info
local:
rotate_count: 50
rotate_size: 200M
location: /var/log/harbor
_version: "{{ harbor_version }}"
external_database:
harbor:
host: "{{ harbor_db_host }}"
port: "{{ harbor_db_port }}"
db_name: "{{ harbor_db_database }}"
username: "{{ harbor_db_user }}"
password: "{{ harbor_db_pass }}"
ssl_mode: verify-full
max_idle_conns: 2
max_open_conns: 0
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- trivy
metric:
enabled: enabled
port: "{{ harbor_port_metrics }}"
path: /metrics
upload_purging:
enabled: true
age: 168h
interval: 24h
dryrun: false
cache:
enabled: false
expire_hours: 24

View file

@ -0,0 +1,7 @@
FROM ubuntu
ENV DEBIAN_FRONTEND=noninteractive
RUN apt update -y \
&& apt install -y curl dnsutils msmtp gettext-base python3-pip python3-requests \
&& pip install matrix-nio

View file

@ -0,0 +1,54 @@
#!/usr/bin/sh
cd /opt/ || exit
hc_url="https://hc-ping.com/$HTTP_HC_UID"
services_down=""
error=""
alias curl_hc='curl -LA "$USER_AGENT" --retry 3'
check_url ()
{
url="https://$1$2"
printf "checking url %s ." "$url"
dig A "$1" >/dev/null
printf "."
result=$(curl -LsSfv --connect-timeout 30 --retry 3 "$url" 2>&1)
code="$?"
printf ".\n"
#shellcheck disable=SC2181
if [ "$code" = "0" ]
then
echo "... good"
else
services_down=$(printf "%s\n%s" "$services_down" "$1")
error=$(printf "%s\n==========\n%s:\n%s" "$error" "$1" "$result")
echo "... bad"
fi
}
#check_url "acme.serguzim.me" "/health"
check_url "analytics.serguzim.me"
check_url "auth.serguzim.me"
check_url "ci.serguzim.me"
#check_url "cloud.serguzim.me" "/login?noredir=1"
check_url "git.serguzim.me"
check_url "hook.serguzim.me"
check_url "mail.serguzim.me"
#check_url "msrg.cc" # disabled because it keeps creating false alerts
check_url "registry.serguzim.me" "/account/sign-in"
check_url "rss.serguzim.me"
#check_url "serguzim.me" # disabled because it keeps creating false alerts
check_url "status.serguzim.me" "/status/serguzim-net"
check_url "tick.serguzim.me"
check_url "wiki.serguzim.me"
check_url "www.reitanlage-oranienburg.de"
if [ "$error" = "" ]
then
curl_hc "$hc_url" >/dev/null
echo "ALL GOOD"
else
curl_hc --data-raw "$services_down$error" "$hc_url/fail" >/dev/null
fi

View file

@ -0,0 +1,17 @@
#!/usr/bin/sh
cd /opt/ || exit
hc_url="https://hc-ping.com/$MAIL_HC_UID"
alias curl_hc='curl -LA "$USER_AGENT" --retry 3'
envsubst < template.msmtprc > /tmp/msmtprc
envsubst < mailcheck.template.mail > /tmp/mailcheck.mail
result=$(msmtp -C /tmp/msmtprc -a default "$MAIL_HC_UID@hc-ping.com" < /tmp/mailcheck.mail 2>&1)
if [ "$?" != "0" ]
then
echo "$result"
curl_hc --data-raw "$result" "$hc_url/fail" >/dev/null
fi

View file

@ -0,0 +1,5 @@
To: ${MAIL_HC_UID}@hc-ping.com
From: ${MAIL_USER}
Subject: Healthcheck
Mailserver alive

View file

@ -0,0 +1,45 @@
#!/usr/bin/python3
import datetime
import os
import requests
import sys
import asyncio
from nio import AsyncClient, RoomMessageNotice
healthcheck_url = "https://hc-ping.com/" + os.environ['MATRIX_HC_UID']
def send_ping(success, msg=""):
url = healthcheck_url
if not success:
url += "/fail"
requests.get(url, data=msg, headers={'user-agent': os.environ['USER_AGENT']})
async def main():
try:
client = AsyncClient(os.environ['MATRIX_SERVER'])
client.access_token = os.environ['MATRIX_TOKEN']
client.device_id = os.environ['USER_AGENT']
await client.room_send(
room_id = os.environ['MATRIX_ROOM'],
message_type = "m.room.message",
content = {
"msgtype": "m.text",
"body": "!ping"
}
)
except Exception as e:
print(e)
print("exception during login or sending")
send_ping(False, str(e))
sys.exit(1)
await client.close()
send_ping(True)
sys.exit(0)
asyncio.new_event_loop().run_until_complete(main())

View file

@ -0,0 +1,13 @@
defaults
auth on
tls on
tls_trust_file /etc/ssl/certs/ca-certificates.crt
logfile /tmp/msmtp.log
account default
host ${MAIL_HOST}
port ${MAIL_PORT}
tls_starttls on
from ${MAIL_USER}
user ${MAIL_USER}
password ${MAIL_PASS}

View file

@ -0,0 +1,24 @@
version: "3.7"
x-common-elements:
&common-elements
build:
context: .
image: registry.serguzim.me/services/healthcheck
restart: never
env_file:
- service.env
volumes:
- ./data/:/opt
network_mode: host
services:
http:
<<: *common-elements
command: "/opt/http"
matrix:
<<: *common-elements
command: "/opt/matrix"
mail:
<<: *common-elements
command: "/opt/mail"

View file

@ -0,0 +1,4 @@
[Timer]
OnCalendar=*:0/5
[Install]
WantedBy=timers.target

View file

@ -0,0 +1,16 @@
---
- name: Copy the docker-compose file
ansible.builtin.copy:
src: docker-compose.yml
dest: "{{ (service_path, 'docker-compose.yml') | path_join }}"
mode: "0644"
- name: Copy the Dockerfile
ansible.builtin.copy:
src: Dockerfile
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
mode: "0644"
- name: Copy the data files
ansible.builtin.copy:
src: data
dest: "{{ service_path }}"
mode: "0755"

View file

@ -0,0 +1,28 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ healthcheck_svc }}"
env: "{{ healthcheck_env }}"
block:
- name: Import tasks to create service directory
ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
- name: Import tasks specific to docker
ansible.builtin.import_tasks: docker.yml
- name: Import tasks specific to systemd
ansible.builtin.import_tasks: systemd.yml
- name: Import tasks create a service.env file
ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
- name: Build service
ansible.builtin.command:
cmd: docker compose build --pull
chdir: "{{ service_path }}"
when:
- "'local-dev' != inventory_hostname"
register: cmd_result
changed_when: true

View file

@ -0,0 +1,21 @@
---
- name: Template the system service
ansible.builtin.template:
src: healthcheck@.service.j2
dest: /etc/systemd/system/healthcheck@.service
mode: "0644"
become: true
- name: Copy the system timer
ansible.builtin.copy:
src: healthcheck@.timer
dest: /etc/systemd/system/healthcheck@.timer
mode: "0644"
become: true
- name: Enable the system timer
ansible.builtin.systemd_service:
name: healthcheck@{{ item }}.timer
state: started
enabled: true
daemon_reload: true
loop: "{{ healthcheck_svc.checks }}"
become: true

View file

@ -0,0 +1,5 @@
[Service]
Type=simple
ExecStart=/usr/bin/docker compose run --rm %i
WorkingDirectory={{ service_path }}
RuntimeMaxSec=300

View file

@ -0,0 +1,24 @@
---
healthcheck_svc:
name: healthcheck
checks:
- http
- mail
- matrix
healthcheck_env:
USER_AGENT: healthcheck-bot for serguzim.net
HTTP_HC_UID: "{{ vault_healthcheck.hc_uid.http }}"
MATRIX_SERVER: https://matrix.serguzim.me
MATRIX_SERVER_FEDTESTER: msrg.cc
MATRIX_HC_UID: "{{ vault_healthcheck.hc_uid.matrix }}"
MATRIX_TOKEN: "{{ vault_healthcheck.matrix.token }}"
MATRIX_ROOM: "{{ vault_healthcheck.matrix.room }}"
MAIL_HC_UID: "{{ vault_healthcheck.hc_uid.mail }}"
MAIL_HOST: "{{ mailer.host }}"
MAIL_PORT: "{{ mailer.port }}"
MAIL_USER: "{{ vault_healthcheck.mailer.user }}"
MAIL_PASS: "{{ vault_healthcheck.mailer.pass }}"

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ homebox_svc }}"
env: "{{ homebox_env }}"
compose: "{{ homebox_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,23 @@
---
homebox_svc:
domain: inventory.serguzim.me
name: homebox
port: 7745
homebox_env:
HBOX_OPTIONS_ALLOW_REGISTRATION: false
HBOX_MAILER_HOST: mail.serguzim.me
HBOX_MAILER_PORT: 587
HBOX_MAILER_USERNAME: inventory@serguzim.me
HBOX_MAILER_PASSWORD: "{{ vault_homebox.mailer_passwd }}"
HBOX_MAILER_FROM: Homebox <inventory@serguzim.me>
HBOX_SWAGGER_SCHEMA: https
homebox_compose:
watchtower: true
image: ghcr.io/hay-kot/homebox:latest-rootless
volumes:
- data:/data
file:
volumes:
data:

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ immich_svc }}"
env: "{{ immich_env }}"
compose: "{{ immich_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,74 @@
---
immich_db_host: database
immich_db_db: immich
immich_db_user: "{{ vault_immich.db.user }}"
immich_db_pass: "{{ vault_immich.db.pass }}"
immich_svc:
domain: gallery.serguzim.me
name: immich
port: 3001
version: release
db:
host: "{{ postgres.host }}"
database: authentik
immich_env:
# IMMICH_CONFIG_FILE: /immich.json
TZ: "{{ timezone }}"
DB_HOSTNAME: "{{ immich_db_host }}"
DB_DATABASE_NAME: "{{ immich_db_db }}"
DB_USERNAME: "{{ immich_db_user }}"
DB_PASSWORD: "{{ immich_db_pass }}"
POSTGRES_DB: "{{ immich_db_db }}"
POSTGRES_USER: "{{ immich_db_user }}"
POSTGRES_PASSWORD: "{{ immich_db_pass }}"
REDIS_HOSTNAME: redis
immich_compose:
watchtower: false
image: ghcr.io/immich-app/immich-server:release
volumes:
- upload:/usr/src/app/upload
file:
services:
app:
depends_on:
- database
- redis
machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:release
volumes:
- model-cache:/cache
env_file:
- service.env
restart: always
networks:
default:
redis:
image: redis:6.2-alpine
restart: always
networks:
default:
database:
image: tensorchord/pgvecto-rs:pg16-v0.2.0
env_file:
- service.env
volumes:
- pgdata:/var/lib/postgresql/data
restart: always
networks:
default:
volumes:
upload:
pgdata:
model-cache:

View file

@ -0,0 +1,28 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ influxdb_svc }}"
env: "{{ influxdb_env }}"
compose: "{{ influxdb_compose }}"
yml: "{{ influxdb_yml }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Template config
ansible.builtin.template:
src: yml.j2
dest: "{{ (service_path, 'influxdb.yml') | path_join }}"
mode: "0600"
register: cmd_result
- name: Set the docker force-recreate flag
ansible.builtin.set_fact:
docker_force_recreate: --force-recreate
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
- name: Import start tasks for common service
ansible.builtin.import_tasks: tasks/start-common-service.yml

View file

@ -0,0 +1,73 @@
---
influxdb_svc:
domain: tick.serguzim.me
name: influxdb
port: 8086
data_dir: /var/lib/influxdb2
influxdb_yml:
assets-path: ""
bolt-path: "{{ (svc.data_dir, 'influxd.bolt') | path_join }}"
e2e-testing: false
engine-path: "{{ (svc.data_dir, 'engine') | path_join }}"
feature-flags: {}
http-bind-address: "0.0.0.0:{{ svc.port }}"
influxql-max-select-buckets: 0
influxql-max-select-point: 0
influxql-max-select-series: 0
key-name: ""
log-level: info
nats-max-payload-bytes: 1048576
nats-port: 4222
no-tasks: false
query-concurrency: 10
query-initial-memory-bytes: 0
query-max-memory-bytes: 0
query-memory-bytes: 9223372036854775807
query-queue-size: 10
reporting-disabled: false
secret-store: bolt
session-length: 60
session-renew-disabled: false
storage-cache-max-memory-size: 1073741824
storage-cache-snapshot-memory-size: 26214400
storage-cache-snapshot-write-cold-duration: 10m0s
storage-compact-full-write-cold-duration: 4h0m0s
storage-compact-throughput-burst: 50331648
storage-max-concurrent-compactions: 0
storage-max-index-log-file-size: 1048576
storage-retention-check-interval: 30m0s
storage-series-file-max-concurrent-snapshot-compactions: 0
storage-series-id-set-cache-size: 0
storage-shard-precreator-advance-period: 30m0s
storage-shard-precreator-check-interval: 10m0s
storage-tsm-use-madv-willneed: false
storage-validate-keys: false
storage-wal-fsync-delay: "0s"
store: bolt
testing-always-allow-setup: false
tls-cert: ""
tls-key: ""
tls-min-version: "1.2"
tls-strict-ciphers: false
tracing-type: ""
vault-addr: ""
vault-cacert: ""
vault-capath: ""
vault-client-cert: ""
vault-client-key: ""
vault-client-timeout: "0s"
vault-max-retries: 0
vault-skip-verify: false
vault-tls-server-name: ""
vault-token: ""
influxdb_compose:
watchtower: false
image: influxdb:2.7
volumes:
- ./influxdb.yml:/etc/influxdb2/config.yml
- data:{{ svc.data_dir }}
file:
volumes:
data:

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ jellyfin_svc }}"
env: "{{ jellyfin_env }}"
compose: "{{ jellyfin_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,27 @@
---
jellyfin_svc:
domain: media.serguzim.me
name: jellyfin
port: 8096
db:
host: "{{ postgres.host }}"
port: "{{ postgres.port }}"
jellyfin_env:
JELLYFIN_PublishedServerUrl: https://{{ svc.domain }}
jellyfin_compose:
watchtower: true
image: jellyfin/jellyfin
volumes:
- config:/config
- cache:/cache
- media:/media
file:
services:
app:
user: 8096:8096
volumes:
config:
cache:
media:

6
roles/lego/files/hook.sh Normal file
View file

@ -0,0 +1,6 @@
#!/usr/bin/env sh
cp -f "$LEGO_CERT_PATH" /certificates
cp -f "$LEGO_CERT_KEY_PATH" /certificates
exit 33 # special exit code to signal that the certificate has been updated

19
roles/lego/files/lego.sh Executable file
View file

@ -0,0 +1,19 @@
#!/usr/bin/env sh
set -a
. ./service.env
set +a
domain="$1"
action="${2:-renew}"
docker compose run --rm app \
--domains "$domain" \
"$action" \
"--$action-hook" "/config/hook.sh"
if [ "$?" = "33" ] && [ -x "./lego.d/$domain" ];
then
echo "Running hook for $domain"
"./lego.d/$domain"
fi

View file

@ -0,0 +1,10 @@
[Unit]
Description=Renew certificates
[Timer]
Persistent=true
OnCalendar=*-*-* 01:15:00
RandomizedDelaySec=2h
[Install]
WantedBy=timers.target

View file

@ -0,0 +1,16 @@
#!/usr/bin/env sh
domain="db.serguzim.me"
_install() {
install --owner=postgres --group=postgres --mode=600 \
"$CERTIFICATES_PATH/$domain.$1" \
"/var/lib/postgres/data/server.$1"
}
_install crt
_install key
sudo -u postgres pg_ctl -D /var/lib/postgres/data/ reload
# vim: ft=sh

View file

@ -0,0 +1,18 @@
#!/usr/bin/env sh
domain="msrg.cc"
tmpdir=$(mktemp -d)
trap 'rm -rf $tmpdir' EXIT
cp "$CERTIFICATES_PATH/$domain.crt" "$tmpdir/fullchain.pem"
cp "$CERTIFICATES_PATH/$domain.key" "$tmpdir/privkey.pem"
curl \
-F submit="submit" \
-F token="$WIUWIU_TOKEN" \
-F "cert=@$tmpdir/fullchain.pem" \
-F "key=@$tmpdir/privkey.pem" \
https://cert-upload.wiuwiu.de/
# vim: ft=sh

View file

@ -0,0 +1,17 @@
#!/usr/bin/env sh
domain="registry.serguzim.me"
_install() {
install --owner=root --group=root --mode=600 \
"$CERTIFICATES_PATH/$domain.$1" \
"/opt/services/harbor/server.$1"
}
_install crt
_install key
export HARBOR_BUNDLE_DIR=/opt/services/harbor
$HARBOR_BUNDLE_DIR/harbor/install.sh
# vim: ft=sh

View file

@ -0,0 +1,19 @@
---
- name: Set config path
ansible.builtin.set_fact:
config_path: "{{ (service_path, 'config') | path_join }}"
- name: Create config directory
ansible.builtin.file:
path: "{{ config_path }}"
state: directory
mode: "0755"
- name: Copy the acme-dns-accounts
ansible.builtin.template:
src: "json.j2"
dest: "{{ (config_path, 'acme-dns-accounts.json') | path_join }}"
mode: "0644"
- name: Copy the hook script
ansible.builtin.copy:
src: "hook.sh"
dest: "{{ (config_path, 'hook.sh') | path_join }}"
mode: "0755"

View file

@ -0,0 +1,16 @@
---
- name: Set lego.d path
ansible.builtin.set_fact:
lego_d_path: "{{ (service_path, 'lego.d') | path_join }}"
- name: Create lego.d directory
ansible.builtin.file:
path: "{{ lego_d_path }}"
state: directory
mode: "0755"
- name: Copy the additional lego scripts
ansible.builtin.copy:
src: "{{ item }}"
dest: "{{ lego_d_path }}"
mode: "0755"
with_fileglob:
- "{{ ansible_facts.hostname }}/*"

35
roles/lego/tasks/main.yml Normal file
View file

@ -0,0 +1,35 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ lego_svc }}"
env: "{{ lego_env }}"
json: "{{ vault_acmedns_registered | acmedns_to_lego }}"
compose: "{{ lego_compose }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Create _certificates directory
ansible.builtin.file:
path: "{{ certificates_path }}"
state: directory
mode: "0755"
- name: Import tasks specific to the config directory
ansible.builtin.import_tasks: config.yml
- name: Import tasks specific to lego.d
ansible.builtin.import_tasks: lego.d.yml
- name: Import tasks specific to systemd
ansible.builtin.import_tasks: systemd.yml
- name: Copy the run script
ansible.builtin.copy:
src: "lego.sh"
dest: "{{ (service_path, 'lego.sh') | path_join }}"
mode: "0755"
- name: Import tasks create a service.env file
ansible.builtin.import_tasks: tasks/steps/template-service-env.yml

View file

@ -0,0 +1,24 @@
---
- name: Copy the system service
ansible.builtin.template:
src: lego@.service.j2
dest: /etc/systemd/system/lego@.service
mode: "0644"
become: true
- name: Copy the system timer
ansible.builtin.copy:
src: lego@.timer
dest: /etc/systemd/system/lego@.timer
mode: "0644"
become: true
- name: Enable the system timers
ansible.builtin.systemd_service:
name: lego@{{ item }}.timer
state: started
enabled: true
daemon_reload: true
loop:
- msrg.cc
- db.serguzim.me
- registry.serguzim.me
become: true

View file

@ -0,0 +1,4 @@
[Service]
Type=oneshot
ExecStart={{ service_path }}/lego.sh %i
WorkingDirectory={{ service_path }}

34
roles/lego/vars/main.yml Normal file
View file

@ -0,0 +1,34 @@
---
lego_svc:
name: lego
lego_env:
ACME_DNS_API_BASE: https://{{ acme_dns.host }}
ACME_DNS_STORAGE_PATH: /config/acme-dns-accounts.json
LEGO_EMAIL: "{{ admin_email }}"
LEGO_PATH: /data
CERTIFICATES_PATH: "{{ certificates_path }}"
WIUWIU_TOKEN: "{{ vault_wiuwiu_token }}"
lego_compose:
watchtower: false
network: false
image: goacme/lego
volumes:
- ./config:/config:ro
- "{{ certificates_path }}:/certificates"
- data:/data
file:
services:
app:
restart: never
network_mode: "host"
entrypoint:
- /lego
- --accept-tos
- --email={{ admin_email }}
- --dns=acme-dns
volumes:
data:

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ linkwarden_svc }}"
env: "{{ linkwarden_env }}"
compose: "{{ linkwarden_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,39 @@
---
linkwarden_secret: "{{ vault_linkwarden.secret }}"
linkwarden_db_host_port: "{{ postgres.host }}:{{ postgres.port }}"
linkwarden_db_user: "{{ vault_linkwarden.db.user }}"
linkwarden_db_pass: "{{ vault_linkwarden.db.pass }}"
linkwarden_db_database: linkwarden
linkwarden_s3_accesskey: "{{ opentofu.scaleway_service_keys.linkwarden.access_key }}"
linkwarden_s3_secretkey: "{{ opentofu.scaleway_service_keys.linkwarden.secret_key }}"
linkwarden_svc:
domain: bookmarks.serguzim.me
name: linkwarden
port: 3000
linkwarden_env:
NEXTAUTH_SECRET: "{{ linkwarden_secret }}"
NEXTAUTH_URL: https://bookmarks.serguzim.me/api/v1/auth
DATABASE_URL: postgres://{{ linkwarden_db_user }}:{{ linkwarden_db_pass }}@{{ linkwarden_db_host_port }}/{{ linkwarden_db_database }}
SPACES_KEY: "{{ linkwarden_s3_accesskey }}"
SPACES_SECRET: "{{ linkwarden_s3_secretkey }}"
SPACES_ENDPOINT: https://s3.nl-ams.scw.cloud
SPACES_BUCKET_NAME: linkwarden.serguzim.me
SPACES_REGION: ns-ams
SPACES_FORCE_PATH_STYLE: false
NEXT_PUBLIC_DISABLE_REGISTRATION: true
NEXT_PUBLIC_CREDENTIALS_ENABLED: true
NEXT_PUBLIC_AUTHENTIK_ENABLED: false
AUTHENTIK_CUSTOM_NAME: auth.serguzim.me
AUTHENTIK_ISSUER: https://auth.serguzim.me/application/o/bookmarks-serguzim-me
AUTHENTIK_CLIENT_ID: "{{ vault_linkwarden.oidc_client.id }}"
AUTHENTIK_CLIENT_SECRET: "{{ vault_linkwarden.oidc_client.secret }}"
linkwarden_compose:
watchtower: true
image: ghcr.io/linkwarden/linkwarden:latest

View file

@ -0,0 +1,10 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ mailcow_svc }}"
block:
- name: Import tasks to template the site for the reverse proxy
ansible.builtin.import_tasks: tasks/steps/template-site-config.yml

View file

@ -0,0 +1,7 @@
---
mailcow_svc:
name: mailcow
domain: mail.serguzim.me
docker_host: host.docker.internal
port: 3004
additional_domains: "{{ ['autodiscover', 'autoconfig'] | product(vault_mailcow.domains) | map('join', '.') }}"

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ minecraft_2_svc }}"
env: "{{ minecraft_2_env }}"
compose: "{{ minecraft_2_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,68 @@
---
minecraft_2_svc:
name: minecraft-2
minecraft_2_env:
ALLOW_FLIGHT: true
ALLOW_NETHER: true
ANNOUNCE_PLAYER_ACHIEVEMENTS: true
BROADCAST_CONSOLE_TO_OPS: true
BROADCAST_RCON_TO_OPS: true
CONSOLE: false
ENABLE_AUTOPAUSE: true
ENABLE_COMMAND_BLOCK: true
ENABLE_JMX: false
ENABLE_RCON: true
ENABLE_STATUS: true
ENABLE_WHITELIST: true
ENFORCE_WHITELIST: true
ENTITY_BROADCAST_RANGE_PERCENTAGE: 100
EULA: true
FORCE_GAMEMODE: false
FUNCTION_PERMISSION_LEVEL: 2
GENERATE_STRUCTURES: true
HARDCORDE: false
ICON:
LEVEL_TYPE: DEFAULT
MAX_BUILD_HEIGHT: 512
MAX_MEMORY: 4G
MAX_TICK_TIME: -1
MAX_PLAYERS: 64
MAX_WORLD_SIZE: 30000000
MODE: survival
MOTD:
NETWORK_COMPRESSION_THRESHOLD: 256
PVP: true
SERVER_NAME: minecraft.serguzim.me
SNOOPER_ENABLED: false
SPAWN_ANIMALS: true
SPAWN_MONSTERS: true
SPAWN_NPCS: true
SPAWN_PROTECTION: 0
SYNC_CHUNK_WRITES: true
TYPE: PAPER
ONLINE_MODE: true
OP_PERMISSION_LEVEL: 4
OPS: "{{ vault_minecraft_2.ops }}"
OVERRIDE_ICON: true
OVERRIDE_SERVER_PROPERTIES: true
PLAYER_IDLE_TIMEOUT: 0
PREVENT_PROXY_CONNECTIONS: false
SEED: "{{ vault_minecraft_2.seed }}"
USE_NATIVE_TRANSPORT: true
VERSION: LATEST
VIEW_DISTANCE: 10
WHITELIST: "{{ vault_minecraft_2.whitelist }}"
minecraft_2_compose:
watchtower: false
image: itzg/minecraft-server
volumes:
- data:/data
file:
services:
app:
ports:
- 25565:25565
volumes:
data:

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ minio_svc }}"
env: "{{ minio_env }}"
compose: "{{ minio_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

34
roles/minio/vars/main.yml Normal file
View file

@ -0,0 +1,34 @@
---
minio_svc:
domain: s3.serguzim.me
name: minio
port: 9000
caddy_extra: |
@nocache {
query nocache=*
}
header @nocache "Cache-Control" "no-store, no-cache"
extra_svcs:
- domain: console.s3.serguzim.me
docker_host: minio
port: 9001
minio_env:
MINIO_SERVER_URL: https://{{ svc.domain }}/
MINIO_BROWSER_REDIRECT_URL: https://console.{{ svc.domain }}
MINIO_VOLUMES: /data
MINIO_ROOT_USER: "{{ vault_minio.user }}"
MINIO_ROOT_PASSWORD: "{{ vault_minio.pass }}"
minio_compose:
watchtower: true
image: minio/minio
volumes:
- data:/data
file:
services:
app:
command: server --console-address ":9001"
volumes:
data:

12
roles/ntfy/tasks/main.yml Normal file
View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ ntfy_svc }}"
compose: "{{ ntfy_compose }}"
env: "{{ ntfy_env }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

55
roles/ntfy/vars/main.yml Normal file
View file

@ -0,0 +1,55 @@
---
ntfy_svc:
name: ntfy
domain: push.serguzim.me
port: 80
ntfy_env:
TZ: "{{ timezone }}"
NTFY_BASE_URL: "https://{{ ntfy_svc.domain }}"
NTFY_CACHE_FILE: /var/cache/ntfy/cache.db
NTFY_CACHE_DURATION: "12h"
NTFY_BEHIND_PROXY: true
NTFY_AUTH_FILE: /var/lib/ntfy/user.db
NTFY_AUTH_DEFAULT_ACCESS: "deny-all"
NTFY_ATTACHMENT_CACHE_DIR: "/var/cache/ntfy/attachments"
NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT: "5G"
NTFY_ATTACHMENT_FILE_SIZE_LIMIT: "15M"
NTFY_ATTACHMENT_EXPIRY_DURATION: "3h"
NTFY_KEEPALIVE_INTERVAL: "45s"
NTFY_MANAGER_INTERVAL: "60m"
NTFY_ENABLE_SIGNUP: false
NTFY_ENABLE_LOGIN: true
NTFY_ENABLE_RESERVATIONS: true
NTFY_GLOBAL_TOPIC_LIMIT: 15000
NTFY_VISITOR_SUBSCRIPTION_LIMIT: 30
NTFY_VISITOR_REQUEST_LIMIT_BURST: 60
NTFY_VISITOR_REQUEST_LIMIT_REPLENISH: "5s"
NTFY_VISITOR_ATTACHMENT_TOTAL_SIZE_LIMIT: "100M"
NTFY_VISITOR_ATTACHMENT_DAILY_BANDWIDTH_LIMIT: "500M"
NTFY_ENABLE_METRICS: true
ntfy_compose:
watchtower: true
image: binwiederhier/ntfy
volumes:
- cache:/var/cache/ntfy
- data:/var/lib/ntfy
file:
services:
app:
command:
- serve
volumes:
cache:
data:

View file

@ -0,0 +1,26 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ reitanlage_oranienburg_svc }}"
compose: "{{ reitanlage_oranienburg_compose }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Get the Dockerfile
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/getgrav/docker-grav/master/Dockerfile
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
mode: "0644"
register: cmd_result
- name: Set the docker rebuild flag
ansible.builtin.set_fact:
docker_rebuild: true
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
- name: Import start tasks for common service
ansible.builtin.import_tasks: tasks/start-common-service.yml

View file

@ -0,0 +1,25 @@
---
reitanlage_oranienburg_svc:
name: reitanlage-oranienburg
domain: reitanlage-oranienburg.de
www_domain: true
port: 80
caddy_extra: |
import analytics
header /images/* Cache-Control "max-age=31536000"
header /assets/* Cache-Control "max-age=2629800"
header /user/themes/* Cache-Control "max-age=2629800"
reitanlage_oranienburg_compose:
watchtower: false
image: registry.serguzim.me/library/grav
volumes:
- data:/var/www/html/
file:
services:
app:
build:
context: .
volumes:
data:

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ shlink_svc }}"
env: "{{ shlink_env }}"
compose: "{{ shlink_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,31 @@
---
shlink_svc:
domain: msrg.cc
additional_domains:
- "emgauwa.app"
name: shlink
port: 8080
shlink_env:
DEFAULT_DOMAIN: "{{ shlink_svc.domain }}"
IS_HTTPS_ENABLED: true
TIMEZONE: "{{ timezone }}"
DEFAULT_SHORT_CODES_LENGTH: 8
MULTI_SEGMENT_SLUGS_ENABLED: false
SHORT_URL_TRAILING_SLASH: true
REDIRECT_APPEND_EXTRA_PATH: true
DEFAULT_BASE_URL_REDIRECT: "https://www.serguzim.me/"
DB_DRIVER: postgres
DB_HOST: "{{ postgres.host }}"
DB_PORT: "{{ postgres.port }}"
DB_NAME: shlink
DB_USER: "{{ vault_shlink.db.user }}"
DB_PASSWORD: "{{ vault_shlink.db.pass }}"
GEOLITE_LICENSE_KEY: "{{ vault_shlink.geolite_key }}"
shlink_compose:
watchtower: true
image: shlinkio/shlink

View file

@ -0,0 +1,22 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: precise
loggers:
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
root:
level: INFO
handlers: [console]
disable_existing_loggers: false

View file

@ -0,0 +1,44 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ synapse_svc }}"
env: "{{ synapse_env }}"
compose: "{{ synapse_compose }}"
yml: "{{ synapse_yml }}"
block:
- name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Set synapse config path
ansible.builtin.set_fact:
config_path: "{{ (service_path, svc.config_path) | path_join }}"
- name: Create config directory
ansible.builtin.file:
path: "{{ config_path }}"
state: directory
mode: "0755"
- name: Template config
ansible.builtin.template:
src: yml.j2
dest: "{{ (config_path, 'homeserver.yaml') | path_join }}"
mode: "0644"
- name: Copy the log config
ansible.builtin.copy:
src: msrg.cc.log.config
dest: "{{ (config_path, 'msrg.cc.log.config') | path_join }}"
mode: "0644"
- name: Copy the signing key
ansible.builtin.copy:
content: "{{ vault_synapse.signing_key }}"
dest: "{{ (config_path, 'msrg.cc.signing.key') | path_join }}"
mode: "0644"
- name: Import start tasks for common service
ansible.builtin.import_tasks: tasks/start-common-service.yml

123
roles/synapse/vars/main.yml Normal file
View file

@ -0,0 +1,123 @@
---
synapse_svc:
name: synapse
domain: matrix.serguzim.me
docker_host: synapse-admin
port: 80
caddy_extra: |
handle /_matrix/* {
reverse_proxy synapse:8008
}
handle /_synapse/* {
reverse_proxy synapse:8008
}
extra_svcs:
- domain: matrix.serguzim.me:8448
additional_domains:
- serguzim.me:8448
docker_host: synapse
port: 8008
db:
host: "{{ postgres.host }}"
database: synapse
user: "{{ vault_synapse.db.user }}"
pass: "{{ vault_synapse.db.pass }}"
config_path: config
synapse_env:
SYNAPSE_CONFIG_PATH: "{{ ('/', svc.config_path) | path_join }}"
REACT_APP_SERVER: https://matrix.serguzim.me
synapse_yml:
server_name: msrg.cc
pid_file: "{{ (svc.config_path, 'homeserver.pid') | path_join }}"
public_baseurl: https://matrix.serguzim.me/
allow_public_rooms_without_auth: true
allow_public_rooms_over_federation: true
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
resources:
- names:
- client
- federation
- metrics
compress: false
admin_contact: mailto:{{ admin_email }}
acme:
enabled: false
database:
name: psycopg2
args:
user: "{{ svc.db.user }}"
password: "{{ svc.db.pass }}"
database: "{{ svc.db.database }}"
host: "{{ svc.db.host }}"
cp_min: 5
cp_max: 10
log_config: "{{ (svc.config_path, 'msrg.cc.log.config') | path_join }}"
media_store_path: /media_store
max_upload_size: 500M
enable_registration: false
enable_metrics: true
report_stats: true
macaroon_secret_key: "{{ vault_synapse.macaroon_secret_key }}"
form_secret: "{{ vault_synapse.form_secret }}"
signing_key_path: "{{ (svc.config_path, 'msrg.cc.signing.key') | path_join }}"
trusted_key_servers:
- server_name: matrix.org
suppress_key_server_warning: true
oidc_providers:
- idp_id: auth_serguzim_me
idp_name: auth.serguzim.me
issuer: https://auth.serguzim.me/application/o/matrix_serguzim_me/
client_id: "{{ vault_synapse.oidc_client.id }}"
client_secret: "{{ vault_synapse.oidc_client.secret }}"
scopes:
- openid
- profile
- email
user_mapping_provider:
config:
localpart_template: "{{ '{{ user.preferred_username }}' }}"
display_name_template: "{{ '{{ user.name }}' }}"
email:
smtp_host: mail.serguzim.me
smtp_port: 587
smtp_user: matrix@serguzim.me
smtp_pass: "{{ vault_synapse.mail.pass }}"
require_transport_security: true
notif_from: Matrix <matrix@serguzim.me>
synapse_compose:
watchtower: true
image: ghcr.io/element-hq/synapse:latest
volumes:
- ./config:/config
- media_store:/media_store
file:
services:
synapse-admin:
image: awesometechnologies/synapse-admin
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
env_file:
- service.env
networks:
apps:
aliases:
- synapse-admin
volumes:
media_store:

View file

@ -0,0 +1,12 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ tandoor_svc }}"
env: "{{ tandoor_env }}"
compose: "{{ tandoor_compose }}"
block:
- name: Import tasks to deploy common service
ansible.builtin.import_tasks: tasks/deploy-common-service.yml

View file

@ -0,0 +1,63 @@
---
tandoor_svc:
domain: recipes.serguzim.me
name: tandoor
port: 80
db:
host: "{{ postgres.host }}"
port: "{{ postgres.port }}"
database: tandoor
user: "{{ vault_tandoor.db.user }}"
pass: "{{ vault_tandoor.db.pass }}"
tandoor_env:
DEBUG: 0
SQL_DEBUG: 0
ALLOWED_HOSTS: recipes.serguzim.me
SECRET_KEY: "{{ vault_tandoor.secret_key }}"
TZ: "{{ timezone }}"
DB_ENGINE: django.db.backends.postgresql
DB_OPTIONS: '{"sslmode": "require"}'
POSTGRES_HOST: "{{ svc.db.host }}"
POSTGRES_PORT: "{{ svc.db.port }}"
POSTGRES_DB: "{{ svc.db.database }}"
POSTGRES_USER: "{{ svc.db.user }}"
POSTGRES_PASSWORD: "{{ svc.db.pass }}"
SHOPPING_MIN_AUTOSYNC_INTERVAL: 5
ENABLE_SIGNUP: 0
ENABLE_METRICS: 1
ENABLE_PDF_EXPORT: 1
SOCIAL_DEFAULT_ACCESS: 1
SOCIAL_DEFAULT_GROUP: guest
tandoor_compose:
watchtower: true
image: nginx:mainline-alpine
volumes:
- nginx_config:/etc/nginx/conf.d:ro
- staticfiles:/static
- mediafiles:/media
file:
services:
web_recipes:
image: vabene1111/recipes
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
env_file:
- service.env
volumes:
- staticfiles:/opt/recipes/staticfiles
- nginx_config:/opt/recipes/nginx/conf.d
- mediafiles:/opt/recipes/mediafiles
networks:
default:
volumes:
nginx_config:
staticfiles:
mediafiles:

View file

@ -0,0 +1,19 @@
services:
teamspeak:
image: teamspeak
restart: always
ports:
- 9987:9987/udp
- 10011:10011
- 30033:30033
environment:
TS3SERVER_DB_PLUGIN: ts3db_sqlite3
TS3SERVER_DB_SQLCREATEPATH: create_sqlite
TS3SERVER_LICENSE: accept
volumes:
- data:/var/ts3server/
volumes:
data:
external: true
name: teamspeak-fallback-data

View file

@ -0,0 +1,36 @@
---
- name: Set common facts
ansible.builtin.import_tasks: tasks/set-default-facts.yml
- name: Deploy {{ svc.name }}
vars:
svc: "{{ teamspeak_fallback_svc }}"
block:
- name: Import tasks to create service directory
ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
- name: Copy the docker-compose file
ansible.builtin.copy:
src: docker-compose.yml
dest: "{{ (service_path, 'docker-compose.yml') | path_join }}"
mode: "0644"
- name: Template the conditional-start script
ansible.builtin.template:
src: conditional-start.sh.j2
dest: "{{ (service_path, 'conditional-start.sh') | path_join }}"
mode: "0755"
- name: Copy the system service
ansible.builtin.template:
src: teamspeak-fallback.service.j2
dest: /etc/systemd/system/teamspeak-fallback.service
mode: "0644"
become: true
- name: Enable the system service
ansible.builtin.systemd_service:
name: teamspeak-fallback.service
state: started
enabled: true
daemon_reload: true
become: true

View file

@ -0,0 +1,18 @@
#!/usr/bin/env sh
while true
do
if nc -z -w 3 "{{ teamspeak_fallback_check_server }}" "{{ teamspeak_fallback_check_port }}"
then
if docker compose ps --services | grep teamspeak >/dev/null; then
echo "Stopping Server"
docker compose down
fi
else
if ! docker compose ps --services | grep teamspeak >/dev/null; then
echo "Starting Server"
docker compose up -d --pull=always
fi
fi
sleep 2
done

Some files were not shown because too many files have changed in this diff Show more