diff --git a/.gitignore b/.gitignore
index f12db07..65dbe42 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,6 @@ dns/hosts.json
secrets.auto.tfvars
.terraform
+
+inventory/group_vars/all/serguzim.net.yml
+inventory/group_vars/all/opentofu.yaml
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..36f7309
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,7 @@
+repos:
+ - repo: https://github.com/ansible/ansible-lint
+ rev: v6.22.1
+ hooks:
+ - id: ansible-lint
+ args:
+ - serguzim.net.yml
diff --git a/Makefile b/Makefile
index 5747dd4..58e10e2 100644
--- a/Makefile
+++ b/Makefile
@@ -3,20 +3,18 @@ SHELL := /bin/bash
include .env
export
-DNS_OUTPUT = "dns/hosts.js"
-SERVICES_OUTPUT = "services/inventory/group_vars/all/opentofu.yaml"
+DNS_OUTPUT = "dns/hosts.json"
+SERVICES_OUTPUT = "inventory/group_vars/all/opentofu.yaml"
$(DNS_OUTPUT):
- cd opentofu && \
- tofu output --json \
+ tofu output --json \
| jq 'with_entries(.value |= .value).hosts' \
- > ../dns/hosts.json
+ > $(DNS_OUTPUT)
$(SERVICES_OUTPUT):
- cd opentofu && \
- tofu output --json \
+ tofu output --json \
| yq -y '{opentofu: with_entries(.value |= .value)}' \
- > ../services/inventory/group_vars/all/opentofu.yaml
+ > $(SERVICES_OUTPUT)
outputs: $(DNS_OUTPUT) $(SERVICES_OUTPUT)
diff --git a/README.md b/README.md
index 3a3beea..5343643 100644
--- a/README.md
+++ b/README.md
@@ -8,3 +8,17 @@
- enter credentials to ovh in .env file (copy from .env.example)
- check credentials with `make dns-check`
- run `make dns`
+
+## Ansible project to deploy services
+
+### Variables
+
+#### CLI variable overwrites
+
+##### force_forgejo_runner_registration
+`-e force_forgejo_runner_registration=True`
+Force forgejo-runner to prompt a new registration token.
+
+##### docker_update
+`-e docker_update=True`
+Pull and build the docker compose services
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 0000000..79a1afc
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,3 @@
+[defaults]
+inventory = ./inventory
+roles_path = ./roles
diff --git a/hcloud.tf b/hcloud.tf
index 97f74b0..281ec61 100644
--- a/hcloud.tf
+++ b/hcloud.tf
@@ -51,7 +51,7 @@ resource "hcloud_server" "nodes" {
ssh_keys,
user_data
]
- prevent_destroy = true
+ prevent_destroy = true
}
}
diff --git a/inventory/group_vars/all/compose_defaults.yml b/inventory/group_vars/all/compose_defaults.yml
new file mode 100644
index 0000000..4be859e
--- /dev/null
+++ b/inventory/group_vars/all/compose_defaults.yml
@@ -0,0 +1,37 @@
+compose_file_main:
+ services:
+ app:
+ image: "{{ compose.image }}"
+ restart: always
+ labels:
+ com.centurylinklabs.watchtower.enable: "{{ compose.watchtower | default(false) }}"
+
+compose_file_env:
+ services:
+ app:
+ env_file:
+ - service.env
+
+compose_file_networks:
+ services:
+ app:
+ networks:
+ default:
+ apps:
+ aliases:
+ - "{{ svc.name }}"
+ networks:
+ default:
+ apps:
+ external: true
+
+compose_file_volumes:
+ services:
+ app:
+ volumes: "{{ compose.volumes }}"
+
+compose_file_monitoring_label:
+ services:
+ app:
+ labels:
+ com.influxdata.telegraf.enable: true
diff --git a/inventory/group_vars/all/main.yml b/inventory/group_vars/all/main.yml
new file mode 100644
index 0000000..7afbfed
--- /dev/null
+++ b/inventory/group_vars/all/main.yml
@@ -0,0 +1,22 @@
+admin_email: tobias@msrg.cc
+timezone: Europe/Berlin
+
+postgres:
+ host: db.serguzim.me
+ port: 5432
+
+mailer:
+ host: mail.serguzim.me
+ port: 587
+
+acme_dns:
+ host: acme.serguzim.me
+
+
+services_path: /opt/services/
+
+caddy_path: "{{ (services_path, 'caddy') | path_join }}"
+caddy_config_path: "{{ (caddy_path, 'config', 'conf.d') | path_join }}"
+managed_sites: []
+
+certificates_path: "{{ (services_path, '_certificates') | path_join }}"
diff --git a/inventory/serguzim.net.yml b/inventory/serguzim.net.yml
new file mode 100644
index 0000000..f6caf17
--- /dev/null
+++ b/inventory/serguzim.net.yml
@@ -0,0 +1,58 @@
+all:
+ hosts:
+ local-dev:
+ ansible_connection: local
+
+ node001:
+ ansible_host: node001.vpn.serguzim.net
+ ansible_port: "{{ vault_node001.ansible_port }}"
+ ansible_user: "{{ vault_node001.ansible_user }}"
+ interactive_user: "{{ vault_node001.interactive_user }}"
+ host_vpn:
+ domain: node001.vpn.serguzim.net
+ ip: 100.64.0.1
+ host_backup:
+ hc_uid: "{{ vault_node001.backup.hc_uid }}"
+ uptime_kuma_token: "{{ vault_node001.backup.uptime_kuma_token }}"
+ volumes:
+ - minecraft-2_data
+
+ node002:
+ ansible_host: node002.vpn.serguzim.net
+ ansible_port: "{{ vault_node002.ansible_port }}"
+ ansible_user: "{{ vault_node002.ansible_user }}"
+ interactive_user: "{{ vault_node002.interactive_user }}"
+ host_vpn:
+ domain: node002.vpn.serguzim.net
+ ip: 100.64.0.2
+ host_backup:
+ hc_uid: "{{ vault_node002.backup.hc_uid }}"
+ uptime_kuma_token: "{{ vault_node002.backup.uptime_kuma_token }}"
+ volumes:
+ - forgejo_data
+ - homebox_data
+ - immich_upload
+ - influxdb_data
+ - jellyfin_config
+ #- jellyfin_media # TODO
+ - minio_data
+ - ntfy_data
+ - reitanlage-oranienburg_data
+ - synapse_media_store
+ - tandoor_mediafiles
+ - teamspeak-fallback-data
+ - uptime-kuma_data
+ - vikunja_data
+
+ node003:
+ ansible_host: node003.vpn.serguzim.net
+ ansible_port: "{{ vault_node003.ansible_port }}"
+ ansible_user: "{{ vault_node003.ansible_user }}"
+ interactive_user: "{{ vault_node003.interactive_user }}"
+ host_vpn:
+ domain: node003.vpn.serguzim.net
+ ip: 100.110.16.30
+ host_backup:
+ hc_uid: "{{ vault_node003.backup.hc_uid }}"
+ uptime_kuma_token: "{{ vault_node003.backup.uptime_kuma_token }}"
+ volumes: []
diff --git a/playbooks/change-password.yml b/playbooks/change-password.yml
new file mode 100644
index 0000000..b8ed866
--- /dev/null
+++ b/playbooks/change-password.yml
@@ -0,0 +1,14 @@
+---
+- name: Change password
+ hosts: all
+ become: true
+ tasks:
+ - name: Get new password
+ ansible.builtin.pause:
+ prompt: Enter the new password
+ echo: false
+ register: new_user_password
+ - name: Change password
+ ansible.builtin.user:
+ name: "{{ interactive_user }}"
+ password: "{{ new_user_password.user_input | password_hash('sha512') }}"
diff --git a/playbooks/filter_plugins/acmedns_to_lego.py b/playbooks/filter_plugins/acmedns_to_lego.py
new file mode 100644
index 0000000..76a24cd
--- /dev/null
+++ b/playbooks/filter_plugins/acmedns_to_lego.py
@@ -0,0 +1,18 @@
+class FilterModule(object):
+ def filters(self):
+ return {
+ 'acmedns_to_lego': self.acmedns_to_lego,
+ }
+
+ def acmedns_to_lego(self, acmedns_registered):
+ result = {}
+ for (key, value) in acmedns_registered.items():
+ result[key] = {
+ "fulldomain": value["subd"] + "." + value["host"],
+ "subdomain": value["subd"],
+ "username": value["user"],
+ "password": value["pass"],
+ "server_url": "https://" + value["host"]
+ }
+
+ return result
diff --git a/playbooks/filter_plugins/map_backup_volumes.py b/playbooks/filter_plugins/map_backup_volumes.py
new file mode 100644
index 0000000..77c1fbc
--- /dev/null
+++ b/playbooks/filter_plugins/map_backup_volumes.py
@@ -0,0 +1,24 @@
+class FilterModule(object):
+ def filters(self):
+ return {
+ 'map_backup_volumes': self.map_backup_volumes,
+ 'map_backup_volumes_service': self.map_backup_volumes_service
+ }
+
+ def map_backup_volumes(self, volumes):
+ result = {}
+
+ for volume in volumes:
+ result[volume] = {
+ "external": True,
+ }
+
+ return result
+
+ def map_backup_volumes_service(self, volumes):
+ result = []
+
+ for volume in volumes:
+ result.append("{volume_name}:/backup/volumes/{volume_name}".format(volume_name=volume))
+
+ return result
diff --git a/playbooks/local-dev.yml b/playbooks/local-dev.yml
new file mode 100644
index 0000000..80afd91
--- /dev/null
+++ b/playbooks/local-dev.yml
@@ -0,0 +1,29 @@
+---
+- name: Run roles for local-dev
+ vars:
+ # Remove inventory
+ base_path: "{{ inventory_dir.split('/')[0:-1] | join('/') }}"
+ services_path: "{{ (base_path, '_services') | path_join }}"
+ caddy_config_path: "{{ (services_path, 'caddy', 'config', 'conf.d') | path_join }}"
+
+ hosts: local-dev
+ roles:
+ - common
+
+ - acme-dns
+ - coder
+ - faas
+ - forgejo
+ - forgejo-runner
+ - healthcheck
+ - homebox
+ - influxdb
+ - jellyfin
+ - tandoor
+ - telegraf
+ - tinytinyrss
+ - umami
+ - uptime-kuma
+ - watchtower
+ - webdis
+ - wiki-js
diff --git a/playbooks/node001.yml b/playbooks/node001.yml
new file mode 100644
index 0000000..639b913
--- /dev/null
+++ b/playbooks/node001.yml
@@ -0,0 +1,15 @@
+---
+- name: Run roles for node001
+ hosts: node001
+ roles:
+ - role: common
+ tags: [always]
+ - role: backup
+ tags: [backup]
+ - role: caddy
+ tags: [caddy, reverse-proxy, webserver]
+
+ - role: mailcow
+ tags: [mailcow, mail, communication]
+ - role: minecraft_2
+ tags: [minecraft-2, minecraft, games]
diff --git a/playbooks/node002.yml b/playbooks/node002.yml
new file mode 100644
index 0000000..a6cd6d8
--- /dev/null
+++ b/playbooks/node002.yml
@@ -0,0 +1,79 @@
+---
+- name: Run roles for node002
+ hosts: node002
+ roles:
+ - role: common
+ tags: [always]
+ - role: backup
+ tags: [backup]
+ - role: lego
+ tags: [lego, certificates]
+ - role: caddy
+ tags: [caddy, reverse-proxy, webserver]
+ vars:
+ caddy_ports_extra:
+ - 8448:8448
+
+
+ - role: acme_dns
+ tags: [acme-dns, certificates]
+ - role: authentik
+ tags: [authentik, authentication]
+ - role: coder
+ tags: [coder, development]
+ - role: extra_services
+ tags: [extra-services]
+ - role: faas
+ tags: [faas]
+ - role: forgejo
+ tags: [forgejo, git, development]
+ - role: forgejo_runner
+ tags: [forgejo-runner, ci, development]
+ - role: harbor
+ tags: [harbor, registry, development]
+ - role: healthcheck
+ tags: [healthcheck, monitoring]
+ - role: homebox
+ tags: [homebox, inventory]
+ - role: immich
+ tags: [immich, gallery]
+ - role: influxdb
+ tags: [influxdb, sensors, monitoring]
+ - role: jellyfin
+ tags: [jellyfin, media]
+ - role: linkwarden
+ tags: [linkwarden, booksmarks]
+ - role: minio
+ tags: [minio, storage]
+ - role: ntfy
+ tags: [ntfy, notifications, push]
+ - role: reitanlage_oranienburg
+ tags: [reitanlage-oranienburg, website]
+ - role: shlink
+ tags: [shlink, url-shortener]
+ - role: synapse
+ tags: [synapse, matrix, communication]
+ - role: tandoor
+ tags: [tandoor, recipes]
+ - role: teamspeak_fallback
+ tags: [teamspeak-fallback, communication]
+ - role: telegraf
+ tags: [telegraf, monitoring]
+ - role: tinytinyrss
+ tags: [tinytinyrss, news]
+ - role: umami
+ tags: [umami, analytics]
+ - role: uptime_kuma
+ tags: [uptime-kuma, monitoring]
+ - role: vikunja
+ tags: [vikunja, todo]
+ - role: watchtower
+ tags: [watchtower]
+ - role: webdis
+ tags: [webdis]
+ - role: webhook
+ tags: [webhook]
+ - role: wiki_js
+ tags: [wiki-js]
+ - role: woodpecker
+ tags: [woodpecker, ci, development]
diff --git a/playbooks/node003.yml b/playbooks/node003.yml
new file mode 100644
index 0000000..b61bfe2
--- /dev/null
+++ b/playbooks/node003.yml
@@ -0,0 +1,15 @@
+---
+- name: Run roles for node003
+ hosts: node003
+ roles:
+ - role: common
+ tags: [common]
+ - role: docker
+ tags: [common]
+ - role: backup
+ tags: [backup]
+ - role: caddy
+ tags: [caddy, reverse-proxy, webserver]
+
+ - role: mailcow
+ tags: [mailcow, mail, communication]
diff --git a/playbooks/serguzim.net.yml b/playbooks/serguzim.net.yml
new file mode 100644
index 0000000..69a1442
--- /dev/null
+++ b/playbooks/serguzim.net.yml
@@ -0,0 +1,6 @@
+---
+- name: Run playbook for node001
+ import_playbook: node001.yml
+
+- name: Run playbook for node002
+ import_playbook: node002.yml
diff --git a/playbooks/tasks/deploy-common-service.yml b/playbooks/tasks/deploy-common-service.yml
new file mode 100644
index 0000000..a4a372f
--- /dev/null
+++ b/playbooks/tasks/deploy-common-service.yml
@@ -0,0 +1,5 @@
+---
+- name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+- name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/playbooks/tasks/prepare-common-service.yml b/playbooks/tasks/prepare-common-service.yml
new file mode 100644
index 0000000..ee588a9
--- /dev/null
+++ b/playbooks/tasks/prepare-common-service.yml
@@ -0,0 +1,11 @@
+---
+- name: Import tasks to create service directory
+ ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
+
+- name: Import tasks to template docker compose file
+ ansible.builtin.import_tasks: tasks/steps/template-docker-compose.yml
+ when: compose is defined
+
+- name: Import tasks create a service.env file
+ ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
+ when: env is defined
diff --git a/playbooks/tasks/set-default-facts.yml b/playbooks/tasks/set-default-facts.yml
new file mode 100644
index 0000000..30b9980
--- /dev/null
+++ b/playbooks/tasks/set-default-facts.yml
@@ -0,0 +1,6 @@
+---
+- name: Set common facts
+ ansible.builtin.set_fact:
+ service_path: "{{ (services_path, role_name | replace('_', '-')) | path_join }}"
+ docker_force_recreate: ""
+ docker_rebuild: false
diff --git a/playbooks/tasks/start-common-service.yml b/playbooks/tasks/start-common-service.yml
new file mode 100644
index 0000000..1b7323a
--- /dev/null
+++ b/playbooks/tasks/start-common-service.yml
@@ -0,0 +1,6 @@
+---
+- name: Import tasks to template the site for the reverse proxy
+ ansible.builtin.include_tasks: tasks/steps/template-site-config.yml
+ when: svc.domain is defined
+- name: Import tasks to start the service
+ ansible.builtin.import_tasks: tasks/steps/start-service.yml
diff --git a/playbooks/tasks/steps/create-service-directory.yml b/playbooks/tasks/steps/create-service-directory.yml
new file mode 100644
index 0000000..36b57ed
--- /dev/null
+++ b/playbooks/tasks/steps/create-service-directory.yml
@@ -0,0 +1,6 @@
+---
+- name: Create a service directory
+ ansible.builtin.file:
+ path: "{{ service_path }}"
+ state: directory
+ mode: "0755"
diff --git a/playbooks/tasks/steps/start-service.yml b/playbooks/tasks/steps/start-service.yml
new file mode 100644
index 0000000..90f4d57
--- /dev/null
+++ b/playbooks/tasks/steps/start-service.yml
@@ -0,0 +1,39 @@
+---
+- name: Rebuild service
+ ansible.builtin.command:
+ cmd: docker compose build --pull
+ chdir: "{{ service_path }}"
+ when:
+ - docker_rebuild
+ register: cmd_result
+ changed_when: true
+
+- name: Build service
+ ansible.builtin.command:
+ cmd: docker compose build --pull
+ chdir: "{{ service_path }}"
+ when:
+ - "'local-dev' != inventory_hostname"
+ - docker_update is defined
+ - docker_update
+ register: cmd_result
+ changed_when: true
+
+- name: Pull service
+ ansible.builtin.command:
+ cmd: docker compose pull --ignore-buildable
+ chdir: "{{ service_path }}"
+ when:
+ - "'local-dev' != inventory_hostname"
+ - docker_update is defined
+ - docker_update
+ register: cmd_result
+ changed_when: true
+
+- name: Start service
+ ansible.builtin.command:
+ cmd: docker compose up -d {{ docker_force_recreate }}
+ chdir: "{{ service_path }}"
+ when: "'local-dev' != inventory_hostname"
+ register: cmd_result
+ changed_when: cmd_result.stderr | regex_search('Started$')
diff --git a/playbooks/tasks/steps/template-docker-compose.yml b/playbooks/tasks/steps/template-docker-compose.yml
new file mode 100644
index 0000000..33ef724
--- /dev/null
+++ b/playbooks/tasks/steps/template-docker-compose.yml
@@ -0,0 +1,6 @@
+---
+- name: Template docker-compose
+ ansible.builtin.template:
+ src: docker-compose.yml.j2
+ dest: "{{ (service_path, 'docker-compose.yml') | path_join }}"
+ mode: "0644"
diff --git a/playbooks/tasks/steps/template-service-env.yml b/playbooks/tasks/steps/template-service-env.yml
new file mode 100644
index 0000000..61c37bb
--- /dev/null
+++ b/playbooks/tasks/steps/template-service-env.yml
@@ -0,0 +1,6 @@
+---
+- name: Template service.env file
+ ansible.builtin.template:
+ src: env.j2
+ dest: "{{ (service_path, 'service.env') | path_join }}"
+ mode: "0700"
diff --git a/playbooks/tasks/steps/template-site-config.yml b/playbooks/tasks/steps/template-site-config.yml
new file mode 100644
index 0000000..2788a00
--- /dev/null
+++ b/playbooks/tasks/steps/template-site-config.yml
@@ -0,0 +1,12 @@
+---
+- name: Template caddy site
+ ansible.builtin.template:
+ src: caddy_site.conf.j2
+ dest: "{{ (caddy_config_path, svc.domain + '.conf') | path_join }}"
+ mode: "0644"
+ notify:
+ - Reload caddy
+
+- name: Register caddy site
+ ansible.builtin.set_fact:
+ managed_sites: "{{ managed_sites + [svc.domain + '.conf'] }}"
diff --git a/playbooks/templates/caddy_site.conf.j2 b/playbooks/templates/caddy_site.conf.j2
new file mode 100644
index 0000000..967ba7a
--- /dev/null
+++ b/playbooks/templates/caddy_site.conf.j2
@@ -0,0 +1,43 @@
+{%- macro caddy_site_hsts(svc, for_www) -%}
+{%- if svc.hsts|default(false) and (svc.www_domain|default(false) == for_www) -%}
+{{ 'header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload"' if svc.hsts|default(false) }}
+{%- endif -%}
+{%- endmacro -%}
+
+{% macro caddy_site(svc) %}
+{%- for domain in svc.additional_domains|default([]) %}
+{{ domain }},
+{% endfor -%}
+{{ "www." + svc.domain if svc.www_domain|default(false) else svc.domain }} {
+ import default
+ {{ caddy_site_hsts(svc, false) }}
+
+{{ svc.caddy_extra | indent(width='\t', first=True) if svc.caddy_extra|default(false) }}
+
+{% if svc.caddy_default|default(true) %}
+ handle {
+{% if svc.faas_function|default(false) %}
+ import faas {{ svc.faas_function }}
+{% elif svc.redirect|default(false) %}
+ redir "{{ svc.redirect }}"
+{% else %}
+ reverse_proxy {{ svc.docker_host|default(svc.name) }}:{{ svc.port }}
+{% endif %}
+ }
+{% endif %}
+}
+
+{% if svc.www_domain|default(false) %}
+{{ svc.domain }} {
+ import default
+ {{ caddy_site_hsts(svc, true) }}
+ redir https://www.{{ svc.domain }}{uri}
+}
+{% endif %}
+{% endmacro -%}
+
+{{ caddy_site(svc) }}
+
+{%- for extra_svc in svc.extra_svcs|default([]) %}
+{{ caddy_site(extra_svc) }}
+{% endfor %}
diff --git a/playbooks/templates/docker-compose.yml.j2 b/playbooks/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..52fbd05
--- /dev/null
+++ b/playbooks/templates/docker-compose.yml.j2
@@ -0,0 +1,20 @@
+{%- set compose_file = compose.file | default({}) -%}
+{%- set compose_file = compose_file_main | combine(compose_file, recursive=True) -%}
+
+{%- if env is defined -%}
+ {%- set compose_file = compose_file | combine(compose_file_env, recursive=True) -%}
+{%- endif -%}
+
+{%- if compose.network | default(True) -%}
+ {%- set compose_file = compose_file | combine(compose_file_networks, recursive=True) -%}
+{%- endif -%}
+
+{%- if compose.volumes | default(False) -%}
+ {%- set compose_file = compose_file | combine(compose_file_volumes, recursive=True) -%}
+{%- endif -%}
+
+{%- if compose.monitoring | default(False) -%}
+ {%- set compose_file = compose_file | combine(compose_file_monitoring_label, recursive=True) -%}
+{%- endif -%}
+
+{{ compose_file | to_nice_yaml }}
diff --git a/playbooks/templates/env.j2 b/playbooks/templates/env.j2
new file mode 100644
index 0000000..1bb88aa
--- /dev/null
+++ b/playbooks/templates/env.j2
@@ -0,0 +1,7 @@
+{% for key, value in env.items() %}
+{% if value is boolean %}
+{{ key }}={{ value|lower }}
+{% else %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
diff --git a/playbooks/templates/json.j2 b/playbooks/templates/json.j2
new file mode 100644
index 0000000..fdf149e
--- /dev/null
+++ b/playbooks/templates/json.j2
@@ -0,0 +1 @@
+{{ json | to_json }}
diff --git a/playbooks/templates/yml.j2 b/playbooks/templates/yml.j2
new file mode 100644
index 0000000..7654b9a
--- /dev/null
+++ b/playbooks/templates/yml.j2
@@ -0,0 +1 @@
+{{ yml | to_nice_yaml }}
diff --git a/roles/_TEMPLATE/tasks/main.yml b/roles/_TEMPLATE/tasks/main.yml
new file mode 100644
index 0000000..53f2d27
--- /dev/null
+++ b/roles/_TEMPLATE/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ NAME_svc }}"
+ env: "{{ NAME_env }}"
+ compose: "{{ NAME_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/_TEMPLATE/vars/main.yml b/roles/_TEMPLATE/vars/main.yml
new file mode 100644
index 0000000..a1db2a8
--- /dev/null
+++ b/roles/_TEMPLATE/vars/main.yml
@@ -0,0 +1,17 @@
+---
+NAME_svc:
+ domain: NAME.serguzim.me
+ name: NAME
+ port: 80
+
+NAME_env:
+ EXAMPLE: value
+
+NAME_compose:
+ watchtower: true
+ image:
+ volumes:
+ - data:/data
+ file:
+ volumes:
+ data:
diff --git a/roles/acme_dns/tasks/main.yml b/roles/acme_dns/tasks/main.yml
new file mode 100644
index 0000000..9eed97f
--- /dev/null
+++ b/roles/acme_dns/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ acme_dns_svc }}"
+ env: "{{ acme_dns_env }}"
+ compose: "{{ acme_dns_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Setting the service config path
+ ansible.builtin.set_fact:
+ config_path: "{{ (service_path, 'config') | path_join }}"
+
+ - name: Create a service-config directory
+ ansible.builtin.file:
+ path: "{{ config_path }}"
+ state: directory
+ mode: "0700"
+
+ - name: Template config
+ ansible.builtin.template:
+ src: config.cfg.j2
+ dest: "{{ (config_path, 'config.cfg') | path_join }}"
+ mode: "0600"
+ register: cmd_result
+
+ - name: Set the docker force-recreate flag
+ ansible.builtin.set_fact:
+ docker_force_recreate: --force-recreate
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/acme_dns/templates/config.cfg.j2 b/roles/acme_dns/templates/config.cfg.j2
new file mode 100644
index 0000000..6954179
--- /dev/null
+++ b/roles/acme_dns/templates/config.cfg.j2
@@ -0,0 +1,32 @@
+[general]
+listen = "0.0.0.0:53"
+protocol = "both"
+domain = "{{ svc.domain }}"
+nsname = "{{ svc.domain }}"
+nsadmin = "{{ svc.nsadmin }}"
+records = [
+ "{{ svc.domain }}. A {{ svc.records.a }}",
+ "{{ svc.domain }}. AAAA {{ svc.records.aaaa }}",
+ "{{ svc.domain }}. NS {{ svc.domain }}.",
+]
+debug = false
+
+[database]
+engine = "postgres"
+connection = "postgres://{{ svc.db.user }}:{{ svc.db.pass }}@{{ svc.db.host }}/{{ svc.db.db }}"
+
+[api]
+ip = "0.0.0.0"
+disable_registration = false
+port = "{{ svc.port }}"
+tls = "none"
+corsorigins = [
+ "*"
+]
+use_header = true
+header_name = "X-Forwarded-For"
+
+[logconfig]
+loglevel = "info"
+logtype = "stdout"
+logformat = "text"
diff --git a/roles/acme_dns/vars/main.yml b/roles/acme_dns/vars/main.yml
new file mode 100644
index 0000000..a2b483a
--- /dev/null
+++ b/roles/acme_dns/vars/main.yml
@@ -0,0 +1,28 @@
+---
+acme_dns_svc:
+ domain: "{{ acme_dns.host }}"
+ name: acme-dns
+ port: 80
+ nsadmin: "{{ admin_email | regex_replace('@', '.') }}"
+ records:
+ a: "{{ ansible_facts.default_ipv4.address }}"
+ aaaa: "{{ ansible_facts.default_ipv6.address }}"
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ user: "{{ vault_acmedns.db.user }}"
+ pass: "{{ vault_acmedns.db.pass }}"
+ db: acme_dns
+
+acme_dns_compose:
+ watchtower: true
+ monitoring: true
+ image: joohoi/acme-dns
+ volumes:
+ - ./config:/etc/acme-dns:ro
+ file:
+ services:
+ app:
+ ports:
+ - "53:53"
+ - 53:53/udp
diff --git a/roles/authentik/tasks/main.yml b/roles/authentik/tasks/main.yml
new file mode 100644
index 0000000..05c7ec4
--- /dev/null
+++ b/roles/authentik/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ authentik_svc }}"
+ env: "{{ authentik_env }}"
+ compose: "{{ authentik_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/authentik/vars/main.yml b/roles/authentik/vars/main.yml
new file mode 100644
index 0000000..77d83c0
--- /dev/null
+++ b/roles/authentik/vars/main.yml
@@ -0,0 +1,60 @@
+---
+authentik_svc:
+ domain: auth.serguzim.me
+ name: authentik
+ port: 9000
+ image_tag: 2024.2
+ db:
+ host: "{{ postgres.host }}"
+ database: authentik
+ user: "{{ vault_authentik.db.user }}"
+ pass: "{{ vault_authentik.db.pass }}"
+
+authentik_env:
+ AUTHENTIK_SECRET_KEY: "{{ vault_authentik.secret_key }}"
+
+ AUTHENTIK_EMAIL__HOST: "{{ mailer.host }}"
+ AUTHENTIK_EMAIL__PORT: "{{ mailer.port }}"
+ AUTHENTIK_EMAIL__USERNAME: "{{ vault_authentik.mail.user }}"
+ AUTHENTIK_EMAIL__PASSWORD: "{{ vault_authentik.mail.pass }}"
+ AUTHENTIK_EMAIL__USE_TLS: true
+ AUTHENTIK_EMAIL__USE_SSL: false
+ AUTHENTIK_EMAIL__TIMEOUT: 10
+ AUTHENTIK_EMAIL__FROM: auth@serguzim.me
+
+ AUTHENTIK_AVATARS: none
+
+ AUTHENTIK_REDIS__HOST: redis
+
+ AUTHENTIK_POSTGRESQL__HOST: "{{ svc.db.host }}"
+ AUTHENTIK_POSTGRESQL__NAME: "{{ svc.db.database }}"
+ AUTHENTIK_POSTGRESQL__USER: "{{ svc.db.user }}"
+ AUTHENTIK_POSTGRESQL__PASSWORD: "{{ svc.db.pass }}"
+
+authentik_compose:
+ watchtower: false
+ image: ghcr.io/goauthentik/server:{{ svc.image_tag }}
+ file:
+ services:
+ app:
+ command: server
+ depends_on:
+ - redis
+ worker:
+ image: ghcr.io/goauthentik/server:{{ svc.image_tag }}
+ restart: always
+ command: worker
+ user: root
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ env_file:
+ - service.env
+ depends_on:
+ - redis
+ networks:
+ default:
+ redis:
+ image: redis:alpine
+ restart: always
+ networks:
+ default:
diff --git a/roles/backup/files/Dockerfile b/roles/backup/files/Dockerfile
new file mode 100644
index 0000000..5cb0994
--- /dev/null
+++ b/roles/backup/files/Dockerfile
@@ -0,0 +1,3 @@
+FROM restic/restic
+
+RUN apk add curl
diff --git a/roles/backup/files/backup.timer b/roles/backup/files/backup.timer
new file mode 100644
index 0000000..d475963
--- /dev/null
+++ b/roles/backup/files/backup.timer
@@ -0,0 +1,4 @@
+[Timer]
+OnCalendar=*-*-* 04:10:00
+[Install]
+WantedBy=timers.target
diff --git a/roles/backup/files/node001/mailcow.sh b/roles/backup/files/node001/mailcow.sh
new file mode 100755
index 0000000..30a110f
--- /dev/null
+++ b/roles/backup/files/node001/mailcow.sh
@@ -0,0 +1,3 @@
+export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
+mkdir -p "$MAILCOW_BACKUP_LOCATION"
+/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all
diff --git a/roles/backup/files/node002/immich.sh b/roles/backup/files/node002/immich.sh
new file mode 100755
index 0000000..c1b4a18
--- /dev/null
+++ b/roles/backup/files/node002/immich.sh
@@ -0,0 +1,5 @@
+backup_path="$BACKUP_LOCATION/immich"
+mkdir -p "$backup_path"
+
+cd /opt/services/immich || exit
+docker compose exec database sh -c 'pg_dump -U "$DB_USERNAME" "$DB_DATABASE"' | gzip >"$backup_path/immich.sql.gz"
diff --git a/roles/backup/files/node002/postgres.sh b/roles/backup/files/node002/postgres.sh
new file mode 100755
index 0000000..b4ddb73
--- /dev/null
+++ b/roles/backup/files/node002/postgres.sh
@@ -0,0 +1,14 @@
+mkdir -p "$BACKUP_LOCATION/postgres"
+cd "$BACKUP_LOCATION/postgres" || exit
+
+postgres_tables=$(sudo -u postgres psql -Atc "SELECT datname FROM pg_database WHERE datistemplate = false;")
+
+for i in $postgres_tables
+do
+ printf "dumping %s ..." "$i"
+ sudo -u postgres pg_dump "$i" | gzip >"pg_dump_$i.sql.gz"
+ echo " done"
+done
+
+echo "dumping all"
+sudo -u postgres pg_dumpall | gzip >"pg_dumpall.sql.gz"
diff --git a/roles/backup/files/node003/mailcow.sh b/roles/backup/files/node003/mailcow.sh
new file mode 100755
index 0000000..30a110f
--- /dev/null
+++ b/roles/backup/files/node003/mailcow.sh
@@ -0,0 +1,3 @@
+export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
+mkdir -p "$MAILCOW_BACKUP_LOCATION"
+/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all
diff --git a/roles/backup/tasks/backup.d.yml b/roles/backup/tasks/backup.d.yml
new file mode 100644
index 0000000..fb28870
--- /dev/null
+++ b/roles/backup/tasks/backup.d.yml
@@ -0,0 +1,16 @@
+---
+- name: Set backup.d path
+ ansible.builtin.set_fact:
+ backup_d_path: "{{ (service_path, 'backup.d') | path_join }}"
+- name: Create backup.d directory
+ ansible.builtin.file:
+ path: "{{ backup_d_path }}"
+ state: directory
+ mode: "0755"
+- name: Copy the additional backup scripts
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "{{ backup_d_path }}"
+ mode: "0755"
+ with_fileglob:
+ - "{{ ansible_facts.hostname }}/*"
diff --git a/roles/backup/tasks/docker.yml b/roles/backup/tasks/docker.yml
new file mode 100644
index 0000000..f5ae9f2
--- /dev/null
+++ b/roles/backup/tasks/docker.yml
@@ -0,0 +1,12 @@
+---
+- name: Copy the Dockerfile
+ ansible.builtin.copy:
+ src: Dockerfile
+ dest: "{{ (service_path, 'Dockerfile') | path_join }}"
+ mode: "0644"
+ register: cmd_result
+
+- name: Set the docker rebuild flag
+ ansible.builtin.set_fact:
+ docker_rebuild: true
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
diff --git a/roles/backup/tasks/main.yml b/roles/backup/tasks/main.yml
new file mode 100644
index 0000000..c165ce5
--- /dev/null
+++ b/roles/backup/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ backup_svc }}"
+ env: "{{ backup_env }}"
+ compose: "{{ backup_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Copy the main backup script
+ ansible.builtin.template:
+ src: "backup.sh.j2"
+ dest: "{{ (service_path, 'backup.sh') | path_join }}"
+ mode: "0755"
+
+ - name: Import tasks specific to docker
+ ansible.builtin.import_tasks: docker.yml
+ - name: Import tasks specific to the backup.d scripts
+ ansible.builtin.import_tasks: backup.d.yml
+ - name: Import tasks specific to systemd
+ ansible.builtin.import_tasks: systemd.yml
+
+ - name: Build service
+ ansible.builtin.command:
+ cmd: docker compose build --pull
+ chdir: "{{ service_path }}"
+ register: cmd_result
+ when: docker_rebuild
+ changed_when: true
+
+ - name: Verify service
+ ansible.builtin.command:
+ cmd: docker compose run --rm app check
+ chdir: "{{ service_path }}"
+ changed_when: false
diff --git a/roles/backup/tasks/systemd.yml b/roles/backup/tasks/systemd.yml
new file mode 100644
index 0000000..4efcbd6
--- /dev/null
+++ b/roles/backup/tasks/systemd.yml
@@ -0,0 +1,20 @@
+---
+- name: Copy the system service
+ ansible.builtin.template:
+ src: backup.service.j2
+ dest: /etc/systemd/system/backup.service
+ mode: "0644"
+ become: true
+- name: Copy the system timer
+ ansible.builtin.copy:
+ src: backup.timer
+ dest: /etc/systemd/system/backup.timer
+ mode: "0644"
+ become: true
+- name: Enable the system timer
+ ansible.builtin.systemd_service:
+ name: backup.timer
+ state: started
+ enabled: true
+ daemon_reload: true
+ become: true
diff --git a/roles/backup/templates/backup.service.j2 b/roles/backup/templates/backup.service.j2
new file mode 100644
index 0000000..131b7d4
--- /dev/null
+++ b/roles/backup/templates/backup.service.j2
@@ -0,0 +1,11 @@
+[Unit]
+Description=Autostart several tools and services
+StartLimitIntervalSec=7200
+StartLimitBurst=5
+
+[Service]
+Type=oneshot
+ExecStart={{ service_path }}/backup.sh
+WorkingDirectory={{ service_path }}
+Restart=on-failure
+RestartSec=15min
diff --git a/roles/backup/templates/backup.sh.j2 b/roles/backup/templates/backup.sh.j2
new file mode 100755
index 0000000..cdfff87
--- /dev/null
+++ b/roles/backup/templates/backup.sh.j2
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+set -e
+
+set -a
+. "{{ service_path }}/service.env"
+set +a
+
+duration_start=$(date +%s)
+_duration_get () {
+ duration_end=$(date +%s)
+ echo "$((duration_end - duration_start))"
+}
+
+hc_url="https://hc-ping.com/$HC_UID"
+uptime_kuma_url="https://status.serguzim.me/api/push/$UPTIME_KUMA_TOKEN"
+_hc_ping () {
+ curl -fsSL --retry 3 "$hc_url$1" >/dev/null
+}
+_uptime_kuma_ping () {
+ duration=$(_duration_get)
+ curl -fsSL --retry 3 \
+ --url-query "status=$1" \
+ --url-query "msg=$2" \
+ --url-query "ping=${duration}000" \
+ "$uptime_kuma_url" >/dev/null
+}
+
+_fail () {
+ _hc_ping "/fail"
+ _uptime_kuma_ping "down" "$1"
+ rm -rf "$BACKUP_LOCATION"
+ exit 1
+}
+_success () {
+ _hc_ping
+ _uptime_kuma_ping "up" "backup successful"
+}
+
+_hc_ping "/start"
+
+BACKUP_LOCATION="$(mktemp -d --suffix=-backup)"
+export BACKUP_LOCATION
+cd "$BACKUP_LOCATION" || _fail "failed to cd to $BACKUP_LOCATION"
+
+shopt -s nullglob
+for file in "{{ service_path }}/backup.d/"*
+do
+ file_name="$(basename "$file")"
+ echo ""
+ echo "running $file_name"
+ time "$file" >"/tmp/$file_name.log" || _fail "error while running $file_name"
+done || true
+
+cd "{{ service_path }}"
+docker compose run --rm -v "$BACKUP_LOCATION:/backup/misc" app backup /backup || _fail "error during restic backup"
+
+_success
+
+rm -rf "$BACKUP_LOCATION"
+
+echo "forgetting old backups for {{ ansible_facts.hostname }}"
+docker compose run --rm app forget --host "{{ ansible_facts.hostname }}" --prune \
+ --keep-last 7 \
+ --keep-daily 14 \
+ --keep-weekly 16 \
+ --keep-monthly 12 \
+ --keep-yearly 2
diff --git a/roles/backup/vars/main.yml b/roles/backup/vars/main.yml
new file mode 100644
index 0000000..f678569
--- /dev/null
+++ b/roles/backup/vars/main.yml
@@ -0,0 +1,59 @@
+---
+
+backup_image: registry.serguzim.me/services/backup
+
+backup_svc:
+ name: backup
+
+backup_volumes_service: "{{ host_backup.volumes | map_backup_volumes_service }}"
+
+backup_env:
+ HC_UID: "{{ host_backup.hc_uid }}"
+ UPTIME_KUMA_TOKEN: "{{ host_backup.uptime_kuma_token }}"
+
+ RESTIC_REPOSITORY: "{{ vault_backup.restic.s3.repository }}"
+ RESTIC_PASSWORD: "{{ vault_backup.restic.s3.password }}"
+
+ AWS_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}"
+ AWS_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}"
+
+ #RESTIC_S3_REPOSITORY: "{{ vault_backup.restic.s3.repository }}"
+ #RESTIC_S3_PASSWORD: "{{ vault_backup.restic.s3.password }}"
+ #RESITC_S3_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}"
+ #RESITC_S3_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}"
+
+ #RESTIC_BORGBASE: "{{ vault_backup.restic.borgbase }}"
+
+backup_compose:
+ watchtower: false
+ image: "{{ backup_image }}"
+ volumes: "{{ backup_volumes_service }}"
+ file:
+ services:
+ app:
+ build:
+ context: .
+ entrypoint:
+ - /usr/bin/restic
+ - --retry-lock=1m
+ restart: never
+ hostname: "{{ ansible_facts.hostname }}"
+ mount:
+ build:
+ context: .
+ image: "{{ backup_image }}"
+ restart: never
+ hostname: "{{ ansible_facts.hostname }}"
+ env_file:
+ - service.env
+ entrypoint:
+ - /usr/bin/restic
+ - --retry-lock=1m
+ command:
+ - mount
+ - /mnt
+ privileged: true
+ devices:
+ - /dev/fuse
+
+ volumes: "{{ host_backup.volumes | map_backup_volumes }}"
diff --git a/roles/caddy/defaults/main.yml b/roles/caddy/defaults/main.yml
new file mode 100644
index 0000000..d55dc2b
--- /dev/null
+++ b/roles/caddy/defaults/main.yml
@@ -0,0 +1 @@
+caddy_ports_extra: []
diff --git a/roles/caddy/files/Dockerfile b/roles/caddy/files/Dockerfile
new file mode 100644
index 0000000..f383d18
--- /dev/null
+++ b/roles/caddy/files/Dockerfile
@@ -0,0 +1,8 @@
+FROM caddy:2-builder AS builder
+
+RUN xcaddy build \
+ --with github.com/caddy-dns/acmedns@main
+
+FROM caddy:2-alpine
+
+COPY --from=builder /usr/bin/caddy /usr/bin/caddy
diff --git a/roles/caddy/files/snippets b/roles/caddy/files/snippets
new file mode 100644
index 0000000..97f8661
--- /dev/null
+++ b/roles/caddy/files/snippets
@@ -0,0 +1,46 @@
+(auth_serguzim_me) {
+ # always forward outpost path to actual outpost
+ reverse_proxy /outpost.goauthentik.io/* authentik:9000
+
+ # forward authentication to outpost
+ forward_auth authentik:9000 {
+ uri /outpost.goauthentik.io/auth/caddy
+
+ # capitalization of the headers is important, otherwise they will be empty
+ copy_headers X-Authentik-Username X-Authentik-Groups X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version
+
+ # optional, in this config trust all private ranges, should probably be set to the outposts IP
+ trusted_proxies private_ranges
+ }
+}
+
+(default) {
+ encode zstd gzip
+}
+
+(acmedns) {
+ tls {
+ dns acmedns {
+ username "{$ACMEDNS_USER}"
+ password "{$ACMEDNS_PASS}"
+ subdomain "{$ACMEDNS_SUBD}"
+ server_url "{$ACMEDNS_URL}"
+ }
+ }
+}
+
+(faas) {
+ rewrite * /function/{args[0]}{uri}
+ reverse_proxy https://faas.serguzim.me {
+ header_up Host {http.reverse_proxy.upstream.hostport}
+ }
+}
+
+(analytics) {
+ handle_path /_a/* {
+ reverse_proxy https://analytics.serguzim.me {
+ header_up X-Analytics-IP {remote}
+ header_up Host {http.reverse_proxy.upstream.hostport}
+ }
+ }
+}
diff --git a/roles/caddy/tasks/main.yml b/roles/caddy/tasks/main.yml
new file mode 100644
index 0000000..bd9a6b5
--- /dev/null
+++ b/roles/caddy/tasks/main.yml
@@ -0,0 +1,57 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ caddy_svc }}"
+ env: "{{ caddy_env }}"
+ compose: "{{ caddy_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Copy the Dockerfile
+ ansible.builtin.copy:
+ src: Dockerfile
+ dest: "{{ (service_path, 'Dockerfile') | path_join }}"
+ mode: "0644"
+ register: cmd_result
+
+ - name: Set the docker rebuild flag
+ ansible.builtin.set_fact:
+ docker_rebuild: true
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Set caddy config path
+ ansible.builtin.set_fact:
+ config_path: "{{ (service_path, 'config') | path_join }}"
+
+ - name: Create config directory
+ ansible.builtin.file:
+ path: "{{ config_path }}"
+ state: directory
+ mode: "0755"
+
+ - name: Template caddyfile
+ ansible.builtin.template:
+ src: Caddyfile.j2
+ dest: "{{ (config_path, 'Caddyfile') | path_join }}"
+ mode: "0644"
+ notify: Reload caddy
+
+ - name: Copy snippets file
+ ansible.builtin.copy:
+ src: snippets
+ dest: "{{ (config_path, 'snippets') | path_join }}"
+ mode: "0644"
+ notify: Reload caddy
+
+ - name: Create sites-config directory
+ ansible.builtin.file:
+ path: "{{ caddy_config_path }}"
+ state: directory
+ mode: "0755"
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/caddy/templates/Caddyfile.j2 b/roles/caddy/templates/Caddyfile.j2
new file mode 100644
index 0000000..803ac06
--- /dev/null
+++ b/roles/caddy/templates/Caddyfile.j2
@@ -0,0 +1,11 @@
+{
+ email {{ admin_email }}
+
+ servers {
+ metrics
+ strict_sni_host on
+ }
+}
+
+import /etc/caddy/snippets
+import /etc/caddy/conf.d/*.conf
diff --git a/roles/caddy/vars/main.yml b/roles/caddy/vars/main.yml
new file mode 100644
index 0000000..a68e9a8
--- /dev/null
+++ b/roles/caddy/vars/main.yml
@@ -0,0 +1,40 @@
+---
+caddy_acmedns_user: "{{ vault_caddy.acmedns.user }}"
+caddy_acmedns_pass: "{{ vault_caddy.acmedns.pass }}"
+caddy_acmedns_subd: "{{ vault_caddy.acmedns.subd }}"
+caddy_acmedns_url: "https://{{ acme_dns.host }}"
+
+caddy_ports_default:
+ - 80:80
+ - 443:443
+ - 443:443/udp
+ - "{{ host_vpn.ip }}:2019:2019"
+caddy_ports: "{{ caddy_ports_default | union(caddy_ports_extra) }}"
+
+caddy_svc:
+ name: caddy
+
+caddy_env:
+ CADDY_ADMIN: 0.0.0.0:2019
+
+ ACMEDNS_USER: "{{ caddy_acmedns_user }}"
+ ACMEDNS_PASS: "{{ caddy_acmedns_pass }}"
+ ACMEDNS_SUBD: "{{ caddy_acmedns_subd }}"
+ ACMEDNS_URL: "{{ caddy_acmedns_url }}"
+
+caddy_compose:
+ watchtower: false
+ image: registry.serguzim.me/services/caddy:2-alpine
+ volumes:
+ - "./config:/etc/caddy/"
+ - data:/data
+ file:
+ services:
+ app:
+ build:
+ context: .
+ ports: "{{ caddy_ports }}"
+ extra_hosts:
+ - host.docker.internal:host-gateway
+ volumes:
+ data:
diff --git a/roles/coder/tasks/main.yml b/roles/coder/tasks/main.yml
new file mode 100644
index 0000000..6c05ea0
--- /dev/null
+++ b/roles/coder/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ coder_svc }}"
+ env: "{{ coder_env }}"
+ compose: "{{ coder_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/coder/vars/main.yml b/roles/coder/vars/main.yml
new file mode 100644
index 0000000..bf8fabd
--- /dev/null
+++ b/roles/coder/vars/main.yml
@@ -0,0 +1,35 @@
+---
+coder_svc:
+ domain: coder.serguzim.me
+ additional_domains:
+ - "*.coder.serguzim.me"
+ caddy_extra: import acmedns
+ name: coder
+ port: 7080
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ ssh_port: 22
+ ssh_port_alt: 3022
+
+coder_env:
+ CODER_ADDRESS: "0.0.0.0:7080"
+ CODER_ACCESS_URL: https://{{ svc.domain }}
+ CODER_WILDCARD_ACCESS_URL: "*.{{ svc.domain }}"
+
+ CODER_PG_CONNECTION_URL: postgres://{{ vault_coder.db.user }}:{{ vault_coder.db.pass }}@{{ svc.db.host }}:{{ svc.db.port }}/coder?sslmode=verify-full
+
+ CODER_OIDC_ISSUER_URL: https://auth.serguzim.me/application/o/coder-serguzim-me/
+ CODER_OIDC_CLIENT_ID: "{{ vault_coder.oidc_client.id }}"
+ CODER_OIDC_CLIENT_SECRET: "{{ vault_coder.oidc_client.secret }}"
+
+coder_compose:
+ watchtower: true
+ image: ghcr.io/coder/coder:latest
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ file:
+ services:
+ app:
+ group_add:
+ - "972" # docker group on host
diff --git a/roles/common/handlers/main.yml b/roles/common/handlers/main.yml
new file mode 100644
index 0000000..517225f
--- /dev/null
+++ b/roles/common/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+- name: Reload caddy
+ ansible.builtin.command:
+ cmd: docker compose exec app sh -c "caddy validate --config /etc/caddy/Caddyfile && caddy reload --config /etc/caddy/Caddyfile"
+ chdir: "{{ caddy_path }}"
+ when: "'local-dev' != inventory_hostname"
+ changed_when: true
diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml
new file mode 100644
index 0000000..1684048
--- /dev/null
+++ b/roles/common/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Create the services directory
+ ansible.builtin.file:
+ path: "{{ services_path }}"
+ state: directory
+ mode: "0755"
+ owner: "{{ ansible_user }}"
+ group: "{{ ansible_user }}"
+ become: true
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
new file mode 100644
index 0000000..351facf
--- /dev/null
+++ b/roles/docker/tasks/main.yml
@@ -0,0 +1,55 @@
+- name: Install aptitude
+ apt:
+ name: aptitude
+ state: latest
+ update_cache: true
+ become: true
+
+- name: Install required system packages
+ apt:
+ pkg:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - software-properties-common
+ - python3-pip
+ - virtualenv
+ - python3-setuptools
+ state: latest
+ update_cache: true
+ become: true
+
+- name: Add Docker GPG apt Key
+ apt_key:
+ url: https://download.docker.com/linux/ubuntu/gpg
+ state: present
+ become: true
+
+- name: Add Docker Repository
+ apt_repository:
+ repo: deb https://download.docker.com/linux/ubuntu focal stable
+ state: present
+ become: true
+
+- name: Update apt and install docker packages
+ apt:
+ pkg:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ - docker-buildx-plugin
+ - docker-compose-plugin
+ state: latest
+ update_cache: true
+ become: true
+
+- name: Add user to the Docker group
+ user:
+ name: "{{ ansible_user }}"
+ groups: docker
+ append: yes
+ become: true
+
+- name: Create a network
+ community.docker.docker_network:
+ name: apps
diff --git a/roles/extra_services/tasks/main.yml b/roles/extra_services/tasks/main.yml
new file mode 100644
index 0000000..9c1c71f
--- /dev/null
+++ b/roles/extra_services/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy extra services
+ block:
+ - name: Import tasks to template the site and functions for the reverse proxy
+ ansible.builtin.include_tasks: tasks/steps/template-site-config.yml
+ loop: "{{ extra_services_all }}"
+ loop_control:
+ loop_var: svc
diff --git a/roles/extra_services/vars/main.yml b/roles/extra_services/vars/main.yml
new file mode 100644
index 0000000..79d4b33
--- /dev/null
+++ b/roles/extra_services/vars/main.yml
@@ -0,0 +1,14 @@
+---
+extra_services_default:
+ - domain: cloud-old.serguzim.me
+ docker_host: nextcloud
+ port: 80
+ caddy_extra: |
+ redir /.well-known/host-meta /public.php?service=host-meta 301
+ redir /.well-known/host-meta.json /public.php?service=host-meta-json 301
+ redir /.well-known/webfinger /public.php?service=webfinger 301
+ redir /.well-known/carddav /remote.php/dav/ 301
+ redir /.well-known/caldav /remote.php/dav/ 301
+
+extra_services_hidden: "{{ vault_extra_services }}"
+extra_services_all: "{{ extra_services_default | union(extra_services_hidden) }}"
diff --git a/roles/faas/tasks/main.yml b/roles/faas/tasks/main.yml
new file mode 100644
index 0000000..f2d6b02
--- /dev/null
+++ b/roles/faas/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ faas_svc }}"
+ block:
+ - name: Import tasks to template the site and functions for the reverse proxy
+ ansible.builtin.import_tasks: tasks/steps/template-site-config.yml
diff --git a/roles/faas/vars/main.yml b/roles/faas/vars/main.yml
new file mode 100644
index 0000000..69d7ca8
--- /dev/null
+++ b/roles/faas/vars/main.yml
@@ -0,0 +1,24 @@
+---
+faas_svc:
+ name: faas
+ domain: faas.serguzim.me
+ docker_host: host.docker.internal
+ port: 8080
+ extra_svcs:
+ - domain: serguzim.me
+ faas_function: webpage-serguzim-me
+ www_domain: true
+ hsts: true
+ caddy_extra: |
+ header /.well-known/* Access-Control-Allow-Origin *
+
+ handle /.well-known/webfinger {
+ map {query.resource} {user} {
+ acct:tobias@msrg.cc serguzim
+ acct:serguzim@msrg.cc serguzim
+ }
+ rewrite * /.well-known/webfinger/{user}.json
+ import faas webpage-msrg-cc
+ }
+ - domain: xn--sder-5qa.stream
+ faas_function: webpage-soeder-stream
diff --git a/roles/forgejo/files/templates/custom/extra_links_footer.tmpl b/roles/forgejo/files/templates/custom/extra_links_footer.tmpl
new file mode 100644
index 0000000..c53e88b
--- /dev/null
+++ b/roles/forgejo/files/templates/custom/extra_links_footer.tmpl
@@ -0,0 +1 @@
+Impressum
diff --git a/roles/forgejo/tasks/main.yml b/roles/forgejo/tasks/main.yml
new file mode 100644
index 0000000..bebe90e
--- /dev/null
+++ b/roles/forgejo/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ forgejo_svc }}"
+ env: "{{ forgejo_env }}"
+ compose: "{{ forgejo_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Copy the template files
+ ansible.builtin.copy:
+ src: templates/
+ dest: "{{ (service_path, 'templates') | path_join }}"
+ mode: "0644"
+ register: cmd_result
+
+ - name: Set the docker force-recreate flag
+ ansible.builtin.set_fact:
+ docker_force_recreate: --force-recreate
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Template the custom footer
+ ansible.builtin.template:
+ src: footer.tmpl.j2
+ dest: "{{ (service_path, 'templates', 'custom', 'footer.tmpl') | path_join }}"
+ mode: "0644"
+ register: cmd_result
+
+ - name: Set the docker force-recreate flag
+ ansible.builtin.set_fact:
+ docker_force_recreate: --force-recreate
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/forgejo/templates/footer.tmpl.j2 b/roles/forgejo/templates/footer.tmpl.j2
new file mode 100644
index 0000000..53fcdad
--- /dev/null
+++ b/roles/forgejo/templates/footer.tmpl.j2
@@ -0,0 +1 @@
+
diff --git a/roles/forgejo/vars/main.yml b/roles/forgejo/vars/main.yml
new file mode 100644
index 0000000..13f96a8
--- /dev/null
+++ b/roles/forgejo/vars/main.yml
@@ -0,0 +1,98 @@
+---
+forgejo_svc:
+ domain: git.serguzim.me
+ name: forgejo
+ port: 3000
+ caddy_extra: |
+ import analytics
+ header /attachments/* Access-Control-Allow-Origin *
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ ssh_port: 22
+
+forgejo_env:
+ FORGEJO__database__DB_TYPE: postgres
+ FORGEJO__database__HOST: "{{ svc.db.host }}:{{ svc.db.port }}"
+ FORGEJO__database__NAME: forgejo
+ FORGEJO__database__USER: "{{ vault_forgejo.db.user }}"
+ FORGEJO__database__PASSWD: "{{ vault_forgejo.db.pass }}"
+ FORGEJO__database__SSL_MODE: verify-full
+
+ FORGEJO__repository__ENABLE_PUSH_CREATE_USER: true
+ FORGEJO__repository__ENABLE_PUSH_CREATE_ORG: true
+ FORGEJO__repository__DEFAULT_BRANCH: main
+
+ FORGEJO__cors__ENABLED: true
+ FORGEJO__cors__SCHEME: https
+
+ FORGEJO__ui__DEFAULT_THEME: forgejo-dark
+
+ FORGEJO__server__DOMAIN: "{{ svc.domain }}"
+ FORGEJO__server__SSH_DOMAIN: "{{ svc.domain }}"
+ FORGEJO__server__SSH_PORT: "{{ svc.ssh_port }}"
+ FORGEJO__server__ROOT_URL: https://{{ svc.domain }}
+ FORGEJO__server__OFFLINE_MODE: true
+ FORGEJO__server__LFS_JWT_SECRET: "{{ vault_forgejo.server_lfs_jwt_secret }}"
+ FORGEJO__server__LFS_START_SERVER: true
+
+ FORGEJO__security__INSTALL_LOCK: true
+ FORGEJO__security__INTERNAL_TOKEN: "{{ vault_forgejo.security_internal_token }}"
+ FORGEJO__security__SECRET_KEY: "{{ vault_forgejo.security_secret_key }}"
+
+ FORGEJO__openid__ENABLE_OPENID_SIGNUP: true
+ FORGEJO__openid__ENABLE_OPENID_SIGNIN: false
+
+ FORGEJO__service__ALLOW_ONLY_EXTERNAL_REGISTRATION: true
+ FORGEJO__service__ENABLE_BASIC_AUTHENTICATION: false
+ FORGEJO__service__DEFAULT_KEEP_EMAIL_PRIVATE: true
+ FORGEJO__service__NO_REPLY_ADDRESS: discard.msrg.cc
+
+ FORGEJO__webhook__DELIVER_TIMEOUT: 60
+
+ FORGEJO__mailer__ENABLED: true
+ FORGEJO__mailer__PROTOCOL: smtp+starttls
+ FORGEJO__mailer__SMTP_ADDR: mail.serguzim.me
+ FORGEJO__mailer__SMTP_PORT: 587
+ FORGEJO__mailer__FROM: Forgejo
+ FORGEJO__mailer__USER: git@serguzim.me
+ FORGEJO__mailer__PASSWD: "{{ vault_forgejo.mailer_passwd }}"
+ FORGEJO__mailer__SEND_AS_PLAIN_TEXT: true
+
+ FORGEJO__picture__DISABLE_GRAVATAR: true
+
+ FORGEJO__attachment__MAX_FILES: 10
+
+ FORGEJO__oauth2__JWT_SECRET: "{{ vault_forgejo.oauth2_jwt_secret }}"
+
+ FORGEJO__metrics__ENABLED: true
+ FORGEJO__metrics__TOKEN: "{{ vault_metrics_token }}"
+
+ FORGEJO__actions__ENABLED: true
+
+ FORGEJO__storage__STORAGE_TYPE: minio
+ FORGEJO__storage__MINIO_ENDPOINT: s3.nl-ams.scw.cloud
+ FORGEJO__storage__MINIO_ACCESS_KEY_ID: "{{ opentofu.scaleway_service_keys.forgejo.access_key }}"
+ FORGEJO__storage__MINIO_SECRET_ACCESS_KEY: "{{ opentofu.scaleway_service_keys.forgejo.secret_key }}"
+ FORGEJO__storage__MINIO_BUCKET: forgejo.serguzim.me
+ FORGEJO__storage__MINIO_LOCATION: ns-ams
+ FORGEJO__storage__MINIO_USE_SSL: true
+
+ FORGEJO__other__SHOW_FOOTER_VERSION: true
+ FORGEJO__other__SHOW_FOOTER_TEMPLATE_LOAD_TIME: false
+
+forgejo_compose:
+ watchtower: true
+ image: codeberg.org/forgejo/forgejo:7.0
+ volumes:
+ - data:/data
+ - ./templates:/data/gitea/templates
+ - /etc/timezone:/etc/timezone:ro
+ - /etc/localtime:/etc/localtime:ro
+ file:
+ services:
+ app:
+ ports:
+ - "{{ svc.ssh_port }}:{{ svc.ssh_port }}"
+ volumes:
+ data:
diff --git a/roles/forgejo_runner/files/config.yml b/roles/forgejo_runner/files/config.yml
new file mode 100644
index 0000000..3a46c2d
--- /dev/null
+++ b/roles/forgejo_runner/files/config.yml
@@ -0,0 +1,81 @@
+log:
+ # The level of logging, can be trace, debug, info, warn, error, fatal
+ level: info
+
+runner:
+ # Where to store the registration result.
+ file: /data/.runner
+ # Execute how many tasks concurrently at the same time.
+ capacity: 1
+ # Extra environment variables to run jobs.
+ #envs:
+ # A_TEST_ENV_NAME_1: a_test_env_value_1
+ # A_TEST_ENV_NAME_2: a_test_env_value_2
+ # Extra environment variables to run jobs from a file.
+ # It will be ignored if it's empty or the file doesn't exist.
+ #env_file: .env
+ # The timeout for a job to be finished.
+ # Please note that the Forgejo instance also has a timeout (3h by default) for the job.
+ # So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
+ timeout: 3h
+ # Whether skip verifying the TLS certificate of the Forgejo instance.
+ insecure: false
+ # The timeout for fetching the job from the Forgejo instance.
+ fetch_timeout: 5s
+ # The interval for fetching the job from the Forgejo instance.
+ fetch_interval: 2s
+ # The labels of a runner are used to determine which jobs the runner can run, and how to run them.
+ # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
+ # If it's empty when registering, it will ask for inputting labels.
+ # If it's empty when execute `deamon`, will use labels in `.runner` file.
+ labels: []
+
+cache:
+ # Enable cache server to use actions/cache.
+ enabled: true
+ # The directory to store the cache data.
+ # If it's empty, the cache data will be stored in $HOME/.cache/actcache.
+ dir: ""
+ # The host of the cache server.
+ # It's not for the address to listen, but the address to connect from job containers.
+ # So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
+ host: ""
+ # The port of the cache server.
+ # 0 means to use a random available port.
+ port: 0
+
+container:
+ # Specifies the network to which the container will connect.
+ # Could be host, bridge or the name of a custom network.
+ # If it's empty, create a network automatically.
+ network: ""
+ # Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
+ # Only takes effect if "network" is set to "".
+ enable_ipv6: false
+ # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
+ privileged: false
+ # And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway).
+ options:
+ # The parent directory of a job's working directory.
+ # If it's empty, /workspace will be used.
+ workdir_parent:
+ # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
+ # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
+ # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
+ # valid_volumes:
+ # - data
+ # - /src/*.json
+ # If you want to allow any volume, please use the following configuration:
+ # valid_volumes:
+ # - '**'
+ valid_volumes: []
+ # overrides the docker client host with the specified one.
+ # If it's empty, act_runner will find an available docker host automatically.
+ # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
+ # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
+ docker_host: ""
+
+host:
+ # The parent directory of a job's working directory.
+ # If it's empty, $HOME/.cache/act/ will be used.
+ workdir_parent:
diff --git a/roles/forgejo_runner/tasks/main.yml b/roles/forgejo_runner/tasks/main.yml
new file mode 100644
index 0000000..911dfd1
--- /dev/null
+++ b/roles/forgejo_runner/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ forgejo_runner_svc }}"
+ env: "{{ forgejo_runner_env }}"
+ compose: "{{ forgejo_runner_compose }}"
+ block:
+ - name: Import tasks to create service directory
+ ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
+ - name: Import tasks to template docker compose file
+ ansible.builtin.import_tasks: tasks/steps/template-docker-compose.yml
+
+ - name: Copy the config
+ ansible.builtin.copy:
+ src: config.yml
+ dest: "{{ (service_path, 'config.yml') | path_join }}"
+ mode: "0755"
+
+ - name: Check if service.env already exists
+ ansible.builtin.stat:
+ path: "{{ (service_path, 'service.env') | path_join }}"
+ register: env_file
+
+ - name: Import tasks to prompt for the registration token
+ ansible.builtin.import_tasks: tasks/prompt-registration-token.yml
+ when: not env_file.stat.exists or force_forgejo_runner_registration | default(False)
+
+ - name: Import tasks create a service.env file
+ ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
+
+ - name: Register runner
+ ansible.builtin.command:
+ cmd: docker compose run --rm -it app sh -c
+ 'forgejo-runner register --no-interactive --token ${FORGEJO_RUNNER_REGISTRATION_TOKEN} --instance ${FORGEJO_INSTANCE_URL}'
+ chdir: "{{ service_path }}"
+ when: not env_file.stat.exists or force_forgejo_runner_registration | default(False)
+ changed_when: true # "when" checks enough. We are sure to change something here.
diff --git a/roles/forgejo_runner/tasks/prompt-registration-token.yml b/roles/forgejo_runner/tasks/prompt-registration-token.yml
new file mode 100644
index 0000000..95e14de
--- /dev/null
+++ b/roles/forgejo_runner/tasks/prompt-registration-token.yml
@@ -0,0 +1,10 @@
+---
+- name: Input forgejo-runner registration token
+ ansible.builtin.pause:
+ prompt: Enter a secret
+ echo: false
+ register: promt_registration_token
+
+- name: Put registration token into env vars
+ ansible.builtin.set_fact:
+ forgejo_runner_env: "{{ forgejo_runner_env | combine({'FORGEJO_RUNNER_REGISTRATION_TOKEN': promt_registration_token.user_input}, recursive=True) }}"
diff --git a/roles/forgejo_runner/vars/main.yml b/roles/forgejo_runner/vars/main.yml
new file mode 100644
index 0000000..2b13fbf
--- /dev/null
+++ b/roles/forgejo_runner/vars/main.yml
@@ -0,0 +1,32 @@
+---
+forgejo_runner_svc:
+ name: forgejo-runner
+
+forgejo_runner_env:
+ FORGEJO_INSTANCE_URL: https://git.serguzim.me/
+ FORGEJO_RUNNER_REGISTRATION_TOKEN:
+ DOCKER_HOST: tcp://docker-in-docker:2375
+
+forgejo_runner_compose:
+ watchtower: true
+ image: code.forgejo.org/forgejo/runner:3.3.0
+ volumes:
+ - ./config.yml:/config/config.yml
+ - data:/data
+ file:
+ services:
+ app:
+ hostname: "{{ ansible_facts.hostname }}"
+ command: forgejo-runner --config /config/config.yml daemon
+ depends_on:
+ - docker-in-docker
+ links:
+ - docker-in-docker
+ docker-in-docker:
+ image: docker:dind
+ privileged: true
+ command: dockerd -H tcp://0.0.0.0:2375 --tls=false
+ networks:
+ default:
+ volumes:
+ data:
diff --git a/roles/harbor/tasks/main.yml b/roles/harbor/tasks/main.yml
new file mode 100644
index 0000000..88a58a2
--- /dev/null
+++ b/roles/harbor/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ harbor_svc }}"
+ env: "{{ harbor_env }}"
+ yml: "{{ harbor_yml }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Import tasks to template the site for the reverse proxy
+ ansible.builtin.import_tasks: tasks/steps/template-site-config.yml
+
+ - name: Template config
+ ansible.builtin.template:
+ src: yml.j2
+ dest: "{{ (service_path, 'harbor.yml') | path_join }}"
+ mode: "0644"
+
+ - name: Download harbor
+ ansible.builtin.unarchive:
+ src: https://github.com/goharbor/harbor/releases/download/v{{ svc.harbor_version }}/harbor-online-installer-v{{ svc.harbor_version }}.tgz
+ dest: "{{ service_path }}"
+ remote_src: true
+
+ - name: Run the harbor prepare command
+ ansible.builtin.command:
+ cmd: "{{ service_path }}/harbor/prepare"
+ chdir: "{{ service_path }}"
+ creates: "{{ (service_path, 'docker-compose.yml') | path_join }}"
+ environment:
+ HARBOR_BUNDLE_DIR: "{{ service_path }}"
+
+ - name: Run the harbor install command
+ ansible.builtin.command:
+ cmd: "{{ service_path }}/harbor/install.sh"
+ chdir: "{{ service_path }}"
+ environment:
+ HARBOR_BUNDLE_DIR: "{{ service_path }}"
+ become: true
+ changed_when: true # TODO find way to recognize need to run install command
diff --git a/roles/harbor/vars/main.yml b/roles/harbor/vars/main.yml
new file mode 100644
index 0000000..ef165e5
--- /dev/null
+++ b/roles/harbor/vars/main.yml
@@ -0,0 +1,103 @@
+---
+harbor_port_http: 20080
+harbor_port_https: 20443
+harbor_port_metrics: 29000
+
+harbor_db_host: "{{ postgres.host }}"
+harbor_db_port: "{{ postgres.port }}"
+harbor_db_database: harbor
+harbor_db_user: "{{ vault_harbor.db.user }}"
+harbor_db_pass: "{{ vault_harbor.db.pass }}"
+harbor_version: 2.9.0
+
+harbor_svc:
+ name: harbor
+ domain: registry.serguzim.me
+ caddy_extra: |
+ reverse_proxy /metrics host.docker.internal:{{ harbor_port_metrics }}
+ reverse_proxy host.docker.internal:{{ harbor_port_https }} {
+ transport http {
+ tls
+ tls_server_name registry.serguzim.me
+ }
+ }
+ caddy_default: false
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ database: harbor
+ user: "{{ vault_harbor.db.user }}"
+ pass: "{{ vault_harbor.db.pass }}"
+ harbor_version: 2.9.0
+
+harbor_yml:
+ hostname: "{{ harbor_svc.domain }}"
+ http:
+ port: "{{ harbor_port_http }}"
+ https:
+ port: "{{ harbor_port_https }}"
+ certificate: "{{ (service_path, 'server.crt') | path_join }}"
+ private_key: "{{ (service_path, 'server.key') | path_join }}"
+ external_url: https://registry.serguzim.me
+ harbor_admin_password: "{{ vault_harbor.admin_password }}"
+ data_volume: "{{ (service_path, 'data') | path_join }}"
+ storage_service:
+ s3:
+ accesskey: "{{ vault_harbor.minio.accesskey }}"
+ secretkey: "{{ vault_harbor.minio.secretkey }}"
+ region: de-contabo-1
+ regionendpoint: https://s3.serguzim.me
+ bucket: registry
+ secure: true
+ trivy:
+ ignore_unfixed: false
+ skip_update: false
+ offline_scan: false
+ security_check: vuln
+ insecure: false
+ jobservice:
+ max_job_workers: 10
+ job_loggers:
+ - STD_OUTPUT
+ - FILE
+ logger_sweeper_duration: 1
+ notification:
+ webhook_job_max_retry: 3
+ webhook_job_http_client_timeout: 3
+ log:
+ level: info
+ local:
+ rotate_count: 50
+ rotate_size: 200M
+ location: /var/log/harbor
+ _version: "{{ harbor_version }}"
+ external_database:
+ harbor:
+ host: "{{ harbor_db_host }}"
+ port: "{{ harbor_db_port }}"
+ db_name: "{{ harbor_db_database }}"
+ username: "{{ harbor_db_user }}"
+ password: "{{ harbor_db_pass }}"
+ ssl_mode: verify-full
+ max_idle_conns: 2
+ max_open_conns: 0
+ proxy:
+ http_proxy:
+ https_proxy:
+ no_proxy:
+ components:
+ - core
+ - jobservice
+ - trivy
+ metric:
+ enabled: enabled
+ port: "{{ harbor_port_metrics }}"
+ path: /metrics
+ upload_purging:
+ enabled: true
+ age: 168h
+ interval: 24h
+ dryrun: false
+ cache:
+ enabled: false
+ expire_hours: 24
diff --git a/roles/healthcheck/files/Dockerfile b/roles/healthcheck/files/Dockerfile
new file mode 100644
index 0000000..de41acf
--- /dev/null
+++ b/roles/healthcheck/files/Dockerfile
@@ -0,0 +1,7 @@
+FROM ubuntu
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+RUN apt update -y \
+ && apt install -y curl dnsutils msmtp gettext-base python3-pip python3-requests \
+ && pip install matrix-nio
diff --git a/roles/healthcheck/files/data/http b/roles/healthcheck/files/data/http
new file mode 100755
index 0000000..3fecf5e
--- /dev/null
+++ b/roles/healthcheck/files/data/http
@@ -0,0 +1,54 @@
+#!/usr/bin/sh
+
+cd /opt/ || exit
+
+hc_url="https://hc-ping.com/$HTTP_HC_UID"
+services_down=""
+error=""
+
+alias curl_hc='curl -LA "$USER_AGENT" --retry 3'
+
+check_url ()
+{
+ url="https://$1$2"
+ printf "checking url %s ." "$url"
+ dig A "$1" >/dev/null
+ printf "."
+ result=$(curl -LsSfv --connect-timeout 30 --retry 3 "$url" 2>&1)
+ code="$?"
+ printf ".\n"
+ #shellcheck disable=SC2181
+ if [ "$code" = "0" ]
+ then
+ echo "... good"
+ else
+ services_down=$(printf "%s\n%s" "$services_down" "$1")
+ error=$(printf "%s\n==========\n%s:\n%s" "$error" "$1" "$result")
+ echo "... bad"
+ fi
+}
+
+#check_url "acme.serguzim.me" "/health"
+check_url "analytics.serguzim.me"
+check_url "auth.serguzim.me"
+check_url "ci.serguzim.me"
+#check_url "cloud.serguzim.me" "/login?noredir=1"
+check_url "git.serguzim.me"
+check_url "hook.serguzim.me"
+check_url "mail.serguzim.me"
+#check_url "msrg.cc" # disabled because it keeps creating false alerts
+check_url "registry.serguzim.me" "/account/sign-in"
+check_url "rss.serguzim.me"
+#check_url "serguzim.me" # disabled because it keeps creating false alerts
+check_url "status.serguzim.me" "/status/serguzim-net"
+check_url "tick.serguzim.me"
+check_url "wiki.serguzim.me"
+check_url "www.reitanlage-oranienburg.de"
+
+if [ "$error" = "" ]
+then
+ curl_hc "$hc_url" >/dev/null
+ echo "ALL GOOD"
+else
+ curl_hc --data-raw "$services_down$error" "$hc_url/fail" >/dev/null
+fi
diff --git a/roles/healthcheck/files/data/mail b/roles/healthcheck/files/data/mail
new file mode 100755
index 0000000..650815d
--- /dev/null
+++ b/roles/healthcheck/files/data/mail
@@ -0,0 +1,17 @@
+#!/usr/bin/sh
+
+cd /opt/ || exit
+
+hc_url="https://hc-ping.com/$MAIL_HC_UID"
+
+alias curl_hc='curl -LA "$USER_AGENT" --retry 3'
+
+envsubst < template.msmtprc > /tmp/msmtprc
+envsubst < mailcheck.template.mail > /tmp/mailcheck.mail
+
+result=$(msmtp -C /tmp/msmtprc -a default "$MAIL_HC_UID@hc-ping.com" < /tmp/mailcheck.mail 2>&1)
+if [ "$?" != "0" ]
+then
+ echo "$result"
+ curl_hc --data-raw "$result" "$hc_url/fail" >/dev/null
+fi
diff --git a/roles/healthcheck/files/data/mailcheck.template.mail b/roles/healthcheck/files/data/mailcheck.template.mail
new file mode 100644
index 0000000..e6c8ebd
--- /dev/null
+++ b/roles/healthcheck/files/data/mailcheck.template.mail
@@ -0,0 +1,5 @@
+To: ${MAIL_HC_UID}@hc-ping.com
+From: ${MAIL_USER}
+Subject: Healthcheck
+
+Mailserver alive
diff --git a/roles/healthcheck/files/data/matrix b/roles/healthcheck/files/data/matrix
new file mode 100755
index 0000000..f2e4ac9
--- /dev/null
+++ b/roles/healthcheck/files/data/matrix
@@ -0,0 +1,45 @@
+#!/usr/bin/python3
+
+import datetime
+import os
+import requests
+import sys
+
+import asyncio
+from nio import AsyncClient, RoomMessageNotice
+
+healthcheck_url = "https://hc-ping.com/" + os.environ['MATRIX_HC_UID']
+
+def send_ping(success, msg=""):
+ url = healthcheck_url
+ if not success:
+ url += "/fail"
+
+ requests.get(url, data=msg, headers={'user-agent': os.environ['USER_AGENT']})
+
+async def main():
+ try:
+ client = AsyncClient(os.environ['MATRIX_SERVER'])
+ client.access_token = os.environ['MATRIX_TOKEN']
+ client.device_id = os.environ['USER_AGENT']
+ await client.room_send(
+ room_id = os.environ['MATRIX_ROOM'],
+ message_type = "m.room.message",
+ content = {
+ "msgtype": "m.text",
+ "body": "!ping"
+ }
+ )
+ except Exception as e:
+ print(e)
+
+ print("exception during login or sending")
+ send_ping(False, str(e))
+ sys.exit(1)
+ await client.close()
+
+ send_ping(True)
+ sys.exit(0)
+
+
+asyncio.new_event_loop().run_until_complete(main())
diff --git a/roles/healthcheck/files/data/template.msmtprc b/roles/healthcheck/files/data/template.msmtprc
new file mode 100644
index 0000000..4a95888
--- /dev/null
+++ b/roles/healthcheck/files/data/template.msmtprc
@@ -0,0 +1,13 @@
+defaults
+auth on
+tls on
+tls_trust_file /etc/ssl/certs/ca-certificates.crt
+logfile /tmp/msmtp.log
+
+account default
+host ${MAIL_HOST}
+port ${MAIL_PORT}
+tls_starttls on
+from ${MAIL_USER}
+user ${MAIL_USER}
+password ${MAIL_PASS}
diff --git a/roles/healthcheck/files/docker-compose.yml b/roles/healthcheck/files/docker-compose.yml
new file mode 100644
index 0000000..7400e74
--- /dev/null
+++ b/roles/healthcheck/files/docker-compose.yml
@@ -0,0 +1,24 @@
+version: "3.7"
+
+x-common-elements:
+ &common-elements
+ build:
+ context: .
+ image: registry.serguzim.me/services/healthcheck
+ restart: never
+ env_file:
+ - service.env
+ volumes:
+ - ./data/:/opt
+ network_mode: host
+
+services:
+ http:
+ <<: *common-elements
+ command: "/opt/http"
+ matrix:
+ <<: *common-elements
+ command: "/opt/matrix"
+ mail:
+ <<: *common-elements
+ command: "/opt/mail"
diff --git a/roles/healthcheck/files/healthcheck@.timer b/roles/healthcheck/files/healthcheck@.timer
new file mode 100644
index 0000000..1ecfb64
--- /dev/null
+++ b/roles/healthcheck/files/healthcheck@.timer
@@ -0,0 +1,4 @@
+[Timer]
+OnCalendar=*:0/5
+[Install]
+WantedBy=timers.target
diff --git a/roles/healthcheck/tasks/docker.yml b/roles/healthcheck/tasks/docker.yml
new file mode 100644
index 0000000..626d1fd
--- /dev/null
+++ b/roles/healthcheck/tasks/docker.yml
@@ -0,0 +1,16 @@
+---
+- name: Copy the docker-compose file
+ ansible.builtin.copy:
+ src: docker-compose.yml
+ dest: "{{ (service_path, 'docker-compose.yml') | path_join }}"
+ mode: "0644"
+- name: Copy the Dockerfile
+ ansible.builtin.copy:
+ src: Dockerfile
+ dest: "{{ (service_path, 'Dockerfile') | path_join }}"
+ mode: "0644"
+- name: Copy the data files
+ ansible.builtin.copy:
+ src: data
+ dest: "{{ service_path }}"
+ mode: "0755"
diff --git a/roles/healthcheck/tasks/main.yml b/roles/healthcheck/tasks/main.yml
new file mode 100644
index 0000000..9a04158
--- /dev/null
+++ b/roles/healthcheck/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ healthcheck_svc }}"
+ env: "{{ healthcheck_env }}"
+ block:
+ - name: Import tasks to create service directory
+ ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
+
+ - name: Import tasks specific to docker
+ ansible.builtin.import_tasks: docker.yml
+ - name: Import tasks specific to systemd
+ ansible.builtin.import_tasks: systemd.yml
+
+ - name: Import tasks create a service.env file
+ ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
+
+ - name: Build service
+ ansible.builtin.command:
+ cmd: docker compose build --pull
+ chdir: "{{ service_path }}"
+ when:
+ - "'local-dev' != inventory_hostname"
+ register: cmd_result
+ changed_when: true
diff --git a/roles/healthcheck/tasks/systemd.yml b/roles/healthcheck/tasks/systemd.yml
new file mode 100644
index 0000000..c5d6379
--- /dev/null
+++ b/roles/healthcheck/tasks/systemd.yml
@@ -0,0 +1,21 @@
+---
+- name: Template the system service
+ ansible.builtin.template:
+ src: healthcheck@.service.j2
+ dest: /etc/systemd/system/healthcheck@.service
+ mode: "0644"
+ become: true
+- name: Copy the system timer
+ ansible.builtin.copy:
+ src: healthcheck@.timer
+ dest: /etc/systemd/system/healthcheck@.timer
+ mode: "0644"
+ become: true
+- name: Enable the system timer
+ ansible.builtin.systemd_service:
+ name: healthcheck@{{ item }}.timer
+ state: started
+ enabled: true
+ daemon_reload: true
+ loop: "{{ healthcheck_svc.checks }}"
+ become: true
diff --git a/roles/healthcheck/templates/healthcheck@.service.j2 b/roles/healthcheck/templates/healthcheck@.service.j2
new file mode 100644
index 0000000..32fa199
--- /dev/null
+++ b/roles/healthcheck/templates/healthcheck@.service.j2
@@ -0,0 +1,5 @@
+[Service]
+Type=simple
+ExecStart=/usr/bin/docker compose run --rm %i
+WorkingDirectory={{ service_path }}
+RuntimeMaxSec=300
diff --git a/roles/healthcheck/vars/main.yml b/roles/healthcheck/vars/main.yml
new file mode 100644
index 0000000..4dd4f9e
--- /dev/null
+++ b/roles/healthcheck/vars/main.yml
@@ -0,0 +1,24 @@
+---
+healthcheck_svc:
+ name: healthcheck
+ checks:
+ - http
+ - mail
+ - matrix
+
+healthcheck_env:
+ USER_AGENT: healthcheck-bot for serguzim.net
+
+ HTTP_HC_UID: "{{ vault_healthcheck.hc_uid.http }}"
+
+ MATRIX_SERVER: https://matrix.serguzim.me
+ MATRIX_SERVER_FEDTESTER: msrg.cc
+ MATRIX_HC_UID: "{{ vault_healthcheck.hc_uid.matrix }}"
+ MATRIX_TOKEN: "{{ vault_healthcheck.matrix.token }}"
+ MATRIX_ROOM: "{{ vault_healthcheck.matrix.room }}"
+
+ MAIL_HC_UID: "{{ vault_healthcheck.hc_uid.mail }}"
+ MAIL_HOST: "{{ mailer.host }}"
+ MAIL_PORT: "{{ mailer.port }}"
+ MAIL_USER: "{{ vault_healthcheck.mailer.user }}"
+ MAIL_PASS: "{{ vault_healthcheck.mailer.pass }}"
diff --git a/roles/homebox/tasks/main.yml b/roles/homebox/tasks/main.yml
new file mode 100644
index 0000000..d4aed71
--- /dev/null
+++ b/roles/homebox/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ homebox_svc }}"
+ env: "{{ homebox_env }}"
+ compose: "{{ homebox_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/homebox/vars/main.yml b/roles/homebox/vars/main.yml
new file mode 100644
index 0000000..0376954
--- /dev/null
+++ b/roles/homebox/vars/main.yml
@@ -0,0 +1,23 @@
+---
+homebox_svc:
+ domain: inventory.serguzim.me
+ name: homebox
+ port: 7745
+
+homebox_env:
+ HBOX_OPTIONS_ALLOW_REGISTRATION: false
+ HBOX_MAILER_HOST: mail.serguzim.me
+ HBOX_MAILER_PORT: 587
+ HBOX_MAILER_USERNAME: inventory@serguzim.me
+ HBOX_MAILER_PASSWORD: "{{ vault_homebox.mailer_passwd }}"
+ HBOX_MAILER_FROM: Homebox
+ HBOX_SWAGGER_SCHEMA: https
+
+homebox_compose:
+ watchtower: true
+ image: ghcr.io/hay-kot/homebox:latest-rootless
+ volumes:
+ - data:/data
+ file:
+ volumes:
+ data:
diff --git a/roles/immich/tasks/main.yml b/roles/immich/tasks/main.yml
new file mode 100644
index 0000000..78fd542
--- /dev/null
+++ b/roles/immich/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ immich_svc }}"
+ env: "{{ immich_env }}"
+ compose: "{{ immich_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/immich/vars/main.yml b/roles/immich/vars/main.yml
new file mode 100644
index 0000000..0f4f92f
--- /dev/null
+++ b/roles/immich/vars/main.yml
@@ -0,0 +1,74 @@
+---
+immich_db_host: database
+immich_db_db: immich
+immich_db_user: "{{ vault_immich.db.user }}"
+immich_db_pass: "{{ vault_immich.db.pass }}"
+
+immich_svc:
+ domain: gallery.serguzim.me
+ name: immich
+ port: 3001
+ version: release
+ db:
+ host: "{{ postgres.host }}"
+ database: authentik
+
+
+immich_env:
+ # IMMICH_CONFIG_FILE: /immich.json
+
+ TZ: "{{ timezone }}"
+
+ DB_HOSTNAME: "{{ immich_db_host }}"
+ DB_DATABASE_NAME: "{{ immich_db_db }}"
+ DB_USERNAME: "{{ immich_db_user }}"
+ DB_PASSWORD: "{{ immich_db_pass }}"
+
+ POSTGRES_DB: "{{ immich_db_db }}"
+ POSTGRES_USER: "{{ immich_db_user }}"
+ POSTGRES_PASSWORD: "{{ immich_db_pass }}"
+
+ REDIS_HOSTNAME: redis
+
+immich_compose:
+ watchtower: false
+ image: ghcr.io/immich-app/immich-server:release
+ volumes:
+ - upload:/usr/src/app/upload
+ file:
+ services:
+ app:
+ depends_on:
+ - database
+ - redis
+
+ machine-learning:
+ image: ghcr.io/immich-app/immich-machine-learning:release
+ volumes:
+ - model-cache:/cache
+ env_file:
+ - service.env
+ restart: always
+ networks:
+ default:
+
+ redis:
+ image: redis:6.2-alpine
+ restart: always
+ networks:
+ default:
+
+ database:
+ image: tensorchord/pgvecto-rs:pg16-v0.2.0
+ env_file:
+ - service.env
+ volumes:
+ - pgdata:/var/lib/postgresql/data
+ restart: always
+ networks:
+ default:
+
+ volumes:
+ upload:
+ pgdata:
+ model-cache:
diff --git a/roles/influxdb/tasks/main.yml b/roles/influxdb/tasks/main.yml
new file mode 100644
index 0000000..dfb6043
--- /dev/null
+++ b/roles/influxdb/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ influxdb_svc }}"
+ env: "{{ influxdb_env }}"
+ compose: "{{ influxdb_compose }}"
+ yml: "{{ influxdb_yml }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Template config
+ ansible.builtin.template:
+ src: yml.j2
+ dest: "{{ (service_path, 'influxdb.yml') | path_join }}"
+ mode: "0600"
+ register: cmd_result
+
+ - name: Set the docker force-recreate flag
+ ansible.builtin.set_fact:
+ docker_force_recreate: --force-recreate
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/influxdb/vars/main.yml b/roles/influxdb/vars/main.yml
new file mode 100644
index 0000000..1517777
--- /dev/null
+++ b/roles/influxdb/vars/main.yml
@@ -0,0 +1,73 @@
+---
+influxdb_svc:
+ domain: tick.serguzim.me
+ name: influxdb
+ port: 8086
+ data_dir: /var/lib/influxdb2
+
+influxdb_yml:
+ assets-path: ""
+ bolt-path: "{{ (svc.data_dir, 'influxd.bolt') | path_join }}"
+ e2e-testing: false
+ engine-path: "{{ (svc.data_dir, 'engine') | path_join }}"
+ feature-flags: {}
+ http-bind-address: "0.0.0.0:{{ svc.port }}"
+ influxql-max-select-buckets: 0
+ influxql-max-select-point: 0
+ influxql-max-select-series: 0
+ key-name: ""
+ log-level: info
+ nats-max-payload-bytes: 1048576
+ nats-port: 4222
+ no-tasks: false
+ query-concurrency: 10
+ query-initial-memory-bytes: 0
+ query-max-memory-bytes: 0
+ query-memory-bytes: 9223372036854775807
+ query-queue-size: 10
+ reporting-disabled: false
+ secret-store: bolt
+ session-length: 60
+ session-renew-disabled: false
+ storage-cache-max-memory-size: 1073741824
+ storage-cache-snapshot-memory-size: 26214400
+ storage-cache-snapshot-write-cold-duration: 10m0s
+ storage-compact-full-write-cold-duration: 4h0m0s
+ storage-compact-throughput-burst: 50331648
+ storage-max-concurrent-compactions: 0
+ storage-max-index-log-file-size: 1048576
+ storage-retention-check-interval: 30m0s
+ storage-series-file-max-concurrent-snapshot-compactions: 0
+ storage-series-id-set-cache-size: 0
+ storage-shard-precreator-advance-period: 30m0s
+ storage-shard-precreator-check-interval: 10m0s
+ storage-tsm-use-madv-willneed: false
+ storage-validate-keys: false
+ storage-wal-fsync-delay: "0s"
+ store: bolt
+ testing-always-allow-setup: false
+ tls-cert: ""
+ tls-key: ""
+ tls-min-version: "1.2"
+ tls-strict-ciphers: false
+ tracing-type: ""
+ vault-addr: ""
+ vault-cacert: ""
+ vault-capath: ""
+ vault-client-cert: ""
+ vault-client-key: ""
+ vault-client-timeout: "0s"
+ vault-max-retries: 0
+ vault-skip-verify: false
+ vault-tls-server-name: ""
+ vault-token: ""
+
+influxdb_compose:
+ watchtower: false
+ image: influxdb:2.7
+ volumes:
+ - ./influxdb.yml:/etc/influxdb2/config.yml
+ - data:{{ svc.data_dir }}
+ file:
+ volumes:
+ data:
diff --git a/roles/jellyfin/tasks/main.yml b/roles/jellyfin/tasks/main.yml
new file mode 100644
index 0000000..112567d
--- /dev/null
+++ b/roles/jellyfin/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ jellyfin_svc }}"
+ env: "{{ jellyfin_env }}"
+ compose: "{{ jellyfin_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/jellyfin/vars/main.yml b/roles/jellyfin/vars/main.yml
new file mode 100644
index 0000000..17db44a
--- /dev/null
+++ b/roles/jellyfin/vars/main.yml
@@ -0,0 +1,27 @@
+---
+jellyfin_svc:
+ domain: media.serguzim.me
+ name: jellyfin
+ port: 8096
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+
+jellyfin_env:
+ JELLYFIN_PublishedServerUrl: https://{{ svc.domain }}
+
+jellyfin_compose:
+ watchtower: true
+ image: jellyfin/jellyfin
+ volumes:
+ - config:/config
+ - cache:/cache
+ - media:/media
+ file:
+ services:
+ app:
+ user: 8096:8096
+ volumes:
+ config:
+ cache:
+ media:
diff --git a/roles/lego/files/hook.sh b/roles/lego/files/hook.sh
new file mode 100644
index 0000000..b060634
--- /dev/null
+++ b/roles/lego/files/hook.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env sh
+
+cp -f "$LEGO_CERT_PATH" /certificates
+cp -f "$LEGO_CERT_KEY_PATH" /certificates
+
+exit 33 # special exit code to signal that the certificate has been updated
diff --git a/roles/lego/files/lego.sh b/roles/lego/files/lego.sh
new file mode 100755
index 0000000..98c7060
--- /dev/null
+++ b/roles/lego/files/lego.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env sh
+
+set -a
+. ./service.env
+set +a
+
+domain="$1"
+action="${2:-renew}"
+
+docker compose run --rm app \
+ --domains "$domain" \
+ "$action" \
+ "--$action-hook" "/config/hook.sh"
+
+if [ "$?" = "33" ] && [ -x "./lego.d/$domain" ];
+then
+ echo "Running hook for $domain"
+ "./lego.d/$domain"
+fi
diff --git a/roles/lego/files/lego@.timer b/roles/lego/files/lego@.timer
new file mode 100644
index 0000000..284347f
--- /dev/null
+++ b/roles/lego/files/lego@.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Renew certificates
+
+[Timer]
+Persistent=true
+OnCalendar=*-*-* 01:15:00
+RandomizedDelaySec=2h
+
+[Install]
+WantedBy=timers.target
diff --git a/roles/lego/files/node002/db.serguzim.me b/roles/lego/files/node002/db.serguzim.me
new file mode 100755
index 0000000..b411f33
--- /dev/null
+++ b/roles/lego/files/node002/db.serguzim.me
@@ -0,0 +1,16 @@
+#!/usr/bin/env sh
+
+domain="db.serguzim.me"
+
+_install() {
+ install --owner=postgres --group=postgres --mode=600 \
+ "$CERTIFICATES_PATH/$domain.$1" \
+ "/var/lib/postgres/data/server.$1"
+}
+
+_install crt
+_install key
+
+sudo -u postgres pg_ctl -D /var/lib/postgres/data/ reload
+
+# vim: ft=sh
diff --git a/roles/lego/files/node002/msrg.cc b/roles/lego/files/node002/msrg.cc
new file mode 100755
index 0000000..7797db0
--- /dev/null
+++ b/roles/lego/files/node002/msrg.cc
@@ -0,0 +1,18 @@
+#!/usr/bin/env sh
+
+domain="msrg.cc"
+
+tmpdir=$(mktemp -d)
+trap 'rm -rf $tmpdir' EXIT
+
+cp "$CERTIFICATES_PATH/$domain.crt" "$tmpdir/fullchain.pem"
+cp "$CERTIFICATES_PATH/$domain.key" "$tmpdir/privkey.pem"
+
+curl \
+ -F submit="submit" \
+ -F token="$WIUWIU_TOKEN" \
+ -F "cert=@$tmpdir/fullchain.pem" \
+ -F "key=@$tmpdir/privkey.pem" \
+ https://cert-upload.wiuwiu.de/
+
+# vim: ft=sh
diff --git a/roles/lego/files/node002/registry.serguzim.me b/roles/lego/files/node002/registry.serguzim.me
new file mode 100755
index 0000000..4f564c7
--- /dev/null
+++ b/roles/lego/files/node002/registry.serguzim.me
@@ -0,0 +1,17 @@
+#!/usr/bin/env sh
+
+domain="registry.serguzim.me"
+
+_install() {
+ install --owner=root --group=root --mode=600 \
+ "$CERTIFICATES_PATH/$domain.$1" \
+ "/opt/services/harbor/server.$1"
+}
+
+_install crt
+_install key
+
+export HARBOR_BUNDLE_DIR=/opt/services/harbor
+$HARBOR_BUNDLE_DIR/harbor/install.sh
+
+# vim: ft=sh
diff --git a/roles/lego/tasks/config.yml b/roles/lego/tasks/config.yml
new file mode 100644
index 0000000..266efcb
--- /dev/null
+++ b/roles/lego/tasks/config.yml
@@ -0,0 +1,19 @@
+---
+- name: Set config path
+ ansible.builtin.set_fact:
+ config_path: "{{ (service_path, 'config') | path_join }}"
+- name: Create config directory
+ ansible.builtin.file:
+ path: "{{ config_path }}"
+ state: directory
+ mode: "0755"
+- name: Copy the acme-dns-accounts
+ ansible.builtin.template:
+ src: "json.j2"
+ dest: "{{ (config_path, 'acme-dns-accounts.json') | path_join }}"
+ mode: "0644"
+- name: Copy the hook script
+ ansible.builtin.copy:
+ src: "hook.sh"
+ dest: "{{ (config_path, 'hook.sh') | path_join }}"
+ mode: "0755"
diff --git a/roles/lego/tasks/lego.d.yml b/roles/lego/tasks/lego.d.yml
new file mode 100644
index 0000000..04acb4b
--- /dev/null
+++ b/roles/lego/tasks/lego.d.yml
@@ -0,0 +1,16 @@
+---
+- name: Set lego.d path
+ ansible.builtin.set_fact:
+ lego_d_path: "{{ (service_path, 'lego.d') | path_join }}"
+- name: Create lego.d directory
+ ansible.builtin.file:
+ path: "{{ lego_d_path }}"
+ state: directory
+ mode: "0755"
+- name: Copy the additional lego scripts
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "{{ lego_d_path }}"
+ mode: "0755"
+ with_fileglob:
+ - "{{ ansible_facts.hostname }}/*"
diff --git a/roles/lego/tasks/main.yml b/roles/lego/tasks/main.yml
new file mode 100644
index 0000000..3dc6de1
--- /dev/null
+++ b/roles/lego/tasks/main.yml
@@ -0,0 +1,35 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ lego_svc }}"
+ env: "{{ lego_env }}"
+ json: "{{ vault_acmedns_registered | acmedns_to_lego }}"
+ compose: "{{ lego_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Create _certificates directory
+ ansible.builtin.file:
+ path: "{{ certificates_path }}"
+ state: directory
+ mode: "0755"
+
+ - name: Import tasks specific to the config directory
+ ansible.builtin.import_tasks: config.yml
+ - name: Import tasks specific to lego.d
+ ansible.builtin.import_tasks: lego.d.yml
+ - name: Import tasks specific to systemd
+ ansible.builtin.import_tasks: systemd.yml
+
+ - name: Copy the run script
+ ansible.builtin.copy:
+ src: "lego.sh"
+ dest: "{{ (service_path, 'lego.sh') | path_join }}"
+ mode: "0755"
+
+ - name: Import tasks create a service.env file
+ ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
diff --git a/roles/lego/tasks/systemd.yml b/roles/lego/tasks/systemd.yml
new file mode 100644
index 0000000..d31cb31
--- /dev/null
+++ b/roles/lego/tasks/systemd.yml
@@ -0,0 +1,24 @@
+---
+- name: Copy the system service
+ ansible.builtin.template:
+ src: lego@.service.j2
+ dest: /etc/systemd/system/lego@.service
+ mode: "0644"
+ become: true
+- name: Copy the system timer
+ ansible.builtin.copy:
+ src: lego@.timer
+ dest: /etc/systemd/system/lego@.timer
+ mode: "0644"
+ become: true
+- name: Enable the system timers
+ ansible.builtin.systemd_service:
+ name: lego@{{ item }}.timer
+ state: started
+ enabled: true
+ daemon_reload: true
+ loop:
+ - msrg.cc
+ - db.serguzim.me
+ - registry.serguzim.me
+ become: true
diff --git a/roles/lego/templates/lego@.service.j2 b/roles/lego/templates/lego@.service.j2
new file mode 100644
index 0000000..4b310f2
--- /dev/null
+++ b/roles/lego/templates/lego@.service.j2
@@ -0,0 +1,4 @@
+[Service]
+Type=oneshot
+ExecStart={{ service_path }}/lego.sh %i
+WorkingDirectory={{ service_path }}
diff --git a/roles/lego/vars/main.yml b/roles/lego/vars/main.yml
new file mode 100644
index 0000000..3ceec71
--- /dev/null
+++ b/roles/lego/vars/main.yml
@@ -0,0 +1,34 @@
+---
+lego_svc:
+ name: lego
+
+lego_env:
+ ACME_DNS_API_BASE: https://{{ acme_dns.host }}
+ ACME_DNS_STORAGE_PATH: /config/acme-dns-accounts.json
+
+ LEGO_EMAIL: "{{ admin_email }}"
+ LEGO_PATH: /data
+
+ CERTIFICATES_PATH: "{{ certificates_path }}"
+ WIUWIU_TOKEN: "{{ vault_wiuwiu_token }}"
+
+lego_compose:
+ watchtower: false
+ network: false
+ image: goacme/lego
+ volumes:
+ - ./config:/config:ro
+ - "{{ certificates_path }}:/certificates"
+ - data:/data
+ file:
+ services:
+ app:
+ restart: never
+ network_mode: "host"
+ entrypoint:
+ - /lego
+ - --accept-tos
+ - --email={{ admin_email }}
+ - --dns=acme-dns
+ volumes:
+ data:
diff --git a/roles/linkwarden/tasks/main.yml b/roles/linkwarden/tasks/main.yml
new file mode 100644
index 0000000..4b06747
--- /dev/null
+++ b/roles/linkwarden/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ linkwarden_svc }}"
+ env: "{{ linkwarden_env }}"
+ compose: "{{ linkwarden_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/linkwarden/vars/main.yml b/roles/linkwarden/vars/main.yml
new file mode 100644
index 0000000..ff86056
--- /dev/null
+++ b/roles/linkwarden/vars/main.yml
@@ -0,0 +1,39 @@
+---
+linkwarden_secret: "{{ vault_linkwarden.secret }}"
+
+linkwarden_db_host_port: "{{ postgres.host }}:{{ postgres.port }}"
+linkwarden_db_user: "{{ vault_linkwarden.db.user }}"
+linkwarden_db_pass: "{{ vault_linkwarden.db.pass }}"
+linkwarden_db_database: linkwarden
+
+linkwarden_s3_accesskey: "{{ opentofu.scaleway_service_keys.linkwarden.access_key }}"
+linkwarden_s3_secretkey: "{{ opentofu.scaleway_service_keys.linkwarden.secret_key }}"
+
+linkwarden_svc:
+ domain: bookmarks.serguzim.me
+ name: linkwarden
+ port: 3000
+
+linkwarden_env:
+ NEXTAUTH_SECRET: "{{ linkwarden_secret }}"
+ NEXTAUTH_URL: https://bookmarks.serguzim.me/api/v1/auth
+ DATABASE_URL: postgres://{{ linkwarden_db_user }}:{{ linkwarden_db_pass }}@{{ linkwarden_db_host_port }}/{{ linkwarden_db_database }}
+
+ SPACES_KEY: "{{ linkwarden_s3_accesskey }}"
+ SPACES_SECRET: "{{ linkwarden_s3_secretkey }}"
+ SPACES_ENDPOINT: https://s3.nl-ams.scw.cloud
+ SPACES_BUCKET_NAME: linkwarden.serguzim.me
+ SPACES_REGION: ns-ams
+ SPACES_FORCE_PATH_STYLE: false
+
+ NEXT_PUBLIC_DISABLE_REGISTRATION: true
+ NEXT_PUBLIC_CREDENTIALS_ENABLED: true
+ NEXT_PUBLIC_AUTHENTIK_ENABLED: false
+ AUTHENTIK_CUSTOM_NAME: auth.serguzim.me
+ AUTHENTIK_ISSUER: https://auth.serguzim.me/application/o/bookmarks-serguzim-me
+ AUTHENTIK_CLIENT_ID: "{{ vault_linkwarden.oidc_client.id }}"
+ AUTHENTIK_CLIENT_SECRET: "{{ vault_linkwarden.oidc_client.secret }}"
+
+linkwarden_compose:
+ watchtower: true
+ image: ghcr.io/linkwarden/linkwarden:latest
diff --git a/roles/mailcow/tasks/main.yml b/roles/mailcow/tasks/main.yml
new file mode 100644
index 0000000..7f8ffde
--- /dev/null
+++ b/roles/mailcow/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ mailcow_svc }}"
+ block:
+ - name: Import tasks to template the site for the reverse proxy
+ ansible.builtin.import_tasks: tasks/steps/template-site-config.yml
diff --git a/roles/mailcow/vars/main.yml b/roles/mailcow/vars/main.yml
new file mode 100644
index 0000000..5cccc3c
--- /dev/null
+++ b/roles/mailcow/vars/main.yml
@@ -0,0 +1,7 @@
+---
+mailcow_svc:
+ name: mailcow
+ domain: mail.serguzim.me
+ docker_host: host.docker.internal
+ port: 3004
+ additional_domains: "{{ ['autodiscover', 'autoconfig'] | product(vault_mailcow.domains) | map('join', '.') }}"
diff --git a/roles/minecraft_2/tasks/main.yml b/roles/minecraft_2/tasks/main.yml
new file mode 100644
index 0000000..fd5279c
--- /dev/null
+++ b/roles/minecraft_2/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ minecraft_2_svc }}"
+ env: "{{ minecraft_2_env }}"
+ compose: "{{ minecraft_2_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/minecraft_2/vars/main.yml b/roles/minecraft_2/vars/main.yml
new file mode 100644
index 0000000..e14e7a1
--- /dev/null
+++ b/roles/minecraft_2/vars/main.yml
@@ -0,0 +1,68 @@
+---
+minecraft_2_svc:
+ name: minecraft-2
+
+minecraft_2_env:
+ ALLOW_FLIGHT: true
+ ALLOW_NETHER: true
+ ANNOUNCE_PLAYER_ACHIEVEMENTS: true
+ BROADCAST_CONSOLE_TO_OPS: true
+ BROADCAST_RCON_TO_OPS: true
+ CONSOLE: false
+ ENABLE_AUTOPAUSE: true
+ ENABLE_COMMAND_BLOCK: true
+ ENABLE_JMX: false
+ ENABLE_RCON: true
+ ENABLE_STATUS: true
+ ENABLE_WHITELIST: true
+ ENFORCE_WHITELIST: true
+ ENTITY_BROADCAST_RANGE_PERCENTAGE: 100
+ EULA: true
+ FORCE_GAMEMODE: false
+ FUNCTION_PERMISSION_LEVEL: 2
+ GENERATE_STRUCTURES: true
+ HARDCORDE: false
+ ICON:
+ LEVEL_TYPE: DEFAULT
+ MAX_BUILD_HEIGHT: 512
+ MAX_MEMORY: 4G
+ MAX_TICK_TIME: -1
+ MAX_PLAYERS: 64
+ MAX_WORLD_SIZE: 30000000
+ MODE: survival
+ MOTD:
+ NETWORK_COMPRESSION_THRESHOLD: 256
+ PVP: true
+ SERVER_NAME: minecraft.serguzim.me
+ SNOOPER_ENABLED: false
+ SPAWN_ANIMALS: true
+ SPAWN_MONSTERS: true
+ SPAWN_NPCS: true
+ SPAWN_PROTECTION: 0
+ SYNC_CHUNK_WRITES: true
+ TYPE: PAPER
+ ONLINE_MODE: true
+ OP_PERMISSION_LEVEL: 4
+ OPS: "{{ vault_minecraft_2.ops }}"
+ OVERRIDE_ICON: true
+ OVERRIDE_SERVER_PROPERTIES: true
+ PLAYER_IDLE_TIMEOUT: 0
+ PREVENT_PROXY_CONNECTIONS: false
+ SEED: "{{ vault_minecraft_2.seed }}"
+ USE_NATIVE_TRANSPORT: true
+ VERSION: LATEST
+ VIEW_DISTANCE: 10
+ WHITELIST: "{{ vault_minecraft_2.whitelist }}"
+
+minecraft_2_compose:
+ watchtower: false
+ image: itzg/minecraft-server
+ volumes:
+ - data:/data
+ file:
+ services:
+ app:
+ ports:
+ - 25565:25565
+ volumes:
+ data:
diff --git a/roles/minio/tasks/main.yml b/roles/minio/tasks/main.yml
new file mode 100644
index 0000000..17d9abb
--- /dev/null
+++ b/roles/minio/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ minio_svc }}"
+ env: "{{ minio_env }}"
+ compose: "{{ minio_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/minio/vars/main.yml b/roles/minio/vars/main.yml
new file mode 100644
index 0000000..ca82ffe
--- /dev/null
+++ b/roles/minio/vars/main.yml
@@ -0,0 +1,34 @@
+---
+minio_svc:
+ domain: s3.serguzim.me
+ name: minio
+ port: 9000
+ caddy_extra: |
+ @nocache {
+ query nocache=*
+ }
+ header @nocache "Cache-Control" "no-store, no-cache"
+ extra_svcs:
+ - domain: console.s3.serguzim.me
+ docker_host: minio
+ port: 9001
+
+minio_env:
+ MINIO_SERVER_URL: https://{{ svc.domain }}/
+ MINIO_BROWSER_REDIRECT_URL: https://console.{{ svc.domain }}
+ MINIO_VOLUMES: /data
+
+ MINIO_ROOT_USER: "{{ vault_minio.user }}"
+ MINIO_ROOT_PASSWORD: "{{ vault_minio.pass }}"
+
+minio_compose:
+ watchtower: true
+ image: minio/minio
+ volumes:
+ - data:/data
+ file:
+ services:
+ app:
+ command: server --console-address ":9001"
+ volumes:
+ data:
diff --git a/roles/ntfy/tasks/main.yml b/roles/ntfy/tasks/main.yml
new file mode 100644
index 0000000..4026612
--- /dev/null
+++ b/roles/ntfy/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ ntfy_svc }}"
+ compose: "{{ ntfy_compose }}"
+ env: "{{ ntfy_env }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/ntfy/vars/main.yml b/roles/ntfy/vars/main.yml
new file mode 100644
index 0000000..b881fe4
--- /dev/null
+++ b/roles/ntfy/vars/main.yml
@@ -0,0 +1,55 @@
+---
+ntfy_svc:
+ name: ntfy
+ domain: push.serguzim.me
+ port: 80
+
+ntfy_env:
+ TZ: "{{ timezone }}"
+
+ NTFY_BASE_URL: "https://{{ ntfy_svc.domain }}"
+
+ NTFY_CACHE_FILE: /var/cache/ntfy/cache.db
+ NTFY_CACHE_DURATION: "12h"
+
+ NTFY_BEHIND_PROXY: true
+
+ NTFY_AUTH_FILE: /var/lib/ntfy/user.db
+ NTFY_AUTH_DEFAULT_ACCESS: "deny-all"
+
+ NTFY_ATTACHMENT_CACHE_DIR: "/var/cache/ntfy/attachments"
+ NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT: "5G"
+ NTFY_ATTACHMENT_FILE_SIZE_LIMIT: "15M"
+ NTFY_ATTACHMENT_EXPIRY_DURATION: "3h"
+
+ NTFY_KEEPALIVE_INTERVAL: "45s"
+ NTFY_MANAGER_INTERVAL: "60m"
+
+ NTFY_ENABLE_SIGNUP: false
+ NTFY_ENABLE_LOGIN: true
+ NTFY_ENABLE_RESERVATIONS: true
+
+ NTFY_GLOBAL_TOPIC_LIMIT: 15000
+
+ NTFY_VISITOR_SUBSCRIPTION_LIMIT: 30
+ NTFY_VISITOR_REQUEST_LIMIT_BURST: 60
+ NTFY_VISITOR_REQUEST_LIMIT_REPLENISH: "5s"
+ NTFY_VISITOR_ATTACHMENT_TOTAL_SIZE_LIMIT: "100M"
+ NTFY_VISITOR_ATTACHMENT_DAILY_BANDWIDTH_LIMIT: "500M"
+
+ NTFY_ENABLE_METRICS: true
+
+ntfy_compose:
+ watchtower: true
+ image: binwiederhier/ntfy
+ volumes:
+ - cache:/var/cache/ntfy
+ - data:/var/lib/ntfy
+ file:
+ services:
+ app:
+ command:
+ - serve
+ volumes:
+ cache:
+ data:
diff --git a/roles/reitanlage_oranienburg/tasks/main.yml b/roles/reitanlage_oranienburg/tasks/main.yml
new file mode 100644
index 0000000..a41843c
--- /dev/null
+++ b/roles/reitanlage_oranienburg/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ reitanlage_oranienburg_svc }}"
+ compose: "{{ reitanlage_oranienburg_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Get the Dockerfile
+ ansible.builtin.get_url:
+ url: https://raw.githubusercontent.com/getgrav/docker-grav/master/Dockerfile
+ dest: "{{ (service_path, 'Dockerfile') | path_join }}"
+ mode: "0644"
+ register: cmd_result
+
+ - name: Set the docker rebuild flag
+ ansible.builtin.set_fact:
+ docker_rebuild: true
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/reitanlage_oranienburg/vars/main.yml b/roles/reitanlage_oranienburg/vars/main.yml
new file mode 100644
index 0000000..ecc55da
--- /dev/null
+++ b/roles/reitanlage_oranienburg/vars/main.yml
@@ -0,0 +1,25 @@
+---
+reitanlage_oranienburg_svc:
+ name: reitanlage-oranienburg
+ domain: reitanlage-oranienburg.de
+ www_domain: true
+ port: 80
+ caddy_extra: |
+ import analytics
+
+ header /images/* Cache-Control "max-age=31536000"
+ header /assets/* Cache-Control "max-age=2629800"
+ header /user/themes/* Cache-Control "max-age=2629800"
+
+reitanlage_oranienburg_compose:
+ watchtower: false
+ image: registry.serguzim.me/library/grav
+ volumes:
+ - data:/var/www/html/
+ file:
+ services:
+ app:
+ build:
+ context: .
+ volumes:
+ data:
diff --git a/roles/shlink/tasks/main.yml b/roles/shlink/tasks/main.yml
new file mode 100644
index 0000000..bc0230e
--- /dev/null
+++ b/roles/shlink/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ shlink_svc }}"
+ env: "{{ shlink_env }}"
+ compose: "{{ shlink_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/shlink/vars/main.yml b/roles/shlink/vars/main.yml
new file mode 100644
index 0000000..beda88e
--- /dev/null
+++ b/roles/shlink/vars/main.yml
@@ -0,0 +1,31 @@
+---
+shlink_svc:
+ domain: msrg.cc
+ additional_domains:
+ - "emgauwa.app"
+ name: shlink
+ port: 8080
+
+shlink_env:
+ DEFAULT_DOMAIN: "{{ shlink_svc.domain }}"
+ IS_HTTPS_ENABLED: true
+ TIMEZONE: "{{ timezone }}"
+
+ DEFAULT_SHORT_CODES_LENGTH: 8
+ MULTI_SEGMENT_SLUGS_ENABLED: false
+ SHORT_URL_TRAILING_SLASH: true
+ REDIRECT_APPEND_EXTRA_PATH: true
+ DEFAULT_BASE_URL_REDIRECT: "https://www.serguzim.me/"
+
+ DB_DRIVER: postgres
+ DB_HOST: "{{ postgres.host }}"
+ DB_PORT: "{{ postgres.port }}"
+ DB_NAME: shlink
+ DB_USER: "{{ vault_shlink.db.user }}"
+ DB_PASSWORD: "{{ vault_shlink.db.pass }}"
+
+ GEOLITE_LICENSE_KEY: "{{ vault_shlink.geolite_key }}"
+
+shlink_compose:
+ watchtower: true
+ image: shlinkio/shlink
diff --git a/roles/synapse/files/msrg.cc.log.config b/roles/synapse/files/msrg.cc.log.config
new file mode 100644
index 0000000..03a08cb
--- /dev/null
+++ b/roles/synapse/files/msrg.cc.log.config
@@ -0,0 +1,22 @@
+version: 1
+
+formatters:
+ precise:
+ format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
+
+handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: precise
+
+loggers:
+ synapse.storage.SQL:
+ # beware: increasing this to DEBUG will make synapse log sensitive
+ # information such as access tokens.
+ level: INFO
+
+root:
+ level: INFO
+ handlers: [console]
+
+disable_existing_loggers: false
\ No newline at end of file
diff --git a/roles/synapse/tasks/main.yml b/roles/synapse/tasks/main.yml
new file mode 100644
index 0000000..ff17bed
--- /dev/null
+++ b/roles/synapse/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ synapse_svc }}"
+ env: "{{ synapse_env }}"
+ compose: "{{ synapse_compose }}"
+ yml: "{{ synapse_yml }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Set synapse config path
+ ansible.builtin.set_fact:
+ config_path: "{{ (service_path, svc.config_path) | path_join }}"
+
+ - name: Create config directory
+ ansible.builtin.file:
+ path: "{{ config_path }}"
+ state: directory
+ mode: "0755"
+
+ - name: Template config
+ ansible.builtin.template:
+ src: yml.j2
+ dest: "{{ (config_path, 'homeserver.yaml') | path_join }}"
+ mode: "0644"
+
+ - name: Copy the log config
+ ansible.builtin.copy:
+ src: msrg.cc.log.config
+ dest: "{{ (config_path, 'msrg.cc.log.config') | path_join }}"
+ mode: "0644"
+
+ - name: Copy the signing key
+ ansible.builtin.copy:
+ content: "{{ vault_synapse.signing_key }}"
+ dest: "{{ (config_path, 'msrg.cc.signing.key') | path_join }}"
+ mode: "0644"
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/synapse/vars/main.yml b/roles/synapse/vars/main.yml
new file mode 100644
index 0000000..3e615d6
--- /dev/null
+++ b/roles/synapse/vars/main.yml
@@ -0,0 +1,123 @@
+---
+synapse_svc:
+ name: synapse
+ domain: matrix.serguzim.me
+ docker_host: synapse-admin
+ port: 80
+ caddy_extra: |
+ handle /_matrix/* {
+ reverse_proxy synapse:8008
+ }
+ handle /_synapse/* {
+ reverse_proxy synapse:8008
+ }
+ extra_svcs:
+ - domain: matrix.serguzim.me:8448
+ additional_domains:
+ - serguzim.me:8448
+ docker_host: synapse
+ port: 8008
+ db:
+ host: "{{ postgres.host }}"
+ database: synapse
+ user: "{{ vault_synapse.db.user }}"
+ pass: "{{ vault_synapse.db.pass }}"
+ config_path: config
+
+synapse_env:
+ SYNAPSE_CONFIG_PATH: "{{ ('/', svc.config_path) | path_join }}"
+ REACT_APP_SERVER: https://matrix.serguzim.me
+
+synapse_yml:
+ server_name: msrg.cc
+ pid_file: "{{ (svc.config_path, 'homeserver.pid') | path_join }}"
+ public_baseurl: https://matrix.serguzim.me/
+ allow_public_rooms_without_auth: true
+ allow_public_rooms_over_federation: true
+
+ listeners:
+ - port: 8008
+ tls: false
+ type: http
+ x_forwarded: true
+ resources:
+ - names:
+ - client
+ - federation
+ - metrics
+ compress: false
+
+ admin_contact: mailto:{{ admin_email }}
+
+ acme:
+ enabled: false
+
+ database:
+ name: psycopg2
+ args:
+ user: "{{ svc.db.user }}"
+ password: "{{ svc.db.pass }}"
+ database: "{{ svc.db.database }}"
+ host: "{{ svc.db.host }}"
+ cp_min: 5
+ cp_max: 10
+
+ log_config: "{{ (svc.config_path, 'msrg.cc.log.config') | path_join }}"
+ media_store_path: /media_store
+ max_upload_size: 500M
+ enable_registration: false
+ enable_metrics: true
+ report_stats: true
+
+ macaroon_secret_key: "{{ vault_synapse.macaroon_secret_key }}"
+ form_secret: "{{ vault_synapse.form_secret }}"
+ signing_key_path: "{{ (svc.config_path, 'msrg.cc.signing.key') | path_join }}"
+
+ trusted_key_servers:
+ - server_name: matrix.org
+ suppress_key_server_warning: true
+
+ oidc_providers:
+ - idp_id: auth_serguzim_me
+ idp_name: auth.serguzim.me
+ issuer: https://auth.serguzim.me/application/o/matrix_serguzim_me/
+ client_id: "{{ vault_synapse.oidc_client.id }}"
+ client_secret: "{{ vault_synapse.oidc_client.secret }}"
+ scopes:
+ - openid
+ - profile
+ - email
+ user_mapping_provider:
+ config:
+ localpart_template: "{{ '{{ user.preferred_username }}' }}"
+ display_name_template: "{{ '{{ user.name }}' }}"
+
+ email:
+ smtp_host: mail.serguzim.me
+ smtp_port: 587
+ smtp_user: matrix@serguzim.me
+ smtp_pass: "{{ vault_synapse.mail.pass }}"
+ require_transport_security: true
+ notif_from: Matrix
+
+synapse_compose:
+ watchtower: true
+ image: ghcr.io/element-hq/synapse:latest
+ volumes:
+ - ./config:/config
+ - media_store:/media_store
+ file:
+ services:
+ synapse-admin:
+ image: awesometechnologies/synapse-admin
+ restart: always
+ labels:
+ com.centurylinklabs.watchtower.enable: true
+ env_file:
+ - service.env
+ networks:
+ apps:
+ aliases:
+ - synapse-admin
+ volumes:
+ media_store:
diff --git a/roles/tandoor/tasks/main.yml b/roles/tandoor/tasks/main.yml
new file mode 100644
index 0000000..2c9b7fd
--- /dev/null
+++ b/roles/tandoor/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ tandoor_svc }}"
+ env: "{{ tandoor_env }}"
+ compose: "{{ tandoor_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/tandoor/vars/main.yml b/roles/tandoor/vars/main.yml
new file mode 100644
index 0000000..0694ebd
--- /dev/null
+++ b/roles/tandoor/vars/main.yml
@@ -0,0 +1,63 @@
+---
+tandoor_svc:
+ domain: recipes.serguzim.me
+ name: tandoor
+ port: 80
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ database: tandoor
+ user: "{{ vault_tandoor.db.user }}"
+ pass: "{{ vault_tandoor.db.pass }}"
+
+tandoor_env:
+ DEBUG: 0
+ SQL_DEBUG: 0
+
+ ALLOWED_HOSTS: recipes.serguzim.me
+ SECRET_KEY: "{{ vault_tandoor.secret_key }}"
+ TZ: "{{ timezone }}"
+
+ DB_ENGINE: django.db.backends.postgresql
+ DB_OPTIONS: '{"sslmode": "require"}'
+ POSTGRES_HOST: "{{ svc.db.host }}"
+ POSTGRES_PORT: "{{ svc.db.port }}"
+ POSTGRES_DB: "{{ svc.db.database }}"
+ POSTGRES_USER: "{{ svc.db.user }}"
+ POSTGRES_PASSWORD: "{{ svc.db.pass }}"
+
+ SHOPPING_MIN_AUTOSYNC_INTERVAL: 5
+
+ ENABLE_SIGNUP: 0
+ ENABLE_METRICS: 1
+ ENABLE_PDF_EXPORT: 1
+
+ SOCIAL_DEFAULT_ACCESS: 1
+ SOCIAL_DEFAULT_GROUP: guest
+
+tandoor_compose:
+ watchtower: true
+ image: nginx:mainline-alpine
+ volumes:
+ - nginx_config:/etc/nginx/conf.d:ro
+ - staticfiles:/static
+ - mediafiles:/media
+ file:
+ services:
+ web_recipes:
+ image: vabene1111/recipes
+ restart: always
+ labels:
+ com.centurylinklabs.watchtower.enable: true
+ env_file:
+ - service.env
+ volumes:
+ - staticfiles:/opt/recipes/staticfiles
+ - nginx_config:/opt/recipes/nginx/conf.d
+ - mediafiles:/opt/recipes/mediafiles
+ networks:
+ default:
+ volumes:
+ nginx_config:
+ staticfiles:
+ mediafiles:
diff --git a/roles/teamspeak_fallback/files/docker-compose.yml b/roles/teamspeak_fallback/files/docker-compose.yml
new file mode 100644
index 0000000..32ff3d2
--- /dev/null
+++ b/roles/teamspeak_fallback/files/docker-compose.yml
@@ -0,0 +1,19 @@
+services:
+ teamspeak:
+ image: teamspeak
+ restart: always
+ ports:
+ - 9987:9987/udp
+ - 10011:10011
+ - 30033:30033
+ environment:
+ TS3SERVER_DB_PLUGIN: ts3db_sqlite3
+ TS3SERVER_DB_SQLCREATEPATH: create_sqlite
+ TS3SERVER_LICENSE: accept
+ volumes:
+ - data:/var/ts3server/
+
+volumes:
+ data:
+ external: true
+ name: teamspeak-fallback-data
diff --git a/roles/teamspeak_fallback/tasks/main.yml b/roles/teamspeak_fallback/tasks/main.yml
new file mode 100644
index 0000000..9aae8c9
--- /dev/null
+++ b/roles/teamspeak_fallback/tasks/main.yml
@@ -0,0 +1,36 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ teamspeak_fallback_svc }}"
+ block:
+ - name: Import tasks to create service directory
+ ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
+
+ - name: Copy the docker-compose file
+ ansible.builtin.copy:
+ src: docker-compose.yml
+ dest: "{{ (service_path, 'docker-compose.yml') | path_join }}"
+ mode: "0644"
+
+ - name: Template the conditional-start script
+ ansible.builtin.template:
+ src: conditional-start.sh.j2
+ dest: "{{ (service_path, 'conditional-start.sh') | path_join }}"
+ mode: "0755"
+
+ - name: Copy the system service
+ ansible.builtin.template:
+ src: teamspeak-fallback.service.j2
+ dest: /etc/systemd/system/teamspeak-fallback.service
+ mode: "0644"
+ become: true
+ - name: Enable the system service
+ ansible.builtin.systemd_service:
+ name: teamspeak-fallback.service
+ state: started
+ enabled: true
+ daemon_reload: true
+ become: true
diff --git a/roles/teamspeak_fallback/templates/conditional-start.sh.j2 b/roles/teamspeak_fallback/templates/conditional-start.sh.j2
new file mode 100644
index 0000000..e06184a
--- /dev/null
+++ b/roles/teamspeak_fallback/templates/conditional-start.sh.j2
@@ -0,0 +1,18 @@
+#!/usr/bin/env sh
+
+while true
+do
+ if nc -z -w 3 "{{ teamspeak_fallback_check_server }}" "{{ teamspeak_fallback_check_port }}"
+ then
+ if docker compose ps --services | grep teamspeak >/dev/null; then
+ echo "Stopping Server"
+ docker compose down
+ fi
+ else
+ if ! docker compose ps --services | grep teamspeak >/dev/null; then
+ echo "Starting Server"
+ docker compose up -d --pull=always
+ fi
+ fi
+ sleep 2
+done
diff --git a/roles/teamspeak_fallback/templates/teamspeak-fallback.service.j2 b/roles/teamspeak_fallback/templates/teamspeak-fallback.service.j2
new file mode 100644
index 0000000..6420535
--- /dev/null
+++ b/roles/teamspeak_fallback/templates/teamspeak-fallback.service.j2
@@ -0,0 +1,13 @@
+[Service]
+[Unit]
+Description=Teamspeak Fallback Starter
+After=network.target
+
+[Service]
+Type=simple
+ExecStart={{ service_path }}/conditional-start.sh
+WorkingDirectory={{ service_path }}
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/teamspeak_fallback/vars/main.yml b/roles/teamspeak_fallback/vars/main.yml
new file mode 100644
index 0000000..154cf00
--- /dev/null
+++ b/roles/teamspeak_fallback/vars/main.yml
@@ -0,0 +1,6 @@
+---
+teamspeak_fallback_check_server: ts.sneiso.eu
+teamspeak_fallback_check_port: 30033
+
+teamspeak_fallback_svc:
+ name: teamspeak-fallback
diff --git a/roles/telegraf/tasks/main.yml b/roles/telegraf/tasks/main.yml
new file mode 100644
index 0000000..7db7dc4
--- /dev/null
+++ b/roles/telegraf/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ telegraf_svc }}"
+ env: "{{ telegraf_env }}"
+ compose: "{{ telegraf_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Template config
+ ansible.builtin.template:
+ src: telegraf.conf.j2
+ dest: "{{ (service_path, 'telegraf.conf') | path_join }}"
+ mode: "0664"
+ register: cmd_result
+
+ - name: Set the docker force-recreate flag
+ ansible.builtin.set_fact:
+ docker_force_recreate: --force-recreate
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/telegraf/templates/telegraf.conf.j2 b/roles/telegraf/templates/telegraf.conf.j2
new file mode 100644
index 0000000..654b933
--- /dev/null
+++ b/roles/telegraf/templates/telegraf.conf.j2
@@ -0,0 +1,47 @@
+[agent]
+ interval = "60s"
+ round_interval = true
+ metric_batch_size = 1000
+ metric_buffer_limit = 10000
+ collection_jitter = "0s"
+ flush_interval = "10s"
+ flush_jitter = "0s"
+ precision = ""
+ hostname = "node002.serguzim.me"
+ omit_hostname = false
+
+[[outputs.influxdb_v2]]
+ urls = ["{{ svc.influxdb.url }}"]
+ token = "{{ svc.influxdb.token }}"
+ organization = "{{ svc.influxdb.organization }}"
+ bucket = "{{ svc.influxdb.bucket }}"
+
+[[inputs.prometheus]]
+ urls = [
+ {%- for url in svc.prometheus_unprotected.urls -%}
+ "{{ url }}",
+ {%- endfor -%}
+ ]
+
+[[inputs.prometheus]]
+ urls = [
+ {%- for url in svc.prometheus_protected.urls -%}
+ "{{ url }}",
+ {%- endfor -%}
+ ]
+
+ bearer_token_string = "{{ svc.prometheus_protected.bearer_token }}"
+
+[[inputs.postgresql]]
+ address = "postgres://{{ svc.postgresql.user }}:{{ svc.postgresql.pass }}@{{ svc.postgresql.host }}:{{ svc.postgresql.port }}/{{ svc.postgresql.database }}?sslmode=verify-full"
+ ignored_databases = ["postgres", "template0", "template1"]
+ prepared_statements = true
+
+[[inputs.docker_log]]
+ endpoint = "{{ svc.docker_log.endpoint }}"
+
+ docker_label_include = [
+ "com.influxdata.telegraf.enable"
+ ]
+
+ source_tag = {{ svc.docker_log.source_tag|lower }}
diff --git a/roles/telegraf/vars/main.yml b/roles/telegraf/vars/main.yml
new file mode 100644
index 0000000..363ed13
--- /dev/null
+++ b/roles/telegraf/vars/main.yml
@@ -0,0 +1,44 @@
+---
+telegraf_svc:
+ name: telegraf
+ influxdb:
+ url: https://tick.serguzim.me
+ token: "{{ vault_telegraf.influxdb_token }}"
+ organization: serguzim.net
+ bucket: metrics
+ prometheus_unprotected:
+ urls:
+ - http://node002.vpn.serguzim.net:2019/metrics
+ - https://matrix.serguzim.me/_synapse/metrics
+ - https://push.serguzim.me/metrics
+ - https://tick.serguzim.me/metrics
+ - https://todo.serguzim.me/api/v1/metrics
+ prometheus_protected:
+ urls:
+ - https://ci.serguzim.me/metrics
+ - https://git.serguzim.me/metrics
+ bearer_token: "{{ vault_metrics_token }}"
+ postgresql:
+ user: "{{ vault_telegraf.db.user }}"
+ pass: "{{ vault_telegraf.db.pass }}"
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ database: telegraf
+ docker_log:
+ endpoint: unix:///var/run/docker.sock
+ source_tag: false
+
+telegraf_compose:
+ watchtower: false
+ image: telegraf:1.28
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ - ./telegraf.conf:/etc/telegraf/telegraf.conf:ro
+ file:
+ services:
+ app:
+ user: telegraf
+ group_add:
+ - "972" # docker group on host
+ volumes:
+ data:
diff --git a/roles/tinytinyrss/tasks/main.yml b/roles/tinytinyrss/tasks/main.yml
new file mode 100644
index 0000000..be3db8a
--- /dev/null
+++ b/roles/tinytinyrss/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ tinytinyrss_svc }}"
+ env: "{{ tinytinyrss_env }}"
+ compose: "{{ tinytinyrss_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/tinytinyrss/vars/main.yml b/roles/tinytinyrss/vars/main.yml
new file mode 100644
index 0000000..771ef34
--- /dev/null
+++ b/roles/tinytinyrss/vars/main.yml
@@ -0,0 +1,60 @@
+---
+tinytinyrss_svc:
+ domain: rss.serguzim.me
+ name: tinytinyrss
+ port: 80
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ database: tinytinyrss
+ user: "{{ vault_tinytinyrss.db.user }}"
+ pass: "{{ vault_tinytinyrss.db.pass }}"
+
+tinytinyrss_env:
+ TTRSS_DB_TYPE: pgsql
+ TTRSS_DB_HOST: "{{ svc.db.host }}"
+ TTRSS_DB_NAME: "{{ svc.db.database }}"
+ TTRSS_DB_USER: "{{ svc.db.user }}"
+ TTRSS_DB_PASS: "{{ svc.db.pass }}"
+
+ TTRSS_SELF_URL_PATH: https://{{ svc.domain }}/tt-rss/
+
+ APP_UPSTREAM: tt-rss
+
+ # Workaround for this bug:
+ # could not open certificate file "/root/.postgresql/postgresql.crt": Permission denied
+ PGSSLCERT: /tmp/postgresql.crt
+
+tinytinyrss_compose:
+ watchtower: false
+ image: cthulhoo/ttrss-web-nginx
+ volumes:
+ - app:/var/www/html:ro
+ file:
+ services:
+ app:
+ depends_on:
+ - tt-rss
+ tt-rss:
+ image: cthulhoo/ttrss-fpm-pgsql-static
+ restart: always
+ env_file:
+ - service.env
+ volumes:
+ - app:/var/www/html
+ networks:
+ default:
+ updater:
+ image: cthulhoo/ttrss-fpm-pgsql-static
+ restart: always
+ env_file:
+ - service.env
+ volumes:
+ - app:/var/www/html
+ depends_on:
+ - tt-rss
+ command: /opt/tt-rss/updater.sh
+ networks:
+ default:
+ volumes:
+ app:
diff --git a/roles/umami/tasks/main.yml b/roles/umami/tasks/main.yml
new file mode 100644
index 0000000..ede7369
--- /dev/null
+++ b/roles/umami/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ umami_svc }}"
+ env: "{{ umami_env }}"
+ compose: "{{ umami_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/umami/vars/main.yml b/roles/umami/vars/main.yml
new file mode 100644
index 0000000..4240443
--- /dev/null
+++ b/roles/umami/vars/main.yml
@@ -0,0 +1,25 @@
+---
+umami_db_host: "{{ postgres.host }}"
+umami_db_user: "{{ vault_umami.db.user }}"
+umami_db_pass: "{{ vault_umami.db.pass }}"
+umami_db_database: umami
+
+umami_hash_salt: "{{ vault_umami.hash_salt }}"
+
+umami_docker_image: docker.umami.dev/umami-software/umami:postgresql-latest
+
+umami_svc:
+ domain: analytics.serguzim.me
+ name: umami
+ port: 3000
+
+umami_env:
+ DATABASE_URL: postgres://{{ umami_db_user }}:{{ umami_db_pass }}@{{ umami_db_host }}/{{ umami_db_database }}
+ DATABASE_TYPE: postgresql
+ FORCE_SSL: 1
+ HASH_SALT: "{{ umami_hash_salt }}"
+ CLIENT_IP_HEADER: X-Analytics-IP
+
+umami_compose:
+ watchtower: true
+ image: "{{ umami_docker_image }}"
diff --git a/roles/uptime_kuma/tasks/main.yml b/roles/uptime_kuma/tasks/main.yml
new file mode 100644
index 0000000..d0e8e13
--- /dev/null
+++ b/roles/uptime_kuma/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ uptime_kuma_svc }}"
+ env: "{{ uptime_kuma_env }}"
+ compose: "{{ uptime_kuma_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/uptime_kuma/vars/main.yml b/roles/uptime_kuma/vars/main.yml
new file mode 100644
index 0000000..093005b
--- /dev/null
+++ b/roles/uptime_kuma/vars/main.yml
@@ -0,0 +1,16 @@
+---
+uptime_kuma_svc:
+ domain: status.serguzim.me
+ additional_domains:
+ - status.serguzim.net
+ name: uptime-kuma
+ port: 3001
+
+uptime_kuma_compose:
+ watchtower: true
+ image: louislam/uptime-kuma:1
+ volumes:
+ - data:/app/data
+ file:
+ volumes:
+ data:
diff --git a/roles/vikunja/tasks/main.yml b/roles/vikunja/tasks/main.yml
new file mode 100644
index 0000000..1e822ba
--- /dev/null
+++ b/roles/vikunja/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ vikunja_svc }}"
+ yml: "{{ vikunja_yml }}"
+ compose: "{{ vikunja_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Template config
+ ansible.builtin.template:
+ src: yml.j2
+ dest: "{{ (service_path, 'config.yml') | path_join }}"
+ mode: "0600"
+ register: cmd_result
+
+ - name: Set the docker force-recreate flag
+ ansible.builtin.set_fact:
+ docker_force_recreate: --force-recreate
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/vikunja/vars/main.yml b/roles/vikunja/vars/main.yml
new file mode 100644
index 0000000..66c7771
--- /dev/null
+++ b/roles/vikunja/vars/main.yml
@@ -0,0 +1,59 @@
+---
+vikunja_svc:
+ domain: todo.serguzim.me
+ name: vikunja
+ port: 3456
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ database: vikunja
+ user: "{{ vault_vikunja.db.user }}"
+ pass: "{{ vault_vikunja.db.pass }}"
+
+vikunja_yml:
+ service:
+ JWTSecret: "{{ vault_vikunja.jwt_secret }}"
+ frontendurl: https://{{ svc.domain }}
+ enableregistration: false
+ timezone: "{{ timezone }}"
+
+ database:
+ type: postgres
+ sslmode: verify-full
+ host: "{{ svc.db.host }}"
+ database: "{{ svc.db.database }}"
+ user: "{{ svc.db.user }}"
+ password: "{{ svc.db.pass }}"
+
+ mailer:
+ enabled: true
+ host: "{{ mailer.host }}"
+ port: "{{ mailer.port }}"
+ username: "{{ vault_vikunja.mailer.user }}"
+ password: "{{ vault_vikunja.mailer.pass }}"
+ fromemail: "{{ vault_vikunja.mailer.user }}"
+
+ auth:
+ local:
+ enabled: false
+ openid:
+ enabled: true
+ providers:
+ - name: auth.serguzim.me
+ authurl: https://auth.serguzim.me/application/o/todo-serguzim-me/
+ logouturl: https://auth.serguzim.me/application/o/todo-serguzim-me/end-session/
+ clientid: "{{ vault_vikunja.oidc_client.id }}"
+ clientsecret: "{{ vault_vikunja.oidc_client.secret }}"
+
+ metrics:
+ enabled: true
+
+vikunja_compose:
+ watchtower: true
+ image: vikunja/vikunja
+ volumes:
+ - data:/app/vikunja/files
+ - ./config.yml:/app/vikunja/config.yml
+ file:
+ volumes:
+ data:
diff --git a/roles/watchtower/files/run-once.sh b/roles/watchtower/files/run-once.sh
new file mode 100644
index 0000000..535100a
--- /dev/null
+++ b/roles/watchtower/files/run-once.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env sh
+
+docker compose run -e WATCHTOWER_RUN_ONCE=true -e WATCHTOWER_NOTIFICATIONS= watchtower
diff --git a/roles/watchtower/tasks/main.yml b/roles/watchtower/tasks/main.yml
new file mode 100644
index 0000000..90df08b
--- /dev/null
+++ b/roles/watchtower/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ watchtower_svc }}"
+ env: "{{ watchtower_env }}"
+ compose: "{{ watchtower_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
+
+ - name: Copy the run-once script
+ ansible.builtin.copy:
+ src: run-once.sh
+ dest: "{{ (service_path, 'run-once.sh') | path_join }}"
+ mode: "0755"
diff --git a/roles/watchtower/vars/main.yml b/roles/watchtower/vars/main.yml
new file mode 100644
index 0000000..94aa577
--- /dev/null
+++ b/roles/watchtower/vars/main.yml
@@ -0,0 +1,30 @@
+---
+watchtower_svc:
+ name: watchtower
+
+watchtower_env:
+ WATCHTOWER_LABEL_ENABLE: true
+ WATCHTOWER_CLEANUP: true
+ WATCHTOWER_SCHEDULE: "0 27 20 * * *"
+
+ # use to disable watchtower
+ # WATCHTOWER_NO_PULL: true
+
+ WATCHTOWER_NOTIFICATIONS: email
+ WATCHTOWER_NOTIFICATION_EMAIL_FROM: "{{ svc.name }}@serguzim.me"
+ WATCHTOWER_NOTIFICATION_EMAIL_TO: "{{ admin_email }}"
+ WATCHTOWER_NOTIFICATION_EMAIL_SERVER: "{{ mailer.host }}"
+ WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PORT: "{{ mailer.port }}"
+ WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER: "{{ svc.name }}@serguzim.me"
+ WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD: "{{ vault_watchtower.mailer.pass }}"
+ WATCHTOWER_NOTIFICATION_EMAIL_DELAY: 5
+
+watchtower_compose:
+ watchtower: false
+ image: containrrr/watchtower
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ file:
+ services:
+ app:
+ hostname: "{{ ansible_facts.hostname }}"
diff --git a/roles/webdis/files/webdis.json b/roles/webdis/files/webdis.json
new file mode 100644
index 0000000..25d95ac
--- /dev/null
+++ b/roles/webdis/files/webdis.json
@@ -0,0 +1,31 @@
+{
+ "redis_host": "redis",
+
+ "redis_port": 6379,
+ "redis_auth": null,
+
+ "http_host": "0.0.0.0",
+ "http_port": 7379,
+
+ "threads": 5,
+ "pool_size": 20,
+
+ "daemonize": false,
+ "websockets": false,
+
+ "database": 0,
+
+ "acl": [
+ {
+ "disabled": ["DEBUG"]
+ },
+
+ {
+ "http_basic_auth": "user:password",
+ "enabled": ["DEBUG"]
+ }
+ ],
+
+ "verbosity": 4,
+ "logfile": "/dev/stderr"
+}
diff --git a/roles/webdis/tasks/main.yml b/roles/webdis/tasks/main.yml
new file mode 100644
index 0000000..b151122
--- /dev/null
+++ b/roles/webdis/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ webdis_svc }}"
+ env: "{{ webdis_env }}"
+ compose: "{{ webdis_compose }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Copy the config
+ ansible.builtin.copy:
+ src: webdis.json
+ dest: "{{ (service_path, 'webdis.json') | path_join }}"
+ mode: "0755"
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/webdis/vars/main.yml b/roles/webdis/vars/main.yml
new file mode 100644
index 0000000..d32512f
--- /dev/null
+++ b/roles/webdis/vars/main.yml
@@ -0,0 +1,24 @@
+---
+webdis_svc:
+ name: webdis
+ domain: webdis.huck.serguzim.me
+ port: 7379
+
+webdis_compose:
+ watchtower: true
+ image: nicolas/webdis
+ volumes:
+ - ./webdis.json:/config/webdis.json
+ file:
+ services:
+ app:
+ command: /usr/local/bin/webdis /config/webdis.json
+ depends_on:
+ - redis
+ redis:
+ image: redis:6.2.6
+ restart: always
+ labels:
+ com.centurylinklabs.watchtower.enable: true
+ networks:
+ default:
diff --git a/roles/webhook/files/teamspeak-fallback-db b/roles/webhook/files/teamspeak-fallback-db
new file mode 100755
index 0000000..7a3ad52
--- /dev/null
+++ b/roles/webhook/files/teamspeak-fallback-db
@@ -0,0 +1,4 @@
+#!/usr/bin/env sh
+
+chown -R "${TEAMSPEAK_USER}:${TEAMSPEAK_GROUP}" /mnt/teamspeak-fallback-data
+install -o "${TEAMSPEAK_USER}" -g "${TEAMSPEAK_GROUP}" -m 644 "$WEBHOOK_DATA" "/mnt/teamspeak-fallback-data/ts3server.sqlitedb"
diff --git a/roles/webhook/tasks/main.yml b/roles/webhook/tasks/main.yml
new file mode 100644
index 0000000..5fa9415
--- /dev/null
+++ b/roles/webhook/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ webhook_svc }}"
+ compose: "{{ webhook_compose }}"
+ env: "{{ webhook_env }}"
+ yml: "{{ webhook_yml }}"
+ block:
+ - name: Import prepare tasks for common service
+ ansible.builtin.import_tasks: tasks/prepare-common-service.yml
+
+ - name: Set webhook config path
+ ansible.builtin.set_fact:
+ config_path: "{{ (service_path, 'config') | path_join }}"
+
+ - name: Create config directory
+ ansible.builtin.file:
+ path: "{{ config_path }}"
+ state: directory
+ mode: "0755"
+
+ - name: Template main config
+ ansible.builtin.template:
+ src: yml.j2
+ dest: "{{ (config_path, 'hooks.yml') | path_join }}"
+ mode: "0644"
+ register: cmd_result
+
+ - name: Set the docker force-recreate flag
+ ansible.builtin.set_fact:
+ docker_force_recreate: --force-recreate
+ when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
+
+ - name: Copy the teamspeak-fallback-db script
+ ansible.builtin.copy:
+ src: teamspeak-fallback-db
+ dest: "{{ (config_path, 'teamspeak-fallback-db') | path_join }}"
+ mode: "0755"
+
+ - name: Import start tasks for common service
+ ansible.builtin.import_tasks: tasks/start-common-service.yml
diff --git a/roles/webhook/vars/main.yml b/roles/webhook/vars/main.yml
new file mode 100644
index 0000000..91e05a3
--- /dev/null
+++ b/roles/webhook/vars/main.yml
@@ -0,0 +1,45 @@
+---
+webhook_teamspeak_fallback_db_token: "{{ vault_webhook.teamspeak_fallback_db_token }}"
+webhook_teamspeak_user: 9987
+webhook_teamspeak_group: 9987
+
+webhook_svc:
+ name: webhook
+ domain: hook.serguzim.me
+ port: 9000
+
+webhook_env:
+ TEAMSPEAK_USER: "{{ webhook_teamspeak_user }}"
+ TEAMSPEAK_GROUP: "{{ webhook_teamspeak_group }}"
+
+webhook_yml:
+ - id: teamspeak-fallback-db
+ trigger-rule-mismatch-http-response-code: 400
+ execute-command: /config/teamspeak-fallback-db
+ pass-file-to-command:
+ - source: payload
+ name: data
+ envname: WEBHOOK_DATA
+ base64decode: true
+ trigger-rule:
+ and:
+ - match:
+ type: value
+ value: "{{ webhook_teamspeak_fallback_db_token }}"
+ parameter:
+ source: header
+ name: X-Webhook-Token
+
+webhook_compose:
+ watchtower: true
+ image: ghcr.io/thecatlady/webhook
+ volumes:
+ - ./config:/config:ro
+ - teamspeak-fallback-data:/mnt/teamspeak-fallback-data
+ file:
+ services:
+ app:
+ command: ["-verbose", "-hooks=/config/hooks.yml"]
+ volumes:
+ teamspeak-fallback-data:
+ name: teamspeak-fallback-data
diff --git a/roles/wiki_js/tasks/main.yml b/roles/wiki_js/tasks/main.yml
new file mode 100644
index 0000000..a2b70d8
--- /dev/null
+++ b/roles/wiki_js/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ wiki_js_svc }}"
+ env: "{{ wiki_js_env }}"
+ compose: "{{ wiki_js_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/wiki_js/vars/main.yml b/roles/wiki_js/vars/main.yml
new file mode 100644
index 0000000..04dbc5f
--- /dev/null
+++ b/roles/wiki_js/vars/main.yml
@@ -0,0 +1,26 @@
+---
+wiki_js_svc:
+ domain: wiki.serguzim.me
+ name: wiki-js
+ port: 3000
+ caddy_extra: |
+ import analytics
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ user: "{{ vault_wiki_js.db.user }}"
+ pass: "{{ vault_wiki_js.db.pass }}"
+ name: wikijs
+
+wiki_js_env:
+ DB_TYPE: postgres
+ DB_HOST: "{{ svc.db.host }}"
+ DB_PORT: "{{ svc.db.port }}"
+ DB_USER: "{{ svc.db.user }}"
+ DB_PASS: "{{ svc.db.pass }}"
+ DB_NAME: "{{ svc.db.name }}"
+ DB_SSL: 1
+
+wiki_js_compose:
+ watchtower: true
+ image: requarks/wiki
diff --git a/roles/woodpecker/tasks/main.yml b/roles/woodpecker/tasks/main.yml
new file mode 100644
index 0000000..dc6a26d
--- /dev/null
+++ b/roles/woodpecker/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Set common facts
+ ansible.builtin.import_tasks: tasks/set-default-facts.yml
+
+- name: Deploy {{ svc.name }}
+ vars:
+ svc: "{{ woodpecker_svc }}"
+ env: "{{ woodpecker_env }}"
+ compose: "{{ woodpecker_compose }}"
+ block:
+ - name: Import tasks to deploy common service
+ ansible.builtin.import_tasks: tasks/deploy-common-service.yml
diff --git a/roles/woodpecker/vars/main.yml b/roles/woodpecker/vars/main.yml
new file mode 100644
index 0000000..ddb85e1
--- /dev/null
+++ b/roles/woodpecker/vars/main.yml
@@ -0,0 +1,53 @@
+---
+woodpecker_svc:
+ domain: ci.serguzim.me
+ name: woodpecker
+ port: 8000
+ extra_svcs:
+ - domain: agents.ci.serguzim.me
+ docker_host: h2c://woodpecker
+ port: 9000
+ db:
+ host: "{{ postgres.host }}"
+ port: "{{ postgres.port }}"
+ database: woodpecker
+ user: "{{ vault_woodpecker.db.user }}"
+ pass: "{{ vault_woodpecker.db.pass }}"
+
+woodpecker_env:
+ WOODPECKER_OPEN: true
+ WOODPECKER_HOST: https://{{ svc.domain }}
+ WOODPECKER_ADMIN: serguzim
+ WOODPECKER_AGENT_SECRET: "{{ vault_woodpecker.agent_secret }}"
+ WOODPECKER_PROMETHEUS_AUTH_TOKEN: "{{ vault_metrics_token }}"
+
+ WOODPECKER_SERVER: "{{ svc.extra_svcs[0].domain }}:443"
+ WOODPECKER_GRPC_SECURE: true
+
+ WOODPECKER_GITEA: true
+ WOODPECKER_GITEA_URL: https://git.serguzim.me
+ WOODPECKER_GITEA_CLIENT: "{{ vault_woodpecker.gitea.client }}"
+ WOODPECKER_GITEA_SECRET: "{{ vault_woodpecker.gitea.secret }}"
+
+ WOODPECKER_DATABASE_DRIVER: postgres
+ WOODPECKER_DATABASE_DATASOURCE: postgres://{{ svc.db.user }}:{{ svc.db.pass }}@{{ svc.db.host }}:{{ svc.db.port }}/{{ svc.db.database }}?sslmode=verify-full
+
+woodpecker_compose:
+ watchtower: true
+ image: woodpeckerci/woodpecker-server
+ file:
+ services:
+ agent:
+ image: woodpeckerci/woodpecker-agent:latest
+ restart: always
+ labels:
+ com.centurylinklabs.watchtower.enable: true
+ command: agent
+ env_file:
+ - service.env
+ depends_on:
+ - app
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ networks:
+ default: