Migrate services part
This commit is contained in:
parent
7c59e4ae57
commit
73bce8f6e5
157 changed files with 3883 additions and 9 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -5,3 +5,6 @@ dns/hosts.json
|
||||||
|
|
||||||
secrets.auto.tfvars
|
secrets.auto.tfvars
|
||||||
.terraform
|
.terraform
|
||||||
|
|
||||||
|
inventory/group_vars/all/serguzim.net.yml
|
||||||
|
inventory/group_vars/all/opentofu.yaml
|
||||||
|
|
7
.pre-commit-config.yaml
Normal file
7
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/ansible/ansible-lint
|
||||||
|
rev: v6.22.1
|
||||||
|
hooks:
|
||||||
|
- id: ansible-lint
|
||||||
|
args:
|
||||||
|
- serguzim.net.yml
|
10
Makefile
10
Makefile
|
@ -3,20 +3,18 @@ SHELL := /bin/bash
|
||||||
include .env
|
include .env
|
||||||
export
|
export
|
||||||
|
|
||||||
DNS_OUTPUT = "dns/hosts.js"
|
DNS_OUTPUT = "dns/hosts.json"
|
||||||
SERVICES_OUTPUT = "services/inventory/group_vars/all/opentofu.yaml"
|
SERVICES_OUTPUT = "inventory/group_vars/all/opentofu.yaml"
|
||||||
|
|
||||||
$(DNS_OUTPUT):
|
$(DNS_OUTPUT):
|
||||||
cd opentofu && \
|
|
||||||
tofu output --json \
|
tofu output --json \
|
||||||
| jq 'with_entries(.value |= .value).hosts' \
|
| jq 'with_entries(.value |= .value).hosts' \
|
||||||
> ../dns/hosts.json
|
> $(DNS_OUTPUT)
|
||||||
|
|
||||||
$(SERVICES_OUTPUT):
|
$(SERVICES_OUTPUT):
|
||||||
cd opentofu && \
|
|
||||||
tofu output --json \
|
tofu output --json \
|
||||||
| yq -y '{opentofu: with_entries(.value |= .value)}' \
|
| yq -y '{opentofu: with_entries(.value |= .value)}' \
|
||||||
> ../services/inventory/group_vars/all/opentofu.yaml
|
> $(SERVICES_OUTPUT)
|
||||||
|
|
||||||
outputs: $(DNS_OUTPUT) $(SERVICES_OUTPUT)
|
outputs: $(DNS_OUTPUT) $(SERVICES_OUTPUT)
|
||||||
|
|
||||||
|
|
14
README.md
14
README.md
|
@ -8,3 +8,17 @@
|
||||||
- enter credentials to ovh in .env file (copy from .env.example)
|
- enter credentials to ovh in .env file (copy from .env.example)
|
||||||
- check credentials with `make dns-check`
|
- check credentials with `make dns-check`
|
||||||
- run `make dns`
|
- run `make dns`
|
||||||
|
|
||||||
|
## Ansible project to deploy services
|
||||||
|
|
||||||
|
### Variables
|
||||||
|
|
||||||
|
#### CLI variable overwrites
|
||||||
|
|
||||||
|
##### force_forgejo_runner_registration
|
||||||
|
`-e force_forgejo_runner_registration=True`
|
||||||
|
Force forgejo-runner to prompt a new registration token.
|
||||||
|
|
||||||
|
##### docker_update
|
||||||
|
`-e docker_update=True`
|
||||||
|
Pull and build the docker compose services
|
||||||
|
|
3
ansible.cfg
Normal file
3
ansible.cfg
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
[defaults]
|
||||||
|
inventory = ./inventory
|
||||||
|
roles_path = ./roles
|
37
inventory/group_vars/all/compose_defaults.yml
Normal file
37
inventory/group_vars/all/compose_defaults.yml
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
compose_file_main:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
image: "{{ compose.image }}"
|
||||||
|
restart: always
|
||||||
|
labels:
|
||||||
|
com.centurylinklabs.watchtower.enable: "{{ compose.watchtower | default(false) }}"
|
||||||
|
|
||||||
|
compose_file_env:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
env_file:
|
||||||
|
- service.env
|
||||||
|
|
||||||
|
compose_file_networks:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
apps:
|
||||||
|
aliases:
|
||||||
|
- "{{ svc.name }}"
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
apps:
|
||||||
|
external: true
|
||||||
|
|
||||||
|
compose_file_volumes:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
volumes: "{{ compose.volumes }}"
|
||||||
|
|
||||||
|
compose_file_monitoring_label:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
labels:
|
||||||
|
com.influxdata.telegraf.enable: true
|
22
inventory/group_vars/all/main.yml
Normal file
22
inventory/group_vars/all/main.yml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
admin_email: tobias@msrg.cc
|
||||||
|
timezone: Europe/Berlin
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
host: db.serguzim.me
|
||||||
|
port: 5432
|
||||||
|
|
||||||
|
mailer:
|
||||||
|
host: mail.serguzim.me
|
||||||
|
port: 587
|
||||||
|
|
||||||
|
acme_dns:
|
||||||
|
host: acme.serguzim.me
|
||||||
|
|
||||||
|
|
||||||
|
services_path: /opt/services/
|
||||||
|
|
||||||
|
caddy_path: "{{ (services_path, 'caddy') | path_join }}"
|
||||||
|
caddy_config_path: "{{ (caddy_path, 'config', 'conf.d') | path_join }}"
|
||||||
|
managed_sites: []
|
||||||
|
|
||||||
|
certificates_path: "{{ (services_path, '_certificates') | path_join }}"
|
58
inventory/serguzim.net.yml
Normal file
58
inventory/serguzim.net.yml
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
all:
|
||||||
|
hosts:
|
||||||
|
local-dev:
|
||||||
|
ansible_connection: local
|
||||||
|
|
||||||
|
node001:
|
||||||
|
ansible_host: node001.vpn.serguzim.net
|
||||||
|
ansible_port: "{{ vault_node001.ansible_port }}"
|
||||||
|
ansible_user: "{{ vault_node001.ansible_user }}"
|
||||||
|
interactive_user: "{{ vault_node001.interactive_user }}"
|
||||||
|
host_vpn:
|
||||||
|
domain: node001.vpn.serguzim.net
|
||||||
|
ip: 100.64.0.1
|
||||||
|
host_backup:
|
||||||
|
hc_uid: "{{ vault_node001.backup.hc_uid }}"
|
||||||
|
uptime_kuma_token: "{{ vault_node001.backup.uptime_kuma_token }}"
|
||||||
|
volumes:
|
||||||
|
- minecraft-2_data
|
||||||
|
|
||||||
|
node002:
|
||||||
|
ansible_host: node002.vpn.serguzim.net
|
||||||
|
ansible_port: "{{ vault_node002.ansible_port }}"
|
||||||
|
ansible_user: "{{ vault_node002.ansible_user }}"
|
||||||
|
interactive_user: "{{ vault_node002.interactive_user }}"
|
||||||
|
host_vpn:
|
||||||
|
domain: node002.vpn.serguzim.net
|
||||||
|
ip: 100.64.0.2
|
||||||
|
host_backup:
|
||||||
|
hc_uid: "{{ vault_node002.backup.hc_uid }}"
|
||||||
|
uptime_kuma_token: "{{ vault_node002.backup.uptime_kuma_token }}"
|
||||||
|
volumes:
|
||||||
|
- forgejo_data
|
||||||
|
- homebox_data
|
||||||
|
- immich_upload
|
||||||
|
- influxdb_data
|
||||||
|
- jellyfin_config
|
||||||
|
#- jellyfin_media # TODO
|
||||||
|
- minio_data
|
||||||
|
- ntfy_data
|
||||||
|
- reitanlage-oranienburg_data
|
||||||
|
- synapse_media_store
|
||||||
|
- tandoor_mediafiles
|
||||||
|
- teamspeak-fallback-data
|
||||||
|
- uptime-kuma_data
|
||||||
|
- vikunja_data
|
||||||
|
|
||||||
|
node003:
|
||||||
|
ansible_host: node003.vpn.serguzim.net
|
||||||
|
ansible_port: "{{ vault_node003.ansible_port }}"
|
||||||
|
ansible_user: "{{ vault_node003.ansible_user }}"
|
||||||
|
interactive_user: "{{ vault_node003.interactive_user }}"
|
||||||
|
host_vpn:
|
||||||
|
domain: node003.vpn.serguzim.net
|
||||||
|
ip: 100.110.16.30
|
||||||
|
host_backup:
|
||||||
|
hc_uid: "{{ vault_node003.backup.hc_uid }}"
|
||||||
|
uptime_kuma_token: "{{ vault_node003.backup.uptime_kuma_token }}"
|
||||||
|
volumes: []
|
14
playbooks/change-password.yml
Normal file
14
playbooks/change-password.yml
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
---
|
||||||
|
- name: Change password
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
tasks:
|
||||||
|
- name: Get new password
|
||||||
|
ansible.builtin.pause:
|
||||||
|
prompt: Enter the new password
|
||||||
|
echo: false
|
||||||
|
register: new_user_password
|
||||||
|
- name: Change password
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ interactive_user }}"
|
||||||
|
password: "{{ new_user_password.user_input | password_hash('sha512') }}"
|
18
playbooks/filter_plugins/acmedns_to_lego.py
Normal file
18
playbooks/filter_plugins/acmedns_to_lego.py
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'acmedns_to_lego': self.acmedns_to_lego,
|
||||||
|
}
|
||||||
|
|
||||||
|
def acmedns_to_lego(self, acmedns_registered):
|
||||||
|
result = {}
|
||||||
|
for (key, value) in acmedns_registered.items():
|
||||||
|
result[key] = {
|
||||||
|
"fulldomain": value["subd"] + "." + value["host"],
|
||||||
|
"subdomain": value["subd"],
|
||||||
|
"username": value["user"],
|
||||||
|
"password": value["pass"],
|
||||||
|
"server_url": "https://" + value["host"]
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
24
playbooks/filter_plugins/map_backup_volumes.py
Normal file
24
playbooks/filter_plugins/map_backup_volumes.py
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'map_backup_volumes': self.map_backup_volumes,
|
||||||
|
'map_backup_volumes_service': self.map_backup_volumes_service
|
||||||
|
}
|
||||||
|
|
||||||
|
def map_backup_volumes(self, volumes):
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
for volume in volumes:
|
||||||
|
result[volume] = {
|
||||||
|
"external": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def map_backup_volumes_service(self, volumes):
|
||||||
|
result = []
|
||||||
|
|
||||||
|
for volume in volumes:
|
||||||
|
result.append("{volume_name}:/backup/volumes/{volume_name}".format(volume_name=volume))
|
||||||
|
|
||||||
|
return result
|
29
playbooks/local-dev.yml
Normal file
29
playbooks/local-dev.yml
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
---
|
||||||
|
- name: Run roles for local-dev
|
||||||
|
vars:
|
||||||
|
# Remove inventory
|
||||||
|
base_path: "{{ inventory_dir.split('/')[0:-1] | join('/') }}"
|
||||||
|
services_path: "{{ (base_path, '_services') | path_join }}"
|
||||||
|
caddy_config_path: "{{ (services_path, 'caddy', 'config', 'conf.d') | path_join }}"
|
||||||
|
|
||||||
|
hosts: local-dev
|
||||||
|
roles:
|
||||||
|
- common
|
||||||
|
|
||||||
|
- acme-dns
|
||||||
|
- coder
|
||||||
|
- faas
|
||||||
|
- forgejo
|
||||||
|
- forgejo-runner
|
||||||
|
- healthcheck
|
||||||
|
- homebox
|
||||||
|
- influxdb
|
||||||
|
- jellyfin
|
||||||
|
- tandoor
|
||||||
|
- telegraf
|
||||||
|
- tinytinyrss
|
||||||
|
- umami
|
||||||
|
- uptime-kuma
|
||||||
|
- watchtower
|
||||||
|
- webdis
|
||||||
|
- wiki-js
|
15
playbooks/node001.yml
Normal file
15
playbooks/node001.yml
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
- name: Run roles for node001
|
||||||
|
hosts: node001
|
||||||
|
roles:
|
||||||
|
- role: common
|
||||||
|
tags: [always]
|
||||||
|
- role: backup
|
||||||
|
tags: [backup]
|
||||||
|
- role: caddy
|
||||||
|
tags: [caddy, reverse-proxy, webserver]
|
||||||
|
|
||||||
|
- role: mailcow
|
||||||
|
tags: [mailcow, mail, communication]
|
||||||
|
- role: minecraft_2
|
||||||
|
tags: [minecraft-2, minecraft, games]
|
79
playbooks/node002.yml
Normal file
79
playbooks/node002.yml
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
---
|
||||||
|
- name: Run roles for node002
|
||||||
|
hosts: node002
|
||||||
|
roles:
|
||||||
|
- role: common
|
||||||
|
tags: [always]
|
||||||
|
- role: backup
|
||||||
|
tags: [backup]
|
||||||
|
- role: lego
|
||||||
|
tags: [lego, certificates]
|
||||||
|
- role: caddy
|
||||||
|
tags: [caddy, reverse-proxy, webserver]
|
||||||
|
vars:
|
||||||
|
caddy_ports_extra:
|
||||||
|
- 8448:8448
|
||||||
|
|
||||||
|
|
||||||
|
- role: acme_dns
|
||||||
|
tags: [acme-dns, certificates]
|
||||||
|
- role: authentik
|
||||||
|
tags: [authentik, authentication]
|
||||||
|
- role: coder
|
||||||
|
tags: [coder, development]
|
||||||
|
- role: extra_services
|
||||||
|
tags: [extra-services]
|
||||||
|
- role: faas
|
||||||
|
tags: [faas]
|
||||||
|
- role: forgejo
|
||||||
|
tags: [forgejo, git, development]
|
||||||
|
- role: forgejo_runner
|
||||||
|
tags: [forgejo-runner, ci, development]
|
||||||
|
- role: harbor
|
||||||
|
tags: [harbor, registry, development]
|
||||||
|
- role: healthcheck
|
||||||
|
tags: [healthcheck, monitoring]
|
||||||
|
- role: homebox
|
||||||
|
tags: [homebox, inventory]
|
||||||
|
- role: immich
|
||||||
|
tags: [immich, gallery]
|
||||||
|
- role: influxdb
|
||||||
|
tags: [influxdb, sensors, monitoring]
|
||||||
|
- role: jellyfin
|
||||||
|
tags: [jellyfin, media]
|
||||||
|
- role: linkwarden
|
||||||
|
tags: [linkwarden, booksmarks]
|
||||||
|
- role: minio
|
||||||
|
tags: [minio, storage]
|
||||||
|
- role: ntfy
|
||||||
|
tags: [ntfy, notifications, push]
|
||||||
|
- role: reitanlage_oranienburg
|
||||||
|
tags: [reitanlage-oranienburg, website]
|
||||||
|
- role: shlink
|
||||||
|
tags: [shlink, url-shortener]
|
||||||
|
- role: synapse
|
||||||
|
tags: [synapse, matrix, communication]
|
||||||
|
- role: tandoor
|
||||||
|
tags: [tandoor, recipes]
|
||||||
|
- role: teamspeak_fallback
|
||||||
|
tags: [teamspeak-fallback, communication]
|
||||||
|
- role: telegraf
|
||||||
|
tags: [telegraf, monitoring]
|
||||||
|
- role: tinytinyrss
|
||||||
|
tags: [tinytinyrss, news]
|
||||||
|
- role: umami
|
||||||
|
tags: [umami, analytics]
|
||||||
|
- role: uptime_kuma
|
||||||
|
tags: [uptime-kuma, monitoring]
|
||||||
|
- role: vikunja
|
||||||
|
tags: [vikunja, todo]
|
||||||
|
- role: watchtower
|
||||||
|
tags: [watchtower]
|
||||||
|
- role: webdis
|
||||||
|
tags: [webdis]
|
||||||
|
- role: webhook
|
||||||
|
tags: [webhook]
|
||||||
|
- role: wiki_js
|
||||||
|
tags: [wiki-js]
|
||||||
|
- role: woodpecker
|
||||||
|
tags: [woodpecker, ci, development]
|
15
playbooks/node003.yml
Normal file
15
playbooks/node003.yml
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
- name: Run roles for node003
|
||||||
|
hosts: node003
|
||||||
|
roles:
|
||||||
|
- role: common
|
||||||
|
tags: [common]
|
||||||
|
- role: docker
|
||||||
|
tags: [common]
|
||||||
|
- role: backup
|
||||||
|
tags: [backup]
|
||||||
|
- role: caddy
|
||||||
|
tags: [caddy, reverse-proxy, webserver]
|
||||||
|
|
||||||
|
- role: mailcow
|
||||||
|
tags: [mailcow, mail, communication]
|
6
playbooks/serguzim.net.yml
Normal file
6
playbooks/serguzim.net.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: Run playbook for node001
|
||||||
|
import_playbook: node001.yml
|
||||||
|
|
||||||
|
- name: Run playbook for node002
|
||||||
|
import_playbook: node002.yml
|
5
playbooks/tasks/deploy-common-service.yml
Normal file
5
playbooks/tasks/deploy-common-service.yml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
- name: Import prepare tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
|
||||||
|
- name: Import start tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/start-common-service.yml
|
11
playbooks/tasks/prepare-common-service.yml
Normal file
11
playbooks/tasks/prepare-common-service.yml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: Import tasks to create service directory
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
|
||||||
|
|
||||||
|
- name: Import tasks to template docker compose file
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/template-docker-compose.yml
|
||||||
|
when: compose is defined
|
||||||
|
|
||||||
|
- name: Import tasks create a service.env file
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
|
||||||
|
when: env is defined
|
6
playbooks/tasks/set-default-facts.yml
Normal file
6
playbooks/tasks/set-default-facts.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
service_path: "{{ (services_path, role_name | replace('_', '-')) | path_join }}"
|
||||||
|
docker_force_recreate: ""
|
||||||
|
docker_rebuild: false
|
6
playbooks/tasks/start-common-service.yml
Normal file
6
playbooks/tasks/start-common-service.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: Import tasks to template the site for the reverse proxy
|
||||||
|
ansible.builtin.include_tasks: tasks/steps/template-site-config.yml
|
||||||
|
when: svc.domain is defined
|
||||||
|
- name: Import tasks to start the service
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/start-service.yml
|
6
playbooks/tasks/steps/create-service-directory.yml
Normal file
6
playbooks/tasks/steps/create-service-directory.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: Create a service directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ service_path }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
39
playbooks/tasks/steps/start-service.yml
Normal file
39
playbooks/tasks/steps/start-service.yml
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
---
|
||||||
|
- name: Rebuild service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose build --pull
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
when:
|
||||||
|
- docker_rebuild
|
||||||
|
register: cmd_result
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Build service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose build --pull
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
when:
|
||||||
|
- "'local-dev' != inventory_hostname"
|
||||||
|
- docker_update is defined
|
||||||
|
- docker_update
|
||||||
|
register: cmd_result
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Pull service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose pull --ignore-buildable
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
when:
|
||||||
|
- "'local-dev' != inventory_hostname"
|
||||||
|
- docker_update is defined
|
||||||
|
- docker_update
|
||||||
|
register: cmd_result
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Start service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose up -d {{ docker_force_recreate }}
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
when: "'local-dev' != inventory_hostname"
|
||||||
|
register: cmd_result
|
||||||
|
changed_when: cmd_result.stderr | regex_search('Started$')
|
6
playbooks/tasks/steps/template-docker-compose.yml
Normal file
6
playbooks/tasks/steps/template-docker-compose.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: Template docker-compose
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: docker-compose.yml.j2
|
||||||
|
dest: "{{ (service_path, 'docker-compose.yml') | path_join }}"
|
||||||
|
mode: "0644"
|
6
playbooks/tasks/steps/template-service-env.yml
Normal file
6
playbooks/tasks/steps/template-service-env.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: Template service.env file
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: env.j2
|
||||||
|
dest: "{{ (service_path, 'service.env') | path_join }}"
|
||||||
|
mode: "0700"
|
12
playbooks/tasks/steps/template-site-config.yml
Normal file
12
playbooks/tasks/steps/template-site-config.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Template caddy site
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: caddy_site.conf.j2
|
||||||
|
dest: "{{ (caddy_config_path, svc.domain + '.conf') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
notify:
|
||||||
|
- Reload caddy
|
||||||
|
|
||||||
|
- name: Register caddy site
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
managed_sites: "{{ managed_sites + [svc.domain + '.conf'] }}"
|
43
playbooks/templates/caddy_site.conf.j2
Normal file
43
playbooks/templates/caddy_site.conf.j2
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
{%- macro caddy_site_hsts(svc, for_www) -%}
|
||||||
|
{%- if svc.hsts|default(false) and (svc.www_domain|default(false) == for_www) -%}
|
||||||
|
{{ 'header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload"' if svc.hsts|default(false) }}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endmacro -%}
|
||||||
|
|
||||||
|
{% macro caddy_site(svc) %}
|
||||||
|
{%- for domain in svc.additional_domains|default([]) %}
|
||||||
|
{{ domain }},
|
||||||
|
{% endfor -%}
|
||||||
|
{{ "www." + svc.domain if svc.www_domain|default(false) else svc.domain }} {
|
||||||
|
import default
|
||||||
|
{{ caddy_site_hsts(svc, false) }}
|
||||||
|
|
||||||
|
{{ svc.caddy_extra | indent(width='\t', first=True) if svc.caddy_extra|default(false) }}
|
||||||
|
|
||||||
|
{% if svc.caddy_default|default(true) %}
|
||||||
|
handle {
|
||||||
|
{% if svc.faas_function|default(false) %}
|
||||||
|
import faas {{ svc.faas_function }}
|
||||||
|
{% elif svc.redirect|default(false) %}
|
||||||
|
redir "{{ svc.redirect }}"
|
||||||
|
{% else %}
|
||||||
|
reverse_proxy {{ svc.docker_host|default(svc.name) }}:{{ svc.port }}
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
|
||||||
|
{% if svc.www_domain|default(false) %}
|
||||||
|
{{ svc.domain }} {
|
||||||
|
import default
|
||||||
|
{{ caddy_site_hsts(svc, true) }}
|
||||||
|
redir https://www.{{ svc.domain }}{uri}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
{% endmacro -%}
|
||||||
|
|
||||||
|
{{ caddy_site(svc) }}
|
||||||
|
|
||||||
|
{%- for extra_svc in svc.extra_svcs|default([]) %}
|
||||||
|
{{ caddy_site(extra_svc) }}
|
||||||
|
{% endfor %}
|
20
playbooks/templates/docker-compose.yml.j2
Normal file
20
playbooks/templates/docker-compose.yml.j2
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
{%- set compose_file = compose.file | default({}) -%}
|
||||||
|
{%- set compose_file = compose_file_main | combine(compose_file, recursive=True) -%}
|
||||||
|
|
||||||
|
{%- if env is defined -%}
|
||||||
|
{%- set compose_file = compose_file | combine(compose_file_env, recursive=True) -%}
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
{%- if compose.network | default(True) -%}
|
||||||
|
{%- set compose_file = compose_file | combine(compose_file_networks, recursive=True) -%}
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
{%- if compose.volumes | default(False) -%}
|
||||||
|
{%- set compose_file = compose_file | combine(compose_file_volumes, recursive=True) -%}
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
{%- if compose.monitoring | default(False) -%}
|
||||||
|
{%- set compose_file = compose_file | combine(compose_file_monitoring_label, recursive=True) -%}
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
{{ compose_file | to_nice_yaml }}
|
7
playbooks/templates/env.j2
Normal file
7
playbooks/templates/env.j2
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
{% for key, value in env.items() %}
|
||||||
|
{% if value is boolean %}
|
||||||
|
{{ key }}={{ value|lower }}
|
||||||
|
{% else %}
|
||||||
|
{{ key }}={{ value }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
1
playbooks/templates/json.j2
Normal file
1
playbooks/templates/json.j2
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{{ json | to_json }}
|
1
playbooks/templates/yml.j2
Normal file
1
playbooks/templates/yml.j2
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{{ yml | to_nice_yaml }}
|
12
roles/_TEMPLATE/tasks/main.yml
Normal file
12
roles/_TEMPLATE/tasks/main.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ NAME_svc }}"
|
||||||
|
env: "{{ NAME_env }}"
|
||||||
|
compose: "{{ NAME_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to deploy common service
|
||||||
|
ansible.builtin.import_tasks: tasks/deploy-common-service.yml
|
17
roles/_TEMPLATE/vars/main.yml
Normal file
17
roles/_TEMPLATE/vars/main.yml
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
NAME_svc:
|
||||||
|
domain: NAME.serguzim.me
|
||||||
|
name: NAME
|
||||||
|
port: 80
|
||||||
|
|
||||||
|
NAME_env:
|
||||||
|
EXAMPLE: value
|
||||||
|
|
||||||
|
NAME_compose:
|
||||||
|
watchtower: true
|
||||||
|
image:
|
||||||
|
volumes:
|
||||||
|
- data:/data
|
||||||
|
file:
|
||||||
|
volumes:
|
||||||
|
data:
|
37
roles/acme_dns/tasks/main.yml
Normal file
37
roles/acme_dns/tasks/main.yml
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ acme_dns_svc }}"
|
||||||
|
env: "{{ acme_dns_env }}"
|
||||||
|
compose: "{{ acme_dns_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import prepare tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
|
||||||
|
|
||||||
|
- name: Setting the service config path
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
config_path: "{{ (service_path, 'config') | path_join }}"
|
||||||
|
|
||||||
|
- name: Create a service-config directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ config_path }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0700"
|
||||||
|
|
||||||
|
- name: Template config
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: config.cfg.j2
|
||||||
|
dest: "{{ (config_path, 'config.cfg') | path_join }}"
|
||||||
|
mode: "0600"
|
||||||
|
register: cmd_result
|
||||||
|
|
||||||
|
- name: Set the docker force-recreate flag
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
docker_force_recreate: --force-recreate
|
||||||
|
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
|
||||||
|
|
||||||
|
- name: Import start tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/start-common-service.yml
|
32
roles/acme_dns/templates/config.cfg.j2
Normal file
32
roles/acme_dns/templates/config.cfg.j2
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
[general]
|
||||||
|
listen = "0.0.0.0:53"
|
||||||
|
protocol = "both"
|
||||||
|
domain = "{{ svc.domain }}"
|
||||||
|
nsname = "{{ svc.domain }}"
|
||||||
|
nsadmin = "{{ svc.nsadmin }}"
|
||||||
|
records = [
|
||||||
|
"{{ svc.domain }}. A {{ svc.records.a }}",
|
||||||
|
"{{ svc.domain }}. AAAA {{ svc.records.aaaa }}",
|
||||||
|
"{{ svc.domain }}. NS {{ svc.domain }}.",
|
||||||
|
]
|
||||||
|
debug = false
|
||||||
|
|
||||||
|
[database]
|
||||||
|
engine = "postgres"
|
||||||
|
connection = "postgres://{{ svc.db.user }}:{{ svc.db.pass }}@{{ svc.db.host }}/{{ svc.db.db }}"
|
||||||
|
|
||||||
|
[api]
|
||||||
|
ip = "0.0.0.0"
|
||||||
|
disable_registration = false
|
||||||
|
port = "{{ svc.port }}"
|
||||||
|
tls = "none"
|
||||||
|
corsorigins = [
|
||||||
|
"*"
|
||||||
|
]
|
||||||
|
use_header = true
|
||||||
|
header_name = "X-Forwarded-For"
|
||||||
|
|
||||||
|
[logconfig]
|
||||||
|
loglevel = "info"
|
||||||
|
logtype = "stdout"
|
||||||
|
logformat = "text"
|
28
roles/acme_dns/vars/main.yml
Normal file
28
roles/acme_dns/vars/main.yml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
---
|
||||||
|
acme_dns_svc:
|
||||||
|
domain: "{{ acme_dns.host }}"
|
||||||
|
name: acme-dns
|
||||||
|
port: 80
|
||||||
|
nsadmin: "{{ admin_email | regex_replace('@', '.') }}"
|
||||||
|
records:
|
||||||
|
a: "{{ ansible_facts.default_ipv4.address }}"
|
||||||
|
aaaa: "{{ ansible_facts.default_ipv6.address }}"
|
||||||
|
db:
|
||||||
|
host: "{{ postgres.host }}"
|
||||||
|
port: "{{ postgres.port }}"
|
||||||
|
user: "{{ vault_acmedns.db.user }}"
|
||||||
|
pass: "{{ vault_acmedns.db.pass }}"
|
||||||
|
db: acme_dns
|
||||||
|
|
||||||
|
acme_dns_compose:
|
||||||
|
watchtower: true
|
||||||
|
monitoring: true
|
||||||
|
image: joohoi/acme-dns
|
||||||
|
volumes:
|
||||||
|
- ./config:/etc/acme-dns:ro
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
ports:
|
||||||
|
- "53:53"
|
||||||
|
- 53:53/udp
|
12
roles/authentik/tasks/main.yml
Normal file
12
roles/authentik/tasks/main.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ authentik_svc }}"
|
||||||
|
env: "{{ authentik_env }}"
|
||||||
|
compose: "{{ authentik_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to deploy common service
|
||||||
|
ansible.builtin.import_tasks: tasks/deploy-common-service.yml
|
60
roles/authentik/vars/main.yml
Normal file
60
roles/authentik/vars/main.yml
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
---
|
||||||
|
authentik_svc:
|
||||||
|
domain: auth.serguzim.me
|
||||||
|
name: authentik
|
||||||
|
port: 9000
|
||||||
|
image_tag: 2024.2
|
||||||
|
db:
|
||||||
|
host: "{{ postgres.host }}"
|
||||||
|
database: authentik
|
||||||
|
user: "{{ vault_authentik.db.user }}"
|
||||||
|
pass: "{{ vault_authentik.db.pass }}"
|
||||||
|
|
||||||
|
authentik_env:
|
||||||
|
AUTHENTIK_SECRET_KEY: "{{ vault_authentik.secret_key }}"
|
||||||
|
|
||||||
|
AUTHENTIK_EMAIL__HOST: "{{ mailer.host }}"
|
||||||
|
AUTHENTIK_EMAIL__PORT: "{{ mailer.port }}"
|
||||||
|
AUTHENTIK_EMAIL__USERNAME: "{{ vault_authentik.mail.user }}"
|
||||||
|
AUTHENTIK_EMAIL__PASSWORD: "{{ vault_authentik.mail.pass }}"
|
||||||
|
AUTHENTIK_EMAIL__USE_TLS: true
|
||||||
|
AUTHENTIK_EMAIL__USE_SSL: false
|
||||||
|
AUTHENTIK_EMAIL__TIMEOUT: 10
|
||||||
|
AUTHENTIK_EMAIL__FROM: auth@serguzim.me
|
||||||
|
|
||||||
|
AUTHENTIK_AVATARS: none
|
||||||
|
|
||||||
|
AUTHENTIK_REDIS__HOST: redis
|
||||||
|
|
||||||
|
AUTHENTIK_POSTGRESQL__HOST: "{{ svc.db.host }}"
|
||||||
|
AUTHENTIK_POSTGRESQL__NAME: "{{ svc.db.database }}"
|
||||||
|
AUTHENTIK_POSTGRESQL__USER: "{{ svc.db.user }}"
|
||||||
|
AUTHENTIK_POSTGRESQL__PASSWORD: "{{ svc.db.pass }}"
|
||||||
|
|
||||||
|
authentik_compose:
|
||||||
|
watchtower: false
|
||||||
|
image: ghcr.io/goauthentik/server:{{ svc.image_tag }}
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
command: server
|
||||||
|
depends_on:
|
||||||
|
- redis
|
||||||
|
worker:
|
||||||
|
image: ghcr.io/goauthentik/server:{{ svc.image_tag }}
|
||||||
|
restart: always
|
||||||
|
command: worker
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
env_file:
|
||||||
|
- service.env
|
||||||
|
depends_on:
|
||||||
|
- redis
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
redis:
|
||||||
|
image: redis:alpine
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
default:
|
3
roles/backup/files/Dockerfile
Normal file
3
roles/backup/files/Dockerfile
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
FROM restic/restic
|
||||||
|
|
||||||
|
RUN apk add curl
|
4
roles/backup/files/backup.timer
Normal file
4
roles/backup/files/backup.timer
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=*-*-* 04:10:00
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
3
roles/backup/files/node001/mailcow.sh
Executable file
3
roles/backup/files/node001/mailcow.sh
Executable file
|
@ -0,0 +1,3 @@
|
||||||
|
export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
|
||||||
|
mkdir -p "$MAILCOW_BACKUP_LOCATION"
|
||||||
|
/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all
|
5
roles/backup/files/node002/immich.sh
Executable file
5
roles/backup/files/node002/immich.sh
Executable file
|
@ -0,0 +1,5 @@
|
||||||
|
backup_path="$BACKUP_LOCATION/immich"
|
||||||
|
mkdir -p "$backup_path"
|
||||||
|
|
||||||
|
cd /opt/services/immich || exit
|
||||||
|
docker compose exec database sh -c 'pg_dump -U "$DB_USERNAME" "$DB_DATABASE"' | gzip >"$backup_path/immich.sql.gz"
|
14
roles/backup/files/node002/postgres.sh
Executable file
14
roles/backup/files/node002/postgres.sh
Executable file
|
@ -0,0 +1,14 @@
|
||||||
|
mkdir -p "$BACKUP_LOCATION/postgres"
|
||||||
|
cd "$BACKUP_LOCATION/postgres" || exit
|
||||||
|
|
||||||
|
postgres_tables=$(sudo -u postgres psql -Atc "SELECT datname FROM pg_database WHERE datistemplate = false;")
|
||||||
|
|
||||||
|
for i in $postgres_tables
|
||||||
|
do
|
||||||
|
printf "dumping %s ..." "$i"
|
||||||
|
sudo -u postgres pg_dump "$i" | gzip >"pg_dump_$i.sql.gz"
|
||||||
|
echo " done"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "dumping all"
|
||||||
|
sudo -u postgres pg_dumpall | gzip >"pg_dumpall.sql.gz"
|
3
roles/backup/files/node003/mailcow.sh
Executable file
3
roles/backup/files/node003/mailcow.sh
Executable file
|
@ -0,0 +1,3 @@
|
||||||
|
export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
|
||||||
|
mkdir -p "$MAILCOW_BACKUP_LOCATION"
|
||||||
|
/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all
|
16
roles/backup/tasks/backup.d.yml
Normal file
16
roles/backup/tasks/backup.d.yml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
- name: Set backup.d path
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
backup_d_path: "{{ (service_path, 'backup.d') | path_join }}"
|
||||||
|
- name: Create backup.d directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ backup_d_path }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
- name: Copy the additional backup scripts
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ backup_d_path }}"
|
||||||
|
mode: "0755"
|
||||||
|
with_fileglob:
|
||||||
|
- "{{ ansible_facts.hostname }}/*"
|
12
roles/backup/tasks/docker.yml
Normal file
12
roles/backup/tasks/docker.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Copy the Dockerfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: Dockerfile
|
||||||
|
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
register: cmd_result
|
||||||
|
|
||||||
|
- name: Set the docker rebuild flag
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
docker_rebuild: true
|
||||||
|
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
|
39
roles/backup/tasks/main.yml
Normal file
39
roles/backup/tasks/main.yml
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ backup_svc }}"
|
||||||
|
env: "{{ backup_env }}"
|
||||||
|
compose: "{{ backup_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import prepare tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
|
||||||
|
|
||||||
|
- name: Copy the main backup script
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "backup.sh.j2"
|
||||||
|
dest: "{{ (service_path, 'backup.sh') | path_join }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Import tasks specific to docker
|
||||||
|
ansible.builtin.import_tasks: docker.yml
|
||||||
|
- name: Import tasks specific to the backup.d scripts
|
||||||
|
ansible.builtin.import_tasks: backup.d.yml
|
||||||
|
- name: Import tasks specific to systemd
|
||||||
|
ansible.builtin.import_tasks: systemd.yml
|
||||||
|
|
||||||
|
- name: Build service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose build --pull
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
register: cmd_result
|
||||||
|
when: docker_rebuild
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Verify service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose run --rm app check
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
changed_when: false
|
20
roles/backup/tasks/systemd.yml
Normal file
20
roles/backup/tasks/systemd.yml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
- name: Copy the system service
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: backup.service.j2
|
||||||
|
dest: /etc/systemd/system/backup.service
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
- name: Copy the system timer
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: backup.timer
|
||||||
|
dest: /etc/systemd/system/backup.timer
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
- name: Enable the system timer
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: backup.timer
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
11
roles/backup/templates/backup.service.j2
Normal file
11
roles/backup/templates/backup.service.j2
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Autostart several tools and services
|
||||||
|
StartLimitIntervalSec=7200
|
||||||
|
StartLimitBurst=5
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
ExecStart={{ service_path }}/backup.sh
|
||||||
|
WorkingDirectory={{ service_path }}
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=15min
|
68
roles/backup/templates/backup.sh.j2
Executable file
68
roles/backup/templates/backup.sh.j2
Executable file
|
@ -0,0 +1,68 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
set -a
|
||||||
|
. "{{ service_path }}/service.env"
|
||||||
|
set +a
|
||||||
|
|
||||||
|
duration_start=$(date +%s)
|
||||||
|
_duration_get () {
|
||||||
|
duration_end=$(date +%s)
|
||||||
|
echo "$((duration_end - duration_start))"
|
||||||
|
}
|
||||||
|
|
||||||
|
hc_url="https://hc-ping.com/$HC_UID"
|
||||||
|
uptime_kuma_url="https://status.serguzim.me/api/push/$UPTIME_KUMA_TOKEN"
|
||||||
|
_hc_ping () {
|
||||||
|
curl -fsSL --retry 3 "$hc_url$1" >/dev/null
|
||||||
|
}
|
||||||
|
_uptime_kuma_ping () {
|
||||||
|
duration=$(_duration_get)
|
||||||
|
curl -fsSL --retry 3 \
|
||||||
|
--url-query "status=$1" \
|
||||||
|
--url-query "msg=$2" \
|
||||||
|
--url-query "ping=${duration}000" \
|
||||||
|
"$uptime_kuma_url" >/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
_fail () {
|
||||||
|
_hc_ping "/fail"
|
||||||
|
_uptime_kuma_ping "down" "$1"
|
||||||
|
rm -rf "$BACKUP_LOCATION"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
_success () {
|
||||||
|
_hc_ping
|
||||||
|
_uptime_kuma_ping "up" "backup successful"
|
||||||
|
}
|
||||||
|
|
||||||
|
_hc_ping "/start"
|
||||||
|
|
||||||
|
BACKUP_LOCATION="$(mktemp -d --suffix=-backup)"
|
||||||
|
export BACKUP_LOCATION
|
||||||
|
cd "$BACKUP_LOCATION" || _fail "failed to cd to $BACKUP_LOCATION"
|
||||||
|
|
||||||
|
shopt -s nullglob
|
||||||
|
for file in "{{ service_path }}/backup.d/"*
|
||||||
|
do
|
||||||
|
file_name="$(basename "$file")"
|
||||||
|
echo ""
|
||||||
|
echo "running $file_name"
|
||||||
|
time "$file" >"/tmp/$file_name.log" || _fail "error while running $file_name"
|
||||||
|
done || true
|
||||||
|
|
||||||
|
cd "{{ service_path }}"
|
||||||
|
docker compose run --rm -v "$BACKUP_LOCATION:/backup/misc" app backup /backup || _fail "error during restic backup"
|
||||||
|
|
||||||
|
_success
|
||||||
|
|
||||||
|
rm -rf "$BACKUP_LOCATION"
|
||||||
|
|
||||||
|
echo "forgetting old backups for {{ ansible_facts.hostname }}"
|
||||||
|
docker compose run --rm app forget --host "{{ ansible_facts.hostname }}" --prune \
|
||||||
|
--keep-last 7 \
|
||||||
|
--keep-daily 14 \
|
||||||
|
--keep-weekly 16 \
|
||||||
|
--keep-monthly 12 \
|
||||||
|
--keep-yearly 2
|
59
roles/backup/vars/main.yml
Normal file
59
roles/backup/vars/main.yml
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
backup_image: registry.serguzim.me/services/backup
|
||||||
|
|
||||||
|
backup_svc:
|
||||||
|
name: backup
|
||||||
|
|
||||||
|
backup_volumes_service: "{{ host_backup.volumes | map_backup_volumes_service }}"
|
||||||
|
|
||||||
|
backup_env:
|
||||||
|
HC_UID: "{{ host_backup.hc_uid }}"
|
||||||
|
UPTIME_KUMA_TOKEN: "{{ host_backup.uptime_kuma_token }}"
|
||||||
|
|
||||||
|
RESTIC_REPOSITORY: "{{ vault_backup.restic.s3.repository }}"
|
||||||
|
RESTIC_PASSWORD: "{{ vault_backup.restic.s3.password }}"
|
||||||
|
|
||||||
|
AWS_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}"
|
||||||
|
AWS_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}"
|
||||||
|
|
||||||
|
#RESTIC_S3_REPOSITORY: "{{ vault_backup.restic.s3.repository }}"
|
||||||
|
#RESTIC_S3_PASSWORD: "{{ vault_backup.restic.s3.password }}"
|
||||||
|
#RESITC_S3_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}"
|
||||||
|
#RESITC_S3_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}"
|
||||||
|
|
||||||
|
#RESTIC_BORGBASE: "{{ vault_backup.restic.borgbase }}"
|
||||||
|
|
||||||
|
backup_compose:
|
||||||
|
watchtower: false
|
||||||
|
image: "{{ backup_image }}"
|
||||||
|
volumes: "{{ backup_volumes_service }}"
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
entrypoint:
|
||||||
|
- /usr/bin/restic
|
||||||
|
- --retry-lock=1m
|
||||||
|
restart: never
|
||||||
|
hostname: "{{ ansible_facts.hostname }}"
|
||||||
|
mount:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
image: "{{ backup_image }}"
|
||||||
|
restart: never
|
||||||
|
hostname: "{{ ansible_facts.hostname }}"
|
||||||
|
env_file:
|
||||||
|
- service.env
|
||||||
|
entrypoint:
|
||||||
|
- /usr/bin/restic
|
||||||
|
- --retry-lock=1m
|
||||||
|
command:
|
||||||
|
- mount
|
||||||
|
- /mnt
|
||||||
|
privileged: true
|
||||||
|
devices:
|
||||||
|
- /dev/fuse
|
||||||
|
|
||||||
|
volumes: "{{ host_backup.volumes | map_backup_volumes }}"
|
1
roles/caddy/defaults/main.yml
Normal file
1
roles/caddy/defaults/main.yml
Normal file
|
@ -0,0 +1 @@
|
||||||
|
caddy_ports_extra: []
|
8
roles/caddy/files/Dockerfile
Normal file
8
roles/caddy/files/Dockerfile
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
FROM caddy:2-builder AS builder
|
||||||
|
|
||||||
|
RUN xcaddy build \
|
||||||
|
--with github.com/caddy-dns/acmedns@main
|
||||||
|
|
||||||
|
FROM caddy:2-alpine
|
||||||
|
|
||||||
|
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
|
46
roles/caddy/files/snippets
Normal file
46
roles/caddy/files/snippets
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
(auth_serguzim_me) {
|
||||||
|
# always forward outpost path to actual outpost
|
||||||
|
reverse_proxy /outpost.goauthentik.io/* authentik:9000
|
||||||
|
|
||||||
|
# forward authentication to outpost
|
||||||
|
forward_auth authentik:9000 {
|
||||||
|
uri /outpost.goauthentik.io/auth/caddy
|
||||||
|
|
||||||
|
# capitalization of the headers is important, otherwise they will be empty
|
||||||
|
copy_headers X-Authentik-Username X-Authentik-Groups X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version
|
||||||
|
|
||||||
|
# optional, in this config trust all private ranges, should probably be set to the outposts IP
|
||||||
|
trusted_proxies private_ranges
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(default) {
|
||||||
|
encode zstd gzip
|
||||||
|
}
|
||||||
|
|
||||||
|
(acmedns) {
|
||||||
|
tls {
|
||||||
|
dns acmedns {
|
||||||
|
username "{$ACMEDNS_USER}"
|
||||||
|
password "{$ACMEDNS_PASS}"
|
||||||
|
subdomain "{$ACMEDNS_SUBD}"
|
||||||
|
server_url "{$ACMEDNS_URL}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(faas) {
|
||||||
|
rewrite * /function/{args[0]}{uri}
|
||||||
|
reverse_proxy https://faas.serguzim.me {
|
||||||
|
header_up Host {http.reverse_proxy.upstream.hostport}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(analytics) {
|
||||||
|
handle_path /_a/* {
|
||||||
|
reverse_proxy https://analytics.serguzim.me {
|
||||||
|
header_up X-Analytics-IP {remote}
|
||||||
|
header_up Host {http.reverse_proxy.upstream.hostport}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
57
roles/caddy/tasks/main.yml
Normal file
57
roles/caddy/tasks/main.yml
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ caddy_svc }}"
|
||||||
|
env: "{{ caddy_env }}"
|
||||||
|
compose: "{{ caddy_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import prepare tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
|
||||||
|
|
||||||
|
- name: Copy the Dockerfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: Dockerfile
|
||||||
|
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
register: cmd_result
|
||||||
|
|
||||||
|
- name: Set the docker rebuild flag
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
docker_rebuild: true
|
||||||
|
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
|
||||||
|
|
||||||
|
- name: Set caddy config path
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
config_path: "{{ (service_path, 'config') | path_join }}"
|
||||||
|
|
||||||
|
- name: Create config directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ config_path }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Template caddyfile
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: Caddyfile.j2
|
||||||
|
dest: "{{ (config_path, 'Caddyfile') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
notify: Reload caddy
|
||||||
|
|
||||||
|
- name: Copy snippets file
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: snippets
|
||||||
|
dest: "{{ (config_path, 'snippets') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
notify: Reload caddy
|
||||||
|
|
||||||
|
- name: Create sites-config directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ caddy_config_path }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Import start tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/start-common-service.yml
|
11
roles/caddy/templates/Caddyfile.j2
Normal file
11
roles/caddy/templates/Caddyfile.j2
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
{
|
||||||
|
email {{ admin_email }}
|
||||||
|
|
||||||
|
servers {
|
||||||
|
metrics
|
||||||
|
strict_sni_host on
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
import /etc/caddy/snippets
|
||||||
|
import /etc/caddy/conf.d/*.conf
|
40
roles/caddy/vars/main.yml
Normal file
40
roles/caddy/vars/main.yml
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
---
|
||||||
|
caddy_acmedns_user: "{{ vault_caddy.acmedns.user }}"
|
||||||
|
caddy_acmedns_pass: "{{ vault_caddy.acmedns.pass }}"
|
||||||
|
caddy_acmedns_subd: "{{ vault_caddy.acmedns.subd }}"
|
||||||
|
caddy_acmedns_url: "https://{{ acme_dns.host }}"
|
||||||
|
|
||||||
|
caddy_ports_default:
|
||||||
|
- 80:80
|
||||||
|
- 443:443
|
||||||
|
- 443:443/udp
|
||||||
|
- "{{ host_vpn.ip }}:2019:2019"
|
||||||
|
caddy_ports: "{{ caddy_ports_default | union(caddy_ports_extra) }}"
|
||||||
|
|
||||||
|
caddy_svc:
|
||||||
|
name: caddy
|
||||||
|
|
||||||
|
caddy_env:
|
||||||
|
CADDY_ADMIN: 0.0.0.0:2019
|
||||||
|
|
||||||
|
ACMEDNS_USER: "{{ caddy_acmedns_user }}"
|
||||||
|
ACMEDNS_PASS: "{{ caddy_acmedns_pass }}"
|
||||||
|
ACMEDNS_SUBD: "{{ caddy_acmedns_subd }}"
|
||||||
|
ACMEDNS_URL: "{{ caddy_acmedns_url }}"
|
||||||
|
|
||||||
|
caddy_compose:
|
||||||
|
watchtower: false
|
||||||
|
image: registry.serguzim.me/services/caddy:2-alpine
|
||||||
|
volumes:
|
||||||
|
- "./config:/etc/caddy/"
|
||||||
|
- data:/data
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
ports: "{{ caddy_ports }}"
|
||||||
|
extra_hosts:
|
||||||
|
- host.docker.internal:host-gateway
|
||||||
|
volumes:
|
||||||
|
data:
|
12
roles/coder/tasks/main.yml
Normal file
12
roles/coder/tasks/main.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ coder_svc }}"
|
||||||
|
env: "{{ coder_env }}"
|
||||||
|
compose: "{{ coder_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to deploy common service
|
||||||
|
ansible.builtin.import_tasks: tasks/deploy-common-service.yml
|
35
roles/coder/vars/main.yml
Normal file
35
roles/coder/vars/main.yml
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
---
|
||||||
|
coder_svc:
|
||||||
|
domain: coder.serguzim.me
|
||||||
|
additional_domains:
|
||||||
|
- "*.coder.serguzim.me"
|
||||||
|
caddy_extra: import acmedns
|
||||||
|
name: coder
|
||||||
|
port: 7080
|
||||||
|
db:
|
||||||
|
host: "{{ postgres.host }}"
|
||||||
|
port: "{{ postgres.port }}"
|
||||||
|
ssh_port: 22
|
||||||
|
ssh_port_alt: 3022
|
||||||
|
|
||||||
|
coder_env:
|
||||||
|
CODER_ADDRESS: "0.0.0.0:7080"
|
||||||
|
CODER_ACCESS_URL: https://{{ svc.domain }}
|
||||||
|
CODER_WILDCARD_ACCESS_URL: "*.{{ svc.domain }}"
|
||||||
|
|
||||||
|
CODER_PG_CONNECTION_URL: postgres://{{ vault_coder.db.user }}:{{ vault_coder.db.pass }}@{{ svc.db.host }}:{{ svc.db.port }}/coder?sslmode=verify-full
|
||||||
|
|
||||||
|
CODER_OIDC_ISSUER_URL: https://auth.serguzim.me/application/o/coder-serguzim-me/
|
||||||
|
CODER_OIDC_CLIENT_ID: "{{ vault_coder.oidc_client.id }}"
|
||||||
|
CODER_OIDC_CLIENT_SECRET: "{{ vault_coder.oidc_client.secret }}"
|
||||||
|
|
||||||
|
coder_compose:
|
||||||
|
watchtower: true
|
||||||
|
image: ghcr.io/coder/coder:latest
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
group_add:
|
||||||
|
- "972" # docker group on host
|
7
roles/common/handlers/main.yml
Normal file
7
roles/common/handlers/main.yml
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
---
|
||||||
|
- name: Reload caddy
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose exec app sh -c "caddy validate --config /etc/caddy/Caddyfile && caddy reload --config /etc/caddy/Caddyfile"
|
||||||
|
chdir: "{{ caddy_path }}"
|
||||||
|
when: "'local-dev' != inventory_hostname"
|
||||||
|
changed_when: true
|
9
roles/common/tasks/main.yml
Normal file
9
roles/common/tasks/main.yml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
- name: Create the services directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ services_path }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
become: true
|
55
roles/docker/tasks/main.yml
Normal file
55
roles/docker/tasks/main.yml
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
- name: Install aptitude
|
||||||
|
apt:
|
||||||
|
name: aptitude
|
||||||
|
state: latest
|
||||||
|
update_cache: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Install required system packages
|
||||||
|
apt:
|
||||||
|
pkg:
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- software-properties-common
|
||||||
|
- python3-pip
|
||||||
|
- virtualenv
|
||||||
|
- python3-setuptools
|
||||||
|
state: latest
|
||||||
|
update_cache: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Add Docker GPG apt Key
|
||||||
|
apt_key:
|
||||||
|
url: https://download.docker.com/linux/ubuntu/gpg
|
||||||
|
state: present
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Add Docker Repository
|
||||||
|
apt_repository:
|
||||||
|
repo: deb https://download.docker.com/linux/ubuntu focal stable
|
||||||
|
state: present
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Update apt and install docker packages
|
||||||
|
apt:
|
||||||
|
pkg:
|
||||||
|
- docker-ce
|
||||||
|
- docker-ce-cli
|
||||||
|
- containerd.io
|
||||||
|
- docker-buildx-plugin
|
||||||
|
- docker-compose-plugin
|
||||||
|
state: latest
|
||||||
|
update_cache: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Add user to the Docker group
|
||||||
|
user:
|
||||||
|
name: "{{ ansible_user }}"
|
||||||
|
groups: docker
|
||||||
|
append: yes
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Create a network
|
||||||
|
community.docker.docker_network:
|
||||||
|
name: apps
|
11
roles/extra_services/tasks/main.yml
Normal file
11
roles/extra_services/tasks/main.yml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy extra services
|
||||||
|
block:
|
||||||
|
- name: Import tasks to template the site and functions for the reverse proxy
|
||||||
|
ansible.builtin.include_tasks: tasks/steps/template-site-config.yml
|
||||||
|
loop: "{{ extra_services_all }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: svc
|
14
roles/extra_services/vars/main.yml
Normal file
14
roles/extra_services/vars/main.yml
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
---
|
||||||
|
extra_services_default:
|
||||||
|
- domain: cloud-old.serguzim.me
|
||||||
|
docker_host: nextcloud
|
||||||
|
port: 80
|
||||||
|
caddy_extra: |
|
||||||
|
redir /.well-known/host-meta /public.php?service=host-meta 301
|
||||||
|
redir /.well-known/host-meta.json /public.php?service=host-meta-json 301
|
||||||
|
redir /.well-known/webfinger /public.php?service=webfinger 301
|
||||||
|
redir /.well-known/carddav /remote.php/dav/ 301
|
||||||
|
redir /.well-known/caldav /remote.php/dav/ 301
|
||||||
|
|
||||||
|
extra_services_hidden: "{{ vault_extra_services }}"
|
||||||
|
extra_services_all: "{{ extra_services_default | union(extra_services_hidden) }}"
|
10
roles/faas/tasks/main.yml
Normal file
10
roles/faas/tasks/main.yml
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ faas_svc }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to template the site and functions for the reverse proxy
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/template-site-config.yml
|
24
roles/faas/vars/main.yml
Normal file
24
roles/faas/vars/main.yml
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
---
|
||||||
|
faas_svc:
|
||||||
|
name: faas
|
||||||
|
domain: faas.serguzim.me
|
||||||
|
docker_host: host.docker.internal
|
||||||
|
port: 8080
|
||||||
|
extra_svcs:
|
||||||
|
- domain: serguzim.me
|
||||||
|
faas_function: webpage-serguzim-me
|
||||||
|
www_domain: true
|
||||||
|
hsts: true
|
||||||
|
caddy_extra: |
|
||||||
|
header /.well-known/* Access-Control-Allow-Origin *
|
||||||
|
|
||||||
|
handle /.well-known/webfinger {
|
||||||
|
map {query.resource} {user} {
|
||||||
|
acct:tobias@msrg.cc serguzim
|
||||||
|
acct:serguzim@msrg.cc serguzim
|
||||||
|
}
|
||||||
|
rewrite * /.well-known/webfinger/{user}.json
|
||||||
|
import faas webpage-msrg-cc
|
||||||
|
}
|
||||||
|
- domain: xn--sder-5qa.stream
|
||||||
|
faas_function: webpage-soeder-stream
|
|
@ -0,0 +1 @@
|
||||||
|
<a class="item" href="https://www.serguzim.me/imprint/">Impressum</a>
|
39
roles/forgejo/tasks/main.yml
Normal file
39
roles/forgejo/tasks/main.yml
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ forgejo_svc }}"
|
||||||
|
env: "{{ forgejo_env }}"
|
||||||
|
compose: "{{ forgejo_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import prepare tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
|
||||||
|
|
||||||
|
- name: Copy the template files
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: templates/
|
||||||
|
dest: "{{ (service_path, 'templates') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
register: cmd_result
|
||||||
|
|
||||||
|
- name: Set the docker force-recreate flag
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
docker_force_recreate: --force-recreate
|
||||||
|
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
|
||||||
|
|
||||||
|
- name: Template the custom footer
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: footer.tmpl.j2
|
||||||
|
dest: "{{ (service_path, 'templates', 'custom', 'footer.tmpl') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
register: cmd_result
|
||||||
|
|
||||||
|
- name: Set the docker force-recreate flag
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
docker_force_recreate: --force-recreate
|
||||||
|
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
|
||||||
|
|
||||||
|
- name: Import start tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/start-common-service.yml
|
1
roles/forgejo/templates/footer.tmpl.j2
Normal file
1
roles/forgejo/templates/footer.tmpl.j2
Normal file
|
@ -0,0 +1 @@
|
||||||
|
<script async src="/_a/script.js" data-website-id="{{ vault_forgejo.umami }}"></script>
|
98
roles/forgejo/vars/main.yml
Normal file
98
roles/forgejo/vars/main.yml
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
---
|
||||||
|
forgejo_svc:
|
||||||
|
domain: git.serguzim.me
|
||||||
|
name: forgejo
|
||||||
|
port: 3000
|
||||||
|
caddy_extra: |
|
||||||
|
import analytics
|
||||||
|
header /attachments/* Access-Control-Allow-Origin *
|
||||||
|
db:
|
||||||
|
host: "{{ postgres.host }}"
|
||||||
|
port: "{{ postgres.port }}"
|
||||||
|
ssh_port: 22
|
||||||
|
|
||||||
|
forgejo_env:
|
||||||
|
FORGEJO__database__DB_TYPE: postgres
|
||||||
|
FORGEJO__database__HOST: "{{ svc.db.host }}:{{ svc.db.port }}"
|
||||||
|
FORGEJO__database__NAME: forgejo
|
||||||
|
FORGEJO__database__USER: "{{ vault_forgejo.db.user }}"
|
||||||
|
FORGEJO__database__PASSWD: "{{ vault_forgejo.db.pass }}"
|
||||||
|
FORGEJO__database__SSL_MODE: verify-full
|
||||||
|
|
||||||
|
FORGEJO__repository__ENABLE_PUSH_CREATE_USER: true
|
||||||
|
FORGEJO__repository__ENABLE_PUSH_CREATE_ORG: true
|
||||||
|
FORGEJO__repository__DEFAULT_BRANCH: main
|
||||||
|
|
||||||
|
FORGEJO__cors__ENABLED: true
|
||||||
|
FORGEJO__cors__SCHEME: https
|
||||||
|
|
||||||
|
FORGEJO__ui__DEFAULT_THEME: forgejo-dark
|
||||||
|
|
||||||
|
FORGEJO__server__DOMAIN: "{{ svc.domain }}"
|
||||||
|
FORGEJO__server__SSH_DOMAIN: "{{ svc.domain }}"
|
||||||
|
FORGEJO__server__SSH_PORT: "{{ svc.ssh_port }}"
|
||||||
|
FORGEJO__server__ROOT_URL: https://{{ svc.domain }}
|
||||||
|
FORGEJO__server__OFFLINE_MODE: true
|
||||||
|
FORGEJO__server__LFS_JWT_SECRET: "{{ vault_forgejo.server_lfs_jwt_secret }}"
|
||||||
|
FORGEJO__server__LFS_START_SERVER: true
|
||||||
|
|
||||||
|
FORGEJO__security__INSTALL_LOCK: true
|
||||||
|
FORGEJO__security__INTERNAL_TOKEN: "{{ vault_forgejo.security_internal_token }}"
|
||||||
|
FORGEJO__security__SECRET_KEY: "{{ vault_forgejo.security_secret_key }}"
|
||||||
|
|
||||||
|
FORGEJO__openid__ENABLE_OPENID_SIGNUP: true
|
||||||
|
FORGEJO__openid__ENABLE_OPENID_SIGNIN: false
|
||||||
|
|
||||||
|
FORGEJO__service__ALLOW_ONLY_EXTERNAL_REGISTRATION: true
|
||||||
|
FORGEJO__service__ENABLE_BASIC_AUTHENTICATION: false
|
||||||
|
FORGEJO__service__DEFAULT_KEEP_EMAIL_PRIVATE: true
|
||||||
|
FORGEJO__service__NO_REPLY_ADDRESS: discard.msrg.cc
|
||||||
|
|
||||||
|
FORGEJO__webhook__DELIVER_TIMEOUT: 60
|
||||||
|
|
||||||
|
FORGEJO__mailer__ENABLED: true
|
||||||
|
FORGEJO__mailer__PROTOCOL: smtp+starttls
|
||||||
|
FORGEJO__mailer__SMTP_ADDR: mail.serguzim.me
|
||||||
|
FORGEJO__mailer__SMTP_PORT: 587
|
||||||
|
FORGEJO__mailer__FROM: Forgejo <git@serguzim.me>
|
||||||
|
FORGEJO__mailer__USER: git@serguzim.me
|
||||||
|
FORGEJO__mailer__PASSWD: "{{ vault_forgejo.mailer_passwd }}"
|
||||||
|
FORGEJO__mailer__SEND_AS_PLAIN_TEXT: true
|
||||||
|
|
||||||
|
FORGEJO__picture__DISABLE_GRAVATAR: true
|
||||||
|
|
||||||
|
FORGEJO__attachment__MAX_FILES: 10
|
||||||
|
|
||||||
|
FORGEJO__oauth2__JWT_SECRET: "{{ vault_forgejo.oauth2_jwt_secret }}"
|
||||||
|
|
||||||
|
FORGEJO__metrics__ENABLED: true
|
||||||
|
FORGEJO__metrics__TOKEN: "{{ vault_metrics_token }}"
|
||||||
|
|
||||||
|
FORGEJO__actions__ENABLED: true
|
||||||
|
|
||||||
|
FORGEJO__storage__STORAGE_TYPE: minio
|
||||||
|
FORGEJO__storage__MINIO_ENDPOINT: s3.nl-ams.scw.cloud
|
||||||
|
FORGEJO__storage__MINIO_ACCESS_KEY_ID: "{{ opentofu.scaleway_service_keys.forgejo.access_key }}"
|
||||||
|
FORGEJO__storage__MINIO_SECRET_ACCESS_KEY: "{{ opentofu.scaleway_service_keys.forgejo.secret_key }}"
|
||||||
|
FORGEJO__storage__MINIO_BUCKET: forgejo.serguzim.me
|
||||||
|
FORGEJO__storage__MINIO_LOCATION: ns-ams
|
||||||
|
FORGEJO__storage__MINIO_USE_SSL: true
|
||||||
|
|
||||||
|
FORGEJO__other__SHOW_FOOTER_VERSION: true
|
||||||
|
FORGEJO__other__SHOW_FOOTER_TEMPLATE_LOAD_TIME: false
|
||||||
|
|
||||||
|
forgejo_compose:
|
||||||
|
watchtower: true
|
||||||
|
image: codeberg.org/forgejo/forgejo:7.0
|
||||||
|
volumes:
|
||||||
|
- data:/data
|
||||||
|
- ./templates:/data/gitea/templates
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
ports:
|
||||||
|
- "{{ svc.ssh_port }}:{{ svc.ssh_port }}"
|
||||||
|
volumes:
|
||||||
|
data:
|
81
roles/forgejo_runner/files/config.yml
Normal file
81
roles/forgejo_runner/files/config.yml
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
log:
|
||||||
|
# The level of logging, can be trace, debug, info, warn, error, fatal
|
||||||
|
level: info
|
||||||
|
|
||||||
|
runner:
|
||||||
|
# Where to store the registration result.
|
||||||
|
file: /data/.runner
|
||||||
|
# Execute how many tasks concurrently at the same time.
|
||||||
|
capacity: 1
|
||||||
|
# Extra environment variables to run jobs.
|
||||||
|
#envs:
|
||||||
|
# A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||||
|
# A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||||
|
# Extra environment variables to run jobs from a file.
|
||||||
|
# It will be ignored if it's empty or the file doesn't exist.
|
||||||
|
#env_file: .env
|
||||||
|
# The timeout for a job to be finished.
|
||||||
|
# Please note that the Forgejo instance also has a timeout (3h by default) for the job.
|
||||||
|
# So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
|
||||||
|
timeout: 3h
|
||||||
|
# Whether skip verifying the TLS certificate of the Forgejo instance.
|
||||||
|
insecure: false
|
||||||
|
# The timeout for fetching the job from the Forgejo instance.
|
||||||
|
fetch_timeout: 5s
|
||||||
|
# The interval for fetching the job from the Forgejo instance.
|
||||||
|
fetch_interval: 2s
|
||||||
|
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||||
|
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
|
||||||
|
# If it's empty when registering, it will ask for inputting labels.
|
||||||
|
# If it's empty when execute `deamon`, will use labels in `.runner` file.
|
||||||
|
labels: []
|
||||||
|
|
||||||
|
cache:
|
||||||
|
# Enable cache server to use actions/cache.
|
||||||
|
enabled: true
|
||||||
|
# The directory to store the cache data.
|
||||||
|
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||||
|
dir: ""
|
||||||
|
# The host of the cache server.
|
||||||
|
# It's not for the address to listen, but the address to connect from job containers.
|
||||||
|
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||||
|
host: ""
|
||||||
|
# The port of the cache server.
|
||||||
|
# 0 means to use a random available port.
|
||||||
|
port: 0
|
||||||
|
|
||||||
|
container:
|
||||||
|
# Specifies the network to which the container will connect.
|
||||||
|
# Could be host, bridge or the name of a custom network.
|
||||||
|
# If it's empty, create a network automatically.
|
||||||
|
network: ""
|
||||||
|
# Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
|
||||||
|
# Only takes effect if "network" is set to "".
|
||||||
|
enable_ipv6: false
|
||||||
|
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||||
|
privileged: false
|
||||||
|
# And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway).
|
||||||
|
options:
|
||||||
|
# The parent directory of a job's working directory.
|
||||||
|
# If it's empty, /workspace will be used.
|
||||||
|
workdir_parent:
|
||||||
|
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||||
|
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||||
|
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||||
|
# valid_volumes:
|
||||||
|
# - data
|
||||||
|
# - /src/*.json
|
||||||
|
# If you want to allow any volume, please use the following configuration:
|
||||||
|
# valid_volumes:
|
||||||
|
# - '**'
|
||||||
|
valid_volumes: []
|
||||||
|
# overrides the docker client host with the specified one.
|
||||||
|
# If it's empty, act_runner will find an available docker host automatically.
|
||||||
|
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||||
|
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||||
|
docker_host: ""
|
||||||
|
|
||||||
|
host:
|
||||||
|
# The parent directory of a job's working directory.
|
||||||
|
# If it's empty, $HOME/.cache/act/ will be used.
|
||||||
|
workdir_parent:
|
42
roles/forgejo_runner/tasks/main.yml
Normal file
42
roles/forgejo_runner/tasks/main.yml
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ forgejo_runner_svc }}"
|
||||||
|
env: "{{ forgejo_runner_env }}"
|
||||||
|
compose: "{{ forgejo_runner_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to create service directory
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
|
||||||
|
- name: Import tasks to template docker compose file
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/template-docker-compose.yml
|
||||||
|
|
||||||
|
- name: Copy the config
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: config.yml
|
||||||
|
dest: "{{ (service_path, 'config.yml') | path_join }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Check if service.env already exists
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "{{ (service_path, 'service.env') | path_join }}"
|
||||||
|
register: env_file
|
||||||
|
|
||||||
|
- name: Import tasks to prompt for the registration token
|
||||||
|
ansible.builtin.import_tasks: tasks/prompt-registration-token.yml
|
||||||
|
when: not env_file.stat.exists or force_forgejo_runner_registration | default(False)
|
||||||
|
|
||||||
|
- name: Import tasks create a service.env file
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
|
||||||
|
- name: Import start tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/start-common-service.yml
|
||||||
|
|
||||||
|
- name: Register runner
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose run --rm -it app sh -c
|
||||||
|
'forgejo-runner register --no-interactive --token ${FORGEJO_RUNNER_REGISTRATION_TOKEN} --instance ${FORGEJO_INSTANCE_URL}'
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
when: not env_file.stat.exists or force_forgejo_runner_registration | default(False)
|
||||||
|
changed_when: true # "when" checks enough. We are sure to change something here.
|
10
roles/forgejo_runner/tasks/prompt-registration-token.yml
Normal file
10
roles/forgejo_runner/tasks/prompt-registration-token.yml
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
---
|
||||||
|
- name: Input forgejo-runner registration token
|
||||||
|
ansible.builtin.pause:
|
||||||
|
prompt: Enter a secret
|
||||||
|
echo: false
|
||||||
|
register: promt_registration_token
|
||||||
|
|
||||||
|
- name: Put registration token into env vars
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
forgejo_runner_env: "{{ forgejo_runner_env | combine({'FORGEJO_RUNNER_REGISTRATION_TOKEN': promt_registration_token.user_input}, recursive=True) }}"
|
32
roles/forgejo_runner/vars/main.yml
Normal file
32
roles/forgejo_runner/vars/main.yml
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
---
|
||||||
|
forgejo_runner_svc:
|
||||||
|
name: forgejo-runner
|
||||||
|
|
||||||
|
forgejo_runner_env:
|
||||||
|
FORGEJO_INSTANCE_URL: https://git.serguzim.me/
|
||||||
|
FORGEJO_RUNNER_REGISTRATION_TOKEN:
|
||||||
|
DOCKER_HOST: tcp://docker-in-docker:2375
|
||||||
|
|
||||||
|
forgejo_runner_compose:
|
||||||
|
watchtower: true
|
||||||
|
image: code.forgejo.org/forgejo/runner:3.3.0
|
||||||
|
volumes:
|
||||||
|
- ./config.yml:/config/config.yml
|
||||||
|
- data:/data
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
hostname: "{{ ansible_facts.hostname }}"
|
||||||
|
command: forgejo-runner --config /config/config.yml daemon
|
||||||
|
depends_on:
|
||||||
|
- docker-in-docker
|
||||||
|
links:
|
||||||
|
- docker-in-docker
|
||||||
|
docker-in-docker:
|
||||||
|
image: docker:dind
|
||||||
|
privileged: true
|
||||||
|
command: dockerd -H tcp://0.0.0.0:2375 --tls=false
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
volumes:
|
||||||
|
data:
|
44
roles/harbor/tasks/main.yml
Normal file
44
roles/harbor/tasks/main.yml
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ harbor_svc }}"
|
||||||
|
env: "{{ harbor_env }}"
|
||||||
|
yml: "{{ harbor_yml }}"
|
||||||
|
block:
|
||||||
|
- name: Import prepare tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
|
||||||
|
|
||||||
|
- name: Import tasks to template the site for the reverse proxy
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/template-site-config.yml
|
||||||
|
|
||||||
|
- name: Template config
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: yml.j2
|
||||||
|
dest: "{{ (service_path, 'harbor.yml') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Download harbor
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: https://github.com/goharbor/harbor/releases/download/v{{ svc.harbor_version }}/harbor-online-installer-v{{ svc.harbor_version }}.tgz
|
||||||
|
dest: "{{ service_path }}"
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Run the harbor prepare command
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "{{ service_path }}/harbor/prepare"
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
creates: "{{ (service_path, 'docker-compose.yml') | path_join }}"
|
||||||
|
environment:
|
||||||
|
HARBOR_BUNDLE_DIR: "{{ service_path }}"
|
||||||
|
|
||||||
|
- name: Run the harbor install command
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "{{ service_path }}/harbor/install.sh"
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
environment:
|
||||||
|
HARBOR_BUNDLE_DIR: "{{ service_path }}"
|
||||||
|
become: true
|
||||||
|
changed_when: true # TODO find way to recognize need to run install command
|
103
roles/harbor/vars/main.yml
Normal file
103
roles/harbor/vars/main.yml
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
---
|
||||||
|
harbor_port_http: 20080
|
||||||
|
harbor_port_https: 20443
|
||||||
|
harbor_port_metrics: 29000
|
||||||
|
|
||||||
|
harbor_db_host: "{{ postgres.host }}"
|
||||||
|
harbor_db_port: "{{ postgres.port }}"
|
||||||
|
harbor_db_database: harbor
|
||||||
|
harbor_db_user: "{{ vault_harbor.db.user }}"
|
||||||
|
harbor_db_pass: "{{ vault_harbor.db.pass }}"
|
||||||
|
harbor_version: 2.9.0
|
||||||
|
|
||||||
|
harbor_svc:
|
||||||
|
name: harbor
|
||||||
|
domain: registry.serguzim.me
|
||||||
|
caddy_extra: |
|
||||||
|
reverse_proxy /metrics host.docker.internal:{{ harbor_port_metrics }}
|
||||||
|
reverse_proxy host.docker.internal:{{ harbor_port_https }} {
|
||||||
|
transport http {
|
||||||
|
tls
|
||||||
|
tls_server_name registry.serguzim.me
|
||||||
|
}
|
||||||
|
}
|
||||||
|
caddy_default: false
|
||||||
|
db:
|
||||||
|
host: "{{ postgres.host }}"
|
||||||
|
port: "{{ postgres.port }}"
|
||||||
|
database: harbor
|
||||||
|
user: "{{ vault_harbor.db.user }}"
|
||||||
|
pass: "{{ vault_harbor.db.pass }}"
|
||||||
|
harbor_version: 2.9.0
|
||||||
|
|
||||||
|
harbor_yml:
|
||||||
|
hostname: "{{ harbor_svc.domain }}"
|
||||||
|
http:
|
||||||
|
port: "{{ harbor_port_http }}"
|
||||||
|
https:
|
||||||
|
port: "{{ harbor_port_https }}"
|
||||||
|
certificate: "{{ (service_path, 'server.crt') | path_join }}"
|
||||||
|
private_key: "{{ (service_path, 'server.key') | path_join }}"
|
||||||
|
external_url: https://registry.serguzim.me
|
||||||
|
harbor_admin_password: "{{ vault_harbor.admin_password }}"
|
||||||
|
data_volume: "{{ (service_path, 'data') | path_join }}"
|
||||||
|
storage_service:
|
||||||
|
s3:
|
||||||
|
accesskey: "{{ vault_harbor.minio.accesskey }}"
|
||||||
|
secretkey: "{{ vault_harbor.minio.secretkey }}"
|
||||||
|
region: de-contabo-1
|
||||||
|
regionendpoint: https://s3.serguzim.me
|
||||||
|
bucket: registry
|
||||||
|
secure: true
|
||||||
|
trivy:
|
||||||
|
ignore_unfixed: false
|
||||||
|
skip_update: false
|
||||||
|
offline_scan: false
|
||||||
|
security_check: vuln
|
||||||
|
insecure: false
|
||||||
|
jobservice:
|
||||||
|
max_job_workers: 10
|
||||||
|
job_loggers:
|
||||||
|
- STD_OUTPUT
|
||||||
|
- FILE
|
||||||
|
logger_sweeper_duration: 1
|
||||||
|
notification:
|
||||||
|
webhook_job_max_retry: 3
|
||||||
|
webhook_job_http_client_timeout: 3
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
local:
|
||||||
|
rotate_count: 50
|
||||||
|
rotate_size: 200M
|
||||||
|
location: /var/log/harbor
|
||||||
|
_version: "{{ harbor_version }}"
|
||||||
|
external_database:
|
||||||
|
harbor:
|
||||||
|
host: "{{ harbor_db_host }}"
|
||||||
|
port: "{{ harbor_db_port }}"
|
||||||
|
db_name: "{{ harbor_db_database }}"
|
||||||
|
username: "{{ harbor_db_user }}"
|
||||||
|
password: "{{ harbor_db_pass }}"
|
||||||
|
ssl_mode: verify-full
|
||||||
|
max_idle_conns: 2
|
||||||
|
max_open_conns: 0
|
||||||
|
proxy:
|
||||||
|
http_proxy:
|
||||||
|
https_proxy:
|
||||||
|
no_proxy:
|
||||||
|
components:
|
||||||
|
- core
|
||||||
|
- jobservice
|
||||||
|
- trivy
|
||||||
|
metric:
|
||||||
|
enabled: enabled
|
||||||
|
port: "{{ harbor_port_metrics }}"
|
||||||
|
path: /metrics
|
||||||
|
upload_purging:
|
||||||
|
enabled: true
|
||||||
|
age: 168h
|
||||||
|
interval: 24h
|
||||||
|
dryrun: false
|
||||||
|
cache:
|
||||||
|
enabled: false
|
||||||
|
expire_hours: 24
|
7
roles/healthcheck/files/Dockerfile
Normal file
7
roles/healthcheck/files/Dockerfile
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
FROM ubuntu
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
RUN apt update -y \
|
||||||
|
&& apt install -y curl dnsutils msmtp gettext-base python3-pip python3-requests \
|
||||||
|
&& pip install matrix-nio
|
54
roles/healthcheck/files/data/http
Executable file
54
roles/healthcheck/files/data/http
Executable file
|
@ -0,0 +1,54 @@
|
||||||
|
#!/usr/bin/sh
|
||||||
|
|
||||||
|
cd /opt/ || exit
|
||||||
|
|
||||||
|
hc_url="https://hc-ping.com/$HTTP_HC_UID"
|
||||||
|
services_down=""
|
||||||
|
error=""
|
||||||
|
|
||||||
|
alias curl_hc='curl -LA "$USER_AGENT" --retry 3'
|
||||||
|
|
||||||
|
check_url ()
|
||||||
|
{
|
||||||
|
url="https://$1$2"
|
||||||
|
printf "checking url %s ." "$url"
|
||||||
|
dig A "$1" >/dev/null
|
||||||
|
printf "."
|
||||||
|
result=$(curl -LsSfv --connect-timeout 30 --retry 3 "$url" 2>&1)
|
||||||
|
code="$?"
|
||||||
|
printf ".\n"
|
||||||
|
#shellcheck disable=SC2181
|
||||||
|
if [ "$code" = "0" ]
|
||||||
|
then
|
||||||
|
echo "... good"
|
||||||
|
else
|
||||||
|
services_down=$(printf "%s\n%s" "$services_down" "$1")
|
||||||
|
error=$(printf "%s\n==========\n%s:\n%s" "$error" "$1" "$result")
|
||||||
|
echo "... bad"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#check_url "acme.serguzim.me" "/health"
|
||||||
|
check_url "analytics.serguzim.me"
|
||||||
|
check_url "auth.serguzim.me"
|
||||||
|
check_url "ci.serguzim.me"
|
||||||
|
#check_url "cloud.serguzim.me" "/login?noredir=1"
|
||||||
|
check_url "git.serguzim.me"
|
||||||
|
check_url "hook.serguzim.me"
|
||||||
|
check_url "mail.serguzim.me"
|
||||||
|
#check_url "msrg.cc" # disabled because it keeps creating false alerts
|
||||||
|
check_url "registry.serguzim.me" "/account/sign-in"
|
||||||
|
check_url "rss.serguzim.me"
|
||||||
|
#check_url "serguzim.me" # disabled because it keeps creating false alerts
|
||||||
|
check_url "status.serguzim.me" "/status/serguzim-net"
|
||||||
|
check_url "tick.serguzim.me"
|
||||||
|
check_url "wiki.serguzim.me"
|
||||||
|
check_url "www.reitanlage-oranienburg.de"
|
||||||
|
|
||||||
|
if [ "$error" = "" ]
|
||||||
|
then
|
||||||
|
curl_hc "$hc_url" >/dev/null
|
||||||
|
echo "ALL GOOD"
|
||||||
|
else
|
||||||
|
curl_hc --data-raw "$services_down$error" "$hc_url/fail" >/dev/null
|
||||||
|
fi
|
17
roles/healthcheck/files/data/mail
Executable file
17
roles/healthcheck/files/data/mail
Executable file
|
@ -0,0 +1,17 @@
|
||||||
|
#!/usr/bin/sh
|
||||||
|
|
||||||
|
cd /opt/ || exit
|
||||||
|
|
||||||
|
hc_url="https://hc-ping.com/$MAIL_HC_UID"
|
||||||
|
|
||||||
|
alias curl_hc='curl -LA "$USER_AGENT" --retry 3'
|
||||||
|
|
||||||
|
envsubst < template.msmtprc > /tmp/msmtprc
|
||||||
|
envsubst < mailcheck.template.mail > /tmp/mailcheck.mail
|
||||||
|
|
||||||
|
result=$(msmtp -C /tmp/msmtprc -a default "$MAIL_HC_UID@hc-ping.com" < /tmp/mailcheck.mail 2>&1)
|
||||||
|
if [ "$?" != "0" ]
|
||||||
|
then
|
||||||
|
echo "$result"
|
||||||
|
curl_hc --data-raw "$result" "$hc_url/fail" >/dev/null
|
||||||
|
fi
|
5
roles/healthcheck/files/data/mailcheck.template.mail
Normal file
5
roles/healthcheck/files/data/mailcheck.template.mail
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
To: ${MAIL_HC_UID}@hc-ping.com
|
||||||
|
From: ${MAIL_USER}
|
||||||
|
Subject: Healthcheck
|
||||||
|
|
||||||
|
Mailserver alive
|
45
roles/healthcheck/files/data/matrix
Executable file
45
roles/healthcheck/files/data/matrix
Executable file
|
@ -0,0 +1,45 @@
|
||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from nio import AsyncClient, RoomMessageNotice
|
||||||
|
|
||||||
|
healthcheck_url = "https://hc-ping.com/" + os.environ['MATRIX_HC_UID']
|
||||||
|
|
||||||
|
def send_ping(success, msg=""):
|
||||||
|
url = healthcheck_url
|
||||||
|
if not success:
|
||||||
|
url += "/fail"
|
||||||
|
|
||||||
|
requests.get(url, data=msg, headers={'user-agent': os.environ['USER_AGENT']})
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
try:
|
||||||
|
client = AsyncClient(os.environ['MATRIX_SERVER'])
|
||||||
|
client.access_token = os.environ['MATRIX_TOKEN']
|
||||||
|
client.device_id = os.environ['USER_AGENT']
|
||||||
|
await client.room_send(
|
||||||
|
room_id = os.environ['MATRIX_ROOM'],
|
||||||
|
message_type = "m.room.message",
|
||||||
|
content = {
|
||||||
|
"msgtype": "m.text",
|
||||||
|
"body": "!ping"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
print("exception during login or sending")
|
||||||
|
send_ping(False, str(e))
|
||||||
|
sys.exit(1)
|
||||||
|
await client.close()
|
||||||
|
|
||||||
|
send_ping(True)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
asyncio.new_event_loop().run_until_complete(main())
|
13
roles/healthcheck/files/data/template.msmtprc
Normal file
13
roles/healthcheck/files/data/template.msmtprc
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
defaults
|
||||||
|
auth on
|
||||||
|
tls on
|
||||||
|
tls_trust_file /etc/ssl/certs/ca-certificates.crt
|
||||||
|
logfile /tmp/msmtp.log
|
||||||
|
|
||||||
|
account default
|
||||||
|
host ${MAIL_HOST}
|
||||||
|
port ${MAIL_PORT}
|
||||||
|
tls_starttls on
|
||||||
|
from ${MAIL_USER}
|
||||||
|
user ${MAIL_USER}
|
||||||
|
password ${MAIL_PASS}
|
24
roles/healthcheck/files/docker-compose.yml
Normal file
24
roles/healthcheck/files/docker-compose.yml
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
x-common-elements:
|
||||||
|
&common-elements
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
image: registry.serguzim.me/services/healthcheck
|
||||||
|
restart: never
|
||||||
|
env_file:
|
||||||
|
- service.env
|
||||||
|
volumes:
|
||||||
|
- ./data/:/opt
|
||||||
|
network_mode: host
|
||||||
|
|
||||||
|
services:
|
||||||
|
http:
|
||||||
|
<<: *common-elements
|
||||||
|
command: "/opt/http"
|
||||||
|
matrix:
|
||||||
|
<<: *common-elements
|
||||||
|
command: "/opt/matrix"
|
||||||
|
mail:
|
||||||
|
<<: *common-elements
|
||||||
|
command: "/opt/mail"
|
4
roles/healthcheck/files/healthcheck@.timer
Normal file
4
roles/healthcheck/files/healthcheck@.timer
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=*:0/5
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
16
roles/healthcheck/tasks/docker.yml
Normal file
16
roles/healthcheck/tasks/docker.yml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
- name: Copy the docker-compose file
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: docker-compose.yml
|
||||||
|
dest: "{{ (service_path, 'docker-compose.yml') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
- name: Copy the Dockerfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: Dockerfile
|
||||||
|
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
|
||||||
|
mode: "0644"
|
||||||
|
- name: Copy the data files
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: data
|
||||||
|
dest: "{{ service_path }}"
|
||||||
|
mode: "0755"
|
28
roles/healthcheck/tasks/main.yml
Normal file
28
roles/healthcheck/tasks/main.yml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ healthcheck_svc }}"
|
||||||
|
env: "{{ healthcheck_env }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to create service directory
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/create-service-directory.yml
|
||||||
|
|
||||||
|
- name: Import tasks specific to docker
|
||||||
|
ansible.builtin.import_tasks: docker.yml
|
||||||
|
- name: Import tasks specific to systemd
|
||||||
|
ansible.builtin.import_tasks: systemd.yml
|
||||||
|
|
||||||
|
- name: Import tasks create a service.env file
|
||||||
|
ansible.builtin.import_tasks: tasks/steps/template-service-env.yml
|
||||||
|
|
||||||
|
- name: Build service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker compose build --pull
|
||||||
|
chdir: "{{ service_path }}"
|
||||||
|
when:
|
||||||
|
- "'local-dev' != inventory_hostname"
|
||||||
|
register: cmd_result
|
||||||
|
changed_when: true
|
21
roles/healthcheck/tasks/systemd.yml
Normal file
21
roles/healthcheck/tasks/systemd.yml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
- name: Template the system service
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: healthcheck@.service.j2
|
||||||
|
dest: /etc/systemd/system/healthcheck@.service
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
- name: Copy the system timer
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: healthcheck@.timer
|
||||||
|
dest: /etc/systemd/system/healthcheck@.timer
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
- name: Enable the system timer
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: healthcheck@{{ item }}.timer
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
|
loop: "{{ healthcheck_svc.checks }}"
|
||||||
|
become: true
|
5
roles/healthcheck/templates/healthcheck@.service.j2
Normal file
5
roles/healthcheck/templates/healthcheck@.service.j2
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/bin/docker compose run --rm %i
|
||||||
|
WorkingDirectory={{ service_path }}
|
||||||
|
RuntimeMaxSec=300
|
24
roles/healthcheck/vars/main.yml
Normal file
24
roles/healthcheck/vars/main.yml
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
---
|
||||||
|
healthcheck_svc:
|
||||||
|
name: healthcheck
|
||||||
|
checks:
|
||||||
|
- http
|
||||||
|
- mail
|
||||||
|
- matrix
|
||||||
|
|
||||||
|
healthcheck_env:
|
||||||
|
USER_AGENT: healthcheck-bot for serguzim.net
|
||||||
|
|
||||||
|
HTTP_HC_UID: "{{ vault_healthcheck.hc_uid.http }}"
|
||||||
|
|
||||||
|
MATRIX_SERVER: https://matrix.serguzim.me
|
||||||
|
MATRIX_SERVER_FEDTESTER: msrg.cc
|
||||||
|
MATRIX_HC_UID: "{{ vault_healthcheck.hc_uid.matrix }}"
|
||||||
|
MATRIX_TOKEN: "{{ vault_healthcheck.matrix.token }}"
|
||||||
|
MATRIX_ROOM: "{{ vault_healthcheck.matrix.room }}"
|
||||||
|
|
||||||
|
MAIL_HC_UID: "{{ vault_healthcheck.hc_uid.mail }}"
|
||||||
|
MAIL_HOST: "{{ mailer.host }}"
|
||||||
|
MAIL_PORT: "{{ mailer.port }}"
|
||||||
|
MAIL_USER: "{{ vault_healthcheck.mailer.user }}"
|
||||||
|
MAIL_PASS: "{{ vault_healthcheck.mailer.pass }}"
|
12
roles/homebox/tasks/main.yml
Normal file
12
roles/homebox/tasks/main.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ homebox_svc }}"
|
||||||
|
env: "{{ homebox_env }}"
|
||||||
|
compose: "{{ homebox_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to deploy common service
|
||||||
|
ansible.builtin.import_tasks: tasks/deploy-common-service.yml
|
23
roles/homebox/vars/main.yml
Normal file
23
roles/homebox/vars/main.yml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
homebox_svc:
|
||||||
|
domain: inventory.serguzim.me
|
||||||
|
name: homebox
|
||||||
|
port: 7745
|
||||||
|
|
||||||
|
homebox_env:
|
||||||
|
HBOX_OPTIONS_ALLOW_REGISTRATION: false
|
||||||
|
HBOX_MAILER_HOST: mail.serguzim.me
|
||||||
|
HBOX_MAILER_PORT: 587
|
||||||
|
HBOX_MAILER_USERNAME: inventory@serguzim.me
|
||||||
|
HBOX_MAILER_PASSWORD: "{{ vault_homebox.mailer_passwd }}"
|
||||||
|
HBOX_MAILER_FROM: Homebox <inventory@serguzim.me>
|
||||||
|
HBOX_SWAGGER_SCHEMA: https
|
||||||
|
|
||||||
|
homebox_compose:
|
||||||
|
watchtower: true
|
||||||
|
image: ghcr.io/hay-kot/homebox:latest-rootless
|
||||||
|
volumes:
|
||||||
|
- data:/data
|
||||||
|
file:
|
||||||
|
volumes:
|
||||||
|
data:
|
12
roles/immich/tasks/main.yml
Normal file
12
roles/immich/tasks/main.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ immich_svc }}"
|
||||||
|
env: "{{ immich_env }}"
|
||||||
|
compose: "{{ immich_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to deploy common service
|
||||||
|
ansible.builtin.import_tasks: tasks/deploy-common-service.yml
|
74
roles/immich/vars/main.yml
Normal file
74
roles/immich/vars/main.yml
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
---
|
||||||
|
immich_db_host: database
|
||||||
|
immich_db_db: immich
|
||||||
|
immich_db_user: "{{ vault_immich.db.user }}"
|
||||||
|
immich_db_pass: "{{ vault_immich.db.pass }}"
|
||||||
|
|
||||||
|
immich_svc:
|
||||||
|
domain: gallery.serguzim.me
|
||||||
|
name: immich
|
||||||
|
port: 3001
|
||||||
|
version: release
|
||||||
|
db:
|
||||||
|
host: "{{ postgres.host }}"
|
||||||
|
database: authentik
|
||||||
|
|
||||||
|
|
||||||
|
immich_env:
|
||||||
|
# IMMICH_CONFIG_FILE: /immich.json
|
||||||
|
|
||||||
|
TZ: "{{ timezone }}"
|
||||||
|
|
||||||
|
DB_HOSTNAME: "{{ immich_db_host }}"
|
||||||
|
DB_DATABASE_NAME: "{{ immich_db_db }}"
|
||||||
|
DB_USERNAME: "{{ immich_db_user }}"
|
||||||
|
DB_PASSWORD: "{{ immich_db_pass }}"
|
||||||
|
|
||||||
|
POSTGRES_DB: "{{ immich_db_db }}"
|
||||||
|
POSTGRES_USER: "{{ immich_db_user }}"
|
||||||
|
POSTGRES_PASSWORD: "{{ immich_db_pass }}"
|
||||||
|
|
||||||
|
REDIS_HOSTNAME: redis
|
||||||
|
|
||||||
|
immich_compose:
|
||||||
|
watchtower: false
|
||||||
|
image: ghcr.io/immich-app/immich-server:release
|
||||||
|
volumes:
|
||||||
|
- upload:/usr/src/app/upload
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
depends_on:
|
||||||
|
- database
|
||||||
|
- redis
|
||||||
|
|
||||||
|
machine-learning:
|
||||||
|
image: ghcr.io/immich-app/immich-machine-learning:release
|
||||||
|
volumes:
|
||||||
|
- model-cache:/cache
|
||||||
|
env_file:
|
||||||
|
- service.env
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:6.2-alpine
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
|
||||||
|
database:
|
||||||
|
image: tensorchord/pgvecto-rs:pg16-v0.2.0
|
||||||
|
env_file:
|
||||||
|
- service.env
|
||||||
|
volumes:
|
||||||
|
- pgdata:/var/lib/postgresql/data
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
upload:
|
||||||
|
pgdata:
|
||||||
|
model-cache:
|
28
roles/influxdb/tasks/main.yml
Normal file
28
roles/influxdb/tasks/main.yml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ influxdb_svc }}"
|
||||||
|
env: "{{ influxdb_env }}"
|
||||||
|
compose: "{{ influxdb_compose }}"
|
||||||
|
yml: "{{ influxdb_yml }}"
|
||||||
|
block:
|
||||||
|
- name: Import prepare tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/prepare-common-service.yml
|
||||||
|
|
||||||
|
- name: Template config
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: yml.j2
|
||||||
|
dest: "{{ (service_path, 'influxdb.yml') | path_join }}"
|
||||||
|
mode: "0600"
|
||||||
|
register: cmd_result
|
||||||
|
|
||||||
|
- name: Set the docker force-recreate flag
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
docker_force_recreate: --force-recreate
|
||||||
|
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.
|
||||||
|
|
||||||
|
- name: Import start tasks for common service
|
||||||
|
ansible.builtin.import_tasks: tasks/start-common-service.yml
|
73
roles/influxdb/vars/main.yml
Normal file
73
roles/influxdb/vars/main.yml
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
---
|
||||||
|
influxdb_svc:
|
||||||
|
domain: tick.serguzim.me
|
||||||
|
name: influxdb
|
||||||
|
port: 8086
|
||||||
|
data_dir: /var/lib/influxdb2
|
||||||
|
|
||||||
|
influxdb_yml:
|
||||||
|
assets-path: ""
|
||||||
|
bolt-path: "{{ (svc.data_dir, 'influxd.bolt') | path_join }}"
|
||||||
|
e2e-testing: false
|
||||||
|
engine-path: "{{ (svc.data_dir, 'engine') | path_join }}"
|
||||||
|
feature-flags: {}
|
||||||
|
http-bind-address: "0.0.0.0:{{ svc.port }}"
|
||||||
|
influxql-max-select-buckets: 0
|
||||||
|
influxql-max-select-point: 0
|
||||||
|
influxql-max-select-series: 0
|
||||||
|
key-name: ""
|
||||||
|
log-level: info
|
||||||
|
nats-max-payload-bytes: 1048576
|
||||||
|
nats-port: 4222
|
||||||
|
no-tasks: false
|
||||||
|
query-concurrency: 10
|
||||||
|
query-initial-memory-bytes: 0
|
||||||
|
query-max-memory-bytes: 0
|
||||||
|
query-memory-bytes: 9223372036854775807
|
||||||
|
query-queue-size: 10
|
||||||
|
reporting-disabled: false
|
||||||
|
secret-store: bolt
|
||||||
|
session-length: 60
|
||||||
|
session-renew-disabled: false
|
||||||
|
storage-cache-max-memory-size: 1073741824
|
||||||
|
storage-cache-snapshot-memory-size: 26214400
|
||||||
|
storage-cache-snapshot-write-cold-duration: 10m0s
|
||||||
|
storage-compact-full-write-cold-duration: 4h0m0s
|
||||||
|
storage-compact-throughput-burst: 50331648
|
||||||
|
storage-max-concurrent-compactions: 0
|
||||||
|
storage-max-index-log-file-size: 1048576
|
||||||
|
storage-retention-check-interval: 30m0s
|
||||||
|
storage-series-file-max-concurrent-snapshot-compactions: 0
|
||||||
|
storage-series-id-set-cache-size: 0
|
||||||
|
storage-shard-precreator-advance-period: 30m0s
|
||||||
|
storage-shard-precreator-check-interval: 10m0s
|
||||||
|
storage-tsm-use-madv-willneed: false
|
||||||
|
storage-validate-keys: false
|
||||||
|
storage-wal-fsync-delay: "0s"
|
||||||
|
store: bolt
|
||||||
|
testing-always-allow-setup: false
|
||||||
|
tls-cert: ""
|
||||||
|
tls-key: ""
|
||||||
|
tls-min-version: "1.2"
|
||||||
|
tls-strict-ciphers: false
|
||||||
|
tracing-type: ""
|
||||||
|
vault-addr: ""
|
||||||
|
vault-cacert: ""
|
||||||
|
vault-capath: ""
|
||||||
|
vault-client-cert: ""
|
||||||
|
vault-client-key: ""
|
||||||
|
vault-client-timeout: "0s"
|
||||||
|
vault-max-retries: 0
|
||||||
|
vault-skip-verify: false
|
||||||
|
vault-tls-server-name: ""
|
||||||
|
vault-token: ""
|
||||||
|
|
||||||
|
influxdb_compose:
|
||||||
|
watchtower: false
|
||||||
|
image: influxdb:2.7
|
||||||
|
volumes:
|
||||||
|
- ./influxdb.yml:/etc/influxdb2/config.yml
|
||||||
|
- data:{{ svc.data_dir }}
|
||||||
|
file:
|
||||||
|
volumes:
|
||||||
|
data:
|
12
roles/jellyfin/tasks/main.yml
Normal file
12
roles/jellyfin/tasks/main.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Set common facts
|
||||||
|
ansible.builtin.import_tasks: tasks/set-default-facts.yml
|
||||||
|
|
||||||
|
- name: Deploy {{ svc.name }}
|
||||||
|
vars:
|
||||||
|
svc: "{{ jellyfin_svc }}"
|
||||||
|
env: "{{ jellyfin_env }}"
|
||||||
|
compose: "{{ jellyfin_compose }}"
|
||||||
|
block:
|
||||||
|
- name: Import tasks to deploy common service
|
||||||
|
ansible.builtin.import_tasks: tasks/deploy-common-service.yml
|
27
roles/jellyfin/vars/main.yml
Normal file
27
roles/jellyfin/vars/main.yml
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
jellyfin_svc:
|
||||||
|
domain: media.serguzim.me
|
||||||
|
name: jellyfin
|
||||||
|
port: 8096
|
||||||
|
db:
|
||||||
|
host: "{{ postgres.host }}"
|
||||||
|
port: "{{ postgres.port }}"
|
||||||
|
|
||||||
|
jellyfin_env:
|
||||||
|
JELLYFIN_PublishedServerUrl: https://{{ svc.domain }}
|
||||||
|
|
||||||
|
jellyfin_compose:
|
||||||
|
watchtower: true
|
||||||
|
image: jellyfin/jellyfin
|
||||||
|
volumes:
|
||||||
|
- config:/config
|
||||||
|
- cache:/cache
|
||||||
|
- media:/media
|
||||||
|
file:
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
user: 8096:8096
|
||||||
|
volumes:
|
||||||
|
config:
|
||||||
|
cache:
|
||||||
|
media:
|
6
roles/lego/files/hook.sh
Normal file
6
roles/lego/files/hook.sh
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
cp -f "$LEGO_CERT_PATH" /certificates
|
||||||
|
cp -f "$LEGO_CERT_KEY_PATH" /certificates
|
||||||
|
|
||||||
|
exit 33 # special exit code to signal that the certificate has been updated
|
19
roles/lego/files/lego.sh
Executable file
19
roles/lego/files/lego.sh
Executable file
|
@ -0,0 +1,19 @@
|
||||||
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
set -a
|
||||||
|
. ./service.env
|
||||||
|
set +a
|
||||||
|
|
||||||
|
domain="$1"
|
||||||
|
action="${2:-renew}"
|
||||||
|
|
||||||
|
docker compose run --rm app \
|
||||||
|
--domains "$domain" \
|
||||||
|
"$action" \
|
||||||
|
"--$action-hook" "/config/hook.sh"
|
||||||
|
|
||||||
|
if [ "$?" = "33" ] && [ -x "./lego.d/$domain" ];
|
||||||
|
then
|
||||||
|
echo "Running hook for $domain"
|
||||||
|
"./lego.d/$domain"
|
||||||
|
fi
|
10
roles/lego/files/lego@.timer
Normal file
10
roles/lego/files/lego@.timer
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Renew certificates
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
Persistent=true
|
||||||
|
OnCalendar=*-*-* 01:15:00
|
||||||
|
RandomizedDelaySec=2h
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
16
roles/lego/files/node002/db.serguzim.me
Executable file
16
roles/lego/files/node002/db.serguzim.me
Executable file
|
@ -0,0 +1,16 @@
|
||||||
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
domain="db.serguzim.me"
|
||||||
|
|
||||||
|
_install() {
|
||||||
|
install --owner=postgres --group=postgres --mode=600 \
|
||||||
|
"$CERTIFICATES_PATH/$domain.$1" \
|
||||||
|
"/var/lib/postgres/data/server.$1"
|
||||||
|
}
|
||||||
|
|
||||||
|
_install crt
|
||||||
|
_install key
|
||||||
|
|
||||||
|
sudo -u postgres pg_ctl -D /var/lib/postgres/data/ reload
|
||||||
|
|
||||||
|
# vim: ft=sh
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue