Replace backup script with autorestic

This commit is contained in:
Tobias Reisinger 2024-10-06 01:59:46 +02:00
parent 13084e3558
commit ed51a86935
Signed by: serguzim
GPG key ID: 13AD60C237A28DFE
17 changed files with 180 additions and 224 deletions

View file

@ -32,8 +32,9 @@ all_services:
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: forgejo target: forgejo
volumes_backup: backup:
- forgejo_data - name: forgejo_data
type: docker
- name: forgejo_runner - name: forgejo_runner
host: node002 host: node002
@ -46,32 +47,38 @@ all_services:
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: inventory target: inventory
volumes_backup: backup:
- homebox_data - name: homebox_data
type: docker
- name: immich - name: immich
host: node002 host: node002
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: gallery target: gallery
volumes_backup: backup:
- immich_upload - name: immich_upload
type: docker
- name: immich_database
type: hook
- name: influxdb - name: influxdb
host: node002 host: node002
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: tick target: tick
volumes_backup: backup:
- influxdb_data - name: influxdb_data
type: docker
- name: jellyfin - name: jellyfin
host: node002 host: node002
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: media target: media
volumes_backup: backup:
- jellyfin_config - name: jellyfin_config
type: docker
#- jellyfin_media # TODO #- jellyfin_media # TODO
- name: linkwarden - name: linkwarden
@ -85,6 +92,9 @@ all_services:
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: mail target: mail
backup:
- name: mailcow
type: hook
- name: minio - name: minio
host: node002 host: node002
@ -95,16 +105,24 @@ all_services:
target: console.s3 target: console.s3
name: minio-console name: minio-console
alias: minio alias: minio
volumes_backup: backup:
- minio_data - name: minio_data
type: docker
- name: ntfy - name: ntfy
host: node002 host: node002
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: push target: push
volumes_backup: backup:
- ntfy_data - name: ntfy_data
type: docker
- name: postgresql
host: node002
backup:
- name: postgresql
type: hook
- name: reitanlage_oranienburg - name: reitanlage_oranienburg
host: node002 host: node002
@ -115,8 +133,9 @@ all_services:
target: www target: www
name: reitanlage_oranienburg-www name: reitanlage_oranienburg-www
alias: reitanlage_oranienburg alias: reitanlage_oranienburg
volumes_backup: backup:
- reitanlage-oranienburg_data - name: reitanlage-oranienburg_data
type: docker
- name: shlink - name: shlink
host: node002 host: node002
@ -137,8 +156,9 @@ all_services:
target: matrix target: matrix
name: synapse_msrg name: synapse_msrg
alias: synapse alias: synapse
volumes_backup: backup:
- synapse_media_store - name: synapse_media_store
type: docker
ports: ports:
- 8448:8448 - 8448:8448
@ -147,16 +167,18 @@ all_services:
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: recipes target: recipes
volumes_backup: backup:
- tandoor_mediafiles - name: tandoor_mediafiles
type: docker
- name: teamspeak_fallback - name: teamspeak_fallback
host: node002 host: node002
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: ts target: ts
volumes_backup: backup:
- teamspeak-fallback-data - name: teamspeak-fallback-data
type: docker
- name: telegraf - name: telegraf
host: node002 host: node002
@ -178,16 +200,18 @@ all_services:
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: status target: status
volumes_backup: backup:
- uptime-kuma_data - name: uptime-kuma_data
type: docker
- name: vikunja - name: vikunja
host: node002 host: node002
dns: dns:
- domain: serguzim.me - domain: serguzim.me
target: todo target: todo
volumes_backup: backup:
- vikunja_data - name: vikunja_data
type: docker
- name: webhook - name: webhook
host: node002 host: node002

View file

@ -0,0 +1,38 @@
import copy
class FilterModule(object):
def filters(self):
return {
'map_backup_locations': self.map_backup_locations
}
def map_backup_locations(self, locations, backends, hooks):
result = {}
backends_list = list(backends.keys())
for location in locations:
name = location["name"]
new_location = {
"to": backends_list,
"forget": "yes",
"hooks": copy.deepcopy(hooks)
}
if location["type"] == "docker":
new_location["from"] = name
new_location["type"] = "volume"
if location["type"] == "hook":
backup_dir = f"/opt/services/_backup/{name}"
new_location["from"] = backup_dir
if not "before" in new_location["hooks"]:
new_location["hooks"]["before"] = []
new_location["hooks"]["before"].append(f"/opt/services/backup/hooks/{name} '{backup_dir}'")
if location["type"] == "directory":
new_location["from"] = location["path"]
result[name.lower()] = new_location
return result

View file

@ -1,24 +0,0 @@
class FilterModule(object):
def filters(self):
return {
'map_backup_volumes': self.map_backup_volumes,
'map_backup_volumes_service': self.map_backup_volumes_service
}
def map_backup_volumes(self, volumes):
result = {}
for volume in volumes:
result[volume] = {
"external": True,
}
return result
def map_backup_volumes_service(self, volumes):
result = []
for volume in volumes:
result.append("{volume_name}:/backup/volumes/{volume_name}".format(volume_name=volume))
return result

View file

@ -1,3 +0,0 @@
FROM restic/restic
RUN apk add curl

View file

@ -1,5 +1,6 @@
backup_path="$BACKUP_LOCATION/immich" #!/usr/bin/env bash
mkdir -p "$backup_path"
backup_path="$1"
cd /opt/services/immich || exit cd /opt/services/immich || exit
docker compose exec database sh -c 'pg_dump -U "$DB_USERNAME" "$DB_DATABASE"' | gzip >"$backup_path/immich.sql.gz" docker compose exec database sh -c 'pg_dump -U "$DB_USERNAME" "$DB_DATABASE"' | gzip >"$backup_path/immich.sql.gz"

View file

@ -0,0 +1,5 @@
#!/usr/bin/env bash
export MAILCOW_BACKUP_LOCATION="$1"
/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all

View file

@ -1,5 +1,6 @@
mkdir -p "$BACKUP_LOCATION/postgres" #!/usr/bin/env bash
cd "$BACKUP_LOCATION/postgres" || exit
cd "$1"
postgres_tables=$(sudo -u postgres psql -Atc "SELECT datname FROM pg_database WHERE datistemplate = false;") postgres_tables=$(sudo -u postgres psql -Atc "SELECT datname FROM pg_database WHERE datistemplate = false;")

View file

@ -1,3 +0,0 @@
export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
mkdir -p "$MAILCOW_BACKUP_LOCATION"
/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all

View file

@ -1,3 +0,0 @@
export MAILCOW_BACKUP_LOCATION="$BACKUP_LOCATION/mailcow"
mkdir -p "$MAILCOW_BACKUP_LOCATION"
/opt/mailcow-dockerized/helper-scripts/backup_and_restore.sh backup all

View file

@ -1,16 +0,0 @@
---
- name: Set backup.d path
ansible.builtin.set_fact:
backup_d_path: "{{ (service_path, 'backup.d') | path_join }}"
- name: Create backup.d directory
ansible.builtin.file:
path: "{{ backup_d_path }}"
state: directory
mode: "0755"
- name: Copy the additional backup scripts
ansible.builtin.copy:
src: "{{ item }}"
dest: "{{ backup_d_path }}"
mode: "0755"
with_fileglob:
- "{{ ansible_facts.hostname }}/*"

View file

@ -1,12 +0,0 @@
---
- name: Copy the Dockerfile
ansible.builtin.copy:
src: Dockerfile
dest: "{{ (service_path, 'Dockerfile') | path_join }}"
mode: "0644"
register: cmd_result
- name: Set the docker rebuild flag
ansible.builtin.set_fact:
docker_rebuild: true
when: cmd_result.changed # noqa: no-handler We need to handle the restart per service. Handlers don't support variables.

View file

@ -0,0 +1,23 @@
---
- name: Set hooks path
ansible.builtin.set_fact:
hooks_path: "{{ (service_path, 'hooks') | path_join }}"
- name: Create hooks directory
ansible.builtin.file:
path: "{{ hooks_path }}"
state: directory
mode: "0755"
- name: Copy the hooks
ansible.builtin.copy:
src: "{{ item }}"
dest: "{{ hooks_path }}"
mode: "0755"
with_fileglob:
- "hooks/*"
- name: Create the from directories
ansible.builtin.file:
path: "{{ ('/opt/services/_backup', item | basename) | path_join }}"
state: directory
mode: "0755"
with_fileglob:
- "hooks/*"

View file

@ -4,36 +4,31 @@
- name: Deploy {{ svc.name }} - name: Deploy {{ svc.name }}
vars: vars:
svc: "{{ backup_svc }}" yml: "{{ backup_yml }}"
env: "{{ backup_env }}"
compose: "{{ backup_compose }}"
block: block:
- name: Import prepare tasks for common service - name: Import prepare tasks for common service
ansible.builtin.import_tasks: tasks/prepare-common-service.yml ansible.builtin.import_tasks: tasks/prepare-common-service.yml
- name: Copy the main backup script - name: Template the main backup script
ansible.builtin.template: ansible.builtin.template:
src: "backup.sh.j2" src: backup.sh.j2
dest: "{{ (service_path, 'backup.sh') | path_join }}" dest: "{{ (service_path, 'backup.sh') | path_join }}"
mode: "0755" mode: "0755"
- name: Import tasks specific to docker - name: Template autorestic.yml
ansible.builtin.import_tasks: docker.yml ansible.builtin.template:
- name: Import tasks specific to the backup.d scripts src: yml.j2
ansible.builtin.import_tasks: backup.d.yml dest: "{{ (service_path, '.autorestic.yml') | path_join }}"
mode: "0644"
- name: Import tasks specific to the hooks scripts
ansible.builtin.import_tasks: hooks.yml
- name: Import tasks specific to systemd - name: Import tasks specific to systemd
ansible.builtin.import_tasks: systemd.yml ansible.builtin.import_tasks: systemd.yml
- name: Build service
ansible.builtin.command:
cmd: docker compose build --pull
chdir: "{{ service_path }}"
register: cmd_result
when: docker_rebuild
changed_when: true
- name: Verify service - name: Verify service
ansible.builtin.command: ansible.builtin.command:
cmd: docker compose run --rm app check cmd: autorestic -v check
chdir: "{{ service_path }}" chdir: "{{ service_path }}"
changed_when: false changed_when: false
become: true

View file

@ -1,11 +1,11 @@
[Unit] [Unit]
Description=Autostart several tools and services Description=Run the backup script
StartLimitIntervalSec=7200 StartLimitIntervalSec=7200
StartLimitBurst=5 StartLimitBurst=5
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart={{ service_path }}/backup.sh ExecStart={{ (service_path, 'backup.sh') | path_join }}
WorkingDirectory={{ service_path }} WorkingDirectory={{ service_path }}
Restart=on-failure Restart=on-failure
RestartSec=15min RestartSec=15min

74
roles/backup/templates/backup.sh.j2 Executable file → Normal file
View file

@ -1,68 +1,12 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e {{ backup_hc_command_start }}
set -a if autorestic backup -av --ci
. "{{ service_path }}/service.env" then
set +a {{ backup_hc_command_success }}
{{ backup_uk_command_success }}
duration_start=$(date +%s) else
_duration_get () { {{ backup_hc_command_fail }}
duration_end=$(date +%s) {{ backup_uk_command_fail }}
echo "$((duration_end - duration_start))" fi
}
hc_url="https://hc-ping.com/$HC_UID"
uptime_kuma_url="https://status.serguzim.me/api/push/$UPTIME_KUMA_TOKEN"
_hc_ping () {
curl -fsSL --retry 3 "$hc_url$1" >/dev/null
}
_uptime_kuma_ping () {
duration=$(_duration_get)
curl -fsSL --retry 3 \
--url-query "status=$1" \
--url-query "msg=$2" \
--url-query "ping=${duration}000" \
"$uptime_kuma_url" >/dev/null
}
_fail () {
_hc_ping "/fail"
_uptime_kuma_ping "down" "$1"
rm -rf "$BACKUP_LOCATION"
exit 1
}
_success () {
_hc_ping
_uptime_kuma_ping "up" "backup successful"
}
_hc_ping "/start"
BACKUP_LOCATION="$(mktemp -d --suffix=-backup)"
export BACKUP_LOCATION
cd "$BACKUP_LOCATION" || _fail "failed to cd to $BACKUP_LOCATION"
shopt -s nullglob
for file in "{{ service_path }}/backup.d/"*
do
file_name="$(basename "$file")"
echo ""
echo "running $file_name"
time "$file" >"/tmp/$file_name.log" || _fail "error while running $file_name"
done || true
cd "{{ service_path }}"
docker compose run --rm -v "$BACKUP_LOCATION:/backup/misc" app backup /backup || _fail "error during restic backup"
_success
rm -rf "$BACKUP_LOCATION"
echo "forgetting old backups for {{ ansible_facts.hostname }}"
docker compose run --rm app forget --host "{{ ansible_facts.hostname }}" --prune \
--keep-last 7 \
--keep-daily 14 \
--keep-weekly 16 \
--keep-monthly 12 \
--keep-yearly 2

View file

@ -1,60 +1,46 @@
--- ---
backup_image: "{{ (container_registry.public, 'services/backup') | path_join }}"
backup_svc: backup_svc:
name: backup name: backup
backup_volumes_list: "{{ all_services | my_service_attributes(inventory_hostname, 'volumes_backup') }}" backup_list: "{{ all_services | my_service_attributes(inventory_hostname, 'backup') }}"
backup_volumes_service: "{{ backup_volumes_list | map_backup_volumes_service }}"
backup_env: backup_msg_start: "Backup started"
HC_UID: "{{ host_backup.hc_uid }}" backup_msg_fail: "Backup failed"
UPTIME_KUMA_TOKEN: "{{ host_backup.uptime_kuma_token }}" backup_msg_fail_location: "Backup failed for location: "
backup_msg_success: "Backup successful"
RESTIC_REPOSITORY: "{{ vault_backup.restic.s3.repository }}" backup_curl_base: 'curl -L -m 10 --retry 5'
RESTIC_PASSWORD: "{{ vault_backup.restic.s3.password }}" backup_hc_curl_base: '{{ backup_curl_base }} -X POST -H "Content-Type: text/plain"'
backup_uk_curl_base: '{{ backup_curl_base }}'
backup_hc_url: 'https://hc-ping.com/{{ host_backup.hc_uid }}'
backup_uk_url: 'https://status.serguzim.me/api/push/{{ host_backup.uptime_kuma_token }}'
AWS_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}" backup_hc_command_start: '{{ backup_hc_curl_base }} --data "{{ backup_msg_start }}" {{ backup_hc_url }}/start'
AWS_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}" backup_hc_command_success: '{{ backup_hc_curl_base }} --data "{{ backup_msg_success }}" {{ backup_hc_url }}'
backup_uk_command_success: '{{ backup_uk_curl_base }} "{{ backup_uk_url }}?status=up&msg={{ backup_msg_success | urlencode }}&ping="'
backup_hc_command_fail: '{{ backup_hc_curl_base }} --data "{{ backup_msg_fail }}" {{ backup_hc_url }}/fail'
backup_uk_command_fail: '{{ backup_uk_curl_base }} "{{ backup_uk_url }}?status=down&msg={{ backup_msg_fail | urlencode }}&ping="'
#RESTIC_S3_REPOSITORY: "{{ vault_backup.restic.s3.repository }}" backup_default_hooks:
#RESTIC_S3_PASSWORD: "{{ vault_backup.restic.s3.password }}" failure:
#RESITC_S3_ACCESS_KEY_ID: "{{ vault_backup.restic.s3.access_key_id }}" - '{{ backup_hc_curl_base }} --data "{{ backup_msg_fail_location }}${AUTORESTIC_LOCATION}" {{ backup_hc_url }}/fail'
#RESITC_S3_SECRET_ACCESS_KEY: "{{ vault_backup.restic.s3.secret_access_key }}" - '{{ backup_uk_curl_base }} "{{ backup_uk_url }}?status=down&msg={{ backup_msg_fail_location | urlencode }}${AUTORESTIC_LOCATION}&ping="'
#RESTIC_BORGBASE: "{{ vault_backup.restic.borgbase }}" backup_yml:
version: 2
backup_compose: backends: "{{ vault_backup.locations }}"
watchtower: false
image: "{{ backup_image }}"
volumes: "{{ backup_volumes_service }}"
file:
services:
app:
build:
context: .
entrypoint:
- /usr/bin/restic
- --retry-lock=1m
restart: never
hostname: "{{ ansible_facts.hostname }}"
mount:
build:
context: .
image: "{{ backup_image }}"
restart: never
hostname: "{{ ansible_facts.hostname }}"
env_file:
- service.env
entrypoint:
- /usr/bin/restic
- --retry-lock=1m
command:
- mount
- /mnt
privileged: true
devices:
- /dev/fuse
volumes: "{{ backup_volumes_list | map_backup_volumes }}" locations: "{{ backup_list | map_backup_locations(vault_backup.locations, backup_default_hooks ) }}"
global:
forget:
keep-last: 7
keep-daily: 14
keep-weekly: 16
keep-monthly: 12
keep-yearly: 2
host: "{{ ansible_facts.hostname }}"
backup:
host: "{{ ansible_facts.hostname }}"

View file