#!/bin/bash # Cronjob needs to be on one line, spaced out for example/ in-case running cronjob from a script that just runs the below # e.g. 0 0 * * * /home/nathan/scripts/backup/borg/docker_backup.sh \ # -c "duckdns homer" \ # -d /home/samba/share/Docker_prod \ # -b /home/nathan/testBack \ # -r pi2 \ # -R ~/backups/pi1/docker # Flags while getopts c:d:b:r:R: flag do case "${flag}" in c) CONTAINER_DIRS=${OPTARG};; # Will accept an 'array', just individual names spaced within quotes. i.e. "homer npm vaultwarden" d) DIR=${OPTARG};; # /home/nathan/docker typically b) BACKUP_DIR=${OPTARG};; # Where the backup is on local host r) REMOTE=${OPTARG};; # user@remote or alias (from SSH config), prefer to use alias, as it can deal with port differences R) REMOTE_DIR=${OPTARG};; # Location of remote backup i.e. /backup/borg/HOSTNAME/docker (then /npm, /vaultwarden, etc.) esac done # Borg assistance: https://borgbackup.readthedocs.io/en/stable/quickstart.html # export BORG_PASSPHRASE='' # If using encryption in borg, ignoring for now, to just have it work # Will need permissions to read and enter directories # sudo setfacl -Rdm "u:nathan:rX" /home/samba/share/Docker_prod && sudo setfacl -Rm "u:nathan:rX" /home/samba/share/Docker_prod # Also need to be part of docker group to down/up: sudo groupadd docker && sudo usermod -aG docker nathan # Function(s) borg_backup (){ # $1 is the first variable passed to the function if [ ! -d "$1" ]; then mkdir -p $1 borg init --encryption=none $1 fi export BORG_REPO=$1 borg create ::{hostname}-{now} $DIR/$i } borg_prune () { # Keep last 8 hours of backups, last 3 days, and last 2 weeks. LESS for local to save storage borg prune \ --glob-archives '{hostname}-*' \ --keep-daily 5 \ --keep-weekly 2 # Then compact (actually clear up the disk space of the files removed via prune) borg compact } offsite_borg () { export BORG_REPO=$REMOTE:$1 # $1 is the first variable passed to the function borg create ::{hostname}-{now} $DIR/$i # Backup with of the directory itself (seperate borg repo) } offsite_prune () { export BORG_REPO=$REMOTE:$1 # $1 is the first variable passed to the function # Prune on server, this may take ages though so I'm not sure about this... borg prune \ --glob-archives '{hostname}-*' \ --keep-daily 7 \ --keep-weekly 4 \ --keep-monthly 6 \ --keep-yearly 1 # Then compact (actually clear up the disk space of the files removed via prune) borg compact } # Script DIRS=($CONTAINER_DIRS) # Put the CONTAINER_DIRS passed into an array that can be looped # DIRS=(homer npm) to hardcode #DIRS=( "$DOCKER"/*/ ) # Currently backs up each docker container in its own repo, may be more efficient to have # them all in the same repo? But this makes sense to me in a backup directory for i in "${DIRS[@]}" do # Stop docker containers before backup incase any moving parts cd $DIR/$i # Change into the container's directory (so it can be up/downed) docker compose stop # Local # Backup the container borg_backup "$BACKUP_DIR/$i" # Offsite # Check if the directory/repo exists. Create if not, and backup if (ssh $REMOTE ls $REMOTE_DIR/$i 2>/dev/null); then offsite_borg "$REMOTE_DIR/$i" else # Doesn't exist, create, and init repo first ssh $REMOTE mkdir -p $REMOTE_DIR/$i borg init --encryption=none $REMOTE:$REMOTE_DIR/$i offsite_borg "$REMOTE_DIR/$i" fi # Bring the container back up docker compose start done # To prevent hangs and the container(s) being down too long # the prune occurs after all the containers are backed-up, and running/started for i in "${DIRS[@]}" do borg_prune "$REMOTE_DIR/$i" offsite_prune "$REMOTE_DIR/$i" done