diff --git a/backup/borg/directory_backup.sh b/backup/borg/directory_backup.sh index f0c5a46..fccb9eb 100644 --- a/backup/borg/directory_backup.sh +++ b/backup/borg/directory_backup.sh @@ -17,10 +17,9 @@ do esac done -# As this is using borg, DIR can be passed as "~/dir2 ~/dir2" rather than a single directory +# As this is using borg, DIR can be passed as "~/dir2 ~/dir2" rather than a single directory # # /var/spool/cron/crontabs # Cron backup - # Function(s) borg_backup (){ if [ ! -d "$BACKUP_DIR" ]; then @@ -32,25 +31,30 @@ borg_backup (){ borg create ::{hostname}-{now} $DIR } borg_prune () { - # Keep last 24 hours of backups, 7 daily backups (one a day/week), 4 weekly (one a week for a month) + # Keep last 8 hours of backups, last 3 days, and last 2 weeks. LESS for local to save storage borg prune \ --glob-archives '{hostname}-*' \ - --keep-hourly 24 \ - --keep-daily 7 \ - --keep-weekly 4 + --keep-hourly 8 \ + --keep-daily 3 \ + --keep-weekly 2 + # Then compact (actually clear up the disk space of the files removed via prune) + borg compact } offsite_borg () { export BORG_REPO=$REMOTE:$REMOTE_DIR borg create ::{hostname}-{now} $DIR # Backup with of the directory itself (seperate borg repo) } offsite_prune () { + export BORG_REPO=$REMOTE:$REMOTE_DIR # Prune on server, this may take ages though so I'm not sure about this... - # 3 hourly, is hopefully 8 hour intervals of the day borg prune \ --glob-archives '{hostname}-*' \ - --keep-hourly 3 \ + --keep-hourly 24 \ --keep-daily 7 \ - --keep-weekly 4 + --keep-weekly 4 \ + --keep-monthly 3 + # Then compact (actually clear up the disk space of the files removed via prune) + borg compact } # Script @@ -71,11 +75,3 @@ fi offsite_prune # Seperate, for docker script i.e. backup each container in loop, and prune after ALL containers looped and back up # so to not keep the containers down for excessively long amounts of time - -# OLD backup, where I rsynced an entire rdiff directory from local to offsite -# Create the remote directory for backup if it doesn't exist -# ssh $REMOTE mkdir -p $REMOTE_DIR -# rsync -azh -e ssh \ -# --delete \ -# $BACKUP_DIR \ -# $REMOTE:$REMOTE_DIR \ No newline at end of file diff --git a/backup/borg/docker_backup.sh b/backup/borg/docker_backup.sh index 0e74dfd..6106e1b 100644 --- a/backup/borg/docker_backup.sh +++ b/backup/borg/docker_backup.sh @@ -6,8 +6,7 @@ # -d /home/samba/share/Docker_prod \ # -b /home/nathan/testBack \ # -r pi2 \ -# -R ~/backups/pi1/docker \ -# -N 0 +# -R ~/backups/pi1/docker # Flags while getopts c:d:b:r:R: flag @@ -18,73 +17,91 @@ do b) BACKUP_DIR=${OPTARG};; # Where the backup is on local host r) REMOTE=${OPTARG};; # user@remote or alias (from SSH config), prefer to use alias, as it can deal with port differences R) REMOTE_DIR=${OPTARG};; # Location of remote backup i.e. /backup/borg/HOSTNAME/docker (then /npm, /vaultwarden, etc.) - # N) NOW=${OPTARG};; # 1/0 for yes/no do offsite backup now esac done # Borg assistance: https://borgbackup.readthedocs.io/en/stable/quickstart.html # export BORG_PASSPHRASE='' # If using encryption in borg, ignoring for now, to just have it work -# Script -DIRS=($CONTAINER_DIRS) # Put the CONTAINER_DIRS passed into an array that can be looped # DIRS=(homer npm) to hardcode -#DIRS=( "$DOCKER"/*/ ) +# Will need permissions to read and enter directories +# sudo setfacl -Rdm "u:nathan:rX" /home/samba/share/Docker_prod && sudo setfacl -Rm "u:nathan:rX" /home/samba/share/Docker_prod +# Also need to be part of docker group to down/up: sudo groupadd docker && sudo usermod -aG docker nathan -for i in "${DIRS[@]}" -do - - # Stop docker containers before backup incase any moving parts - echo $i - # If local directory doesn't exist for backup - if [ ! -d "$BACKUP_DIR/$i" ]; then - # Create new repo # --encryption=none # --encryption=repokey for encryption with key on server/in repo - borg init --encryption=none $BACKUP_DIR/$i # Will create repo if it doesn't exist 'A repo already exist...' 'error' otherwise - # borg init $REMOTE:$REMOTE_DIR/$i # Will create repo if it doesn't exist EXAMPLE for future, will need to do a different check too ig - # --encryption=repokey after init if you want encryption - # TODO: If using encryption, backup the +# Function(s) +borg_backup (){ + # $1 is the first variable passed to the function + if [ ! -d "$1" ]; then + mkdir -p $1 + borg init --encryption=none $1 fi - docker compose stop - - # LOCAL - export BORG_REPO=$BACKUP_DIR/$i + export BORG_REPO=$1 borg create ::{hostname}-{now} $DIR/$i - #rdiff-backup $DIR/$i $BACKUP_DIR/$i # If a directory doesn't exist, it get created too - - # Keep last 24 hours of backups, 7 daily backups (one a day/week), 4 weekly (one a week for a month), and 6 monthly, and 1 a year - # Not 100% on this, but will keep this for now +} +borg_prune () { + # Keep last 8 hours of backups, last 3 days, and last 2 weeks. LESS for local to save storage + borg prune \ + --glob-archives '{hostname}-*' \ + --keep-daily 5 \ + --keep-weekly 2 + # Then compact (actually clear up the disk space of the files removed via prune) + borg compact +} +offsite_borg () { + export BORG_REPO=$REMOTE:$1 # $1 is the first variable passed to the function + borg create ::{hostname}-{now} $DIR/$i # Backup with of the directory itself (seperate borg repo) +} +offsite_prune () { + export BORG_REPO=$REMOTE:$1 # $1 is the first variable passed to the function + # Prune on server, this may take ages though so I'm not sure about this... borg prune \ - --glob-archives '{hostname}-*' \ - --keep-hourly 24 \ + --glob-archives '{hostname}-*' \ --keep-daily 7 \ --keep-weekly 4 \ --keep-monthly 6 \ --keep-yearly 1 - #rdiff-backup --force --remove-older-than 1M $BACKUP_DIR/$i # Keep 1 month worth of backups + # Then compact (actually clear up the disk space of the files removed via prune) + borg compact +} - # OFFSITE - # TODO: This will be better as an actual unique off-site borg backup - - docker compose start +# Script +DIRS=($CONTAINER_DIRS) # Put the CONTAINER_DIRS passed into an array that can be looped # DIRS=(homer npm) to hardcode +#DIRS=( "$DOCKER"/*/ ) -done +# Currently backs up each docker container in its own repo, may be more efficient to have +# them all in the same repo? But this makes sense to me in a backup directory +for i in "${DIRS[@]}" +do + # Stop docker containers before backup incase any moving parts + cd $DIR/$i # Change into the container's directory (so it can be up/downed) + docker compose stop -# Nightly backup offsite (TEMP FOR NOW!!) -# If time is 00:00/midnight, rsync the entire directory of borg backups -# Inefficient for borg, but for now it'll work, will need a seperate borg on remote in future -# So in future, will just run a borg update straight to the server -if [ "$TIME" = 0000 ] || [ "$NOW" = 1 ] -then - # Create the remote directory for backup if it doesn't exist - ssh $REMOTE mkdir -p $REMOTE_DIR + # Local + # Backup the container + borg_backup "$BACKUP_DIR/$i" + + # Offsite + # Check if the directory/repo exists. Create if not, and backup + if (ssh $REMOTE ls $REMOTE_DIR/$i 2>/dev/null); + then + offsite_borg "$REMOTE_DIR/$i" + else + # Doesn't exist, create, and init repo first + ssh $REMOTE mkdir -p $REMOTE_DIR/$i + borg init --encryption=none $REMOTE:$REMOTE_DIR/$i + offsite_borg "$REMOTE_DIR/$i" + fi - # Copy the entire backup directory accross + # Bring the container back up + docker compose start - # -e ssh makes it secure - rsync -azh -e ssh \ - --delete \ - $BACKUP_DIR/ \ - $REMOTE:$REMOTE_DIR - # End-slash on backupDir here, as want to just backup the contents of the directory, not the directory itself -fi +done +# To prevent hangs and the container(s) being down too long +# the prune occurs after all the containers are backed-up, and running/started +for i in "${DIRS[@]}" +do + borg_prune "$REMOTE_DIR/$i" + offsite_prune "$REMOTE_DIR/$i" +done diff --git a/backup/borg/pi1/backup.sh b/backup/borg/pi1/backup.sh new file mode 100644 index 0000000..bd9b04a --- /dev/null +++ b/backup/borg/pi1/backup.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# RUN EACH BACKUP SCRIPT IN HERE AS ITS OWN CRONJOB!!!!! + +# Every hour # Don't add this script though, just as example +# 0 0/1 * * * /home/nathan/scripts/backup/borg/rn3/backup.sh +# At 2am +# 0 2 * * * /home/nathan/scripts/backup/borg/rn3/backup.sh + +# Ensure borg_avps is in .ssh/config # and that server has borg user (with perms for server directories) +# Req installs: borg acl +# Also follow comments above each script + +# source /path/to/script # Uses current script process/vars +# /bin/bash /path/to/script # Runs script as seperate process + +# DOCKER BACKUP # Probably will need rX permissions (read, and X directories (open)) +# sudo setfacl -Rdm "u:nathan:rX" /home/samba/share/Docker_prod && sudo setfacl -Rm "u:nathan:rX" /home/samba/share/Docker_prod +# Also need to be part of docker group to down/up: sudo groupadd docker && sudo usermod -aG docker nathan +/bin/bash /home/nathan/scripts/backup/borg/docker_backup.sh \ + -c "duckdns homer" \ + -d /home/samba/share/Docker_prod \ + -b /home/nathan/testBack/docker \ + -r pi2 \ + -R ~/backups/pi1/docker + +# DIRECTORY BACKUP(S) +/bin/bash /home/nathan/scripts/backup/borg/directory_backup.sh \ + -d "/home/nathan/AA/A /home/nathan/AA/C" \ + -b /home/nathan/testBack/dir \ + -r pi2 \ + -R ~/backups/pi1/dir + +# Cronjob backup, requires permissions of /var/spool/cron/crontabs (rwX, X being x but for directories only, so they can be opened not sure this works on Unix) +# sudo setfacl -Rdm "u:nathan:rX" /var/spool/cron/crontabs && sudo setfacl -Rm "u:nathan:rX" /var/spool/cron/crontabs +/bin/bash /home/nathan/scripts/backup/borg/directory_backup.sh \ + -d /var/spool/cron/crontabs \ + -b /home/nathan/testBack/cron \ + -r pi2 \ + -R ~/backups/pi1/cron diff --git a/backup/borg/pi1/pi_backup.sh b/backup/borg/pi1/pi_backup.sh deleted file mode 100644 index 55bb1de..0000000 --- a/backup/borg/pi1/pi_backup.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# DOCKER BACKUP -# source /home/nathan/scripts/backup/borg/docker_backup.sh \ -# -c "duckdns homer" \ -# -d /home/samba/share/Docker_prod \ -# -b /home/nathan/testBack \ -# -r pi2 \ -# -R ~/backups/pi1/docker \ -# -N 0 - -# DIRECTORY BACKUP -source /home/nathan/scripts/backup/borg/directory_backup.sh \ - -d "/home/nathan/AA/A /home/nathan/AA/C" \ - -b /home/nathan/testBack/dir \ - -r pi2 \ - -R ~/backups/pi1/AA - -source /home/nathan/scripts/backup/borg/directory_backup.sh \ - -d /var/spool/cron/crontabs \ - -b /home/nathan/testBack/cron \ - -r pi2 \ - -R ~/backups/pi1/cron diff --git a/backup/borg/rn3/backup.sh b/backup/borg/rn3/backup.sh new file mode 100644 index 0000000..6114c08 --- /dev/null +++ b/backup/borg/rn3/backup.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# RUN EACH BACKUP SCRIPT IN HERE AS ITS OWN CRONJOB!!!!! + +# Every hour # Don't add this script though, just as example +# 0 0/1 * * * /home/nathan/git/scripts/backup/borg/rn3/backup.sh +# At 2am +# 0 2 * * * /home/nathan/git/scripts/backup/borg/rn3/backup.sh + +# Ensure borg_avps is in .ssh/config # and that server has borg user (with perms for server directories) +# Req installs: borg acl +# Also follow comments above each script + +# source /path/to/script # Uses current script process/vars +# /bin/bash /path/to/script # Runs script as seperate process + +# DOCKER BACKUP +# sudo setfacl -Rdm "u:nathan:rX" /home/samba/share/Docker_prod && sudo setfacl -Rm "u:nathan:rX" /home/samba/share/Docker_prod +# sudo groupadd docker && sudo usermod -aG docker nathan +/bin/bash /home/nathan/git/scripts/backup/borg/docker_backup.sh \ + -c "anetwork.uk aney.co.uk dokuwiki gitea homepage mariadb npm planka uptimekuma vaultwarden" \ + -d /home/nathan/docker \ + -b /home/nathan/BACKUP \ + -r borg_avps \ + -R /home/nathan/BACKUP/rn3/docker + +# WEBSITE DIRECTORY # All sites together for this one (look at seperating in future) +# /bin/bash /home/nathan/git/scripts/backup/borg/directory_backup.sh \ +# -d "/home/nathan/websites" \ +# -b /home/nathan/BACKUP/dir \ +# -r pi2 \ +# -R /home/nathan/BACKUP/rn3/website + +# Maria/mysql Database backup +# /bin/bash /home/nathan/docker/mariadb/scripts/backup.txt + +# Cronjob backup +# Requires permissions of /var/spool/cron/crontabs (rX, X for directories only, so they can be opened) +# sudo setfacl -Rdm "u:nathan:rX" /var/spool/cron/crontabs && sudo setfacl -Rm "u:nathan:rX" /var/spool/cron/crontabs +/bin/bash /home/nathan/git/scripts/backup/borg/directory_backup.sh \ + -d /var/spool/cron/crontabs \ + -b /home/nathan/BACKUP/cron \ + -r pi2 \ + /home/nathan/BACKUP/rn3/cron diff --git a/backup/borg/rn3/rn3_backup.sh b/backup/borg/rn3/rn3_backup.sh deleted file mode 100644 index b54d783..0000000 --- a/backup/borg/rn3/rn3_backup.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -HOME=/home/nathan - -DOCKER=$HOME/docker -BACKUP=$HOME/backup/ -DIRS=('anetwork.uk' 'aney.co.uk' 'mariadb' 'npm' 'vaultwarden') -#DIRS=('nginx' 'npm' 'umami' 'uptimekuma' 'vaultwarden') -#DIRS=( "$DOCKER"/*/ ) - -REMOTE=alphavps -REMOTEBACKUP=/home/nathan/backups/docker/rn2 - -# Run the mariadb/mysql backup script for the docker container -/bin/bash /home/nathan/docker/mariadb/scripts/backup.txt - -# Backup the docker containers -# requires rdiff-backup rsync docker-compose -# also setup .ssh/config for alphavps, and add to a cronjob (as root) - -for i in "${DIRS[@]}" -do - #echo $i - # Stop docker containers before backup incase any moving parts - cd $DOCKER/$i - docker compose stop - rdiff-backup $DOCKER/$i $BACKUP/$i - rdiff-backup --force --remove-older-than 2M $BACKUP/$i - docker compose start -done - -# Create the remote directory for backup if it doesn't exist -ssh $REMOTE mkdir -p $REMOTEBACKUP - -# Copy the backup accross -# -e ssh makes it secure -rsync -azh -e ssh \ - --delete \ - $BACKUP \ - $REMOTE:$REMOTEBACKUP - -## Now backup the websites (one offs) -REMOTEBACKUP=/home/nathan/backups/websites/rn2 -# Create the remote directory for backup if it doesn't exist -ssh $REMOTE mkdir -p $REMOTEBACKUP - -rsync -azh -e ssh \ - --delete \ - $HOME/websites \ - $REMOTE:$REMOTEBACKUP - -## Crontab (has the backup scripts called) -REMOTEBACKUP=/home/nathan/backups/cron/rn2 -# Create the remote directory for backup if it doesn't exist -ssh $REMOTE mkdir -p $REMOTEBACKUP - -rsync -azh -e ssh \ - --delete \ - /var/spool/cron/crontabs \ - $REMOTE:$REMOTEBACKUP -