Changes to backup scripts

master
Nathan Steel 9 months ago
parent c9c67dd9e9
commit 001cfda595

@ -17,10 +17,9 @@ do
esac esac
done done
# As this is using borg, DIR can be passed as "~/dir2 ~/dir2" rather than a single directory # As this is using borg, DIR can be passed as "~/dir2 ~/dir2" rather than a single directory #
# /var/spool/cron/crontabs # Cron backup # /var/spool/cron/crontabs # Cron backup
# Function(s) # Function(s)
borg_backup (){ borg_backup (){
if [ ! -d "$BACKUP_DIR" ]; then if [ ! -d "$BACKUP_DIR" ]; then
@ -32,25 +31,30 @@ borg_backup (){
borg create ::{hostname}-{now} $DIR borg create ::{hostname}-{now} $DIR
} }
borg_prune () { borg_prune () {
# Keep last 24 hours of backups, 7 daily backups (one a day/week), 4 weekly (one a week for a month) # Keep last 8 hours of backups, last 3 days, and last 2 weeks. LESS for local to save storage
borg prune \ borg prune \
--glob-archives '{hostname}-*' \ --glob-archives '{hostname}-*' \
--keep-hourly 24 \ --keep-hourly 8 \
--keep-daily 7 \ --keep-daily 3 \
--keep-weekly 4 --keep-weekly 2
# Then compact (actually clear up the disk space of the files removed via prune)
borg compact
} }
offsite_borg () { offsite_borg () {
export BORG_REPO=$REMOTE:$REMOTE_DIR export BORG_REPO=$REMOTE:$REMOTE_DIR
borg create ::{hostname}-{now} $DIR # Backup with of the directory itself (seperate borg repo) borg create ::{hostname}-{now} $DIR # Backup with of the directory itself (seperate borg repo)
} }
offsite_prune () { offsite_prune () {
export BORG_REPO=$REMOTE:$REMOTE_DIR
# Prune on server, this may take ages though so I'm not sure about this... # Prune on server, this may take ages though so I'm not sure about this...
# 3 hourly, is hopefully 8 hour intervals of the day
borg prune \ borg prune \
--glob-archives '{hostname}-*' \ --glob-archives '{hostname}-*' \
--keep-hourly 3 \ --keep-hourly 24 \
--keep-daily 7 \ --keep-daily 7 \
--keep-weekly 4 --keep-weekly 4 \
--keep-monthly 3
# Then compact (actually clear up the disk space of the files removed via prune)
borg compact
} }
# Script # Script
@ -71,11 +75,3 @@ fi
offsite_prune # Seperate, for docker script i.e. backup each container in loop, and prune after ALL containers looped and back up offsite_prune # Seperate, for docker script i.e. backup each container in loop, and prune after ALL containers looped and back up
# so to not keep the containers down for excessively long amounts of time # so to not keep the containers down for excessively long amounts of time
# OLD backup, where I rsynced an entire rdiff directory from local to offsite
# Create the remote directory for backup if it doesn't exist
# ssh $REMOTE mkdir -p $REMOTE_DIR
# rsync -azh -e ssh \
# --delete \
# $BACKUP_DIR \
# $REMOTE:$REMOTE_DIR

@ -6,8 +6,7 @@
# -d /home/samba/share/Docker_prod \ # -d /home/samba/share/Docker_prod \
# -b /home/nathan/testBack \ # -b /home/nathan/testBack \
# -r pi2 \ # -r pi2 \
# -R ~/backups/pi1/docker \ # -R ~/backups/pi1/docker
# -N 0
# Flags # Flags
while getopts c:d:b:r:R: flag while getopts c:d:b:r:R: flag
@ -18,73 +17,91 @@ do
b) BACKUP_DIR=${OPTARG};; # Where the backup is on local host b) BACKUP_DIR=${OPTARG};; # Where the backup is on local host
r) REMOTE=${OPTARG};; # user@remote or alias (from SSH config), prefer to use alias, as it can deal with port differences r) REMOTE=${OPTARG};; # user@remote or alias (from SSH config), prefer to use alias, as it can deal with port differences
R) REMOTE_DIR=${OPTARG};; # Location of remote backup i.e. /backup/borg/HOSTNAME/docker (then /npm, /vaultwarden, etc.) R) REMOTE_DIR=${OPTARG};; # Location of remote backup i.e. /backup/borg/HOSTNAME/docker (then /npm, /vaultwarden, etc.)
# N) NOW=${OPTARG};; # 1/0 for yes/no do offsite backup now
esac esac
done done
# Borg assistance: https://borgbackup.readthedocs.io/en/stable/quickstart.html # Borg assistance: https://borgbackup.readthedocs.io/en/stable/quickstart.html
# export BORG_PASSPHRASE='' # If using encryption in borg, ignoring for now, to just have it work # export BORG_PASSPHRASE='' # If using encryption in borg, ignoring for now, to just have it work
# Script # Will need permissions to read and enter directories
DIRS=($CONTAINER_DIRS) # Put the CONTAINER_DIRS passed into an array that can be looped # DIRS=(homer npm) to hardcode # sudo setfacl -Rdm "u:nathan:rX" /home/samba/share/Docker_prod && sudo setfacl -Rm "u:nathan:rX" /home/samba/share/Docker_prod
#DIRS=( "$DOCKER"/*/ ) # Also need to be part of docker group to down/up: sudo groupadd docker && sudo usermod -aG docker nathan
for i in "${DIRS[@]}" # Function(s)
do borg_backup (){
# $1 is the first variable passed to the function
# Stop docker containers before backup incase any moving parts if [ ! -d "$1" ]; then
echo $i mkdir -p $1
# If local directory doesn't exist for backup borg init --encryption=none $1
if [ ! -d "$BACKUP_DIR/$i" ]; then
# Create new repo # --encryption=none # --encryption=repokey for encryption with key on server/in repo
borg init --encryption=none $BACKUP_DIR/$i # Will create repo if it doesn't exist 'A repo already exist...' 'error' otherwise
# borg init $REMOTE:$REMOTE_DIR/$i # Will create repo if it doesn't exist EXAMPLE for future, will need to do a different check too ig
# --encryption=repokey after init if you want encryption
# TODO: If using encryption, backup the
fi fi
docker compose stop export BORG_REPO=$1
# LOCAL
export BORG_REPO=$BACKUP_DIR/$i
borg create ::{hostname}-{now} $DIR/$i borg create ::{hostname}-{now} $DIR/$i
#rdiff-backup $DIR/$i $BACKUP_DIR/$i # If a directory doesn't exist, it get created too }
borg_prune () {
# Keep last 24 hours of backups, 7 daily backups (one a day/week), 4 weekly (one a week for a month), and 6 monthly, and 1 a year # Keep last 8 hours of backups, last 3 days, and last 2 weeks. LESS for local to save storage
# Not 100% on this, but will keep this for now borg prune \
--glob-archives '{hostname}-*' \
--keep-daily 5 \
--keep-weekly 2
# Then compact (actually clear up the disk space of the files removed via prune)
borg compact
}
offsite_borg () {
export BORG_REPO=$REMOTE:$1 # $1 is the first variable passed to the function
borg create ::{hostname}-{now} $DIR/$i # Backup with of the directory itself (seperate borg repo)
}
offsite_prune () {
export BORG_REPO=$REMOTE:$1 # $1 is the first variable passed to the function
# Prune on server, this may take ages though so I'm not sure about this...
borg prune \ borg prune \
--glob-archives '{hostname}-*' \ --glob-archives '{hostname}-*' \
--keep-hourly 24 \
--keep-daily 7 \ --keep-daily 7 \
--keep-weekly 4 \ --keep-weekly 4 \
--keep-monthly 6 \ --keep-monthly 6 \
--keep-yearly 1 --keep-yearly 1
#rdiff-backup --force --remove-older-than 1M $BACKUP_DIR/$i # Keep 1 month worth of backups # Then compact (actually clear up the disk space of the files removed via prune)
borg compact
}
# OFFSITE # Script
# TODO: This will be better as an actual unique off-site borg backup DIRS=($CONTAINER_DIRS) # Put the CONTAINER_DIRS passed into an array that can be looped # DIRS=(homer npm) to hardcode
#DIRS=( "$DOCKER"/*/ )
docker compose start
done # Currently backs up each docker container in its own repo, may be more efficient to have
# them all in the same repo? But this makes sense to me in a backup directory
for i in "${DIRS[@]}"
do
# Stop docker containers before backup incase any moving parts
cd $DIR/$i # Change into the container's directory (so it can be up/downed)
docker compose stop
# Nightly backup offsite (TEMP FOR NOW!!) # Local
# If time is 00:00/midnight, rsync the entire directory of borg backups # Backup the container
# Inefficient for borg, but for now it'll work, will need a seperate borg on remote in future borg_backup "$BACKUP_DIR/$i"
# So in future, will just run a borg update straight to the server
if [ "$TIME" = 0000 ] || [ "$NOW" = 1 ] # Offsite
then # Check if the directory/repo exists. Create if not, and backup
# Create the remote directory for backup if it doesn't exist if (ssh $REMOTE ls $REMOTE_DIR/$i 2>/dev/null);
ssh $REMOTE mkdir -p $REMOTE_DIR then
offsite_borg "$REMOTE_DIR/$i"
else
# Doesn't exist, create, and init repo first
ssh $REMOTE mkdir -p $REMOTE_DIR/$i
borg init --encryption=none $REMOTE:$REMOTE_DIR/$i
offsite_borg "$REMOTE_DIR/$i"
fi
# Copy the entire backup directory accross # Bring the container back up
docker compose start
# -e ssh makes it secure done
rsync -azh -e ssh \
--delete \
$BACKUP_DIR/ \
$REMOTE:$REMOTE_DIR
# End-slash on backupDir here, as want to just backup the contents of the directory, not the directory itself
fi
# To prevent hangs and the container(s) being down too long
# the prune occurs after all the containers are backed-up, and running/started
for i in "${DIRS[@]}"
do
borg_prune "$REMOTE_DIR/$i"
offsite_prune "$REMOTE_DIR/$i"
done

@ -0,0 +1,39 @@
#!/bin/bash
# RUN EACH BACKUP SCRIPT IN HERE AS ITS OWN CRONJOB!!!!!
# Every hour # Don't add this script though, just as example
# 0 0/1 * * * /home/nathan/scripts/backup/borg/rn3/backup.sh
# At 2am
# 0 2 * * * /home/nathan/scripts/backup/borg/rn3/backup.sh
# Ensure borg_avps is in .ssh/config # and that server has borg user (with perms for server directories)
# Req installs: borg acl
# Also follow comments above each script
# source /path/to/script # Uses current script process/vars
# /bin/bash /path/to/script # Runs script as seperate process
# DOCKER BACKUP # Probably will need rX permissions (read, and X directories (open))
# sudo setfacl -Rdm "u:nathan:rX" /home/samba/share/Docker_prod && sudo setfacl -Rm "u:nathan:rX" /home/samba/share/Docker_prod
# Also need to be part of docker group to down/up: sudo groupadd docker && sudo usermod -aG docker nathan
/bin/bash /home/nathan/scripts/backup/borg/docker_backup.sh \
-c "duckdns homer" \
-d /home/samba/share/Docker_prod \
-b /home/nathan/testBack/docker \
-r pi2 \
-R ~/backups/pi1/docker
# DIRECTORY BACKUP(S)
/bin/bash /home/nathan/scripts/backup/borg/directory_backup.sh \
-d "/home/nathan/AA/A /home/nathan/AA/C" \
-b /home/nathan/testBack/dir \
-r pi2 \
-R ~/backups/pi1/dir
# Cronjob backup, requires permissions of /var/spool/cron/crontabs (rwX, X being x but for directories only, so they can be opened not sure this works on Unix)
# sudo setfacl -Rdm "u:nathan:rX" /var/spool/cron/crontabs && sudo setfacl -Rm "u:nathan:rX" /var/spool/cron/crontabs
/bin/bash /home/nathan/scripts/backup/borg/directory_backup.sh \
-d /var/spool/cron/crontabs \
-b /home/nathan/testBack/cron \
-r pi2 \
-R ~/backups/pi1/cron

@ -1,23 +0,0 @@
#!/bin/bash
# DOCKER BACKUP
# source /home/nathan/scripts/backup/borg/docker_backup.sh \
# -c "duckdns homer" \
# -d /home/samba/share/Docker_prod \
# -b /home/nathan/testBack \
# -r pi2 \
# -R ~/backups/pi1/docker \
# -N 0
# DIRECTORY BACKUP
source /home/nathan/scripts/backup/borg/directory_backup.sh \
-d "/home/nathan/AA/A /home/nathan/AA/C" \
-b /home/nathan/testBack/dir \
-r pi2 \
-R ~/backups/pi1/AA
source /home/nathan/scripts/backup/borg/directory_backup.sh \
-d /var/spool/cron/crontabs \
-b /home/nathan/testBack/cron \
-r pi2 \
-R ~/backups/pi1/cron

@ -0,0 +1,43 @@
#!/bin/bash
# RUN EACH BACKUP SCRIPT IN HERE AS ITS OWN CRONJOB!!!!!
# Every hour # Don't add this script though, just as example
# 0 0/1 * * * /home/nathan/git/scripts/backup/borg/rn3/backup.sh
# At 2am
# 0 2 * * * /home/nathan/git/scripts/backup/borg/rn3/backup.sh
# Ensure borg_avps is in .ssh/config # and that server has borg user (with perms for server directories)
# Req installs: borg acl
# Also follow comments above each script
# source /path/to/script # Uses current script process/vars
# /bin/bash /path/to/script # Runs script as seperate process
# DOCKER BACKUP
# sudo setfacl -Rdm "u:nathan:rX" /home/samba/share/Docker_prod && sudo setfacl -Rm "u:nathan:rX" /home/samba/share/Docker_prod
# sudo groupadd docker && sudo usermod -aG docker nathan
/bin/bash /home/nathan/git/scripts/backup/borg/docker_backup.sh \
-c "anetwork.uk aney.co.uk dokuwiki gitea homepage mariadb npm planka uptimekuma vaultwarden" \
-d /home/nathan/docker \
-b /home/nathan/BACKUP \
-r borg_avps \
-R /home/nathan/BACKUP/rn3/docker
# WEBSITE DIRECTORY # All sites together for this one (look at seperating in future)
# /bin/bash /home/nathan/git/scripts/backup/borg/directory_backup.sh \
# -d "/home/nathan/websites" \
# -b /home/nathan/BACKUP/dir \
# -r pi2 \
# -R /home/nathan/BACKUP/rn3/website
# Maria/mysql Database backup
# /bin/bash /home/nathan/docker/mariadb/scripts/backup.txt
# Cronjob backup
# Requires permissions of /var/spool/cron/crontabs (rX, X for directories only, so they can be opened)
# sudo setfacl -Rdm "u:nathan:rX" /var/spool/cron/crontabs && sudo setfacl -Rm "u:nathan:rX" /var/spool/cron/crontabs
/bin/bash /home/nathan/git/scripts/backup/borg/directory_backup.sh \
-d /var/spool/cron/crontabs \
-b /home/nathan/BACKUP/cron \
-r pi2 \
/home/nathan/BACKUP/rn3/cron

@ -1,61 +0,0 @@
#!/bin/bash
HOME=/home/nathan
DOCKER=$HOME/docker
BACKUP=$HOME/backup/
DIRS=('anetwork.uk' 'aney.co.uk' 'mariadb' 'npm' 'vaultwarden')
#DIRS=('nginx' 'npm' 'umami' 'uptimekuma' 'vaultwarden')
#DIRS=( "$DOCKER"/*/ )
REMOTE=alphavps
REMOTEBACKUP=/home/nathan/backups/docker/rn2
# Run the mariadb/mysql backup script for the docker container
/bin/bash /home/nathan/docker/mariadb/scripts/backup.txt
# Backup the docker containers
# requires rdiff-backup rsync docker-compose
# also setup .ssh/config for alphavps, and add to a cronjob (as root)
for i in "${DIRS[@]}"
do
#echo $i
# Stop docker containers before backup incase any moving parts
cd $DOCKER/$i
docker compose stop
rdiff-backup $DOCKER/$i $BACKUP/$i
rdiff-backup --force --remove-older-than 2M $BACKUP/$i
docker compose start
done
# Create the remote directory for backup if it doesn't exist
ssh $REMOTE mkdir -p $REMOTEBACKUP
# Copy the backup accross
# -e ssh makes it secure
rsync -azh -e ssh \
--delete \
$BACKUP \
$REMOTE:$REMOTEBACKUP
## Now backup the websites (one offs)
REMOTEBACKUP=/home/nathan/backups/websites/rn2
# Create the remote directory for backup if it doesn't exist
ssh $REMOTE mkdir -p $REMOTEBACKUP
rsync -azh -e ssh \
--delete \
$HOME/websites \
$REMOTE:$REMOTEBACKUP
## Crontab (has the backup scripts called)
REMOTEBACKUP=/home/nathan/backups/cron/rn2
# Create the remote directory for backup if it doesn't exist
ssh $REMOTE mkdir -p $REMOTEBACKUP
rsync -azh -e ssh \
--delete \
/var/spool/cron/crontabs \
$REMOTE:$REMOTEBACKUP
Loading…
Cancel
Save