Archive

Posts Tagged ‘backup’

Redis backup script

August 7, 2015 3 comments
#!/bin/bash 
## Fri, 07 Aug 2015 14:04:57 +0300 
## redis backup every 15 minutes 
## */15 * * * * redis.backup.maintenance.sh >> /var/log/redis.backup.log 2>&1 
## at /etc/rc.local : 
## test -d /var/run/redis.backup.lock.dir && rm -rf /var/run/redis.backup.lock.dir 
## watch the job: 
## tail -f /var/log/redis.backup.log 

#redis-cli LASTSAVE | awk '{print $1}' | { read gmt ; date "+%Y-%m-%d %H:%M:%S" -d "@$gmt" ; } 
# 2015-08-07 01:25:54 

lockf="/var/run/redis.backup.lock.dir"

# check for running script start 
if [ -d "${lockf}" ]
then
 echo "$(date +%Y-%m-%d.%H.%M.%S) : ${lockf} exists, exiting"
 exit 0
else
 mkdir "${lockf}" && echo "$(date +%Y-%m-%d.%H.%M.%S) : created lock at ${lockf}"
fi


echo "$(date +%Y-%m-%d.%H.%M.%S) : redis backup start"
echo "$(date +%Y-%m-%d.%H.%M.%S) : cleanup the /redis_backups and leave the last 6 backups"
find /redis_backups -maxdepth 1 -type f -name "dump.rdb.*" | sort -r | sed '7,$!d' | while read to_be_deleted; do rm -f ${to_be_deleted} && echo "$(date +%Y-%m-%d.%H.%M.%S) : deleted ${to_be_deleted}";done

last_save=$(redis-cli LASTSAVE | awk '{print $1}')
echo -n "$(date +%Y-%m-%d.%H.%M.%S) : executing redis-cli BGSAVE : "
redis-cli BGSAVE
while true
do
 if [ $(redis-cli LASTSAVE | awk '{print $1}') -eq ${last_save} ]
 then
 echo -n ". "
 sleep 2
 else
 echo ""
 echo "$(date +%Y-%m-%d.%H.%M.%S) : start ionice -c2 -n0 cp -vv /opt/redis/dump.rdb to /redis_backups/"
 ionice -c2 -n0 cp -vv /opt/redis/dump.rdb /redis_backups/dump.rdb.$(date +%Y-%m-%d.%H.%M.%S) && echo "$(date +%Y-%m-%d.%H.%M.%S) : backup comleted"
 break
 fi
done


if [ -d "${lockf}" ]
then
 echo "$(date +%Y-%m-%d.%H.%M.%S) : removing the lock"
 rm -rf "${lockf}"
fi
Advertisements
Categories: AWS, bash, NoSQL Tags: , ,

MySQL, Percona, MariaDB long running processes clean up one liner

April 30, 2015 Leave a comment

There are tools like pt-kill from the percona tool kit that may print/kill the long running transactions at MariaDB, MySQL or at Percona data instances, but a lot of backup scripts are just some simple bash lines.
So checking for long running transactions before the backup to be executed seems to be a step that is missed a lot.

Here is one line that might be just added in every bash script before the backup to be executed
Variant 1. Just log all the processlist entries and calculate which ones were running longer than TIMELIMIT:

$ export TIMELIMIT=70 && echo "$(date) : check for long runnig queries start:" >> /tmp/processlist.list.to.kill && mysql -BN -e 'show processlist;' | tee -a /tmp/processlist.list.to.kill | awk -vlongtime=${TIMELIMIT} '($6>longtime){print "kill "$1";"}' | tee -a /tmp/processlist.list.to.kill

Variant 2: Log all the processlist, calculate the calculate which processes are running longer than TIMELIMIT, and kill them before to execute the backup:

$ export TIMELIMIT=70 && echo "$(date) : check for long runnig queries start:" >> /tmp/processlist.list.to.kill && mysql -BN -e 'show processlist;' | tee -a /tmp/processlist.list.to.kill | awk -vlongtime=${TIMELIMIT} '($6>longtime){print "kill "$1";"}' | tee -a /tmp/processlist.list.to.kill | mysql >> /tmp/processlist.list.to.kill 2>&1

MySQL backup and cleanup bash scripts with mydumper

June 28, 2014 1 comment

1. Backup script

#!/bin/sh
# Fri Jun 27 10:44:49 2014
# done by dragkh
# usage: 
# cat /etc/cron.d/backupmysql 
# 0  3  *  *  *       root    /root/bin/clean.backup.hyperion.mysql.mydumper.daily.sh >>  /var/log/clean.backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1
# 35  3  *  *  *       root    /root/bin/backup.hyperion.mysql.mydumper.daily.sh >> /var/log/backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1

ROOT_BACKUP_DIR="/home/mydumper"

seik_date () {
if [ -z $1 ]
then
# cdate=`date +%Y-%m-%d\ %H:%M:%S\ %Z`; export cdate; echo $cdate
cdate=`date -R`; export cdate; echo $cdate
else

if [ -z ${2} ]
then
cdate=`date +%Y-%m-%d.%H.%M.%S`; export cdate; echo $cdate
else
cdate=`date "+%Y-%m-%d %H:%M:%S"`; export cdate; echo $cdate
fi

fi
}


function check_dir {
 test ! -d "${1}" && mkdir -p "${1}"
}


function set_cpu_threads {
    # set the threads one less than the existing 
    threads=$(cat /proc/cpuinfo  |  grep processor | tail -1 | awk '{print $3}')
    test $threads -lt 1 && threads=1
}

function dump_schema {
    mysqldump -d --dump-date --all-databases > ${DATA_DIR}/${HOSTNAME}.only.sql
}


function dump_data {
    echo "$(seik_date f) : executing : mydumper -o $DATA_DIR --long-query-guard 120 -r 100000 -c -e -m -L ${DATA_DIR}/mysql-backup.log -t ${threads} -v 3"
    mydumper -o $DATA_DIR --long-query-guard 120 -r 100000 -c -e -m -L ${DATA_DIR}/mysql-backup.log -t ${threads} -v 3
}

DATA_DIR="${ROOT_BACKUP_DIR}/$(seik_date d)"
check_dir "${DATA_DIR}" && echo "$(seik_date f) : ${DATA_DIR} is missing, creating it now .."
set_cpu_threads
echo "$(seik_date f) : star dumping the schema at ${DATA_DIR}.."
dump_schema && echo "$(seik_date f) : end dumping the schema at ${DATA_DIR} .."
echo "$(seik_date f) : start dumping the data at ${DATA_DIR} via ${threads} parallel threads .."
dump_data && echo "$(seik_date f) : end dumping the data at ${DATA_DIR} via ${threads} parallel threads .."

2. Clean up script keeping always backup directories intact

#!/bin/bash
# Sat Jun 28 03:16:38 EEST 2014
# done by dragkh
# usage: 
# cat /etc/cron.d/backupmysql
# 0       3       *       *       *       root    /root/bin/clean.backup.hyperion.mysql.mydumper.daily.sh >> /var/log/clean.backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1
# 35      3      *       *       *       root    /root/bin/backup.hyperion.mysql.mydumper.daily.sh >> /var/log/backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1

ROOT_BACKUP_DIR="/home/mydumper"

seik_date () {
if [ -z $1 ]
then
cdate=`date -R`; export cdate; echo $cdate
else

if [ -z ${2} ]
then
cdate=`date +%Y-%m-%d.%H.%M.%S`; export cdate; echo $cdate
else
cdate=`date "+%Y-%m-%d %H:%M:%S"`; export cdate; echo $cdate
fi

fi
}

day_limit=7;  
ls -t ${ROOT_BACKUP_DIR} | \
while read dir
do 
    ((dir_num++))
    test $dir_num -gt $day_limit && test -d "${ROOT_BACKUP_DIR}/${dir}" &&  rm -rf "${dir}" && echo "$(seik_date d) : removed [${dir_num}]::[${dir}]" && continue 
    test -d "${ROOT_BACKUP_DIR}/${dir}" && echo "$(seik_date d) : skipping [${dir_num}]::[${dir}]"
done

How to compile mydumper 0.5.2 on Debian 6.0.8 and MySQL Percona Server 5.5.34-rel32.0-591.squeeze

December 8, 2013 1 comment

How to start :
mydumper home page : https://launchpad.net/mydumper
how to build on Debian/Ubuntu : https://answers.launchpad.net/mydumper/+faq/349 , but there are some things missing like cmake and libpcre
Latest version is 0.5.2 released on 2012-11-19: https://launchpad.net/mydumper/0.5/0.5.2/+download/mydumper-0.5.2.tar.gz

MySQL installed version : percona-server-server-5.5 : 5.5.34-rel32.0-591.squeeze

1. get the source :

test ! -d ~/installs/ && mkdir -p ~/installs/
cd ~/installs/
wget https://launchpad.net/mydumper/0.5/0.5.2/+download/mydumper-0.5.2.tar.gz
tar xvf mydumper-0.5.2.tar.gz

2. install the dev packages :

sudo apt-get install libglib2.0-dev zlib1g-dev

3. Install cmake

apt-get install cmake

4. Install percoan mysql client dev files:

apt-get install libmysqlclient-dev

5. Build it :

cd ~/installs/mydumper-0.5.2
cmake . -DCMAKE_INSTALL_PREFIX=~/bin/mydumper

in case you get PCRE not found error:
6. Install the PCRE libs:

apt-get install libpcre3-dev

In case you get no cmake errors, execute make:

make

in case you get that error :

 [ 20%] Building C object CMakeFiles/mydumper.dir/binlog.c.o
 [ 40%] Building C object CMakeFiles/mydumper.dir/server_detect.c.o
 [ 60%] Building C object CMakeFiles/mydumper.dir/g_unix_signal.c.o
 make[2]: *** No rule to make target `/usr/lib/libmysqlclient_r.so', needed by `mydumper'.  Stop.
 make[1]: *** [CMakeFiles/mydumper.dir/all] Error 2
 make: *** [all] Error 2

you need to fix the /usr/lib/libmysqlclient_r.so, it seems to point to non existing libmysqlclient_r.so.18  in my case

ls -lrth /usr/lib/libmysqlclient_r.so
lrwxrwxrwx 1 root root 22 Nov 18 07:24 /usr/lib/libmysqlclient_r.so -> libmysqlclient_r.so.18
cd /usr/lib/
rm libmysqlclient_r.so &&  ln -s libmysqlclient.so.18 libmysqlclient_r.so
root@www:[Sun Dec 08 22:01:19][/usr/lib]$ ls -lrt  | grep  libmysqlclient.so.18
-rw-r--r--  1 root root 3162144 Oct 25 08:35 libmysqlclient.so.18.0.0
-rw-r--r--  1 root root 3551104 Oct 25 09:04 libmysqlclient.so.18.1.0
lrwxrwxrwx  1 root root      24 Nov 18 07:20 libmysqlclient_r.so.18.0.0 -> libmysqlclient.so.18.0.0
lrwxrwxrwx  1 root root      24 Nov 18 07:24 libmysqlclient_r.so.18.1.0 -> libmysqlclient.so.18.1.0
lrwxrwxrwx  1 root root      20 Nov 18 07:24 libmysqlclient.so -> libmysqlclient.so.18
lrwxrwxrwx  1 root root      26 Nov 18 07:24 libmysqlclient.so.18 -> libmysqlclient_r.so.18.1.0
lrwxrwxrwx  1 root root      20 Dec  7 20:41 libmysqlclient_r.so -> libmysqlclient.so.18

Then try again make
in case you get that error :

sql_common.h:26:18: fatal error: hash.h: No such file or directory

this is due to MySQL bug : #70672
in a view to fix that, get the percona source :

$ cd ~/installs/
 wget  http://www.percona.com/redir/downloads/Percona-Server-5.5/LATEST/source/Percona-Server-5.5.34-rel32.0.tar.gz
 tar xvf Percona-Server-5.5.34-rel32.0.tar.gz
 sudo cp ./Percona-Server-5.5.34-rel32.0/include/hash.h /usr/include/
 then again try to build the mydymper:
 cd ~/installs/mydumper-0.5.2
 make
 Scanning dependencies of target mydumper
 [ 20%] Building C object CMakeFiles/mydumper.dir/mydumper.c.o
 [ 40%] Building C object CMakeFiles/mydumper.dir/binlog.c.o
 [ 60%] Building C object CMakeFiles/mydumper.dir/server_detect.c.o
 [ 80%] Building C object CMakeFiles/mydumper.dir/g_unix_signal.c.o
 Linking C executable mydumper
 [ 80%] Built target mydumper
 Scanning dependencies of target myloader
 [100%] Building C object CMakeFiles/myloader.dir/myloader.c.o
 Linking C executable myloader
 [100%] Built target myloader
 make install
 [ 80%] Built target mydumper
 [100%] Built target myloader
 Install the project...
 -- Install configuration: ""
 -- Installing: /home/seik/bin/mydumper/bin/mydumper
 -- Installing: /home/seik/bin/mydumper/bin/myloader
 export PATH=$PATH:/home/seik/bin/mydumper/bin/
 myloader --version
 myloader 0.5.2, built against MySQL 5.6.14

seems to work :
so how to backup the database mysql:

mkdir /home/.mydumper
mydumper -o /home/.mydumper -r 100000 -c -e -m -L mysql-backup.log -u root -p mypass -h localhost -t 2 -v 3 -B mysql 
mysqldump -u root -p -d -R --skip-triggers mysql  > /home/.mydumper/mysql.schema
mysqldump -u root -p -d -t  mysql  > mysql.triggers
seik@www:[Sun Dec 08 22:28:35][/home/.mydumper]$ ls -lrth
total 260K
-rw-r--r-- 1 seik seik  250 Dec  8 04:02 mysql.db.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.columns_priv.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.event.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.general_log.sql.gz
-rw-r--r-- 1 seik seik  166 Dec  8 04:02 mysql.func.sql.gz
-rw-r--r-- 1 seik seik  614 Dec  8 04:02 mysql.help_category.sql.gz
-rw-r--r-- 1 seik seik 3.4K Dec  8 04:02 mysql.help_relation.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.plugin.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.ndb_binlog_index.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.host.sql.gz
-rw-r--r-- 1 seik seik 3.3K Dec  8 04:02 mysql.help_keyword.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.procs_priv.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.proc.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.servers.sql.gz
-rw-r--r-- 1 seik seik  173 Dec  8 04:02 mysql.proxies_priv.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.time_zone_transition_type.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.time_zone_transition.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.time_zone_name.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.time_zone_leap_second.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.time_zone.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.tables_priv.sql.gz
-rw-r--r-- 1 seik seik   78 Dec  8 04:02 mysql.slow_log.sql.gz
-rw-r--r-- 1 seik seik  539 Dec  8 04:02 mysql.user.sql.gz
-rw-r--r-- 1 seik seik 128K Dec  8 04:02 mysql.help_topic.sql.gz
-rw-r--r-- 1 seik seik   75 Dec  8 04:02 metadata
-rw-r--r-- 1 seik seik  25K Dec  8 04:08 mysql.schema
-rw-r--r-- 1 seik seik 1.3K Dec  8 04:08 mysql.triggers

Slackware4Life ! 🙂

How to store MySQL innobackupex backups at Google Cloud Storage

November 26, 2013 Leave a comment

In general, I chose Google Cloud Storage to store web sites MySQL backups due to its price and speed of upload/download in real time

I used the Google native tool – gsutil , innobackupex and some bash

in short : the /etc and local MySQL  backup

#!/bin/sh
# Barcelona Tue Nov 22 17 16:30:36 CEST 2013

days_to_keep=3
NFS=/home/mysql.backups/
exportDate=`date +%Y-%m-%d.%H.%M.%S`
export_DIR=${NFS}/${HOSTNAME}.${exportDate}
test ! -d "${export_DIR}" && echo "$(date) : creating ${export_DIR}" && mkdir -p "${export_DIR}"
export_MySQL_DIR=${export_DIR}/mysql.bckp
export_ETC_DIR=${export_DIR}/etc.bckp
# backup the /etc directory
rsync -avh /etc ${export_ETC_DIR}
echo "=========================================================================================================" >> ${export_DIR}/README.restore.with.innobackupex
echo "HOW to restore this FULL mysql backup" >> ${export_DIR}/README.restore.with.innobackupex
echo "=========================================================================================================" >> ${export_DIR}/README.restore.with.innobackupex
echo "service stop mysql" >> ${export_DIR}/README.restore.with.innobackupex
echo "ps aux | grep mysql" >> ${export_DIR}/README.restore.with.innobackupex
echo "rsync -avh /var/lib/mysql /var/lib/mysql.BAD" >> ${export_DIR}/README.restore.with.innobackupex
echo "rm -rf /var/lib/mysql" >> ${export_DIR}/README.restore.with.innobackupex
echo "mkdir -p /var/lib/mysql && chown -R mysql:mysql /var/lib/mysql" >> ${export_DIR}/README.restore.with.innobackupex
echo "innobackupex --copy-back ${export_MySQL_DIR}" >> ${export_DIR}/README.restore.with.innobackupex
echo "=========================================================================================================" >> ${export_DIR}/README.restore.with.innobackupex
echo "more info at http://www.percona.com/doc/percona-xtrabackup/2.1/innobackupex/restoring_a_backup_ibk.html:" >> ${export_DIR}/README.restore.with.innobackupex
#cat /root/bin/README.restore.with.innobackupex >> ${export_DIR}/README.restore.with.innobackupex
innobackupex --ibbackup=xtrabackup --no-timestamp ${export_MySQL_DIR}
test $? -gt 0 && echo "$(date) : xtrabackup failed at ${export_MySQL_DIR}" && exit 0
innobackupex --apply-log ${export_MySQL_DIR}
find ${NFS}/ -daystart -maxdepth 1 -ctime +${days_to_keep} -type d -delete
find ${NFS}/ -daystart -maxdepth 1 -ctime +${days_to_keep} -type f -delete
nice tar cvf ${export_DIR}.tar.bz2 ${export_DIR}
test $? -eq 0 && chown seik:seik ${export_DIR}.tar.bz2 && chmod 0700 ${export_DIR}.tar.bz2 && rm -rf ${export_DIR}
chown -R seik:seik "${NFS}"

The script uploading the backups at Google cloud storage, it maintains the latest 5 backups:

#!/bin/bash
backupDir="/home/mysql.backups"
export PATH=${PATH}:$HOME/gsutil
gsUrl="gs://gsutil-test-test_default_cors-bucket-xxxxxx"
remoteBckpDir="tobedone.es"
for backup in `ls ${backupDir}`
do
 echo "$(date) : checking if ${backup} is stored"
 gsutil ls ${gsUrl}/${remoteBckpDir}/${backup} > /dev/null 2>&1
 if [ $? -gt 0 ]
 then
 echo "$(date) : ${backupDir}/${backup} is not stored, initiating upload"
 gsutil cp -R ${backupDir}/${backup} ${gsUrl}/${remoteBckpDir}/
 test $? -eq 0 && echo "$(date) : ${backup} is stored, deleting the local one" && rm ${backupDir}/${backup}
 else
 echo "$(date) : ${backup} is stored"
 test -f ${backupDir}/${backup} && echo "$(date) : ${backup} is stored, deleting the local one" && rm ${backupDir}/${backup}
 fi
done
# do some clanup at google storage
gsutil ls -lrh ${gsUrl}/${remoteBckpDir}/ | sed '2,$!d;$d' | sort -r -k 3.12 | awk '{print $4}' | sed '6,$!d' | xargs -icrap gsutil rm crap

PostgreSQL backup and compress schemas only of a db server

May 12, 2012 Leave a comment
root@darkwater:[Sun May 13 00:27:32]:[/home/db.schema.backup]$ cat /home/db.schema.backup/bin/db.schema.dump.sh
#!/bin/sh

here=`pwd`
cd /home/db.schema.backup/


for db in `echo "\l" |  /opt/pgsql/bin/psql -U postgres  | grep -v template | sed '4,$!d' | awk '{print $1}' | grep -v "(" | sed 's/^$//'`
do
  
  echo  "$(date): ${db} schema dump START" | tee -a | gzip -9 -c  >> ${db}.schema.only.sql.gz
  /opt/pgsql/bin/pg_dump -s -U postgres ${db} | /usr/bin/sed '/^$/d' | gzip -9 -c  >> ${db}.schema.only.sql.gz
  echo  "$(date): ${db} schema dump END" | tee -a | gzip -9 -c >>  ${db}.schema.only.sql.gz
done

cd "${here}"