Redis backup script

August 7, 2015 2 comments
#!/bin/bash 
## Fri, 07 Aug 2015 14:04:57 +0300 
## redis backup every 15 minutes 
## */15 * * * * redis.backup.maintenance.sh >> /var/log/redis.backup.log 2>&1 
## at /etc/rc.local : 
## test -d /var/run/redis.backup.lock.dir && rm -rf /var/run/redis.backup.lock.dir 
## watch the job: 
## tail -f /var/log/redis.backup.log 

#redis-cli LASTSAVE | awk '{print $1}' | { read gmt ; date "+%Y-%m-%d %H:%M:%S" -d "@$gmt" ; } 
# 2015-08-07 01:25:54 

lockf="/var/run/redis.backup.lock.dir"

# check for running script start 
if [ -d "${lockf}" ]
then
 echo "$(date +%Y-%m-%d.%H.%M.%S) : ${lockf} exists, exiting"
 exit 0
else
 mkdir "${lockf}" && echo "$(date +%Y-%m-%d.%H.%M.%S) : created lock at ${lockf}"
fi


echo "$(date +%Y-%m-%d.%H.%M.%S) : redis backup start"
echo "$(date +%Y-%m-%d.%H.%M.%S) : cleanup the /redis_backups and leave the last 6 backups"
find /redis_backups -maxdepth 1 -type f -name "dump.rdb.*" | sort -r | sed '7,$!d' | while read to_be_deleted; do rm -f ${to_be_deleted} && echo "$(date +%Y-%m-%d.%H.%M.%S) : deleted ${to_be_deleted}";done

last_save=$(redis-cli LASTSAVE | awk '{print $1}')
echo -n "$(date +%Y-%m-%d.%H.%M.%S) : executing redis-cli BGSAVE : "
redis-cli BGSAVE
while true
do
 if [ $(redis-cli LASTSAVE | awk '{print $1}') -eq ${last_save} ]
 then
 echo -n ". "
 sleep 2
 else
 echo ""
 echo "$(date +%Y-%m-%d.%H.%M.%S) : start ionice -c2 -n0 cp -vv /opt/redis/dump.rdb to /redis_backups/"
 ionice -c2 -n0 cp -vv /opt/redis/dump.rdb /redis_backups/dump.rdb.$(date +%Y-%m-%d.%H.%M.%S) && echo "$(date +%Y-%m-%d.%H.%M.%S) : backup comleted"
 break
 fi
done


if [ -d "${lockf}" ]
then
 echo "$(date +%Y-%m-%d.%H.%M.%S) : removing the lock"
 rm -rf "${lockf}"
fi
Categories: AWS, bash, NoSQL Tags: , ,

MariaDB MySQL Percona list all indexes without using INFORMATION_SCHEMA.STATISTICS

July 8, 2015 1 comment

There is nothing more to be said:

SELECT
gen.TABLE_SCHEMA
, gen.TABLE_NAME
, (select
count(TABLE_NAME) from information_schema.columns idx
where
idx.TABLE_SCHEMA = gen.TABLE_SCHEMA
and idx.TABLE_NAME=gen.TABLE_NAME
) as COLUMN_NUM
, (select
count(TABLE_NAME) from information_schema.columns idx
where
idx.TABLE_SCHEMA = gen.TABLE_SCHEMA
and idx.TABLE_NAME=gen.TABLE_NAME
and COLUMN_KEY != ""
) as INDEX_NUM_ALL
, (select
count(TABLE_NAME) from information_schema.columns idx
where
idx.TABLE_SCHEMA = gen.TABLE_SCHEMA
and idx.TABLE_NAME=gen.TABLE_NAME
and COLUMN_KEY = "PRI"
) as INDEX_NUM_PRI
, (select
count(TABLE_NAME) from information_schema.columns idx
where
idx.TABLE_SCHEMA = gen.TABLE_SCHEMA
and idx.TABLE_NAME=gen.TABLE_NAME
and COLUMN_KEY = "UNI"
) as INDEX_NUM_UNI
, (select
count(TABLE_NAME) from information_schema.columns idx
where
idx.TABLE_SCHEMA = gen.TABLE_SCHEMA
and idx.TABLE_NAME=gen.TABLE_NAME
and COLUMN_KEY = "MUL"
) as INDEX_NUM_MUL

from information_schema.tables gen
where true
and gen.TABLE_SCHEMA !='mysql'
and gen.TABLE_SCHEMA!='performance_schema'
and gen.TABLE_SCHEMA!='information_schema'
;
+-----------------+-----------------------+------------+---------------+---------------+---------------+---------------+
| TABLE_SCHEMA | TABLE_NAME | COLUMN_NUM | INDEX_NUM_ALL | INDEX_NUM_PRI | INDEX_NUM_UNI | INDEX_NUM_MUL |
+-----------------+-----------------------+------------+---------------+---------------+---------------+---------------+
Categories: MariaDB, MySQL Tags: , , ,

MySQL, Percona, MariaDB long running processes clean up one liner

April 30, 2015 Leave a comment

There are tools like pt-kill from the percona tool kit that may print/kill the long running transactions at MariaDB, MySQL or at Percona data instances, but a lot of backup scripts are just some simple bash lines.
So checking for long running transactions before the backup to be executed seems to be a step that is missed a lot.

Here is one line that might be just added in every bash script before the backup to be executed
Variant 1. Just log all the processlist entries and calculate which ones were running longer than TIMELIMIT:

$ export TIMELIMIT=70 && echo "$(date) : check for long runnig queries start:" >> /tmp/processlist.list.to.kill && mysql -BN -e 'show processlist;' | tee -a /tmp/processlist.list.to.kill | awk -vlongtime=${TIMELIMIT} '($6>longtime){print "kill "$1";"}' | tee -a /tmp/processlist.list.to.kill

Variant 2: Log all the processlist, calculate the calculate which processes are running longer than TIMELIMIT, and kill them before to execute the backup:

$ export TIMELIMIT=70 && echo "$(date) : check for long runnig queries start:" >> /tmp/processlist.list.to.kill && mysql -BN -e 'show processlist;' | tee -a /tmp/processlist.list.to.kill | awk -vlongtime=${TIMELIMIT} '($6>longtime){print "kill "$1";"}' | tee -a /tmp/processlist.list.to.kill | mysql >> /tmp/processlist.list.to.kill 2>&1

Update bash at Slackware 11 with the latest patch against shellshock bugs CVE-2014-6271 CVE-2014-7169 CVE-2014-7186 CVE-2014-7187

October 1, 2014 1 comment

I still have Slackware 11 machines … so I had to recompile the bash

I took a moment to read this nice blog related to shellshock: http://chester.me/archives/2014/09/building-bash-from-source-shellshock-mitigation/
So in short what I did at my Slackware 11 machines:

root@DL-380:[Wed Oct 01 23:05:47]:[~]$ cat /etc/slackware-version 
Slackware 11.0.0
root@DL-380:[Wed Oct 01 23:05:47]:[/opt/installs]$ bash --version
GNU bash, version 3.1.17(2)-release (i486-slackware-linux-gnu)
Copyright (C) 2005 Free Software Foundation, Inc.

root@DL-380:[Wed Oct 01 23:06:02]:[/opt/installs]$ wget  http://ftp.gnu.org/gnu/bash/bash-3.1.tar.gz
root@DL-380:[Wed Oct 01 23:06:35]:[/opt/installs]$ lftp http://ftp.gnu.org/gnu/bash
cd: received redirection to `http://ftp.gnu.org/gnu/bash/'
cd ok, cwd=/gnu/bash                               
lftp ftp.gnu.org:/gnu/bash> mirror bash-3.1-patches 
Total: 1 directory, 44 files, 0 symlinks                  
New: 44 files, 0 symlinks
60375 bytes transferred in 5 seconds (11.3K/s)
lftp ftp.gnu.org:/gnu/bash> exit
root@DL-380:[Wed Oct 01 23:07:39]:[/opt/installs]$ rm bash-3.1-patches/*sig
root@DL-380:[Wed Oct 01 23:07:53]:[/opt/installs]$ tar xvf bash-3.1.tar.gz 
root@DL-380:[Wed Oct 01 23:08:54]:[/opt/installs]$ cd bash-3.1
root@DL-380:[Wed Oct 01 23:08:59]:[/opt/installs/bash-3.1]$ 
root@DL-380:[Wed Oct 01 23:08:59]:[/opt/installs/bash-3.1]$ for patch_file in `find /opt/installs/bash-3.1-patches/ -type f `;  do echo $patch_file && patch -p0 < $patch_file ; done
root@DL-380:[Wed Oct 01 23:09:23]:[/opt/installs/bash-3.1]$ tail patchlevel.h
#if !defined (_PATCHLEVEL_H_)
#define _PATCHLEVEL_H_

/* It's important that there be no other strings in this file that match the
   regexp `^#define[     ]*PATCHLEVEL', since that's what support/mkversion.sh
   looks for to find the patch level (for the sccs version string). */

#define PATCHLEVEL 21

#endif /* _PATCHLEVEL_H_ */
root@DL-380:[Wed Oct 01 23:09:44]:[/opt/installs/bash-3.1]$ ./configure 
root@DL-380:[Wed Oct 01 23:10:49]:[/opt/installs/bash-3.1]$ make --j3 
ls -l bash
-rwxr-xr-x 1 root root 1556950 2014-10-01 23:11 bash
size bash
   text       data        bss        dec        hex    filename
 634120      22840      19432     676392      a5228    bash
root@DL-380:[Wed Oct 01 23:11:19]:[/opt/installs/bash-3.1]$ 
root@DL-380:[Wed Oct 01 23:11:19]:[/opt/installs/bash-3.1]$ env x='() { :;}; echo vulnerable' bash -c "echo this is a test" 
vulnerable
this is a test
root@DL-380:[Wed Oct 01 23:12:36]:[/opt/installs/bash-3.1]$ env x='() { :;}; echo vulnerable' ./bash -c "echo this is a test" 
this is a test
root@DL-380:[Wed Oct 01 23:12:41]:[/opt/installs/bash-3.1]$ ./bash --version
GNU bash, version 3.1.21(2)-release (i686-pc-linux-gnu)
Copyright (C) 2005 Free Software Foundation, Inc.
root@DL-380:[Wed Oct 01 23:12:46]:[/opt/installs/bash-3.1]$ which bash
/usr/bin/bash
root@DL-380:[Wed Oct 01 23:13:11]:[/opt/installs/bash-3.1]$ file /usr/bin/bash
/usr/bin/bash: symbolic link to `/bin/bash'
root@DL-380:[Wed Oct 01 23:13:15]:[/opt/installs/bash-3.1]$ file /bin/bash
/bin/bash: ELF 32-bit LSB executable, Intel 80386, version 1 (SYSV), dynamically linked (uses shared libs), stripped
root@DL-380:[Wed Oct 01 23:13:18]:[/opt/installs/bash-3.1]$ cp -fp bash /bin/bash
root@DL-380:[Wed Oct 01 23:13:28]:[/opt/installs/bash-3.1]$ env x='() { :;}; echo vulnerable' bash -c "echo this is a test" 
this is a test
root@DL-380:[Wed Oct 01 23:13:43]:[/opt/installs/bash-3.1]$ (for x in {1..200} ; do echo "for x$x in ; do :"; done; for x in {1..200} ; do echo done ; done) | bash || echo "CVE-2014-7187 vulnerable, word_lineno" 
root@DL-380:[Wed Oct 01 23:13:53]:[/opt/installs/bash-3.1]$ bash -c 'true <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF' || echo "CVE-2014-7186 vulnerable, redir_stack" 
root@DL-380:[Wed Oct 01 23:14:02]:[/opt/installs/bash-3.1]$ 

Done

Slackware4LIfe :)

How to relabel two identical TOSHIBA 1Tb USB 3.0 disks in a view to be auto mounted properly at XBMC

September 19, 2014 Leave a comment

I have two identical 1TB Toshiba USB 3.0 disks, and when I attach them to my RaspberryPI running XBMC, they appear as identical TOSHIBA EXT mounts at the GUI

After searching at the udev rules, I decided just to rename the partitions …

root@raspbmc:[Fri Sep 19 19:31:40][~]$ umount /media/TOSHIBA\ EXT 
root@raspbmc:[Fri Sep 19 19:31:56][~]$ ntfslabel /dev/sdb1 TOSHIBA_GABI
root@raspbmc:[Fri Sep 19 19:32:16][~]$ udevadm info -q all -n /dev/sdb1
P: /devices/platform/bcm2708_usb/usb1/1-1/1-1.2/1-1.2:1.0/host1/target1:0:0/1:0:0:0/block/sdb/sdb1
N: sdb1
S: disk/by-id/usb-TOSHIBA_External_USB_3.0_23183A059E8A-0:0-part1
S: disk/by-label/TOSHIBA_GABI
S: disk/by-path/platform-bcm2708_usb-usb-0:1.2:1.0-scsi-0:0:0:0-part1
S: disk/by-uuid/BC2CF6852CF639CA
E: DEVLINKS=/dev/disk/by-id/usb-TOSHIBA_External_USB_3.0_23183A059E8A-0:0-part1 /dev/disk/by-label/TOSHIBA_GABI /dev/disk/by-path/platform-bcm2708_usb-usb-0:1.2:1.0-scsi-0:0:0:0-part1 /dev/disk/by-uuid/BC2CF6852CF639CA
E: DEVNAME=/dev/sdb1
E: DEVPATH=/devices/platform/bcm2708_usb/usb1/1-1/1-1.2/1-1.2:1.0/host1/target1:0:0/1:0:0:0/block/sdb/sdb1
E: DEVTYPE=partition
E: ID_BUS=usb
E: ID_FS_LABEL=TOSHIBA_GABI
E: ID_FS_LABEL_ENC=TOSHIBA_GABI
E: ID_FS_TYPE=ntfs
E: ID_FS_USAGE=filesystem
E: ID_FS_UUID=BC2CF6852CF639CA
E: ID_FS_UUID_ENC=BC2CF6852CF639CA
E: ID_INSTANCE=0:0
E: ID_MODEL=External_USB_3.0
E: ID_MODEL_ENC=External\x20USB\x203.0
E: ID_MODEL_ID=a00d
E: ID_PART_ENTRY_DISK=8:16
E: ID_PART_ENTRY_NUMBER=1
E: ID_PART_ENTRY_OFFSET=2048
E: ID_PART_ENTRY_SCHEME=dos
E: ID_PART_ENTRY_SIZE=1953519616
E: ID_PART_ENTRY_TYPE=0x7
E: ID_PART_TABLE_TYPE=dos
E: ID_PATH=platform-bcm2708_usb-usb-0:1.2:1.0-scsi-0:0:0:0
E: ID_PATH_TAG=platform-bcm2708_usb-usb-0_1_2_1_0-scsi-0_0_0_0
E: ID_REVISION=5438
E: ID_SERIAL=TOSHIBA_External_USB_3.0_23183A059E8A-0:0
E: ID_SERIAL_SHORT=23183A059E8A
E: ID_TYPE=disk
E: ID_USB_DRIVER=usb-storage
E: ID_USB_INTERFACES=:080650:
E: ID_USB_INTERFACE_NUM=00
E: ID_VENDOR=TOSHIBA
E: ID_VENDOR_ENC=TOSHIBA\x20
E: ID_VENDOR_ID=0480
E: MAJOR=8
E: MINOR=17
E: SUBSYSTEM=block
E: UDEV_LOG=3
E: UDISKS_DISABLE_POLLING=1
E: UDISKS_PARTITION=1
E: UDISKS_PARTITION_ALIGNMENT_OFFSET=0
E: UDISKS_PARTITION_NUMBER=1
E: UDISKS_PARTITION_OFFSET=1048576
E: UDISKS_PARTITION_SCHEME=mbr
E: UDISKS_PARTITION_SIZE=1000202043392
E: UDISKS_PARTITION_SLAVE=/sys/devices/platform/bcm2708_usb/usb1/1-1/1-1.2/1-1.2:1.0/host1/target1:0:0/1:0:0:0/block/sdb
E: UDISKS_PARTITION_TYPE=0x07
E: UDISKS_PRESENTATION_NOPOLICY=0
E: USEC_INITIALIZED=691627146808

root@raspbmc:[Fri Sep 19 19:32:52][~]$ df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/mmcblk0p2  7.3G  2.1G  4.9G  30% /
/dev/mmcblk0p1   69M   50M   19M  73% /boot
/dev/sda1       932G  778G  155G  84% /media/TOSHIBA_ALL
/dev/sdb1       932G  775G  157G  84% /media/TOSHIBA_GABI
root@raspbmc:[Fri Sep 19 19:33:27][~]$ cat /scripts/upd_hist/build_info
raspbmc-rls-1.0-hardfp-b20140527-u20140527
root@raspbmc:[Fri Sep 19 19:35:33][~]$ df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/mmcblk0p2  7.3G  2.1G  4.9G  30% /
/dev/mmcblk0p1   69M   50M   19M  73% /boot
/dev/sda1       932G  778G  155G  84% /media/TOSHIBA_ALL
/dev/sdb1       932G  775G  157G  84% /media/TOSHIBA_GABI

slackware4life :)

Fedora release 20 (Heisenbug) enable client OpenVPN service on boot

August 21, 2014 Leave a comment

In short, I had to deal with the Fedora EVIL EVIL EVIL !!! Systemd
I found my openvpn startup script useless ..
Managed to set the VPN at the Network Manager just loading the openvpn config file, but I wanted to be started on boot … because out of the blue my Fedora 20 Gnome boot hanged on the blue logo !!! This is another story to be told.

Anyway, back to the OpenVPN issue, the steps to do make it start on boot, following the instructions at http://fedoraproject.org/wiki/Openvpn

1. Set the keys and the config file at /etc/openvpn

root@outoutdragkh.f20:[Thu Aug 21 01:30:06][~]$ cd /etc/openvpn/
root@outdragkh.f20:[Thu Aug 21 01:30:10][/etc/openvpn]$ ls
keys  outdragkh.client.conf

2. Fix selinux context of the config files :

root@outdragkh.f20:[Thu Aug 21 01:30:10][/etc/openvpn]$  restorecon -Rv /etc/openvp

3. Check the general openvpn service file:

root@outdragkh.f20:[Thu Aug 21 01:30:32][/etc/openvpn]$ ls /lib/systemd/system/openvpn\@.service
/lib/systemd/system/openvpn@.service
root@outdragkh.f20:[Thu Aug 21 01:30:51][/etc/openvpn]$ cat /lib/systemd/system/openvpn\@.service
[Unit]
Description=OpenVPN Robust And Highly Flexible Tunneling Application On %I
After=syslog.target network.target

[Service]
PrivateTmp=true
Type=forking
PIDFile=/var/run/openvpn/%i.pid
ExecStart=/usr/sbin/openvpn --daemon --writepid /var/run/openvpn/%i.pid --cd /etc/openvpn/ --config %i.conf

[Install]
WantedBy=multi-user.target

4. Set the systemd start scripts for the outoutdragkh.client.conf

root@outdragkh.f20:[Thu Aug 21 01:30:55][/etc/openvpn]$ ln -s /lib/systemd/system/openvpn\@.service /etc/systemd/system/multi-user.target.wants/openvpn\@outdragkh.client.service
root@outdragkh.f20:[Thu Aug 21 01:32:04][/etc/openvpn]$ ls /etc/systemd/system/multi-user.target.wants/openvpn@outdragkh.client.service 
/etc/systemd/system/multi-user.target.wants/openvpn@outdragkh.client.service

5. Enable the dragkh OpenVPN service

root@outdragkh.f20:[Thu Aug 21 01:32:12][/etc/openvpn]$ systemctl -f enable openvpn@outdragkh.client.service
rm '/etc/systemd/system/multi-user.target.wants/openvpn@outdragkh.client.service'
ln -s '/usr/lib/systemd/system/openvpn@.service' '/etc/systemd/system/multi-user.target.wants/openvpn@outdragkh.client.service'

6. Start the dragkh OpenVPN service

root@outdragkh.f20:[Thu Aug 21 01:32:30][/etc/openvpn]$ systemctl start openvpn@outdragkh.client.service 
root@outdragkh.f20:[Thu Aug 21 01:32:49][/etc/openvpn]$ systemctl status openvpn@outdragkh.client.service 
openvpn@outdragkh.client.service - OpenVPN Robust And Highly Flexible Tunneling Application On outdragkh.client
   Loaded: loaded (/usr/lib/systemd/system/openvpn@.service; enabled)
   Active: active (running) since Thu 2014-08-21 01:32:49 CEST; 5s ago
  Process: 3194 ExecStart=/usr/sbin/openvpn --daemon --writepid /var/run/openvpn/%i.pid --cd /etc/openvpn/ --config %i.conf (code=exited, status=0/SUCCESS)
 Main PID: 3195 (openvpn)
   CGroup: /system.slice/system-openvpn.slice/openvpn@outdragkh.client.service
           └─3195 /usr/sbin/openvpn --daemon --writepid /var/run/openvpn/outdragkh.client.pid --cd /etc/openvpn/ --config outdragkh.client.conf

Aug 21 01:32:49 dragkh.wordpress.com systemd[1]: Started OpenVPN Robust And Highly Flexible Tunneling Application On outdragkh.client.
root@outdragkh.f20:[Thu Aug 21 01:32:55][/etc/openvpn]$ systemctl status openvpn@outdragkh.client.service 

done …

Slackware2Life :)

MySQL backup and cleanup bash scripts with mydumper

June 28, 2014 1 comment

1. Backup script

#!/bin/sh
# Fri Jun 27 10:44:49 2014
# done by dragkh
# usage: 
# cat /etc/cron.d/backupmysql 
# 0  3  *  *  *       root    /root/bin/clean.backup.hyperion.mysql.mydumper.daily.sh >>  /var/log/clean.backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1
# 35  3  *  *  *       root    /root/bin/backup.hyperion.mysql.mydumper.daily.sh >> /var/log/backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1

ROOT_BACKUP_DIR="/home/mydumper"

seik_date () {
if [ -z $1 ]
then
# cdate=`date +%Y-%m-%d\ %H:%M:%S\ %Z`; export cdate; echo $cdate
cdate=`date -R`; export cdate; echo $cdate
else

if [ -z ${2} ]
then
cdate=`date +%Y-%m-%d.%H.%M.%S`; export cdate; echo $cdate
else
cdate=`date "+%Y-%m-%d %H:%M:%S"`; export cdate; echo $cdate
fi

fi
}


function check_dir {
 test ! -d "${1}" && mkdir -p "${1}"
}


function set_cpu_threads {
    # set the threads one less than the existing 
    threads=$(cat /proc/cpuinfo  |  grep processor | tail -1 | awk '{print $3}')
    test $threads -lt 1 && threads=1
}

function dump_schema {
    mysqldump -d --dump-date --all-databases > ${DATA_DIR}/${HOSTNAME}.only.sql
}


function dump_data {
    echo "$(seik_date f) : executing : mydumper -o $DATA_DIR --long-query-guard 120 -r 100000 -c -e -m -L ${DATA_DIR}/mysql-backup.log -t ${threads} -v 3"
    mydumper -o $DATA_DIR --long-query-guard 120 -r 100000 -c -e -m -L ${DATA_DIR}/mysql-backup.log -t ${threads} -v 3
}

DATA_DIR="${ROOT_BACKUP_DIR}/$(seik_date d)"
check_dir "${DATA_DIR}" && echo "$(seik_date f) : ${DATA_DIR} is missing, creating it now .."
set_cpu_threads
echo "$(seik_date f) : star dumping the schema at ${DATA_DIR}.."
dump_schema && echo "$(seik_date f) : end dumping the schema at ${DATA_DIR} .."
echo "$(seik_date f) : start dumping the data at ${DATA_DIR} via ${threads} parallel threads .."
dump_data && echo "$(seik_date f) : end dumping the data at ${DATA_DIR} via ${threads} parallel threads .."

2. Clean up script keeping always backup directories intact

#!/bin/bash
# Sat Jun 28 03:16:38 EEST 2014
# done by dragkh
# usage: 
# cat /etc/cron.d/backupmysql
# 0       3       *       *       *       root    /root/bin/clean.backup.hyperion.mysql.mydumper.daily.sh >> /var/log/clean.backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1
# 35      3      *       *       *       root    /root/bin/backup.hyperion.mysql.mydumper.daily.sh >> /var/log/backup.${HOSTNAME}.mysql.mydumper.daily.log 2>&1

ROOT_BACKUP_DIR="/home/mydumper"

seik_date () {
if [ -z $1 ]
then
cdate=`date -R`; export cdate; echo $cdate
else

if [ -z ${2} ]
then
cdate=`date +%Y-%m-%d.%H.%M.%S`; export cdate; echo $cdate
else
cdate=`date "+%Y-%m-%d %H:%M:%S"`; export cdate; echo $cdate
fi

fi
}

day_limit=7;  
ls -t ${ROOT_BACKUP_DIR} | \
while read dir
do 
    ((dir_num++))
    test $dir_num -gt $day_limit && test -d "${ROOT_BACKUP_DIR}/${dir}" &&  rm -rf "${dir}" && echo "$(seik_date d) : removed [${dir_num}]::[${dir}]" && continue 
    test -d "${ROOT_BACKUP_DIR}/${dir}" && echo "$(seik_date d) : skipping [${dir_num}]::[${dir}]"
done

Follow

Get every new post delivered to your Inbox.