Scripting
Zimbra (CLI) - Search & Delete messages from users
posted Sep 26, 2018, 4:43 AM by Chris Franklin [ updated Sep 28, 2018, 4:07 AM ]
This is a simple script that allows you to search either delete a message from a single person OR all users from the command line. A full help with examples is displayed if no or an invalid arguments are given. Most of the script is really about making the existing Zimbra cli tools more user friendly. By converting things like "(to:spam@nomadcf.com OR cc:spam@nomadcf.com)" to a simpler "-t spam@nomadcf.com". CODE#!/bin/bash ## Written by Chris Franklin ## CFranklin@NomadCF.com ## Vars MSG_LIST_MAX=999 LOG=/var/log/zimbra_remove_messages.$(date +%s).log ## Nothing to edit passed here function _help() { cat <<EOF $1 -a : Finds emails with any attachment $0 -a -A : attachment filename $0 -A bob.txt finds emai lwith wav attachments $0 -A .wav -c : Contains this word/phrase (very picky) $0 -c BPMCLASSROOM !! Won't find BPMCLASSROOM !! $0 -c BPMCLASS -C : Condition ("unread", "read", "flagged", "unflagged", "sent", "draft", "received", "replied", "unreplied", "forwarded", unforwarded", "anywhere", "remote" (in a shared folder), "local", "sent", "invite", "solo" (no other messages in conversation), "tome", "fromme", "ccme", "tofromme". "fromccme", "tofromccme" (to, from cc me, including my aliases) $0 -C unread -d : recived on date (must be as fallows mm/dd/yyyy) $0 -d 09/09/2017 -D : today $0 -D -e : Before this date (must be as fallows mm/dd/yyyy) $0 -e 09/09/2018 -E : After this date (must be as fallows mm/dd/yyyy) $0 -E 09/09/2017 -f : From address (Display name,emailaddress or domain) (Always Wild card searches!!) $0 -f cfranklin $0 -f "Chris Franklin" $0 -f @berea.k12.oh.us $0 -f cfranklin@berea.k12.oh.us -h : Header exists (Any value is * (wildcard)) $0 -h "X-Spammy" -H : Header Value (Requires -h) $0 -H "75" -i : Search only the inbox (Default is to search all folders OR -l messages) $0 -i -I : folder to search in (Default is to search all folders OR -l messages) $0 -I Sent -l : Limit List to this number newest messages $0 -l 5 -L : List user messages (max 999) $0 -L -m : Move Message to trash (Default is to show messages) $0 -m -s : Subject $0 -s "Taxes are due" -S : Messages total size including attachments is BIGGER than (in megabytes) $0 -S 5 -t : Message for (to or cc) $0 -t "bob@aol.com" -u : Username to check (Default is all usernames) $0 -u "cfranklin" -v : Verbose -Z : Deletes the matching messages -------------- Examples: Find emails with the fallowing 1. delivered "TODAY" 2. HEADER field called "Return-Path" WITH value of "geofdupo@savba.sk" 3. FROM a display name containing "BCSD" 4. Message Contains the phrase "BCSD Account will be De-activated" 5. Subject Contains "Technology Help Desk" 6. Search in the inbox 7. Search only the mailbox for USER cfranklin $0 -D -h "Return-Path" -H "geofdupo@savba.sk" -f "BCSD" -c "BCSD Account will be De-activated" -s "Technology Help Desk" -i -u cfranklin As as above, but now MOVE the email to the TRASH $0 -D -h "Return-Path" -H "geofdupo@savba.sk" -f "BCSD" -c "BCSD Account will be De-activated" -s "Technology Help Desk" -i -u cfranklin -m ------------- $1 EOF exit 1 } REQ=0 MSG_LIST=0 SEARCH=0 MOVE=0 DELETE=0 DEBUG=0 while getopts ":A:c:C:d:e:E:f:h:l:H:I:N:s:S:t:u:miLaDZv" opt; do case ${opt} in a) EMAIL_ATTACHMENT=1; SEARCH=1; REQ=1; ;; A) EMAIL_ATTACHMENT_NAME="${OPTARG}"; SEARCH=1; REQ=1; ;; t) SEARCH=1; EMAIL_TO="${OPTARG}"; REQ=1; ;; c) SEARCH=1; EMAIL_CONTAINS="${OPTARG}"; REQ=1; ;; C) SEARCH=1; EMAIL_CONDITION="${OPTARG}"; REQ=1; ;; l) MSG_LIST_MAX=${OPTARG}; ;; d) SEARCH=1; EMAIL_DATE="${OPTARG}"; REQ=1; ;; D) SEARCH=1; EMAIL_DATE="$(date +%m/%d/%Y)"; REQ=1; ;; e) SEARCH=1; EMAIL_DATE_BEFORE="${OPTARG}"; REQ=1; ;; E) SEARCH=1; EMAIL_DATE_AFTER="${OPTARG}"; REQ=1; ;; f) SEARCH=1; EMAIL_FROM="${OPTARG}"; REQ=1; ;; h) SEARCH=1; EMAIL_HEADER="#${OPTARG}"; REQ=1; ;; H) SEARCH=1; EMAIL_HEADER_VALUE="${OPTARG}"; REQ=1; ;; i) SEARCH_FOLDER="inbox"; REQ=1; ;; L) MSG_LIST=1; REQ=1; ;; I) SEARCH_FOLDER="${OPTARG}"; REQ=1; ;; m) MOVE=1; ;; s) SEARCH=1; EMAIL_SUBJECT="${OPTARG}"; REQ=1; ;; S) SEARCH=1; EMAIL_SIZE_BIGGER=">${OPTARG}mb"; REQ=1; ;; u) EMAIL_USER="${OPTARG}"; ;; v) DEBUG=1 ;; Z) DELETE=1 ;; \?) _help $opt ;; :) _help $opt ;; *) _help $opt ;; esac done shift $((OPTIND - 1)) if [ $REQ -eq 0 ] && [ ! -z "${EMAIL_USER}" ]; then MSG_LIST=1 REQ=1 fi if [ $REQ -eq 0 ]; then _help fi if [ -z "${EMAIL_USER}" ]; then EMAIL_USER="*" fi if [ ! -z "${EMAIL_DATE}" ]; then if [ ! -z "${EMAIL_DATE_BEFORE}" ] || [ ! -z "${EMAIL_DATE_AFTER}" ]; then _help "ERROR: -d CAN'T be used with -D OR -E " fi fi if [ $DELETE -eq 1 ] && [ $MOVE -eq 1 ]; then _help "ERROR: -Z CAN'T be used with -m" fi #convert LIST anything to listing with a search if [ $MSG_LIST -eq 1 ] && [ $SEARCH -eq 1 ]; then MSG_LIST=2 elif [ $MOVE -eq 0 ] && [ $DELETE -eq 0 ] && [ $MSG_LIST -eq 0 ]; then MSG_LIST=2 fi if [ $MSG_LIST -gt 0 ] && [ $MOVE -eq 1 ]; then _help "ERROR: -m and -L can NOT be used at the same time." fi if [ $MSG_LIST -gt 0 ] && [ $DELETE -eq 1 ]; then _help "ERROR: -Z and -L can NOT be used at the same time." fi if [ ! -z "${EMAIL_HEADER_VALUE}" ] && [ -z "${EMAIL_HEADER}" ]; then _help "ERROR: -H requires -h" fi if [ ! -z "${EMAIL_ATTACHMENT_NAME}" ] && [ ! -z "${EMAIL_ATTACHMENT}" ]; then _help "ERROR: -A Can't BOTH be used -a" fi START_DATE="$(date)" START_SECS=$(date +%s) ## Lookup zmlocalconfig ZMCONFIG=$(whereis zmlocalconfig | cut -d ' ' -f2) if [ ! -e "${ZMCONFIG}" ]; then echo "Could not find: zmlocalconfig" exit fi ## lookup ldapsearch LDAPSEARCH=$(whereis ldapsearch | cut -d ' ' -f2) if [ ! -e "${LDAPSEARCH}" ]; then echo "Could not find: ldapsearch" exit fi MAIL=$(which mail) if [ ! -e "${MAIL}" ]; then echo "Could not find: mail" exit fi EMAIL_SEARCH="" if [ ! -z "${EMAIL_FROM}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}from:${EMAIL_FROM} " fi if [ ! -z "${EMAIL_SUBJECT}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}subject:\"${EMAIL_SUBJECT}\" " fi if [ ! -z "${EMAIL_DATE}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}date:${EMAIL_DATE} " fi if [ ! -z "${EMAIL_DATE_BEFORE}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}before:${EMAIL_DATE_BEFORE} " fi if [ ! -z "${EMAIL_DATE_AFTER}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}after:${EMAIL_DATE_AFTER} " fi if [ ! -z "${SEARCH_FOLDER}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}in:${SEARCH_FOLDER} " fi if [ ! -z "${EMAIL_HEADER_VALUE}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH} ${EMAIL_HEADER}:\"${EMAIL_HEADER_VALUE}\" " fi if [ ! -z "${EMAIL_TO}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH} (to:${EMAIL_TO} OR cc:${EMAIL_TO}) " fi if [ ! -z "${EMAIL_ATTACHMENT}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}has:attachment " fi if [ ! -z "${EMAIL_ATTACHMENT_NAME}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}filename:\"${EMAIL_ATTACHMENT_NAME}\" " fi if [ ! -z "${EMAIL_CONTAINS}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}content:\"${EMAIL_CONTAINS}\" " fi if [ ! -z "${EMAIL_SIZE_BIGGER}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}larger:${EMAIL_SIZE_BIGGER} " fi if [ ! -z "${EMAIL_CONDITION}" ]; then EMAIL_SEARCH="${EMAIL_SEARCH}is:${EMAIL_CONDITION} " fi ## This is the default ldap lookup query LDAP_SEARCH="${LDAPSEARCH} -x -h $($ZMCONFIG ldap_host | cut -d '=' -f2) -D $($ZMCONFIG zimbra_ldap_userdn | awk '{print $3}') -w$($ZMCONFIG -s zimbra_ldap_password | cut -d ' ' -f3) -LLL -o ldif-wrap=no " echo "" if [ $DELETE -eq 1 ] || [ $MOVE -eq 1 ]; then echo "Log file: ${LOG}" fi echo "" $LDAP_SEARCH "(&(objectClass=zimbraAccount)(mail=${EMAIL_USER}@berea.k12.oh.us))" mail | while IFS=": " read TRASH EMAIL; do if [ ! -z "${EMAIL}" ] && [ "${TRASH}" == "mail" ]; then # MNUM = Message number in printed out list # MID = Message ID in zimbra # TRASH = Everything else about the email if [ $DELETE -eq 1 ] || [ $MOVE -eq 1 ]; then echo "${EMAIL}" | tee -a "${LOG}" else echo "${EMAIL}" fi if [ $MSG_LIST -eq 1 ]; then if [ "${DEBUG}" -eq 1 ]; then echo "/opt/zimbra/bin/zmmailbox -z -m '${EMAIL}' s -l ${MSG_LIST_MAX} -T" >> "${LOG}" fi /opt/zimbra/bin/zmmailbox -z -m "${EMAIL}" s -l ${MSG_LIST_MAX} -T | tee -a "${LOG}" elif [ $MSG_LIST -eq 2 ]; then if [ "${DEBUG}" -eq 1 ]; then echo "/opt/zimbra/bin/zmmailbox -z -m '${EMAIL}' s -l ${MSG_LIST_MAX} -t message '${EMAIL_SEARCH}'" >> "${LOG}" fi /opt/zimbra/bin/zmmailbox -z -m "${EMAIL}" s -l ${MSG_LIST_MAX} -t message "${EMAIL_SEARCH}" | tee -a "${LOG}" elif [ $MOVE -eq 1 ] || [ $DELETE -eq 1 ]; then if [ "${DEBUG}" -eq 1 ]; then echo "/opt/zimbra/bin/zmmailbox -z -m '${EMAIL}' s -l ${MSG_LIST_MAX} -t message '${EMAIL_SEARCH}'" | tee -a "${LOG}" fi /opt/zimbra/bin/zmmailbox -z -m "${EMAIL}" s -l ${MSG_LIST_MAX} -t message "${EMAIL_SEARCH}" | while IFS=" " read MNUM MID TRASH; do if [ ! -z "${MNUM}" ] && [ ! -z "${MID}" ]; then if [ "${MNUM//./}" != "${MNUM}" ]; then if [ $DELETE -eq 1 ]; then echo "#Deleting Match: ${EMAIL} => ${MNUM} => ${MID} => ${TRASH} " >> "${LOG}" echo "/opt/zimbra/bin/zmmailbox -z -m '${EMAIL}' deleteMessage '${MID}'" >> "${LOG}" echo "Deleting ${MID} for ${EMAIL}" /opt/zimbra/bin/zmmailbox -z -m "${EMAIL}" deleteMessage "${MID}" else echo "#Moving Match: ${EMAIL} => ${MNUM} => ${MID} => ${TRASH} " >> "${LOG}" echo "#undo: /opt/zimbra/bin/zmmailbox -z -m '${EMAIL}' mm '${MID}' /${SEARCH_FOLDER}" >> "${LOG}" echo /opt/zimbra/bin/zmmailbox -z -m "${EMAIL}" mm "${MID}" /Trash >> "${LOG}" echo "Moving msg ${MID} for ${EMAIL} to TRASH" /opt/zimbra/bin/zmmailbox -z -m "${EMAIL}" mm "${MID}" /Trash fi fi fi done fi fi done FINISHED_DATE="$(date)" FINISHED_SECS=$(date +%s) echo "" echo "" echo "Started: ${START_DATE}" | tee -a "${LOG}" echo "Finished: ${FINISHED_DATE}" | tee -a "${LOG}" if [ $DELETE -eq 1 ] || [ $MOVE -eq 1 ]; then echo "Run time: $((($FINISHED_SECS-$START_SECS)/60)) mins" | tee -a "${LOG}" else echo "Run time: $((($FINISHED_SECS-$START_SECS)/60)) mins" fi echo "" exit 0 |
| Attachments:
zimbra_remove_messages.bash
Bash Zimbra - Export All users Complete Mailboxs (Calendar,Emails,Folders, Etc) individually
posted Jan 16, 2018, 7:13 AM by Chris Franklin [ updated Jan 16, 2018, 7:25 AM ]
I we wanted to start backing up our users complete mailboxes individually. But we had two issues. First was that zmmailbox is a single threaded JAVA "program" and secondly we need this job done during our "off hours" (who really has off hours any more). In order to over come this I needed to have multiple but limited copies of zmmailbox running at the same time. Quickly we found that just feeding a list of users to zmmailbox resulted in a backup taking 13+ hrs.... 13 hrs to backup 983 users WTF. At any rate this script reduces that 13+ hrs down to just 3.10 hrs (still not the hour to hour and a half I was hopping for). While using only the built into tools on a standard box & not over whelming the box... provided you don't over provision the number of jobs that can one at one time. This was written on a Red Hat Enterprise Linux Server release 6 box.
zimbra_backup_mailboxes.bash#!/bin/bash MAXJOBS=8 # 8&10=same, 6=slower LOG=/var/log/zimbra.user-backup.log BACKUPS=/var/opt/zimbra/backup/mailboxes ZMBACKUPCMD=/var/opt/zimbra/bin/zmmailbox ZMCONFIG=$(whereis zmlocalconfig | cut -d ' ' -f2) LDAPSEARCH=$(whereis ldapsearch | cut -d ' ' -f2) LDAP_SEARCH="${LDAPSEARCH} -x -h $($ZMCONFIG ldap_host | cut -d '=' -f2) -D $($ZMCONFIG zimbra_ldap_userdn | awk '{print $3}') -w$($ZMCONFIG -s zimbra_ldap_password | cut -d ' ' -f3) -LLL -o ldif-wrap=no " ## Look for left over runtime files if [ ! -z "$(find /dev/shm -type f -name "zimbra.backup.id.*")" ]; then echo "Found existing RUNTIME files, ending." echo "Found existing RUNTIME files, ending." >> "${LOG}" exit fi ## Functions function _thejob() { THISID=$1 EMAIL=$2 USERN=${EMAIL%%@*} ## Create runtime file touch /dev/shm/zimbra.backup.id.$THISID ## Remvoe old status log files find "${BACKUPS}/" -name "${USERN}.ok" -o -name "${USERN}.bad" -delete $ZMBACKUPCMD -z -m "${EMAIL}" -t 0 getRestURL '/?fmt=tgz' > "${BACKUPS}/${USERN}.tgz" if [ $? -eq 0 ]; then cat <<EOF > "${BACKUPS}/${USERN}.ok" ## Simple restore: (Skips dups, does not recreate deleted calendar events) ${ZMBACKUPCMD} -z -m '${EMAIL}' -t 0 postRestURL '/?fmt=tgz&resolve=skip' '${BACKUPS}/${USERN}.tgz' ## Full Restore: ${ZMBACKUPCMD} -z -m '${EMAIL}' -t 0 postRestURL '/?fmt=tgz&resolve=reset' '${BACKUPS}/${USERN}.tgz' EOF else echo > "${BACKUPS}/${USERN}.bad" fi ## Backup LDAP info (although we can't restore it ... yet) $LDAP_SEARCH "(&(objectClass=zimbraAccount)(mail=${EMAIL}))" > "${BACKUPS}/${USERN}.ldif" ## Remove runtime file rm /dev/shm/zimbra.backup.id.$THISID } date date > "${LOG}" ( ## List all users /var/opt/zimbra/bin/zmprov -l gaa | grep cfranklin | while read EMAIL; do ## Ignore Archive accounts if [[ ${EMAIL} != *".archive" ]]; then echo "${EMAIL}" fi done ) | while read EMAIL; do echo "Working on : $EMAIL" >> "${LOG}" RUNNING=0 while [ $RUNNING -eq 0 ]; do CURJOBS=0 while [ $CURJOBS -lt $MAXJOBS ]; do if [ ! -e /dev/shm/zimbra.backup.id.$CURJOBS ]; then echo "Started : $EMAIL : $CURJOBS" >> "${LOG}" ( _thejob $CURJOBS "${EMAIL}" ) & sleep 1 if [ -e /dev/shm/zimbra.backup.id.$CURJOBS ]; then RUNNING=1 CURJOBS=$MAXJOBS fi else ((CURJOBS++)) fi done if [ $RUNNING -eq 0 ]; then if [ $CURJOBS -eq $MAXJOBS ]; then echo -n "Max Jobs running" >> "${LOG}" elif [ $CURJOBS -gt $MAXJOBS ]; then echo -n "." fi fi sleep 1 done echo done ## Waiting for jobs to finish RUNNING=0 while [ ! -z "$(find /dev/shm -type f -name "zimbra.backup.id.*")" ]; do if [ $RUNNING -eq 0 ]; then RUNNING=1 echo -n "Waiting for jobs to finish." else echo -n "." fi sleep 1 done echo date date >> "${LOG}" |
Google Sheets - Auto update/insert "Last Upated" column on change
posted May 5, 2017, 3:48 AM by Chris Franklin [ updated May 5, 2017, 3:49 AM ]
After a cell has been changed the script will look through the header row for a Column called "Last Updated" and update or insert the date and current time into the cell on that row under the column called "Last Updated" Text Boxfunction columnnumber2letter(column) { var temp, letter = ''; while (column > 0) { temp = (column - 1) % 26; letter = String.fromCharCode(temp + 65) + letter; column = (column - temp - 1) / 26; } return letter; } function onEdit() { var sheet = SpreadsheetApp.getActiveSheet(); var activecell = sheet.getActiveCell(); var temp = sheet.getDataRange().getValues(); var lastupdatecolumn = temp[0].indexOf('Last Updated') + 1; if ( activecell.getRowIndex() > 1 ) { if ( activecell.getColumn() != lastupdatecolumn ) { //checks the column var row = activecell.getRow(); var time = new Date(); time = Utilities.formatDate(time, "GMT-05:00", "yyyyMMdd HH:mm"); sheet.getRange(columnnumber2letter(lastupdatecolumn) + row.toString()).setValue(time); }; }; }; |
VMWare copy and keep thin provisioning scripts & register the VM on a backup VM server
posted Mar 31, 2017, 6:54 AM by Chris Franklin [ updated Mar 31, 2017, 7:17 AM ]
I use ghettovcb for backing up my VM Hosts to my file servers via NFS. I do this as I can't quickly and easily copy them directly to my backup VM host(s). As VMWare refuses to acknowledge that it's users want and haven been asking for remote access to datastores since the beginning of time. So here is my stop gap solution that only uses the tools built into VMWare (like ghettovcb) to copy from a NFS mount to the backup VM host, Register the latest good backup & unregister the old copy of that same server VM Host ( ghettovcb via cron )=> NFS Share => Backup VM Host (My Scripts Via Cron) This script does the fallowing.
copy_vm_to_backup.sh## config vars max_copies=10 vmstore_backup="backups" vmstore_restore="datastore1" log="/vmfs/volumes/${vmstore_backup}/scripts/logs/esxi-backups.$(date +%s).log" debug=1 vmkfstools="$(which vmkfstools)" ## functions logit() { if [ $debug -eq 1 ]; then echo "$1" fi echo "$1" >> "${log}" } logit "========= Starting Copy =========" if [ ! -d "${log}" ]; then mdkir -p "${log}" if [ $? -eq 0 ]; then debug=1 logit "ERROR : mdkir -p '${log}'" else logit "Created Logs dir : '${log}'" fi fi ## the work find /vmfs/volumes/${vmstore_backup}/ -type d -maxdepth 1 -mindepth 1 | while read vmdirs; do # Reset skip this vm vmskip=0 if [ -d "${vmdirs}" ]; then vmname=$(basename "${vmdirs}") logit "Found dir : ${vmdirs}" logit "VM name : ${vmname}" if [ "${vmdirs}" != "scripts" ]; then find "${vmdirs}" -type f -iname "STATUS.ok" | while read vmbackup; do logit "Found Completed backup file : ${vmbackup}" vmbackupdir=$(dirname "${vmbackup}") vmrestoredir=$(dirname "${vmbackup/${vmstore_backup}/${vmstore_restore}}") logit "vmbackupdir : ${vmbackupdir}" logit "vmrestoredir : ${vmrestoredir}" if [ -e "${vmrestoredir}/BACKUP.err" ]; then logit "Removing old ERROR file" rm -f "${vmrestoredir}/BACKUP.err" fi logit "Looking into : ${vmbackupdir}" find "${vmbackupdir}" -type f | while read vmbackupfiles; do vmbackupfilesdir=$(dirname "${vmbackupfiles}") vmbackupfilesname=$(basename "${vmbackupfiles}") vmbackupfilesextension="${vmbackupfilesname##*.}" vmrestorefiles="${vmbackupfiles/$vmstore_backup/$vmstore_restore}" logit "Found file : ${vmbackupfiles}" if [ ! -d "${vmrestoredir}" ]; then logit "Creating dir : ${vmrestoredir}" mkdir -p "${vmrestoredir}" if [ $? -ne 0 ]; then vmskip=1 logit "ERROR : mkdir -p '${vmrestoredir}'" fi fi if [ $vmskip -eq 0 ]; then case "${vmbackupfilesname}" in "STATUS.ok") ## Ignore the STATUS.ok file ignore=1 ;; *"-flat.vmdk") ## Ignore any -flat vhd files logit "checking if flat file is an orphan. : '${vmrestorefiles}' or '${vmrestorefiles/-flat.vmdk/.vmdk}.converted'" if [ -e "${vmrestorefiles}" ]; then logit "Found restored flat file" if [ ! -e "${vmrestorefiles/-flat.vmdk/.vmdk}.converted" ]; then logit "Found no converted flag though : '${vmrestorefiles/-flat.vmdk/.vmdk}.converted'" if [ -e "${vmrestorefiles/-flat.vmdk/.vmdk}.convert.log" ]; then if [ $(grep -c '100% done.' "${vmrestorefiles/-flat.vmdk/.vmdk}.convert.log") -eq 1 ]; then logit "Found convert.log & it reads the conver was 100%. Creating converted flag file : '${vmrestorefiles/-flat.vmdk/.vmdk}.converted'" touch "${vmrestorefiles/-flat.vmdk/.vmdk}.converted" else logit "ERROR : No converted flag found, convert file does NOT read 100% done. Leaving file" fi else logit "Found no converted log : '${vmrestorefiles/-flat.vmdk/.vmdk}.convert.log'" fi fi if [ -e "${vmrestorefiles/-flat.vmdk/.vmdk}.converted" ]; then logit "Found left over flat file" if [ ! -e "${vmbackupfiles/-flat.vmdk/.vmdk}" ]; then logit "Removing left over flat (orphan)" rm -f "${vmbackupfiles}" if [ $? -ne 0 ]; then logit "ERROR : Removing left over flat (orphan) : rm -f '${vmbackupfiles}'" fi fi fi fi ;; *".vmdk") ## Convert to thin (or at least keep thin provisioning) vmrestorefilesflat="${vmrestorefiles%.*}-flat.vmdk" if [ ! -e "${vmrestorefiles}.converted" ]; then logit "Looking for any preveous convert attemps : '${vmrestorefiles}' or '${vmrestorefilesflat}'" if [ -e "${vmrestorefiles}" ] || [ -e "${vmrestorefilesflat}" ]; then if [ -e "${vmrestorefiles}" ]; then logit "Removing preveous convert attemp : '${vmrestorefiles}'" rm -f "${vmrestorefiles}" if [ $? -ne 0 ]; then logit "ERROR : removing preveous convert attemp : rm -f '${vmrestorefiles}'" fi fi if [ -e "${vmrestorefilesflat}" ]; then logit "Removing preveous convert attemp (flat) : '${vmrestorefilesflat}'" rm -f "${vmrestorefilesflat}" if [ $? -ne 0 ]; then logit "ERROR : removing preveous convert attemp : rm -f '${vmrestorefilesflat}'" fi fi fi logit "Convert : Start : $(date)" logit "Converting : '${vmbackupfiles}' => '${vmrestorefiles}'" $vmkfstools -d thin -i "${vmbackupfiles}" "${vmrestorefiles}" > "${vmrestorefiles}.convert.log" if [ $? -eq 0 ] && [ $(grep -c '100% done.' "${vmrestorefiles}.convert.log") -eq 1 ]; then logit "Convert : End : $(date)" touch "${vmrestorefiles}.converted" logit "Created : ${vmrestorefiles}.converted" logit "Looking for vmdk file : '${vmbackupfiles}'" if [ -e "${vmbackupfiles}" ]; then echo "Removing Converted VHD : ${vmbackupfiles}" rm -f "${vmbackupfiles}" fi logit "Looking for flat file : '${vmbackupfilesdir}/${vmbackupfilesname%.*}-flat.vmdk'" if [ -e "${vmbackupfilesdir}/${vmbackupfilesname%.*}-flat.vmdk" ]; then echo "Removing Converted Flat VHD : ${vmbackupfiles%.*}-flat.vmdk" rm -f "${vmbackupfilesdir}/${vmbackupfilesname%.*}-flat.vmdk" fi else logit "Convert : End : $(date)" logit "ERROR : $vmkfstools -d thin -i '${vmbackupfiles}' '${vmrestorefiles}'" vmskip=1 fi else logit "Already Convereted found : '${vmrestorefiles}.converted'" if [ $(grep -c '100% done.' "${vmrestorefiles}.convert.log") -eq 1 ]; then logit "Converted log reports 100% complete : grep -c '100% done.' '${vmrestorefiles}.convert.log'" logit "Looking for left over vmdk file : '${vmbackupfiles}'" if [ -e "${vmbackupfiles}" ]; then echo "Removing Converted VHD : ${vmbackupfiles}" rm -f "${vmbackupfiles}" fi logit "Looking for left over flat file : '${vmbackupfilesdir}/${vmbackupfilesname%.*}-flat.vmdk'" if [ -e "${vmbackupfilesdir}/${vmbackupfilesname%.*}-flat.vmdk" ]; then echo "Removing Converted Flat VHD : ${vmbackupfiles}" rm -f "${vmbackupfilesdir}/${vmbackupfilesname%.*}-flat.vmdk" fi fi fi ;; *".vmx") ## Copy and modify base config vmbackupdate="${vmrestoredir%%_*}" vmbackupdate="${vmbackupdate##*/}" vmbackupdate="${vmbackupdate/${vmname}-/}" cp -f "${vmbackupfiles}" "${vmrestorefiles}.org" if [ $? -eq 0 ]; then logit "Copied Config : ${vmrestorefiles}.org" cat "${vmrestorefiles}.org" | sed -e 's/^displayName\ \=\ \".*"$/displayName\ \=\ \"'"${vmbackupdate} ${vmname}"'"/' > "${vmrestorefiles}" if [ $? -eq 0 ]; then logit "Mod'ed Config : ${vmrestorefiles}" rm -f "${vmbackupfiles}" else logit "ERROR : Mod'ing Config : ${vmrestorefiles}" vmskip=1 fi else logit "ERROR : Copying Config : ${vmbackupfiles} => ${vmrestorefiles}.org" vmskip=1 fi ;; *) ## Default copy any non known file if [ ! -d "${vmrestoredir}" ]; then mkdir -p "${vmrestoredir}" if [ $? -ne 0 ]; then logit "ERROR : mkdir -p '${vmrestoredir}'" vmskip=1 fi fi if [ -d "${vmrestoredir}" ]; then cp -f "${vmbackupfiles}" "${vmrestorefiles}" if [ $? -eq 0 ]; then logit "Copied MISC file : ${vmbackupfiles}" rm -f "${vmbackupfiles}" else logit "ERROR : Coping MISC file : cp -f '${vmbackupfiles}' '${vmrestorefiles}'" vmskip=1 fi else logit "ERROR : Coping MISC file : no dir '${vmrestorefiles}'" vmskip=1 fi ;; esac fi if [ $vmskip -eq 1 ]; then touch ${vmrestoredir}/BACKUP.err fi done if [ $(find "${vmbackupdir}" -type f | wc -l) -eq 1 ] && [ ! -e "${vmrestoredir}/BACKUP.err" ]; then logit "Found only remaning file : $(find '${vmbackupdir}' -type f)" touch "${vmrestoredir}/STATUS.ok" if [ $? -eq 0 ]; then logit "Created STATUS.ok file : ${vmrestoredir}/STATUS.ok" rm -f "${vmbackupdir}/STATUS.ok" if [ $? -ne 0 ]; then logit "ERROR : unable to delete backup source STATUS.ok : rm -f '${vmbackupdir}/STATUS.ok'" fi else logit "ERROR : Creating STATUS.ok : touch '${vmrestoredir}/STATUS.ok'" fi if [ $? -eq 0 ]; then rm -f "${vmbackupdir}/STATUS.ok" fi vmbackupdir="${vmbackupdir//../}" case "${vm2delete}" in *[!/]*/) vmbackupdir=${vmbackupdir%"${vmbackupdir##*[!/]}"} ;; esac if [ -d "vmbackupdir" ]; then if [ "${vmbackupdir}" != "/vmfs/volumes/${vmstore_backup}" ] && [ "${vmbackupdir}" != "/vmfs/volumes" ] && [ "${vmbackupdir}" != "/vmfs" ]; then rm -rf "${vmbackupdir}" if [ $? -eq 0 ]; then logit "Removed : '${vmbackupdir}'" else logit "ERROR : rm -rf '${vmbackupdir}'" fi fi fi else logit "ERROR : Found remaning files : $(find ${vmbackupdir} -type f)" fi done fi else logit "Found misc file : ${vmdirs}" fi done logit "======= Start Old Removal =======" find /vmfs/volumes/${vmstore_restore}/ -type d -maxdepth 1 -mindepth 1 -not -iname ".*" | while read vmbase; do foundvms=$(find "${vmbase}" -type f \( -name "STATUS.ok" -o -name "BACKUP.err" \) | wc -l) logit "Old Copies Count : ${foundvms} for ${vmbase}" if [ $foundvms -gt $max_copies ]; then logit "Over Max Copies : ${vmbase} by $(($foundvms - $max_copies))" ls -1cr "${vmbase}" | head -n $(($foundvms - $max_copies)) | while read vmdelete; do vm2delete="${vmbase}/${vmdelete}/" vm2delete="${vm2delete//../}" case "${vm2delete}" in *[!/]*/) vm2delete=${vm2delete%"${vm2delete##*[!/]}"} ;; esac logit "Removing Old Copy : ${vmbase}/${vmdelete}" if [ "${vm2delete}" != "/vmfs/volumes/${vmstore_restore}" ] && [ "${vm2delete}" != "/vmfs/volumes" ] && [ "${vm2delete}" != "/vmfs" ]; then echo rm -rf "${vm2delete}" if [ $? -eq 0 ]; then logit "Removed : ${vm2delete}" else logit "ERROR : Removing Old Copy : '${vmbase}/${vmdelete}'" fi fi done fi done logit "======= Completely Done =========" This script does all the registering and un-registering of the backups. register_guests.shvmstore_backup="datastore1" find "/vmfs/volumes/${vmstore_backup}/" -type d -maxdepth 1 -mindepth 1 -not -iname "scripts" | while read vmname; do ls -1r "${vmname}"/*/STATUS.ok | head -n 1 | while read vmstatusfile; do ## Setup some vars vmpath=$(dirname "${vmstatusfile}") vmversion=${vmpath##*/} vmname=${vmpath%/*} vmname=${vmname##*/} vmdate=${vmpath##*/} vmdate=${vmdate/${vmname}-/} vmdate=${vmdate%_*} #2017-03-27 vmdisplayname="${vmname}_${vmdate}" # See if this VM is already reg. if [ $(vim-cmd vmsvc/getallvms | grep -c -i "${vmname}") -eq 0 ]; then # if not reg it echo "Adding : ${vmversion}" find "${vmpath}" -iname "*.vmx" -exec vim-cmd solo/registervm '{}' \; else # test is this version is already reged, if not unreg the existing and reg this opne if [ $(vim-cmd vmsvc/getallvms | grep -c "${vmversion}") -eq 0 ]; then # get vmid of exiting vm guest vmid=$(vim-cmd vmsvc/getallvms | grep -i "${vmname}" | awk '{print $1}') # chec kthat it is not on. if it is, we must be using it and SHOULD NOT remove it if [ $(vim-cmd vmsvc/power.getstate ${vmid} | grep -c "Powered off") ]; then vim-cmd vmsvc/unregiste ${vmid} # check that it was removed if [ $(vim-cmd vmsvc/getallvms | grep -c -i "${vmname}") -eq 0 ]; then # add the new one echo "Adding : ${vmversion}" find "${vmpath}" -iname "*.vmx" -exec vim-cmd solo/registervm '{}' \; fi fi else echo "Skipping : ${vmversion}" fi fi done done |
Backup Samba USER sids
posted Oct 21, 2016, 6:22 AM by Chris Franklin [ updated Oct 21, 2016, 6:25 AM ]
Simple bash script Requirements
samba_backup_users_sids.bash#!/bin/bash PDBEDIT=$(which pdbedit) SQLITE=$(which sqlite3) DB=/etc/samba/backup_users_sid.sqlite function _create_db() { echo "CREATE TABLE users (uid INT primary key,sid);" | $SQLITE "${DB}" } if [ ! -e "${DB}" ]; then _create_db fi function _sid_lookup() { USERN=$1 $PDBEDIT -L -v "${USERN}" | while IFS=":" read KEY VALUE; do case "${KEY}" in "User SID") echo "${VALUE// /}";; esac done } $PDBEDIT -L | while IFS=":" read USERN USERUID TRASH; do if [ "${USERN}" == "${USERN/\$/}" ]; then SID=$(_sid_lookup "${USERN}") echo "INSERT OR IGNORE INTO users (uid,sid) VALUES ('${USERUID}','${SID}');" | $SQLITE "${DB}" fi done Example restore Example Restore if [ -e /etc/samba/backup_users_sid.sqlite ]; then SID=$(/usr/bin/sqlite3 /etc/samba/backup_users_sid.sqlite "SELECT sid FROM users WHERE uid=${uid}") else SID="" fi if [ -z "${SID}" ]; then (echo "${password}"; echo "${password}";) | /usr/bin/pdbedit -t -a -u "${username}" > /dev/null 2>&1 else (echo "${password}"; echo "${password}";) | /usr/bin/pdbedit -t -a -U ${SID} -u "${username}" > /dev/null 2>&1 fi |
Bash - Auto Tailing new files in a directory
posted Dec 23, 2015, 9:07 AM by Chris Franklin [ updated Dec 23, 2015, 9:56 AM ]
I had a simple problem, I had multiple log files I needed to monitor and have scripts act on the results. But tail does stop looking at a file if it "disappears" and doesn't look for new files in a Dir once it starts. And after looking at the alternatives xtail and multitail i decided I was better off coming up with my own way to dealing with these issues. New version no grep inline with tail (little bit better) Version 2#!/bin/bash logs_name="*-sshd-*" logs_path=/var/log notify_hashs=/tmp/.notify if [[ ! -d "${notify_hashs}" ]]; then mkdir -p "${notify_hashs}" if [[ ! -d "${notify_hashs}" ]]; then echo "Could NOT create hash storage : ${notify_hashs}" exit fi fi ( while [ 1 ]; do ## We store the hashs in a flat file. The Hashs are just flags for lines that ## we have already send out notfies about. hashs="${notify_hashs}/$(date +%Y%m%d).hash" ## Why check if the file exists since touching is harmless... right? touch "${hashs}" ( ## This sub process watches for newer/deleted versions of the listed files. ## And if any are found it will exit thus forcing the tail process to also ## exit and restart. found_first=$(find ${logs_path} -maxdepth 1 -mindepth 1 -print0 -iname "${logs_name}") repeat=1 ## Loop forever unless we are told otherwise. ## During each loop we look for any changes in the files we are looking for. while [ ${repeat} -eq 1 ]; do sleep 0.5 ## Find will return all the found files in one (possibly) LONG string. ## Hashing would shorten the string and reduce memory memory usage abit but ## it would also require launching other process which would use more CPU ## and temporary memory until the hash is made... EVERY loop! found_current=$(find ${logs_path} -maxdepth 1 -mindepth 1 -print0 -iname "${logs_name}") if [[ $found_current != $found_first ]]; then repeat=0 fi done ) & ## Here is where we tail the files in question, sending erros to /dev/null and only grabbing the lines we care about. tail --pid="$!" -q -F ${logs_path}/${logs_name} 2>/dev/null | while read line; do ## Skip all lines that start with "==" OR blank if [[ ! "$line" =~ ^\=\= ]] && [[ ! -z "$line" ]]; then if [[ "$line" =~ :\ (Accepted|Failed) ]]; then if [[ ! "$line" =~ ((www.bbhcsd.org|files).*10.80.16.10) ]]; then hash=$(echo "${line}" | md5sum -) if [ $(grep -c -m 1 "${hash}" "${hashs}") -eq 0 ]; then echo $line echo "${hash}" >> "${hashs}" /scripts/notify_with_slack.bash -t "${line}" fi fi fi fi done done ) & Old Version with grep & tail logwatcher.sshd.bash#!/bin/bash logs_name="*-sshd-*" logs_path=/var/log notify_hashs=/tmp/.notify if [[ ! -d "${notify_hashs}" ]]; then mkdir -p "${notify_hashs}" if [[ ! -d "${notify_hashs}" ]]; then echo "Could NOT create hash storage : ${notify_hashs}" exit fi fi ( while [ 1 ]; do hashs="${notify_hashs}/$(date +%Y%m%d).hash" touch "${hashs}" ( ## This sub process watches for newer versions of the listed files. ## And if any are found it will exit thus forcing the tail process to also exit. found_first=$(find ${logs_path} -maxdepth 1 -mindepth 1 -print0 -iname "${logs_name}") repeat=1 while [ ${repeat} -eq 1 ]; do sleep 0.5 found_current=$(find ${logs_path} -maxdepth 1 -mindepth 1 -print0 -iname "${logs_name}") if [[ $found_current != $found_first ]]; then repeat=0 fi done ) & tail --pid="$!" -q -F ${logs_path}/${logs_name} 2>/dev/null | grep --line-buffer -E ": (Accepted|Failed)" | grep --line-buffer -v -E "((www.nomadcf.com|files).*10.80.16.10)" | while read line; do ## Skip all lines that start with "==" OR blank if [[ ! $line =~ ^\=\= ]] && [[ $line ]]; then hash=$(echo "${line}" | md5sum -) if [ $(grep -c -m 1 "${hash}" "${hashs}") -eq 0 ]; then echo "${hash}" >> "${hashs}" /scripts/notify_with_slack.bash -t "${line}" fi fi done done ) & |
PHP sockets send auth email (gmail)
posted Sep 10, 2015, 7:16 AM by Chris Franklin
Codefunction SendMail($ServerName, $Port, $Username, $Password, $ToEmail, $FromEmail, $Subject, $Body, $Attachments='' ) { /* Attachments work like this array('Attachment Name'=>'ATTACHMENT AS A STRING','Next Attachment Name'=>'ATTACHMENT 2 AS A STRING') */ $smtp = fsockopen($ServerName, $Port); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 220; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed Connect"; fputs($smtp, "HELO $ServerName\r\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 250; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed Helo"; fputs($smtp, "AUTH LOGIN\r\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 334; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed AUTH"; fputs($smtp, base64_encode($Username)."\r\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 334; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed Username"; fputs($smtp, base64_encode($Password)."\r\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 235; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed Password"; fputs($smtp, "MAIL From:<$FromEmail>\r\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 250; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed MAIL"; fputs($smtp, "RCPT To:<$ToEmail>\r\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 250; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed RCPT"; fputs($smtp, "DATA\r\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 354; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed DATA"; fputs($smtp, "From: $FromEmail\r\n"); fputs($smtp, "To: $ToEmail\r\n"); if ($Attachments != '') { $ContentBoundry = '----=_NextPart_'.md5(date('U').uniqid('NCF',TRUE)); fputs($smtp, "MIME-Version: 1.0\r\n"); fputs($smtp, 'Content-Type: multipart/mixed; boundary="'.$ContentBoundry.'"'."\r\n"); } fputs($smtp, "Subject: $Subject\r\n\r\n"); if ($Attachments == '') { fputs($smtp, "$Body\r\n.\r\n"); } else { $NewBody = "\r\n"; $NewBody .= '--'.$ContentBoundry."\r\n"; $NewBody .= 'Content-Type: text/plain; charset="UTF-8"'."\r\n"; $NewBody .= 'Content-Transfer-Encoding: 8bit'."\r\n\r\n"; $NewBody .= $Body."\r\n"; foreach ($Attachments as $Filename => $Attachment) { $NewBody .= '--'.$ContentBoundry."\r\n"; $NewBody .= 'Content-Type: application/octet-stream; name="'.$Filename.'"'."\r\n"; $NewBody .= 'Content-Transfer-Encoding: base64'."\r\n"; $NewBody .= 'Content-Disposition: attachment; filename="'.$Filename.'"'."\r\n\r\n"; $NewBody .= chunk_split(base64_encode($Attachment)); $NewBody .= "\r\n"; } $NewBody .= $ContentBoundry."--\r\n\r\n"; fputs($smtp, $NewBody."\r\n.\r\n"); } $InputBuffer = fgets($smtp, 1024); $ErrorCode = 250; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed BODY"; fputs($smtp, "QUIT\n"); $InputBuffer = fgets($smtp, 1024); $ErrorCode = 221; if(substr($InputBuffer,0,3) != "$ErrorCode") return "Failed QUIT"; fclose($smtp); return TRUE; } How to use it$TheAttachments = array('CurriculumReviewCycle.pdf'=>file_get_contents('/var/www/html/CurriculumReviewCycle.pdf')); // SendMail($ServerName, $Port, $Username, $Password, $ToEmail, $FromEmail, $Subject, $Body, $Attachments='' ) SendMail('smtp.gmail.com', 25, 'franklinc@fromdomain', 'Password', 'cfranklin@todomain', 'franklinc@fromdomain', 'Attach - Test - subject', 'What up dog! - the body', $TheAttachments); |
Bash & Mysql Transactions Example
posted Aug 14, 2015, 7:23 PM by Chris Franklin
Code - Simple#!/bin/bash ( echo "BEGIN;" echo "USE database;" echo "INSERT INTO table (column1,column2) VALUES ('1','bob');" echo "INSERT INTO table (column1,column2) VALUES ('2','tom');" echo "INSERT INTO table (column1,column2) VALUES ('3','bill');" echo "DELETE FROM table WHERE column2='bob' LIMIT 2;" echo "COMMIT;" ) | mysql
File - loadme.sqlBEGIN; USE database; INSERT INTO table (column1,column2) VALUES ('1','bob'); INSERT INTO table (column1,column2) VALUES ('2','tom'); INSERT INTO table (column1,column2) VALUES ('3','bill'); DELETE FROM table WHERE column2='bob' LIMIT 2; COMMIT; Code#!/bin/bash cat loadme.sql | mysql
File - loadme.sqlINSERT INTO table (column1,column2) VALUES ('1','bob'); INSERT INTO table (column1,column2) VALUES ('2','tom'); INSERT INTO table (column1,column2) VALUES ('3','bill'); DELETE FROM table WHERE column2='bob' LIMIT 2; Code#!/bin/bash ( echo "BEGIN" cat loadme.sql echo "COMMIT" ) | mysql
File - loadme.csvid,first_name,last_name,email,country,ip_address 1,Michelle,George,mgeorge0@ebay.com,China,123.202.75.179 2,Sara,Evans,sevans1@hc360.com,Finland,134.70.196.70 3,Lawrence,Chavez,lchavez2@about.com,China,18.196.142.206 4,Roy,Carpenter,rcarpenter3@walmart.com,Albania,82.181.123.213 5,Shirley,Green,sgreen4@ucoz.ru,Libya,64.253.188.146 6,Adam,Wood,awood5@hhs.gov,Russia,221.26.34.134 7,Wayne,Stephens,wstephens6@nature.com,Russia,56.65.157.158 8,Theresa,Little,tlittle7@dmoz.org,Afghanistan,69.137.112.37 9,Angela,Sanchez,asanchez8@mayoclinic.com,Indonesia,105.61.90.239 10,Michael,Watson,mwatson9@nih.gov,China,252.233.46.123 11,Frank,Gonzalez,fgonzaleza@rambler.ru,Indonesia,93.237.180.19 12,Juan,Allen,jallenb@lycos.com,United States,218.237.224.77 13,Jack,Hall,jhallc@buzzfeed.com,Russia,4.72.91.28 14,Julie,Berry,jberryd@businesswire.com,China,217.110.87.152 15,Joshua,Simpson,jsimpsone@tinyurl.com,China,12.56.150.26 16,Mary,Hudson,mhudsonf@blogtalkradio.com,Tanzania,134.103.90.216 17,Johnny,Cunningham,jcunninghamg@ustream.tv,Malawi,240.198.146.11 18,Keith,Reynolds,kreynoldsh@huffingtonpost.com,Brazil,132.158.192.170 19,Tina,Bryant,tbryanti@so-net.ne.jp,Ukraine,241.208.105.145 20,David,Lawson,dlawsonj@vkontakte.ru,Finland,168.173.240.164 21,Mark,Nelson,mnelsonk@slate.com,China,184.186.226.254 22,Paul,Welch,pwelchl@businesswire.com,Sweden,13.152.202.44 23,Joyce,Jenkins,jjenkinsm@geocities.jp,Indonesia,8.103.77.179 24,Ernest,Morrison,emorrisonn@sakura.ne.jp,China,11.58.115.47 25,Brian,Bryant,bbryanto@imageshack.us,Poland,198.246.131.204 26,Carl,Fox,cfoxp@upenn.edu,Indonesia,1.89.229.49 27,Doris,Murray,dmurrayq@taobao.com,China,239.230.60.255 28,Doris,Bowman,dbowmanr@jiathis.com,Kazakhstan,71.87.138.11 29,Alan,Young,ayoungs@google.co.uk,Portugal,32.163.140.204 30,Evelyn,Montgomery,emontgomeryt@163.com,France,144.2.32.81 31,Alice,Daniels,adanielsu@quantcast.com,Ireland,247.130.69.39 32,Robert,Marshall,rmarshallv@fotki.com,Canada,116.30.129.9 33,Lois,Green,lgreenw@drupal.org,Indonesia,42.74.77.233 34,Arthur,Day,adayx@ucla.edu,North Korea,89.154.95.21 35,Patricia,Cox,pcoxy@ustream.tv,Turkmenistan,105.242.56.235 36,Antonio,Rice,aricez@jigsy.com,France,199.55.140.129 37,Billy,Simpson,bsimpson10@opensource.org,Mexico,209.115.2.51 38,Dorothy,Welch,dwelch11@list-manage.com,Ethiopia,198.175.147.200 39,Paul,Daniels,pdaniels12@ca.gov,Indonesia,227.236.178.238 40,Larry,Peters,lpeters13@vimeo.com,Jordan,226.138.52.216 41,Brandon,Williams,bwilliams14@skype.com,Russia,105.27.15.197 42,Elizabeth,Day,eday15@weebly.com,China,229.25.28.126 43,Ann,Henry,ahenry16@reference.com,China,42.97.38.8 44,Jimmy,Porter,jporter17@icq.com,France,223.26.225.195 45,Victor,Bowman,vbowman18@merriam-webster.com,Philippines,16.246.77.183 46,Brenda,Jackson,bjackson19@blog.com,Indonesia,128.43.154.62 47,Louis,Bryant,lbryant1a@mit.edu,Indonesia,95.22.26.179 48,Janet,Stephens,jstephens1b@usa.gov,Kazakhstan,231.160.86.102 49,Russell,Little,rlittle1c@joomla.org,Sweden,211.97.195.35 50,Keith,Ruiz,kruiz1d@sciencedaily.com,Israel,179.80.105.231 Code#!/bin/bash ( echo "BEGIN;" echo "USE database;" while IFS=',' read id first_name last_name email country ip_address; do echo "UPDATE table SET colum2='${email}' WHERE colum1='${id}';" done < loadme.csv echo "COMMIT;" ) | mysql
Code#!/bin/bash # using the < stops wc from printing the file name totalrows=$(wc -l < loadme.csv) ( echo "BEGIN;" echo "USE database;" echo "DELETE FROM table;" while IFS=',' read id first_name last_name email country ip_address; do echo "INSERT INTO table (colum1,colum2) VALUES ('${id}','${email}');" done < loadme.csv echo "IF ((SELECT COUNT(*) FROM table) = ${totalrows}) THEN" echo "COMMIT;" echo "ELSE" echo "ROLLBACK;" echo "END IF;" ) | mysql |
1-8 of 8