blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5f52f20d0b81dbf69f97b4e557d73b16e186463d
|
Shell
|
JeffersonLab/trackingML
|
/benchmark/bm03/run_benchmark.sh
|
UTF-8
| 1,855
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Run the bm03 benchmark
#
# This script should be run from within the bm03 directory
# *AFTER* sourcing the python virtual environment script
# venv/bin/activate. One should also make sure the GPUS
# variable at the top of the script is set to the number
# of GPUs available on the VM
#
# This will run the benchmark and copy all relevant info
# into the archive bm03_{hostname}.tgz
#
GPUS=1
# Make directory to hold results
results_dir=bm03_`hostname`
mkdir $results_dir
# Download input data and unpack it
curl -O https://www.jlab.org/12gev_phys/ML/data_sets/bm03_dataset.tgz
tar xzf bm03_dataset.tgz
mv bm03_dataset/* .
# Capture system info
cat /proc/cpuinfo > $results_dir/cpuinfo.out
free > $results_dir/memory.out
nvidia-smi > $results_dir/nvidia-smi.out
uname > $results_dir/uname.out
sysbench cpu run > $results_dir/sysbench.out
sudo stress-ng --cpu 1 --cpu-method all --perf -t 60 &> $results_dir/stress-ng.out
#-------------------------------------------------------------------
# Capture GPU stats while training
nvidia-smi dmon -o DT -s puct -f $results_dir/nvidia-smi-train.out &
# Run training
./cnn_flow.py -m test -l testl -p testp -e 10 -g $GPUS &> $results_dir/train.out
# Kill nvidia-smi monitoring
pkill -9 nvidia-smi
#-------------------------------------------------------------------
# Capture GPU stats while testing
nvidia-smi dmon -o DT -s puct -f $results_dir/nvidia-smi-test.out &
# Run testing
./predict_batch.py -m test -i ./bm03_dataset/TEST_set -o predOutput &> $results_dir/test.out
# Kill nvidia-smi monitoring
pkill -9 nvidia-smi
#-------------------------------------------------------------------
# Move tensorboard logs and results images to results dir
mv logs *.png *.dat $results_dir
# Tar and gzip results dir
tar czf ${results_dir}.tgz $results_dir
| true
|
7aed4f0e04b94814d3944e479b8b22e623f5af5f
|
Shell
|
brandly/bash-hunnid
|
/fibonacci
|
UTF-8
| 219
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
input=$1
fibonacci () {
if [ "$1" -le 1 ]
then
echo 1
else
prev=$( fibonacci $(($1 - 1)) )
prev_prev=$( fibonacci $(($1 - 2)) )
echo $(( prev + prev_prev ))
fi
}
fibonacci "$input"
| true
|
7fa31d1f14afee053316b1c06f28e1d0d98b0824
|
Shell
|
dlaststark/machine-learning-projects
|
/Programming Language Detection/Experiment-2/Dataset/Train/UNIX-Shell/old-lady-swallowed-a-fly.sh
|
UTF-8
| 1,030
| 3.078125
| 3
|
[] |
no_license
|
animals=(fly spider bird cat dog pig goat cow donkey horse)
comments=("I don't know why she swallowed that fly"
"That wriggled and jiggled and tickled inside her"
"Quite absurd, to swallow a bird"
"How about that, to swallow a cat"
"What a hog, to swallow a dog"
"Her mouth was so big to swallow a pig"
"She just opened her throat to swallow a goat."
"I don't know how she swallowed a cow."
"It was rather wonky to swallow a donkey"
"She's dead, of course!")
include=(2 2 1 1 1 1 1 1 1 0)
for (( i=0; i<${#animals[@]}; ++i )); do
echo "There was an old lady who swallowed a ${animals[i]}"
echo "${comments[i]}"
if (( include[i] )); then
if (( i )); then
for (( j=i-1; j>=0; --j )); do
echo "She swallowed the ${animals[j+1]} to catch the ${animals[j]}"
if (( include[j] > 1 )); then
echo "${comments[j]}"
fi
done
fi
echo "Perhaps she'll die"
echo
fi
done
| true
|
27ac2d788ebfbf7c6680385fda6e24b4e5887583
|
Shell
|
ashithwilson/gdrive-backup
|
/install-gdrive-backup-script.sh
|
UTF-8
| 1,577
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
install_gdrive_binary()
{
if [ -f "/sbin/gdrive" ]; then
echo "Looks like /sbin/gdrive already exists. Aborting installation!"
exit 1
else
wget -qO /sbin/gdrive https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-x64
chmod +x /sbin/gdrive
fi
}
dry_run_gdrive()
{
echo "Test running first Gdrive transaction
"
/sbin/gdrive list
echo "
"
}
configure_sync_folder()
{
folder_id=`/sbin/gdrive mkdir $(hostname)-backups | awk '{print $2}'`
echo "Created Gdrive folder `hostname`-backups with ID $folder_id. The same would be used in backup script - /gdrive-backups/backup.sh"
}
install_backup_script()
{
mkdir /gdrive-backups/
wget -qO /gdrive-backups/backup.sh http://raw.githubusercontent.com/ashithwilson/server-side/master/gdrive-backup.sh
sed -i "s|REPLACE_WITH_GDRIVE_FOLDER_ID|$folder_id|g" /gdrive-backups/backup.sh
chmod +x /gdrive-backups/backup.sh
}
print_summary()
{
touch /gdrive-backups/mysql_backup_list.txt
touch /gdrive-backups/backup-list.txt
echo "
============
============
Backup script is installed at /gdrive-backups/backup.sh
Check gdrive details using command 'gdrive help'
Website files - backup list : /gdrive-backups/backup-list.txt
Db - backup list: /gdrive-backups/backup-list.txt
============
** Do not forget to set up daily/weekly cron for backups **
"
}
echo ""
read -p "Are you sure to install gdrive backups? [Y/n]: " opt
if [ "$opt" == "Y" ] || [ "$opt" == "y" ]; then
install_gdrive_binary
dry_run_gdrive
configure_sync_folder
install_backup_script
print_summary
else
echo "Aborting!!!"
fi
| true
|
0de9ba98ef345dbe4fc3427d38b05da6489a93c4
|
Shell
|
debidroidcc/debidroidcc
|
/shutdown-outside-chroot.sh
|
UTF-8
| 1,082
| 3.265625
| 3
|
[] |
no_license
|
#!/system/bin/sh
# break on errors:
#set -e
# display commands:
set -x
#busybox=/system/xbin/busybox
if [ -z $busybox ]; then
busybox=/data/data/de.tubs.ibr.distcc/app_bin/busybox
fi
if [ -z $target_mountpoint ]; then
target_mountpoint=/data # this one might be derived from the $target_dir via df or mount
fi
if [ -z $target_dir ]; then
target_dir=$target_mountpoint
fi
if [ -z $debian_dir ]; then
debian_dir=$target_dir/debian
fi
# set paths
export PATH=/usr/bin:/usr/sbin:/bin:$PATH
export HOME=/root
# kill all ssh sessions
$busybox chroot $debian_dir /bin/bash -c "/usr/bin/killall sshd"
# stop services
$busybox chroot $debian_dir /etc/init.d/ssh stop
$busybox chroot $debian_dir /etc/init.d/distcc stop
$busybox chroot $debian_dir /etc/init.d/dbus stop
# make sure it is really dead
$busybox chroot $debian_dir /usr/bin/killall distccd
$busybox chroot $debian_dir /usr/bin/killall dbus-daemon
# unmount virtual filesystems
$busybox chroot $debian_dir /bin/umount /dev/pts
$busybox chroot $debian_dir /bin/umount /proc
$busybox chroot $debian_dir /bin/umount /sys
| true
|
0b2ac25d75de276ebe2261226ca61ffd50db4769
|
Shell
|
possientis/Prog
|
/shell/color.sh
|
UTF-8
| 763
| 2.640625
| 3
|
[] |
no_license
|
# for sh not bash
escseq="\033[s\033[0;0H\033[0;41m\033[K\033[1;33mHello World!\033[0m\033[u"
restore="\033[s\033[0;0H\033[0m\033[K\033[u"
store="\033[s" # save cursor location
home="\033[0;0H" # go to line 0 column 0
redback="\033[0;41m" # set red background color
clearline="\033[K" # clear current line (hence color it red)
yellowfont="\033[1;33m"
coloroff="\033[0m"
load="\033[u" # restore cursor location
echo -n $escseq
sleep 1
echo -n $restore
echo -n $store
echo -n $home
echo -n "Oh I have left..."
echo -n $load
sleep 1
echo -n $store
echo -n $home
echo -n $redback
echo -n $clearline
echo -n $yellowfont
echo -n "Hello again..."
echo -n $coloroff
echo -n $load
sleep 1
echo -n $restore
| true
|
75c13946ddf648de956be7d8cda2a6b67a79a5f5
|
Shell
|
nobu-g/dotfiles
|
/.zsh.d/linux/.zshrc
|
UTF-8
| 1,405
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
set_iterm2_status_bar() {
mycmd=(${(s: :)${1}})
printf "\e]1337;SetUserVar=%s=%s\a" lastcmd "$(echo $mycmd | tr -d '\n' | base64)"
}
add-zsh-hook preexec set_iterm2_status_bar
# https://linuxfan.info/disable-ctrl-s
if [[ -t 0 ]]; then
stty stop undef
stty start undef
fi
# install specified debian package to ${HOME}/usr/bin
apt-user-install() {
local cur_dir=$(pwd)
cd "$(mktemp -d)" || exit
apt download $1
dpkg -x "$(ls)" "${HOME}"
cd "${cur_dir}" || exit
}
# stderred
# if [[ -f "${HOMEBREW_PREFIX}/lib/libstderred.so" ]] && [[ $HOST != "moss110" ]]; then
# export STDERRED_ESC_CODE=$(echo -e "$(tput setaf 9)")
# export LD_PRELOAD="${HOMEBREW_PREFIX}/lib/libstderred.so${LD_PRELOAD:+:$LD_PRELOAD}"
# fi
# LESS man page colors (makes Man pages more readable).
man() {
env -u LD_PRELOAD -u MANPAGER man "$@" | col -bx | bat -l man -p
}
export EMACS_SERVER_SOCKET="${TMPDIR:-/tmp}/emacs$(id -u)"
# alias
alias pbcopy='clipcopy'
alias pbpaste='clippaste'
alias gzcat='zcat'
alias ssh='LC_PWD="${PWD}" /usr/bin/ssh -o SendEnv=LC_PWD'
[[ -x /usr/bin/git ]] && alias git='/usr/bin/git'
alias nv='nvidia-smi'
alias sc='systemctl'
# directory alias
hash -d larch="/mnt/larch/${USER}" # ~larch
hash -d hinoki="/mnt/hinoki/${USER}" # ~hinoki
hash -d elm="/mnt/elm/${USER}" # ~elm
hash -d zamia="/mnt/zamia/${USER}" # ~zamia
hash -d mint="/mnt/mint/${USER}" # ~mint
| true
|
fe6e57657eca1d38a20f50c48e92f41eb89c56df
|
Shell
|
seven5/tutorial
|
/container/init-tutorial.sh
|
UTF-8
| 1,237
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#
# create the tutorial
#
#bootstrap needs gb and gopherjs
cd /seven5/bootstrap
export GOPATH=/seven5/bootstrap
go get github.com/constabulary/gb/...
go get github.com/gopherjs/gopherjs
# no we are ready to get the source code
unset GOPATH
mkdir -p /seven5/tutorial/src/github.com/seven5
cd /seven5/tutorial/src/github.com/seven5
git tutorial https://github.com/seven5/tutorial.git
git clone https://github.com/seven5/seven5.git
git clone https://github.com/seven5/gb-seven5.git
git clone https://github.com/seven5/heroku-buildpack-seven5.git
#configuration
cp tutorial/manifest /seven5/tutorial/vendor/manifest
mv /enable-tutorial.sh /seven5/enable-tutorial
#inflate deps
cd /seven5/tutorial
gb vendor restore
#build source code
gb build github.com/seven5/tutorial/...
#create and init db
su postgres -c "/usr/lib/postgresql/9.5/bin/postgres -p 5433 -D /var/lib/postgresql/9.5/main -c config_file=/etc/postgresql/9.5/main/postgresql.conf" &
echo $! > /tmp/postgres.pid
sleep 5
su postgres bash -c "psql -p 5433 -c \"CREATE USER root WITH PASSWORD '';\""
su postgres -c "createdb -p 5433 -O root fresno"
export DATABASE_URL="postgres://root@localhost:5433/fresno"
migrate --up
kill `cat /tmp/postgres.pid`
rm -f /tmp/postgres.pid
| true
|
f2f280c7d3e2508d05c4e44ece2fa805dc13e45c
|
Shell
|
robobenklein/configs
|
/tests/command-check.bash
|
UTF-8
| 403
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
N=100000
cmd=ls
echo
echo "type:"
time(for i in $(eval echo "{1..$N}"); do
type $cmd &>/dev/null
done)
echo
echo "hash:"
time(for i in $(eval echo "{1..$N}"); do
hash $cmd &>/dev/null
done)
echo
echo "command -v:"
time(for i in $(eval echo "{1..$N}"); do
command -v $cmd &>/dev/null
done)
echo
echo "which:"
time(for i in $(eval echo "{1..$N}"); do
which $cmd &>/dev/null
done)
| true
|
31d8377bdcc734142b98882045896a1515c779dc
|
Shell
|
idiap/iss-dicts
|
/bdlex/CreateDicts.sh
|
UTF-8
| 768
| 3.140625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# Copyright 2013 by Idiap Research Institute, http://www.idiap.ch
#
# See the file COPYING for the licence associated with this software.
#
# Author(s):
# Phil Garner, March 2013
#
cd local
tmp1=/dev/shm/bdlex1.tmp
tmp2=/dev/shm/bdlex2.tmp
tmp3=/dev/shm/bdlex3.tmp
echo Creating monolithic dictionary $tmp1
cat ../media/BDL50/?.B50.flx > $tmp1
if false
then
# Shell-based method
# Slow and leaves odd symbols ('.', '_') in the phone list
echo Expanding optional phones to $tmp2
./bdlex50_expand_optionals.sh $tmp1 $tmp2
echo Converting to HTK format
sort -u $tmp2 > $tmp3
./bdlex50_htk_lexicon.sh $tmp3 dictionary.txt 0
else
# Python based method
./bdlex50_htk_lexicon.py $tmp1 dictionary.txt
fi
rm -f $tmp1 $tmp2 $tmp3
| true
|
9df85e4b5043748a6abce51e84ec7f830facdda1
|
Shell
|
catunlock/brother-archlinux
|
/install_printer.sh
|
UTF-8
| 1,011
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Instalando dependencias"
sudo pacman -Sy cups
echo "Instalando paquetes"
curl -O http://download.brother.com/welcome/dlf006156/mfc5490cnlpr-1.1.2-2.i386.deb
ar vx mfc5490cnlpr-1.1.2-2.i386.deb
tar -zxvf data.tar.gz
sudo cp -R usr /
rm -R usr
rm -R data.tar.gz
rm -R control.tar.gz
rm -R debian-binary
rm -R mfc5490cnlpr-1.1.2-2.i386.deb
curl -O http://download.brother.com/welcome/dlf006158/mfc5490cncupswrapper-1.1.2-2.i386.deb
ar vx mfc5490cncupswrapper-1.1.2-2.i386.deb
tar -zxvf data.tar.gz
sudo cp -R usr /
rm -R usr
rm -R data.tar.gz
rm -R control.tar.gz
rm -R debian-binary
rm -R mfc5490cncupswrapper-1.1.2-2.i386.deb
echo "Configurando el servidor de impresion"
sudo systemctl enable org.cups.cupsd.service
sudo systemctl start org.cups.cupsd.service
echo "Configurando la impresora"
sudo /usr/local/Brother/Printer/mfc5490cn/cupswrapper/cupswrappermfc5490cn
echo "Hecho, ve a http://127.0.0.1:631 i añade la impresora lpd://(Your printer's IP address)/binary_p1"
| true
|
82cfe63a077c0d73ff9dced4be25b5a5a1fb19d2
|
Shell
|
molotov-dmitry/font-monospace-config
|
/font-monospace-config.sh
|
UTF-8
| 9,654
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
TARGET='font-monospace-config'
#### Functions =================================================================
showmessage()
{
local message="$1"
if tty -s
then
echo "${message}"
read -p "Press [Enter] to continue"
else
zenity --info --width 400 --text="${message}"
fi
}
showquestion()
{
local message="$1"
if tty -s
then
while true
do
read -p "${message} [Y/n] " RESULT
if [[ -z "${RESULT}" || "${RESULT,,}" == 'y' ]]
then
return 0
fi
if [[ "${RESULT,,}" == 'n' ]]
then
return 1
fi
done
else
if zenity --question --width 400 --text="${message}"
then
return 0
else
return 1
fi
fi
}
selectvalue()
{
local title="$1"
local prompt="$2"
shift
shift
local result=''
if tty -s
then
result=''
echo "${prompt}" >&2
select result in "$@"
do
if [[ -z "${REPLY}" ]] || [[ ${REPLY} -gt 0 && ${REPLY} -le $# ]]
then
break
else
continue
fi
done
else
while true
do
result=$(zenity --title="$title" --text="$prompt" --list --column="Options" "$@") || break
if [[ -n "$result" ]]
then
break
fi
done
fi
echo "$result"
}
disableautostart()
{
showmessage "Configuration completed. You can re-configure monospace font by running '${TARGET}' command"
mkdir -p "${HOME}/.config/${TARGET}"
echo "autostart=false" > "${HOME}/.config/${TARGET}/setup-done"
}
function ispkginstalled()
{
app="$1"
if dpkg -s "${app}" >/dev/null 2>&1
then
return 0
else
return 1
fi
}
safestring()
{
local inputstr="$1"
echo "${inputstr}" | sed 's/\\/\\\\/g;s/\//\\\//g'
}
getconfigline()
{
local key="$1"
local section="$2"
local file="$3"
if [[ -r "$file" ]]
then
sed -n "/^[ \t]*\[$(safestring "${section}")\]/,/\[/s/^[ \t]*$(safestring "${key}")[ \t]*=[ \t]*//p" "${file}"
fi
}
addconfigline()
{
local key="$1"
local value="$2"
local section="$3"
local file="$4"
if ! grep -F "[${section}]" "$file" 1>/dev/null 2>/dev/null
then
mkdir -p "$(dirname "$file")"
echo >> "$file"
echo "[${section}]" >> "$file"
fi
sed -i "/^[[:space:]]*\[${section}\][[:space:]]*$/,/^[[:space:]]*\[.*/{/^[[:space:]]*$(safestring "${key}")[[:space:]]*=/d}" "$file"
sed -i "/\[${section}\]/a $(safestring "${key}=${value}")" "$file"
if [[ -n "$(tail -c1 "${file}")" ]]
then
echo >> "${file}"
fi
}
backup()
{
local file="$1"
[[ -f "${file}" ]] && cp -f "${file}" "${file}.old"
}
restore()
{
local file="$1"
if [[ -f "${file}.old" ]]
then
mv "${file}.old" "${file}"
else
rm -f "${file}"
fi
}
#### Globals ===================================================================
unset options
declare -a options
options=('Ubuntu Mono' 'Fira Code' 'JetBrains Mono' 'Noto Sans Mono' 'Hack' 'Consolas')
sizes=('10' '12' '14' '16' '18')
#### Get system monospace fonts ================================================
# TODO
#### Sort and remove duplicates from fonts list ===============================
readarray -t fonts < <(for a in "${options[@]}"; do echo "$a"; done | uniq)
#### Select and apply font =====================================================
readonly schemagnome="org.gnome.desktop.interface monospace-font-name"
readonly filekde="${HOME}/.config/kdeglobals"
readonly schemabuilder="org.gnome.builder.editor font-name"
readonly fileqtcreator="${HOME}/.config/QtProject/QtCreator.ini"
readonly filekonsole="${HOME}/.local/share/konsole/UTF-8.profile"
readonly filekate="${HOME}/.config/kateschemarc"
readonly filesqlitebrowser="${HOME}/.config/sqlitebrowser/sqlitebrowser.conf"
readonly fileghostwriter="${HOME}/.config/ghostwriter/ghostwriter.conf"
### Apply settings =============================================================
while true
do
newfont="$(selectvalue 'Monospace font' 'Please select font:' "${fonts[@]}")"
if [[ -n "$newfont" ]]
then
newsize="$(selectvalue 'Font size' 'Please select size:' "${sizes[@]}")"
fi
newoptionskde="-1,5,50,0,0,0,0,0"
newtypekde="Regular"
if [[ -n "$newfont" && -n "$newsize" ]]
then
## Gnome/Cinnamon ------------------------------------------------------
if gsettings writable $schemagnome 1>/dev/null 2>/dev/null
then
oldfontgnome="$(gsettings get $schemagnome)"
gsettings set $schemagnome "${newfont} ${newsize}"
fi
## KDE -----------------------------------------------------------------
if [[ -f "$filekde" ]]
then
backup "$filekde"
addconfigline 'fixed' "${newfont},${newsize},${newoptionskde},${newtypekde}" 'General' "$filekde"
fi
## Gnome Builder -------------------------------------------------------
if gsettings writable $schemabuilder 1>/dev/null 2>/dev/null
then
oldfontbuilder="$(gsettings get $schemabuilder)"
gsettings set $schemabuilder "${newfont} ${newsize}"
fi
## Qt Creator ----------------------------------------------------------
if ispkginstalled qtcreator
then
backup "$fileqtcreator"
addconfigline 'FontFamily' "${newfont}" 'TextEditor' "$fileqtcreator"
addconfigline 'FontSize' "${newsize}" 'TextEditor' "$fileqtcreator"
fi
## Konsole -------------------------------------------------------------
if ispkginstalled konsole
then
backup "$filekonsole"
addconfigline 'Font' "${newfont},${newsize},${newoptionskde},${newtypekde}" 'Appearance' "$filekonsole"
fi
## Kate ----------------------------------------------------------------
if ispkginstalled kate
then
backup "$filekate"
addconfigline 'Font' "${newfont},${newsize},${newoptionskde},${newtypekde}" 'Normal' "$filekate"
fi
## SQLite Browser ------------------------------------------------------
if ispkginstalled sqlitebrowser
then
backup "$filesqlitebrowser"
addconfigline 'font' "${newfont}" 'editor' "$filesqlitebrowser"
addconfigline 'fontsize' "${newsize}" 'editor' "$filesqlitebrowser"
addconfigline 'font' "${newfont}" 'databrowser' "$filesqlitebrowser"
fi
## Ghostwriter ---------------------------------------------------------
if ispkginstalled ghostwriter
then
backup "$fileghostwriter"
addconfigline 'font' "${newfont},${newsize},${newoptionskde}" 'Style' "$fileghostwriter"
fi
## ---------------------------------------------------------------------
if showquestion "Save these settings?" "save" "try another"
then
break
else
### Reset settings =================================================
## Gnome/Cinnamon --------------------------------------------------
if gsettings writable $schemagnome 1>/dev/null 2>/dev/null
then
if [[ -n "${oldfontgnome}" ]]
then
gsettings set $schemagnome "${oldfontgnome}"
else
gsettings reset $schemagnome
fi
fi
## KDE -------------------------------------------------------------
restore "$filekde"
## Gnome Builder ---------------------------------------------------
if gsettings writable $schemabuilder 1>/dev/null 2>/dev/null
then
if [[ -n "${oldfontbuilder}" ]]
then
gsettings set $schemabuilder "${oldfontbuilder}"
else
gsettings reset $schemabuilder
fi
fi
## Qt Creator ------------------------------------------------------
restore "$fileqtcreator"
## Konsole ---------------------------------------------------------
restore "$filekonsole"
## Kate ------------------------------------------------------------
restore "$filekate"
## SQLite Browser --------------------------------------------------
restore "$filesqlitebrowser"
## Ghostwriter -----------------------------------------------------
restore "$fileghostwriter"
## -----------------------------------------------------------------
continue
fi
fi
break
done
#### Disable autostart =========================================================
disableautostart
| true
|
2b364d0e94a921e6a1544076a102b8caf58e8d07
|
Shell
|
laristra/portage
|
/app/msmapp/test/run.sh
|
UTF-8
| 352
| 2.53125
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# This file is part of the Ristra portage project.
# Please see the license file at the root of this repository, or at:
# https://github.com/laristra/portage/blob/master/LICENSE
set -e
set -x
epsilon=1.e-10
mpirun -np 1 ${ROOT_DIR}/msmapp "${ROOT_DIR}/test/example_input"
${COMPARE} "gold_diagnostics.dat" "diagnostics.dat" ${epsilon}
| true
|
e164b54e0db23dede7b976fd961db516ff070482
|
Shell
|
Azure/abfs-backport
|
/patch-cluster-node.sh
|
UTF-8
| 14,877
| 3.953125
| 4
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
# Script to patch legacy Hadoop with new ABFS driver (hadoop-azure.*.jar) that has been specifically backported
# for the targeted Hadoop distro and version.
# To fully patch a cluster, this script must be run on EVERY node to update the local filesystem. On ONE NODE ONLY
# the -a switch must be specified to patch the HDFS contents.
# Parameters
APPLY_HDFS_PATCH=0
HDFS_USER=hdfs
DIR_PREFIX=/usr/hdp/
HDFS_DIR_PREFIX=/hdp/apps/
ROLLBACK=0
TARGET_RELEASE="HDP-2.5.2"
# Constants
export MATCHED_JAR_FILE_NAME=hadoop-azure
GITHUB_API_ROOT_URI=https://api.github.com/repos/Azure/abfs-backport
CURR_TIME=$(date "+%Y-%m-%d-%H-%M-%S")
BACKUP_SUFFIX=".original_${CURR_TIME}"
JAR_EXTENSION=".jar"
JAR_FIND_SUFFIX=""
checkstatus() {
if [ $? -ne 0 ]; then
echo "DEBUG... $1 failed"
else
echo "DEBUG... $1 success"
fi
}
which jq > /dev/null
if [ $? -ne 0 ]; then
echo "This script requires jq to run. Please install using preferred package manager"
exit 1
fi
while getopts ":a?hu:p:P:R:" options
do
case "${options}" in
a)
APPLY_HDFS_PATCH=1
;;
u)
HDFS_USER=${OPTARG}
;;
t)
TARGET_RELEASE=${OPTARG}
;;
p)
DIR_PREFIX=${OPTARG}
;;
P)
HDFS_DIR_PREFIX=${OPTARG}
;;
R)
ROLLBACK=1
ROLLBACK_VERSION=${OPTARG}
if [ -z ${ROLLBACK_VERSION} ]; then
echo "Exiting; Backup version parameter is required for rollback"
exit 4
fi
BACKUP_SUFFIX=".original_${ROLLBACK_VERSION}"
;;
*|?|h)
echo "Usage: $0 [-a] [-u HDFS_USER] [-t TARGET_VERSION] [-p DIRECTORY_PREFIX] [-P HDFS_DIRECTORY_PREFIX] [-R] [-?]"
echo ""
echo "Where:"
echo " -a Update HDFS contents. This switch should only be specified for ONE node in a cluster patch."
echo " -u HDFS_USER Specifies the user name with superuser privileges on HDFS. Applicable only if the -a switch is specified."
echo " -p DIRECTORY_PREFIX "
echo " Specifies a prefix that is specific to the Hadoop distro & version to search for files to patch."
echo " -P HDFS_DIRECTORY_PREFIX "
echo " Specifies a prefix that is specific to the Hadoop distro & version to search on HDFS for files to patch."
echo " -R ROLLBACK_VERSION "
echo " Rollback installation. Restores previously backed up versions of hadoop-azure jar file. Rollback for HDFS "
echo " should follow same model as deployment. Specify the backup version for the rollback. Ex: Specify 2020-06-07-10-10-10 "
echo " for the backup file named hadoop-azure.*.jar.original_2020-06-07-10-10-10"
exit 1
;;
esac
done
[[ "${DIR_PREFIX}" != */ ]] && DIR_PREFIX="${DIR_PREFIX}/"
[[ "${HDFS_DIR_PREFIX}" != */ ]] && HDFS_DIR_PREFIX="${HDFS_DIR_PREFIX}/"
# Confirm rollback
if [ $ROLLBACK -gt 0 ]; then
echo "find $DIR_PREFIX -name $MATCHED_JAR_FILE_NAME*.jar$BACKUP_SUFFIX -a ! -name *datalake* | wc -l"
JARCOUNT=$(find $DIR_PREFIX -name $MATCHED_JAR_FILE_NAME*.jar$BACKUP_SUFFIX -a ! -name *datalake* | wc -l)
echo "jar files found with rollback version: $JARCOUNT"
echo "find $DIR_PREFIX -name *.tar.gz${BACKUP_SUFFIX} -a ! -name *datalake* | wc -l"
GZCOUNT=$(find $DIR_PREFIX -name ${GZ}${BACKUP_SUFFIX} -a ! -name *datalake* | wc -l)
echo "Zip files found with rollback version: $GZCOUNT"
TOT=$(($JARCOUNT+$GZCOUNT))
echo "Number of files found for rollback : $TOT"
if [[ ${TOT} -eq 0 ]]; then
echo "Exiting. Backup version for rollback specified is not found."
exit 4
fi
echo "***************** NOTICE ****************************"
echo "This script will rollback previously applied changes."
echo "Multiple patches and rollbacks are NOT idempotent. Rolling back after applying multiple patches "
echo "may result in an unusable system."
echo "Only rollback if you are confident there is only one saved original version (*.jar.original)."
read -r -p "Are you sure you want to proceed with this operation? [y/N] " response
if [ "${response,,}" != "y" ]; then
exit 4
fi
JAR_FIND_SUFFIX="*"
fi
RELEASE_INFO=$(curl "${GITHUB_API_ROOT_URI}/releases/tags/${TARGET_RELEASE}")
JAR_ASSET=$(echo $RELEASE_INFO | jq -r '.assets[] | select(.content_type == "application/java-archive" or .content_type == "application/octet-stream") | .')
if [[ -z "$JAR_ASSET" ]]; then
echo "Unable to get information for .jar file associated with $TARGET_RELEASE release."
exit 4
fi
PATCHED_JAR_FILE_NAME=$(basename $(echo $JAR_ASSET | jq -r '.name') .jar)
REMOTE_PATCH_PATH=$(echo $JAR_ASSET | jq -r '.browser_download_url')
LOCAL_PATCH_PATH="/tmp/$PATCHED_JAR_FILE_NAME.new"
if [ $ROLLBACK -eq 0 ]; then
if [ -e $LOCAL_PATCH_PATH ]; then
rm $LOCAL_PATCH_PATH;
checkstatus "rm $LOCAL_PATCH_PATH;"
fi
echo ""
echo "Downloading $REMOTE_PATCH_PATH to $LOCAL_PATCH_PATH"
wget $REMOTE_PATCH_PATH -O $LOCAL_PATCH_PATH
if [ $? -ne 0 ]; then
echo "ERROR: failed to download $REMOTE_PATCH_PATH to $LOCAL_PATCH_PATH"
exit 3
fi
fi
echo ""
echo "Locating all JAR files in $DIR_PREFIX*.tar.gz"
GZs=$(find "$DIR_PREFIX" -name "*.tar.gz" -print0 | xargs -0 zgrep "$MATCHED_JAR_FILE_NAME" | tr ":" "\n" | grep .tar.gz)
for GZ in $GZs
do
echo $GZ
if [ $ROLLBACK -eq 0 ]; then
if [[ ! -e "${GZ}${BACKUP_SUFFIX}" ]]; then
cp "$GZ" "${GZ}${BACKUP_SUFFIX}"
checkstatus "cp $GZ ${GZ}${BACKUP_SUFFIX}"
fi
ARCHIVE_DIR="${GZ}.dir"
if [[ -d $ARCHIVE_DIR ]]; then
rm -rf "$ARCHIVE_DIR"
checkstatus "rm -rf "$ARCHIVE_DIR""
fi
mkdir "$ARCHIVE_DIR"
checkstatus "mkdir $ARCHIVE_DIR"
echo " tar -C "$ARCHIVE_DIR" -zxf $GZ"
tar -C "$ARCHIVE_DIR" -zxf "$GZ"
checkstatus "tar -C $ARCHIVE_DIR -zxf $GZ"
else
# Rollback changes
if [[ -e "${GZ}${BACKUP_SUFFIX}" ]]; then
echo " cp ${GZ}${BACKUP_SUFFIX} $GZ"
cp "${GZ}${BACKUP_SUFFIX}" "$GZ"
checkstatus "cp ${GZ}${BACKUP_SUFFIX} $GZ"
rm "${GZ}${BACKUP_SUFFIX}"
checkstatus "rm ${GZ}${BACKUP_SUFFIX}"
fi
fi
done
echo ""
echo "Updating all JAR files with the same name in $DIR_PREFIX$MATCHED_JAR_FILE_NAME*.jar$JAR_FIND_SUFFIX"
for DST in $(find "$DIR_PREFIX" -name "$MATCHED_JAR_FILE_NAME*.jar$JAR_FIND_SUFFIX" -a ! -name "*datalake*")
do
echo $DST
if [ $ROLLBACK -eq 0 ]; then
# Backup original file (jar or symlink) if not already backed up
if [[ ! -e "${DST}${BACKUP_SUFFIX}" ]]; then
cp -P "$DST" "${DST}${BACKUP_SUFFIX}"
checkstatus "cp $DST ${DST}${BACKUP_SUFFIX}"
fi
# Different handling for symlink or real file
if [[ ! -h "$DST" ]]; then
# Replace with patched JAR
rm -f "$DST"
checkstatus "rm -f $DST"
DST="$(dirname "$DST")/$PATCHED_JAR_FILE_NAME.jar"
echo " cp $LOCAL_PATCH_PATH $DST"
cp "$LOCAL_PATCH_PATH" "$DST"
checkstatus "cp $LOCAL_PATCH_PATH $DST"
else
# For symlink, assume the target will be replaced with the correctly named file. Just update the link.
NEW_TARGET="$(dirname $(readlink "$DST"))/$PATCHED_JAR_FILE_NAME.jar"
ln -sfn "$NEW_TARGET" "$DST"
checkstatus "ln -sfn NEW_TARGET DST"
fi
else
# Rollback changes - need to handle 2 cases; hadoop-azure*.jar.original -> hadoop-azure*.jar & hadoop-azure*.jar -> rm
DST_FILENAME=$(basename "$DST")
DST_EXTENSION=.${DST_FILENAME##*.}
if [[ "$DST_EXTENSION" == "$BACKUP_SUFFIX" ]]; then
# hadoop-azure*.jar.original -> hadoop-azure*.jar
DST_ORIG=$(dirname "$DST")/$(basename "$DST" $BACKUP_SUFFIX)
echo " cp ${DST} $DST_ORIG"
cp -P "${DST}" "$DST_ORIG"
checkstatus "cp ${DST} $DST_ORIG"
rm "${DST}"
checkstatus "rm ${DST}"
elif [[ "$DST_EXTENSION" == "$JAR_EXTENSION" ]]; then
# hadoop-azure*.jar -> rm
echo " rm $DST"
rm "$DST"
checkstatus "rm $DST"
fi
fi
done
# HDFS update
if [ $APPLY_HDFS_PATCH -gt 0 ]; then
echo ""
echo "Updating all JAR files on HDFS matching; $HDFS_DIR_PREFIX$MATCHED_JAR_FILE_NAME*.jar$JAR_FIND_SUFFIX"
for HDST in $(sudo -u $HDFS_USER hadoop fs -find "$HDFS_DIR_PREFIX" -name "$MATCHED_JAR_FILE_NAME*.jar$JAR_FIND_SUFFIX" | grep -v "datalake")
do
if [ $ROLLBACK -eq 0 ]; then
sudo -u $HDFS_USER hadoop fs -test -e "${HDST}${BACKUP_SUFFIX}"
if [ $? -ne 0 ]; then
sudo -u $HDFS_USER hadoop fs -cp "$HDST" "${HDST}${BACKUP_SUFFIX}"
checkstatus "hadoop fs -cp $HDST ${HDST}${BACKUP_SUFFIX})"
fi
sudo -u $HDFS_USER hadoop fs -rm $HDST
checkstatus "hadoop fs -rm $HDST)"
HDST="$(dirname "$HDST")/$PATCHED_JAR_FILE_NAME.jar"
echo " hadoop fs -put -f $LOCAL_PATCH_PATH $HDST"
sudo -u $HDFS_USER hadoop fs -put -f "$LOCAL_PATCH_PATH" "$HDST"
checkstatus "hadoop fs -put -f $LOCAL_PATCH_PATH $HDST"
else
# Rollback changes - need to handle 2 cases; hadoop-azure*.jar.original -> hadoop-azure*.jar & hadoop-azure*.jar -> rm
HDST_FILENAME=$(basename "$HDST")
HDST_EXTENSION=.${HDST_FILENAME##*.}
if [[ "$HDST_EXTENSION" == "$BACKUP_SUFFIX" ]]; then
# hadoop-azure*.jar.original -> hadoop-azure*.jar
HDST_ORIG=$(dirname "$HDST")/$(basename "$HDST" $BACKUP_SUFFIX)
echo " hadoop fs -cp $HDST $HDST_ORIG"
sudo -u $HDFS_USER hadoop fs -cp "$HDST" "$HDST_ORIG"
checkstatus "hadoop fs -cp $HDST $HDST_ORIG"
sudo -u $HDFS_USER hadoop fs -rm "$HDST"
checkstatus "hadoop fs -rm $HDST"
elif [[ "$HDST_EXTENSION" == "$JAR_EXTENSION" ]]; then
# hadoop-azure*.jar -> rm
echo " hadoop fs -rm $HDST"
sudo -u $HDFS_USER hadoop fs -rm "$HDST"
checkstatus "hadoop fs -rm $HDST"
fi
fi
done
fi
if [ $ROLLBACK -eq 0 ]; then
echo ""
echo "Updating all .tar.gz"
for GZ in $GZs
do
echo " tar -czf $GZ -C ${GZ}.dir"
tar -czf "$GZ" -C "${GZ}.dir" .
checkstatus " tar -czf $GZ -C ${GZ}.dir"
rm -rf "${GZ}.dir"
checkstatus "rm -rf ${GZ}.dir"
done
fi
if [ $APPLY_HDFS_PATCH -gt 0 ]; then
echo ""
echo "Updating all .tar.gz files on HDFS matching $HDFS_DIR_PREFIX*.tar.gz"
# Get list of *.tar.gz files from HDFS & filter for those containing our patch jar in 2 steps, so that we can correctly preserve the user context
HGZ_FILES_ALL=$(sudo -u $HDFS_USER hadoop fs -find "$HDFS_DIR_PREFIX" -name "*.tar.gz")
for HGZ in $(for f in $HGZ_FILES_ALL; do sudo -u $HDFS_USER hadoop fs -cat $f | tar -tzv | grep "$MATCHED_JAR_FILE_NAME" && echo $f; done | grep ".tar.gz")
do
echo "$HGZ"
if [ $ROLLBACK -eq 0 ]; then
# Create backup
sudo -u $HDFS_USER hadoop fs -test -e "${HGZ}${BACKUP_SUFFIX}"
if [ $? -ne 0 ]; then
sudo -u $HDFS_USER hadoop fs -cp "$HGZ" "${HGZ}${BACKUP_SUFFIX}"
checkstatus "hadoop fs -cp $HGZ ${HGZ}${BACKUP_SUFFIX}"
fi
# Get the archive, update it with the new jar, repackage the archive & copy it to HDFS
ARCHIVE_NAME=$(basename $HGZ)
ARCHIVE_DIR=/tmp/${ARCHIVE_NAME}.dir
LOCAL_TAR_FILE=/tmp/$ARCHIVE_NAME
if [[ -e $LOCAL_TAR_FILE ]]; then
rm -f $LOCAL_TAR_FILE;
checkstatus "rm -f $LOCAL_TAR_FILE;"
fi
sudo -u $HDFS_USER hadoop fs -copyToLocal "$HGZ" "$LOCAL_TAR_FILE"
checkstatus "sudo -u $HDFS_USER hadoop fs -copyToLocal $HGZ $LOCAL_TAR_FILE"
if [[ -d $ARCHIVE_DIR ]]; then
rm -rf $ARCHIVE_DIR
checkstatus "rm -rf $ARCHIVE_DIR"
fi
mkdir $ARCHIVE_DIR
checkstatus "mkdir $ARCHIVE_DIR"
tar -xzf $LOCAL_TAR_FILE -C $ARCHIVE_DIR
checkstatus "tar -xzf $LOCAL_TAR_FILE -C $ARCHIVE_DIR"
for DST in $(find $ARCHIVE_DIR -name "$MATCHED_JAR_FILE_NAME*.jar" -a ! -name "*datalake*")
do
# Backup original JAR if not already backed up
if [[ ! -e "${DST}${BACKUP_SUFFIX}" ]]; then
cp "$DST" "${DST}${BACKUP_SUFFIX}"
checkstatus "cp $DST ${DST}${BACKUP_SUFFIX}"
fi
rm -f "$DST"
checkstatus "rm -f $DST"
cp "$LOCAL_PATCH_PATH" "$(dirname "$DST")/$PATCHED_JAR_FILE_NAME.jar"
checkstatus "cp $LOCAL_PATCH_PATH $(dirname $DST)/$PATCHED_JAR_FILE_NAME.jar"
done
cd $ARCHIVE_DIR
checkstatus "cd $ARCHIVE_DIR"
tar -zcf $LOCAL_TAR_FILE *
checkstatus "tar -zcf $LOCAL_TAR_FILE *"
cd ..
checkstatus "cd .."
echo " hadoop fs -copyFromLocal -p -f $LOCAL_TAR_FILE $HGZ"
sudo -u $HDFS_USER hadoop fs -copyFromLocal -p -f "$LOCAL_TAR_FILE" "$HGZ"
checkstatus "sudo -u $HDFS_USER hadoop fs -copyFromLocal -p -f $LOCAL_TAR_FILE $HGZ"
rm -rf $ARCHIVE_DIR
checkstatus "rm -rf $ARCHIVE_DIR"
rm -f $LOCAL_TAR_FILE
checkstatus "rm -f $LOCAL_TAR_FILE"
else
# Rollback changes
sudo -u $HDFS_USER hadoop fs -test -e "${HGZ}${BACKUP_SUFFIX}"
checkstatus "sudo -u $HDFS_USER hadoop fs -test -e ${HGZ}${BACKUP_SUFFIX}"
if [ $? -eq 0 ]; then
echo " hadoop fs -cp ${HGZ}${BACKUP_SUFFIX} ${HGZ}"
sudo -u $HDFS_USER hadoop fs -cp "${HGZ}${BACKUP_SUFFIX}" "$HGZ"
checkstatus "sudo -u $HDFS_USER hadoop fs -cp ${HGZ}${BACKUP_SUFFIX} $HGZ"
sudo -u $HDFS_USER hadoop fs -rm "${HGZ}${BACKUP_SUFFIX}"
checkstatus "sudo -u $HDFS_USER hadoop fs -rm ${HGZ}${BACKUP_SUFFIX}"
fi
fi
done
fi
echo "Finished"
| true
|
ce23b8bbe90ccc8ea83b1cf867eaf1098ebe7df3
|
Shell
|
ryuuzaki42/php7-SlackBuild
|
/php7Build.sh
|
UTF-8
| 1,654
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
# Build and package mod_php on Slackware 14.02.
# by: Rumbler Soppa <rumbler.soppa@rellcom.com.br>
#
# All rights reserved.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
CWD=$(pwd)
echo "Enter root password!"
su -c "
# compile freetds
( cd $CWD/freetds ; ./freetds.SlackBuild || exit 1 ) || exit 1
upgradepkg --reinstall --install-new /tmp/freetds-1.00.9-$( uname -m )*.txz
#compile php
sh $CWD/php/php.SlackBuild
find /etc -name '*php*' -delete
#upgradepkg --reinstall --install-new /tmp/alpine-2.21-$( uname -m )*.txz
#upgradepkg --reinstall --install-new /tmp/imapd-2.21-$( uname -m )*.txz
upgradepkg --reinstall --install-new /tmp/php-7.1.9-$( uname -m )*.txz
exit 0
"
echo "Instalation Complete!"
| true
|
b572f2f1104d52e17b4020668988040b206fde38
|
Shell
|
mfkiwl/riscv-sfpu
|
/tools/install-systemc.sh
|
UTF-8
| 403
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
INST_PATH=/opt/systemc
if [ -d "$INST_PATH" ]
then
sudo rm -rf $INST_PATH
fi
sudo mkdir $INST_PATH
sudo chown -R $USER $INST_PATH/
if [ -d "systemc-2.3.3" ]; then
rm -rf systemc-2.3.3
fi
wget https://www.accellera.org/images/downloads/standards/systemc/systemc-2.3.3.tar.gz
tar -xf systemc-2.3.3.tar.gz
cd systemc-2.3.3
./configure --prefix=$INST_PATH
make -j$(nproc)
make install
| true
|
9237ee3abb59c2c81de2a66c78b7c590955dd3e3
|
Shell
|
LuxAter/Fem
|
/gen_mesh.sh
|
UTF-8
| 1,125
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
OPTIND=1
QUALITY=
AREA=
OUT=
function show_help() {
echo "gen_mesh.sh [-a AREA] [-q QUALITY] [-o OUT_FILE] INPUT"
echo " -a Maximum area of a triangular element"
echo " -q Minimum angle of the triangular element in degrees"
echo " -o Output file path"
}
while getopts "h?q:a:o:" opt; do
case "$opt" in
h|\?)
show_help
exit 0
;;
q)
QUALITY=$OPTARG
;;
a)
AREA=$OPTARG
;;
o)
OUT=$OPTARG
;;
esac
done
shift $((OPTIND-1))
[ "${1:-}" == "--" ] && shift
if [[ ! -z $QUALITY ]] && [[ ! -z $AREA ]]; then
./triangle -pznq$QUALITY -a$AREA "$1"
elif [[ ! -z $QUALITY ]]; then
./triangle -pznq$QUALITY "$1"
elif [[ ! -z $AREA ]]; then
./triangle -pzna$AREA "$1"
else
./triangle -pzn "$1"
fi
FILE="${1%.*}.1"
if [[ -z $OUT ]]; then
OUT="$(basename "$1")"
OUT="./.mesh/${OUT%.*}"
fi
if [[ ! -d "$OUT" ]]; then
mkdir -p "$OUT"
fi
mv "$FILE.poly" "$OUT/$(basename $OUT).poly"
mv "$FILE.ele" "$OUT/$(basename $OUT).ele"
mv "$FILE.node" "$OUT/$(basename $OUT).node"
mv "$FILE.neigh" "$OUT/$(basename $OUT).neigh"
| true
|
a190295477c8781bf278f4f2c5b658955cc41733
|
Shell
|
brainsqueezer/scripts
|
/inotify_logs.sh
|
UTF-8
| 197
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
while inotifywait -e modify /var/log/messages; do
if tail -n1 /var/log/messages | grep httpd; then
kdialog --msgbox "Apache needs love!"
fi
done
| true
|
7ac06e102f83ad7f6168ca2d66547888dc889b7a
|
Shell
|
markbirss/Ubuntu-on-rpi-fixes
|
/scripts/zoom-install-64bit.sh
|
UTF-8
| 1,354
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#check system info to determine if script can run
echo "checking system info to determine if script can run"
./sys-info-test.sh
echo "checking if box86 is installed..."
#check if box86 is installed
if [ -d ~/box86 ];then
read -p "box86 is already installed. Do you want to reinstall (recommended) (y/n)?" choice
case "$choice" in
y|Y ) echo "box86 will be reinstalled"; ./install-box86-ubuntu-64bit.sh ;;
n|N ) echo "box86 won't be reinstalled" && exit ;;
* ) echo "invalid";;
esac
echo "installing dependencies"
#install dependencies
sudo apt install libxcb-shape0:armhf libxcb-randr0:armhf libxcb-image:armhf libxcb-image0:armhf libxcb-xtest0:armhf libxcb-keysyms1:armhf libdbus-1-3:armhf -y
echo "downloading zoom..."
#download zoom
wget https://github.com/Itai-Nelken/Pi-Assistant/raw/main/apps/zoom/files/zoom.tar.xz
tar -xf zoom.tar.xz /home/$USER
rm zoom.tar.xz
echo "creating launcher script"
#download my launcher script
wget -q wget https://github.com/Itai-Nelken/Pi-Assistant/blob/main/apps/zoom/startzoom.sh
mv startzoom.sh ~/zoom
echo "creating desktop shortcut, enter your password when asked"
#download my desktop shortcut
wget -q wget https://github.com/Itai-Nelken/Pi-Assistant/blob/main/apps/zoom/files/zoom.desktop
sudo mv zoom.desktop /usr/share/applications
sleep 3
echo "
"
echo "script finished"
| true
|
42f1a401f2f450153e43c8a78e4bdd3b5d02c67c
|
Shell
|
kevyin/sunny
|
/scripts/compare_with_md5.sh
|
UTF-8
| 194
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bash
md5file=$1
file=$2
tmp1=~/tmp/${file##*/}.tmp1
tmp2=~/tmp/${file##*/}.tmp2
md5sum $file | cut -f1 -d' ' > $tmp1
cut -f1 -d' ' ${md5file} > $tmp2
diff $tmp1 $tmp2
rm $tmp1 $tmp2
| true
|
1ad144fbd102f9bda1ba38f0722661a0b97efeae
|
Shell
|
af-go/misc
|
/CA/bin/pem2bundle.sh
|
UTF-8
| 400
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
BASE_DIR=`cd $(dirname $0)/.. && pwd`
cat $BASE_DIR/certificates/intermediate/private/cakey.pem $BASE_DIR/certificates/intermediate/cacert.pem > $BASE_DIR/intermediate.pem
data=$(awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' $BASE_DIR/intermediate.pem)
echo "{" > $BASE_DIR/bundle.json
echo " \"pem_bundle\": \"${data}\"" >> $BASE_DIR/bundle.json
echo "}" >> $BASE_DIR/bundle.json
| true
|
0bc178cfc634db9d573bcd3eb4a912545517813b
|
Shell
|
jsdf/goose64
|
/src/disassemble_modern.sh
|
UTF-8
| 911
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
# pass program counter address as arg to this script to see disassembly around
# the program counter
pc_address="${1:-''}"
# this file has original source code info but not final relocated symbol locations
executable_file="codesegment.o"
mips64-elf-objdump --disassemble --prefix-addresses --source --wide "$executable_file" > "${executable_file}.dasm"
# this file has final symbol locations but no source code. use offsets in functions to map instructions back to the codesegment.o file
# would be good if spicy had add an option to not strip this stuff out
executable_file="goose.out" # this should be the linked binary (not rom file)
mips64-elf-objdump --disassemble-all --source --prefix-addresses --wide --all-header "$executable_file" > "${executable_file}.dasm"
if [ -n "$pc_address" ]; then
grep --color=always -C 8 "${pc_address/0x/}" "${executable_file}.dasm"
fi
| true
|
21548c84f82fb1b4937d3190f2360616ad343eb3
|
Shell
|
datajaguar/jaguarqa
|
/sh/security2_login_user.sh
|
UTF-8
| 1,435
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#: Script Name: security1_create_user.sh
#: Authors: Andrew Zhang
#: Date: 5/4/2018
#: Purpose: Security test 1: Create users.
#: Test cases:
# Use case 1:
# -------------------------------------------------------------------
#
#: Component/Sub Comp:
#: Owned by: Andrew Zhang
#: Reviewed by:
#: Tag:
#: Dependencies: QA_HOME framework and loginj
#: Runnable: true
#: Arguments: none
#: Memory/Disk: 200MB/200MB
#: SUC: 1
#: Created for:
#: Retired for:
#: Test time: within 10 sec
#: History:
# Note: In order to run test, we need to set two
#
# Limitation of this test: The test will alwasy generate a diff file as the UUID will
# create a different number everytime we run the script; check the log file for the
# result.
#
export FILE=security1_create_user
logf="$QA_HOME/work/${FILE}.log"
#
export ADMIN_PASSWORD=jaguarjaguarjaguar
# Clean old log:
if [ -f $logf ]
then mv $logf ${logf}_old
fi
# 1.Create table schema
echo -e "\n1. Create table and load data" 2>&1 | tee -a $logf
echo -e "============================== \n" 2>&1 | tee -a $l
loginj < $QA_HOME/sql/${FILE}.sql 2>&1 | tee -a $logf
wait
# 2. Check Correctness
echo -e "\n2. Check Correctness..." 2>&1 | tee -a $logf
echo -e "============================== \n" 2>&1 | tee -a $logf
compare_result $QA_HOME/work/${FILE}.out $QA_HOME/bas/${FILE}.bas 2>&1 | tee -a $logf
echo -e "\nEnd of test ${FILE}.sh" 2>&1 | tee -a $logf
| true
|
bf4a463481eef644b51d48c8a1bc5fe01b90f6c2
|
Shell
|
mikegwhit/servermanager
|
/lib/ubuntu/sh/user/create_user.sh
|
UTF-8
| 341
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "$saasworks Install nss-wrapper to be able to execute image as non-root user"
apt-get update
apt-get install -y libnss-wrapper gettext
apt-get clean -y
echo "add 'source generate_container_user' to .bashrc"
# have to be added to hold all env vars correctly
echo 'source $STARTUPDIR/bash_user.sh' >> $HOME/.bashrc
| true
|
efa9da0429e705a3c91397144211bd931c446a5f
|
Shell
|
Fedtekansler/Compiler
|
/.svn/pristine/ef/efa9da0429e705a3c91397144211bd931c446a5f.svn-base
|
UTF-8
| 730
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash --norc
binary=tigerc.x86-linux
lexer=tiger.lex.sml
rlexer=rescue-$lexer
id='>>> This is rescue-tiger.lex.sml <<<'
if strings $binary | grep "$id" >/dev/null; then
isRescue="yes"; unset isGenerated
else
isGenerated="yes"; unset isRescue
fi
case $1 in
(generated)
if [ "$isRescue" = "yes" ]; then
echo "Removing compiler using rescue lexer"
chmod u+w $binary $lexer
rm -f $binary $lexer
fi;;
(rescue)
if [ "$isGenerated" = "yes" ]; then
echo "Removing compiler not using rescue lexer"
chmod u+w $binary $lexer
rm -f $binary $lexer
cp $rlexer $lexer
touch $lexer
fi;;
esac
| true
|
cba137f5c8f08225df08223b531a40896a7bfd11
|
Shell
|
pranjalikawale/Shell-Script
|
/Programconstruct/forLoop/harmonic.sh
|
UTF-8
| 163
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash -x
read -p "enter the number" num
for ((i=1;i<=$num;i++))
do
harmonicNum=$(bc <<< "scale=2;$(($harmonicNum+$((1/$i))))")
done
echo "$harmonicNum"
| true
|
ae36e97fab7be911eb78796a6fc4ec9a99353c6d
|
Shell
|
lucasdc6/docker-db-manager
|
/bin/mongo-up
|
UTF-8
| 321
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DOCKERNAME="mongo"
NAME="mongo"
STANDAR_PORT=27017
PORT=$STANDAR_PORT
VERSION="latest"
DATA="/data/db"
function new_container {
IMAGE_EXTRA_OPTIONS="-v mongo-data-$VERSION:$DATA"
docker run $DEFAULT_OPTIONS $IMAGE_EXTRA_OPTIONS $IMAGE_NAME
}
source $DOCKERDBMANAGERPATH/libexec/docker_wrapper_template
| true
|
df5d5683cb4e367697817351d9f118d9894f64c3
|
Shell
|
biocore/emp
|
/code/03-otu-picking-trees/deblur/debluremp.90.sh
|
UTF-8
| 759
| 3.015625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# deblur all emp studies (from per study post split-libraries files)
DATADIR=/projects/emp/02-adapter-clean-up
LOCDIR=/home/amam7564/data/emp/process
SCRIPTDIR=/home/amam7564/data/emp/scripts
FNAME=filtered_seqs.fna
cd $LOCDIR
mkdir all90
for cdir in $DATADIR/*
do
studyname=$(basename $cdir)
echo "processing study $studyname"
sfile=$cdir/$FNAME
cp $sfile .
split_sequence_file_on_sample_ids.py -i $FNAME -o splitdir
$SCRIPTDIR/scripts/CleanIndelDirParallel.py splitdir -l 90 -e 0.02 -n 25 -m 0.005 -d 1,0.06,0.02,0.02,0.01,0.005,0.005,0.005,0.001,0.001,0.001,0.0005
cp splitdir/*.ref.fa all90
$SCRIPTDIR/scripts/CreateTable.py -l -s -d splitdir -o $studyname.90.clean
rm -r splitdir
rm $FNAME
echo "done processing $studyname"
done
| true
|
c5c524eea01d28abd88fccfe6e96a9e38934b081
|
Shell
|
smacpats/Freedom
|
/DEBIAN/prerm
|
UTF-8
| 424
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Restoring Default Hosts File"
cp -a /etc/Freedom/defaulthosts /etc/hosts
echo "Restore Complete"
chown root:wheel /etc/hosts
chmod 644 /etc/hosts
echo "Killing discoveryd/mDNSResponder..."
killall -9 discoveryd
killall -9 mDNSResponder
echo "Should have died and restarted by now..."
echo "Cleaning up the mess..."
rm -rf /etc/Freedom
echo "You should be reset back to the default hosts file now!"
exit 0
| true
|
090df9fd29edc002005c241f05346dd24f449bae
|
Shell
|
ahdiaz/archconf
|
/installer/install
|
UTF-8
| 1,092
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
pacman_options="--needed --noconfirm"
packages_url=http://ahdiaz.euler.es/arch/installer
packages_install=$packages_url/packages.install
packages_uninstall=$packages_url/packages.uninstall
yaourt_url=https://aur.archlinux.org/packages/ya/yaourt/PKGBUILD
package_query_url=https://aur.archlinux.org/packages/pa/package-query/PKGBUILD
aur_install ()
{
sudo pacman -S $pacman_options base-devel
package_name=$1
package_url=$2
installed=`pacman -Qs $package_name`
if [ -z "$installed" ]; then
mkdir -p abs/$package_name
curl $package_url -o abs/$package_name/PKGBUILD
cd abs/$package_name
makepkg -csi --noconfirm
cd -
fi
}
#sudo pacman-key --init
#sudo pacman-key --populate archlinux
sudo pacman -Syy
sudo pacman -S $pacman_options pacman
sudo pacman -Rs --noconfirm $(curl $packages_uninstall | grep -v "^#")
#aur_install "package-query" $package_query_url
#aur_install "yaourt" $yaourt_url
#sudo pacman -S $pacman_options yaourt
#yaourt -Sy
yaourt -Sua $pacman_options $(curl $packages_install | grep -v "^#")
| true
|
edcb56a7e6c1f58be8673dc95be98531a04b9ddd
|
Shell
|
ZorgeR/repoZITO
|
/src/repoZITO/content/serv.sh
|
UTF-8
| 758
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Z-Mod E8
#
# Please don't modify this code.
#
# Copyright - Zorge.R - 2010 - motofan.ru
#
rz_curserv=`cat $repoz_content/reposerv`
showRadio "$rz_HEAD_SERV" "$rz_curserv" "$rz_SERV_TAB_1"
ret=$?
[ $ret -eq 0 ] && . $repoz_content/repoZITO.sh
case $ret in
1)
if [ $rz_model = E2 ];then rz_newserv=`showTextEntry 0 "repoZITO" "Enter adress with http:// or ftp://"`;echo "$rz_newserv" > "$repoz_content/reposerv";else rz_newserv=`showTextEntry "$repoz_content/reposerv" "repoZITO" "$rz_newserv_enter" 0`;fi
rz_newserv_txt=`cat $repoz_content/reposerv`
showQ "$rz_newserv_compl" "$rz_newserv_compl_txt - $rz_newserv_txt" 2
;;
*)
;;
esac
# update curent server
rz_serv=`cat $repoz_content/reposerv`
. $repoz_content/repoZITO.sh
| true
|
b6f34f13df2c6fa68b6388526e1f5346799c50be
|
Shell
|
Cloudxtreme/snltd-monitor
|
/checks/hardware/check_PSUs.sh
|
UTF-8
| 766
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/ksh
#=============================================================================
#
# check_PSUs.sh
# -------------
#
# Check all PSUs in the machine are powered up and functioning correctly.
#
# R Fisher 01/2009
#
# v1.0 Initial Relase
#
#=============================================================================
#-----------------------------------------------------------------------------
# VARIABLES
#-----------------------------------------------------------------------------
# SCRIPT STARTS HERE
if [[ -n $DIAG_CACHE ]] && [[ -s $DIAG_CACHE ]]
then
if [[ -n $(egrep P_PWR $DIAG_CACHE | egrep -v okay$) ]]
then
RET=2
[[ -n $RUN_DIAG ]] && \
egrep "P_PWR" $DIAG_CACHE | egrep -v okay$
else
RET=0
fi
else
RET=4
fi
exit $RET
| true
|
dd950b5205e0d1f72cc6cf08f4aa4f13bf2eb1d7
|
Shell
|
jonhare/nativelibs4java
|
/libraries/ScalaCL/Collections/sbazPackage
|
UTF-8
| 2,511
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
MVN_VERSION="`cat pom.xml | grep '<version' | head -n 1 | sed -e 's/.*<version>\(.*\)<\/version>.*/\1/g'`"
SBAZ_VERSION_AUTO="`echo $MVN_VERSION | sed -e "s/-SNAPSHOT/-SNAPSHOT-\`date '+%Y%m%d'\`/g"`"
SBAZ_VERSION=${SBAZ_VERSION:-$SBAZ_VERSION_AUTO}
LINK_BASE="http://nativelibs4java.sourceforge.net/sbaz/scalacl/"
#LINK_BASE="http://ochafik.com/sbaz/scalacl/"
SCALACL_HOME="`pwd`"
echo "###################################################"
echo "# ScalaCL : "
echo "# Maven version = $MVN_VERSION"
echo "# sbaz version = $SBAZ_VERSION"
echo "###################################################"
echo "# Building ScalaCL Collections"
mvn -Djarsigner.skip=false -Dstorepass=$KEYSTORE_PASS package -DskipTests
cd ../Plugin
echo "# Building ScalaCL Compiler Plugin"
mvn -Djarsigner.skip=false -Dstorepass=$KEYSTORE_PASS package -DskipTests
cd $SCALACL_HOME
TEMP_OUT=target/sbaz_out
LIB_JAR_OUT=$TEMP_OUT/lib
PLUGIN_JAR_OUT=$TEMP_OUT/misc/scala-devel/plugins
rm -fR $TEMP_OUT
mkdir -p $LIB_JAR_OUT
mkdir -p $PLUGIN_JAR_OUT
echo "# Copying libraries"
if [[ "$DONT_USE_SHADED" == "1" ]] ; then
export SBAZ_VERSION="$SBAZ_VERSION-devel"
NL4J_BASE="/Users/ochafik/.m2/repository/com/nativelibs4java"
cp target/scalacl-$MVN_VERSION.jar $LIB_JAR_OUT
cp $NL4J_BASE/bridj/0.5/bridj-5.jar $LIB_JAR_OUT
cp $NL4J_BASE/opencl4java-bridj/1.0.0-RC1/opencl4java-bridj-1.0.0-RC1.jar $LIB_JAR_OUT
cp $NL4J_BASE/javacl-core-bridj/1.0.0-RC1/javacl-core-bridj-1.0.0-RC1.jar $LIB_JAR_OUT
cp $NL4J_BASE/javacl-bridj/1.0.0-RC1/javacl-bridj-1.0.0-RC1.jar $LIB_JAR_OUT
cp $NL4J_BASE/libcl-bridj/1.0.0-RC1/libcl-bridj-1.0.0-RC1.jar $LIB_JAR_OUT ;
else
cp target/scalacl-$MVN_VERSION-shaded.jar $LIB_JAR_OUT ;
fi
echo "# Copied the following libraries :"
ls -l $LIB_JAR_OUT
echo "# Copying compiler plugin libraries"
cp ../Plugin/target/scalacl-compiler-plugin-$MVN_VERSION.jar $PLUGIN_JAR_OUT
echo "# Packing for sbaz"
sbaz pack scalacl $TEMP_OUT \
--linkbase $LINK_BASE \
--version $SBAZ_VERSION \
--outdir src/main/sbaz \
--descfile ABOUT || ( echo "sbaz pack failed" && exit 1 )
mkdir -p src/main/sbaz
sbaz remove scalacl
sbaz compact
#sbaz retract scalacl/$SBAZ_VERSION
if [[ "$DONT_SHARE" == "1" ]] ; then
echo "Not sharing the package (use SHARE=1 to share)"
echo src/main/sbaz/scalacl-$SBAZ_VERSION.sbp ;
else
sbaz share *.advert ;
fi
mv *.advert src/main/sbaz
open src/main/sbaz
echo "Now please copy 'src/main/sbaz/scalacl-$SBAZ_VERSION.sbp' to '$LINK_BASE'"
| true
|
bb53033fcf58679d2eacffb64d4fd15af4d49fb4
|
Shell
|
petronny/aur3-mirror
|
/stratuslab-cli-user/stratuslab-cli-setup.sh
|
UTF-8
| 461
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
export STRATUSLAB_ROOT=/opt/stratuslab
echo "::: STRATUSLAB_ROOT=$STRATUSLAB_ROOT"
if [ ! -n "$PATH" ]; then
export PATH=${STRATUSLAB_ROOT}/bin
else
export PATH=${PATH}:${STRATUSLAB_ROOT}/bin
echo "==> [${STRATUSLAB_ROOT}/bin]"
fi
if [ ! -n "$PYTHONPATH" ]; then
export PYTHONPATH=${STRATUSLAB_ROOT}/lib/stratuslab/python
else
export PYTHONPATH=${PYTHONPATH}:${STRATUSLAB_ROOT}/lib/stratuslab/python
fi
echo "::: PATH: $PATH"
| true
|
584d524b564b4d02a7dd4e88147180d856e592b0
|
Shell
|
Mati607/DetectionLab
|
/Vagrant/bootstrap.sh
|
UTF-8
| 14,297
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# Override existing DNS Settings using netplan, but don't do it for Terraform builds
if ! curl -s 169.254.169.254 --connect-timeout 2 >/dev/null; then
echo -e " eth1:\n dhcp4: true\n nameservers:\n addresses: [8.8.8.8,8.8.4.4]" >>/etc/netplan/01-netcfg.yaml
netplan apply
fi
sed -i 's/nameserver 127.0.0.53/nameserver 8.8.8.8/g' /etc/resolv.conf && chattr +i /etc/resolv.conf
# Get a free Maxmind license here: https://www.maxmind.com/en/geolite2/signup
# Required for the ASNgen app to work: https://splunkbase.splunk.com/app/3531/
export MAXMIND_LICENSE=
if [ -n "$MAXMIND_LICENSE" ]; then
echo "Note: You have not entered a MaxMind license key on line 5 of bootstrap.sh, so the ASNgen Splunk app may not work correctly."
echo "However, it is not required and everything else should function correctly."
fi
export DEBIAN_FRONTEND=noninteractive
echo "apt-fast apt-fast/maxdownloads string 10" | debconf-set-selections
echo "apt-fast apt-fast/dlflag boolean true" | debconf-set-selections
sed -i "2ideb mirror://mirrors.ubuntu.com/mirrors.txt focal main restricted universe multiverse\ndeb mirror://mirrors.ubuntu.com/mirrors.txt focal-updates main restricted universe multiverse\ndeb mirror://mirrors.ubuntu.com/mirrors.txt focal-backports main restricted universe multiverse\ndeb mirror://mirrors.ubuntu.com/mirrors.txt focal-security main restricted universe multiverse" /etc/apt/sources.list
apt_install_prerequisites() {
echo "[$(date +%H:%M:%S)]: Adding apt repositories..."
# Add repository for apt-fast
add-apt-repository -y ppa:apt-fast/stable
# Add repository for yq
add-apt-repository -y ppa:rmescandon/yq
# Install prerequisites and useful tools
echo "[$(date +%H:%M:%S)]: Running apt-get clean..."
apt-get clean
echo "[$(date +%H:%M:%S)]: Running apt-get update..."
apt-get -qq update
apt-get -qq install -y apt-fast
echo "[$(date +%H:%M:%S)]: Running apt-fast install..."
apt-fast -qq install -y jq whois git unzip htop yq python3-pip cmake make gcc g++ flex bison libpcap-dev libssl-dev python-dev swig zlib1g-dev emacs
}
modify_motd() {
echo "[$(date +%H:%M:%S)]: Updating the MOTD..."
# Force color terminal
sed -i 's/#force_color_prompt=yes/force_color_prompt=yes/g' /root/.bashrc
sed -i 's/#force_color_prompt=yes/force_color_prompt=yes/g' /home/vagrant/.bashrc
# Remove some stock Ubuntu MOTD content
chmod -x /etc/update-motd.d/10-help-text
# Copy the DetectionLab MOTD
cp /vagrant/resources/logger/20-detectionlab /etc/update-motd.d/
chmod +x /etc/update-motd.d/20-detectionlab
}
test_prerequisites() {
for package in jq whois build-essential git unzip yq python-pip; do
echo "[$(date +%H:%M:%S)]: [TEST] Validating that $package is correctly installed..."
# Loop through each package using dpkg
if ! dpkg -S $package >/dev/null; then
# If which returns a non-zero return code, try to re-install the package
echo "[-] $package was not found. Attempting to reinstall."
apt-get -qq update && apt-get install -y $package
if ! which $package >/dev/null; then
# If the reinstall fails, give up
echo "[X] Unable to install $package even after a retry. Exiting."
exit 1
fi
else
echo "[+] $package was successfully installed!"
fi
done
}
fix_eth1_static_ip() {
USING_KVM=$(sudo lsmod | grep kvm)
if [ -n "$USING_KVM" ]; then
echo "[*] Using KVM, no need to fix DHCP for eth1 iface"
return 0
fi
if [ -f /sys/class/net/eth2/address ]; then
if [ "$(cat /sys/class/net/eth2/address)" == "00:50:56:a3:b1:c4" ]; then
echo "[*] Using ESXi, no need to change anything"
return 0
fi
fi
# There's a fun issue where dhclient keeps messing with eth1 despite the fact
# that eth1 has a static IP set. We workaround this by setting a static DHCP lease.
echo -e 'interface "eth1" {
send host-name = gethostname();
send dhcp-requested-address 192.168.38.105;
}' >>/etc/dhcp/dhclient.conf
netplan apply
# Fix eth1 if the IP isn't set correctly
ETH1_IP=$(ip -4 addr show eth1 | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1)
if [ "$ETH1_IP" != "192.168.38.105" ]; then
echo "Incorrect IP Address settings detected. Attempting to fix."
ifdown eth1
ip addr flush dev eth1
ifup eth1
ETH1_IP=$(ifconfig eth1 | grep 'inet addr' | cut -d ':' -f 2 | cut -d ' ' -f 1)
if [ "$ETH1_IP" == "192.168.38.105" ]; then
echo "[$(date +%H:%M:%S)]: The static IP has been fixed and set to 192.168.38.105"
else
echo "[$(date +%H:%M:%S)]: Failed to fix the broken static IP for eth1. Exiting because this will cause problems with other VMs."
exit 1
fi
fi
# Make sure we do have a DNS resolution
while true; do
if [ "$(dig +short @8.8.8.8 github.com)" ]; then break; fi
sleep 1
done
}
install_splunk() {
# Check if Splunk is already installed
if [ -f "/opt/splunk/bin/splunk" ]; then
echo "[$(date +%H:%M:%S)]: Splunk is already installed"
else
echo "[$(date +%H:%M:%S)]: Installing Splunk..."
# Get download.splunk.com into the DNS cache. Sometimes resolution randomly fails during wget below
dig @8.8.8.8 download.splunk.com >/dev/null
dig @8.8.8.8 splunk.com >/dev/null
dig @8.8.8.8 www.splunk.com >/dev/null
# Try to resolve the latest version of Splunk by parsing the HTML on the downloads page
echo "[$(date +%H:%M:%S)]: Attempting to autoresolve the latest version of Splunk..."
LATEST_SPLUNK=$(curl https://www.splunk.com/en_us/download/splunk-enterprise.html | grep -i deb | grep -Eo "data-link=\"................................................................................................................................" | cut -d '"' -f 2)
# Sanity check what was returned from the auto-parse attempt
if [[ "$(echo "$LATEST_SPLUNK" | grep -c "^https:")" -eq 1 ]] && [[ "$(echo "$LATEST_SPLUNK" | grep -c "\.deb$")" -eq 1 ]]; then
echo "[$(date +%H:%M:%S)]: The URL to the latest Splunk version was automatically resolved as: $LATEST_SPLUNK"
echo "[$(date +%H:%M:%S)]: Attempting to download..."
wget --progress=bar:force -P /opt "$LATEST_SPLUNK"
else
echo "[$(date +%H:%M:%S)]: Unable to auto-resolve the latest Splunk version. Falling back to hardcoded URL..."
# Download Hardcoded Splunk
wget --progress=bar:force -O /opt/splunk-8.0.2-a7f645ddaf91-linux-2.6-amd64.deb 'https://download.splunk.com/products/splunk/releases/8.0.2/linux/splunk-8.0.2-a7f645ddaf91-linux-2.6-amd64.deb&wget=true'
fi
if ! ls /opt/splunk*.deb 1>/dev/null 2>&1; then
echo "Something went wrong while trying to download Splunk. This script cannot continue. Exiting."
exit 1
fi
if ! dpkg -i /opt/splunk*.deb >/dev/null; then
echo "Something went wrong while trying to install Splunk. This script cannot continue. Exiting."
exit 1
fi
/opt/splunk/bin/splunk start --accept-license --answer-yes --no-prompt --seed-passwd changeme
/opt/splunk/bin/splunk add index zeek -auth 'admin:changeme'
/opt/splunk/bin/splunk add index linux -auth 'admin:changeme'
/opt/splunk/bin/splunk install app /vagrant/resources/splunk_server/splunk-add-on-for-zeek-aka-bro_400.tgz -auth 'admin:changeme'
/opt/splunk/bin/splunk install app /vagrant/resources/splunk_server/splunk-add-on-for-unix-and-linux_820.tgz -auth 'admin:changeme'
# Add a Splunk TCP input on port 9997
echo -e "[splunktcp://9997]\nconnection_host = ip" >/opt/splunk/etc/apps/search/local/inputs.conf
# Add props.conf and transforms.conf
cp /vagrant/resources/splunk_server/props.conf /opt/splunk/etc/apps/search/local/
cp /vagrant/resources/splunk_server/transforms.conf /opt/splunk/etc/apps/search/local/
cp /opt/splunk/etc/system/default/limits.conf /opt/splunk/etc/system/local/limits.conf
# Bump the memtable limits to allow for the ASN lookup table
sed -i.bak 's/max_memtable_bytes = 10000000/max_memtable_bytes = 30000000/g' /opt/splunk/etc/system/local/limits.conf
# Skip Splunk Tour and Change Password Dialog
echo "[$(date +%H:%M:%S)]: Disabling the Splunk tour prompt..."
touch /opt/splunk/etc/.ui_login
mkdir -p /opt/splunk/etc/users/admin/search/local
echo -e "[search-tour]\nviewed = 1" >/opt/splunk/etc/system/local/ui-tour.conf
# Source: https://answers.splunk.com/answers/660728/how-to-disable-the-modal-pop-up-help-us-to-improve.html
if [ ! -d "/opt/splunk/etc/users/admin/user-prefs/local" ]; then
mkdir -p "/opt/splunk/etc/users/admin/user-prefs/local"
fi
echo '[general]
render_version_messages = 1
dismissedInstrumentationOptInVersion = 4
notification_python_3_impact = false
display.page.home.dashboardId = /servicesNS/nobody/search/data/ui/views/logger_dashboard' >/opt/splunk/etc/users/admin/user-prefs/local/user-prefs.conf
# Enable SSL Login for Splunk
echo -e "[settings]\nenableSplunkWebSSL = true" >/opt/splunk/etc/system/local/web.conf
# Copy over the Logger Dashboard
if [ ! -d "/opt/splunk/etc/apps/search/local/data/ui/views" ]; then
mkdir -p "/opt/splunk/etc/apps/search/local/data/ui/views"
fi
cp /vagrant/resources/splunk_server/logger_dashboard.xml /opt/splunk/etc/apps/search/local/data/ui/views || echo "Unable to find dashboard"
# Reboot Splunk to make changes take effect
/opt/splunk/bin/splunk restart
/opt/splunk/bin/splunk enable boot-start
fi
}
install_zeek() {
echo "[$(date +%H:%M:%S)]: Installing Zeek..."
# Environment variables
NODECFG=/opt/zeek/etc/node.cfg
## custom download ######################
cd ~
wget https://download.zeek.org/zeek-3.1.3.tar.gz
tar xzf zeek-3.1.3.tar.gz
cd zeek-3.1.3
./configure --prefix=/opt/zeek/
make -j4
sudo make install
# Update APT repositories
apt-get -qq -ym update
# Install crudini
apt-get -qq -ym install crudini
##############################
# sh -c "echo 'deb http://download.opensuse.org/repositories/security:/zeek/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/security:zeek.list"
# wget -nv https://download.opensuse.org/repositories/security:zeek/xUbuntu_20.04/Release.key -O /tmp/Release.key
# apt-key add - </tmp/Release.key &>/dev/null
# # Update APT repositories
# apt-get -qq -ym update
# # Install tools to build and configure Zeek
# apt-get -qq -ym install zeek crudini
##############################
export PATH=$PATH:/opt/zeek/bin
ln -s /home/vagrant/projects/zeek-agent-framework/zeek-agent /opt/zeek/share/zeek/site/zeek-agent
pip3 install zkg==2.1.1
zkg refresh
zkg autoconfig
zkg install --force salesforce/ja3
# Load Zeek scripts
echo '
@load protocols/ftp/software
@load protocols/smtp/software
@load protocols/ssh/software
@load protocols/http/software
@load tuning/json-logs
@load policy/integration/collective-intel
@load policy/frameworks/intel/do_notice
@load frameworks/intel/seen
@load frameworks/intel/do_notice
@load frameworks/files/hash-all-files
@load base/protocols/smb
@load policy/protocols/conn/vlan-logging
@load policy/protocols/conn/mac-logging
@load ja3
@load zeek-agent
@load zeek-agent/queries/auditd
redef Intel::read_files += {
"/opt/zeek/etc/intel.dat"
};
' >>/opt/zeek/share/zeek/site/local.zeek
# Configure Zeek
crudini --del $NODECFG zeek
crudini --set $NODECFG manager type manager
crudini --set $NODECFG manager host localhost
crudini --set $NODECFG proxy type proxy
crudini --set $NODECFG proxy host localhost
# Setup Zeek workers
crudini --set $NODECFG worker-eth0 type worker
crudini --set $NODECFG worker-eth0 host localhost
crudini --set $NODECFG worker-eth0 interface eth0
crudini --set $NODECFG worker-eth0 lb_method pf_ring
crudini --set $NODECFG worker-eth0 lb_procs 1
crudini --set $NODECFG worker-eth1 type worker
crudini --set $NODECFG worker-eth1 host localhost
crudini --set $NODECFG worker-eth1 interface eth1
crudini --set $NODECFG worker-eth1 lb_method pf_ring
crudini --set $NODECFG worker-eth1 lb_procs 1
# Setup Zeek to run at boot
cp /vagrant/resources/zeek/zeek.service /lib/systemd/system/zeek.service
systemctl enable zeek
systemctl start zeek
# # Configure the Splunk inputs
mkdir -p /opt/splunk/etc/apps/Splunk_TA_bro/local && touch /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager index zeek
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager sourcetype bro:json
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager whitelist '.*\.log$'
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager blacklist '.*(communication|stderr)\.log$'
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager disabled 0
# Ensure permissions are correct and restart splunk
chown -R splunk:splunk /opt/splunk/etc/apps/Splunk_TA_bro
/opt/splunk/bin/splunk restart
# Verify that Zeek is running
if ! pgrep -f zeek >/dev/null; then
echo "Zeek attempted to start but is not running. Exiting"
exit 1
fi
}
install_zeek_agent_framework() {
mkdir -p /home/vagrant/projects/
cd /home/vagrant/projects/
git clone https://github.com/Wajihulhassan/zeek-agent-framework.git
cd zeek-agent-framework/
git checkout state-final
cd /home/vagrant/
chown -R vagrant:vagrant projects
}
postinstall_tasks() {
# Include Splunk and Zeek in the PATH
echo export PATH="$PATH:/opt/zeek/bin" >>~/.bashrc
echo "export SPLUNK_HOME=/opt/splunk" >>~/.bashrc
# Include Zeekpath
echo export ZEEKPATH="/home/vagrant/projects/zeek-agent-framework/:$(zeek-config --zeekpath)" >>~/.bashrc
# Ping DetectionLab server for usage statistics
curl -s -A "DetectionLab-logger" "https:/ping.detectionlab.network/logger" || echo "Unable to connect to ping.detectionlab.network"
}
main() {
apt_install_prerequisites
modify_motd
test_prerequisites
fix_eth1_static_ip
install_splunk
install_zeek_agent_framework
install_zeek
postinstall_tasks
}
main
exit 0
| true
|
d92387116f41741d4b7b718e88803c7e473cd3e0
|
Shell
|
tashkeev-alex/cdr3-QTL
|
/code/remove_public_clonotype.sh
|
UTF-8
| 764
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
id=$1
dd=/data/srlab/kishigaki/data/TCR_robins
# id=HIP14156
mkdir -p cdr3/HP_nopub/$id
cat $dd/$id.tsv |
sed -e "1d" |
cut -f 2,8,9,11 |
awk 'BEGIN{FS="\t"}{
amino_acid=$1;
productive_frequency=$2;
v_gene=$4;
if( productive_frequency != "null" && v_gene != "unresolved" ) {
v_gene = substr(v_gene,5,7);
split(v_gene, v_gene2, "-");
print v_gene2[1] "_" amino_acid }
}' |
grep -F -w -v -f cdr3/HP_nopub/pub_cdr3.txt - |
cut -d "_" -f 2 > cdr3/HP_nopub/$id/tmp_all.cdr3
for length in $(seq 12 18);do
cat cdr3/HP_nopub/$id/tmp_all.cdr3 |
awk -v L=$length '{if( length($1)==L ){ print }}' |
gzip -f -c - > cdr3/HP_nopub/$id/$length.txt.gz #target length cdr3
done
rm -f cdr3/HP_nopub/$id/tmp_all.cdr3
| true
|
571be3fb90288429add4a81abb916b482533c4b7
|
Shell
|
TrungNguyenBa/bug_fix_minimization
|
/scripts/D4J_comment_removed_diffs.sh
|
UTF-8
| 4,355
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
die() {
echo $1
exit 1
}
[ "$D4J_HOME" != "" ] || die "D4J_HOME is not set!"
[ "$BFM" != "" ] || die "BFM is not set!"
clean=$1
projects=$2
if [ projects == "" ] || [ projects == "ALL" ]; then
projects=(Closure Lang Math Time)
fi
current=$(pwd)
echo "living $current"
echo "moving to $BFM"
cd $BFM
for pid in $projects; do
dir_project="$D4J_HOME/framework/projects/$pid"
dir_patches="$dir_project/patches"
dir_original="raw_data/D4J_projects/$pid/remove_comment_patches"
dir_modified="raw_data/D4J_projects/$pid/raw_modified_files"
if [[ $clean == "--clean" ]]; then rm -rf $dir_original $dir_modified > /dev/null 2> /dev/null; fi
cmake -E make_directory $dir_original
cmake -E make_directory $dir_modified
echo directory is $dir_original
# Determine the number of bugs for this project
num_bugs=$(cat $dir_project/commit-db | wc -l)
# Iterate over all bugs for this project
echo "checking project $pid ($num_bugs bugs)"
for bid in $(seq 1 $num_bugs); do
#get the line
echo $dir_project/commit-db
defects4j checkout -p ${pid} -v ${bid}b -w /tmp/${pid}_${bid}_buggy
src_dir=$(grep "d4j.dir.src.classes=" /tmp/${pid}_${bid}_buggy/defects4j.build.properties | cut -f2 -d'=')
src_dir=$src_dir/
#specific src_dir for bug 22-27 of Time
if [[ $pid == "Time" ]]; then
if [ $bid == "22" ] || [ $bid == "23" ] || [ $bid == "24" ] || [ $bid == "25" ] || [ $bid == "26" ] || [ $bid == "27" ]; then
src_dir="JodaTime/"$src_dir
fi
fi
line=$(head -${bid} $dir_project/commit-db | tail -1 )
#splitting the line into string array
IFS=$',' read -r -a array <<< "$line"
#get the faulted version from the array
faulted=${array[1]}
fixed=${array[2]}
echo "bug $bid:"
echo "faulted version hash: $faulted"
echo "fixed version hash: $fixed"
#get the number of file changes
git -C /tmp/${pid}_${bid}_buggy whatchanged -1 $fixed $faulted --pretty=format:'%h : %s' > $dir_original/${bid}.changed_files_list.txt
num_files=$(cat $dir_original/${bid}.changed_files_list.txt | wc -l)
#iterate through the file
for fid in $(seq 2 $num_files); do
file_line=$(head -${fid} $dir_original/${bid}.changed_files_list.txt | tail -1 )
IFS=$' |\t|\\' read -r -a chunks <<< "$file_line"
echo file_line is $file_line
file_name=${chunks[${#chunks[@]}-1]}
if [[ $file_name == *".java" ]]; then
IFS=$'/' read -r -a fchunks <<< "$file_name"
#check if the file is from test folder
istest0=${fchunks[0]}
istest1=${fchunks[1]}
istest2=${fchunks[2]}
echo $istest0 $istest1 $istest2
if [ $istest0 != "test" ] && [ $istest1 != "test" ] && [ $istest2 != "test" ]; then
echo file_name is $file_name
#get the faulted file
git -C /tmp/${pid}_${bid}_buggy checkout $faulted -- "${file_name}"
file_name_base=$( echo ${file_name#$src_dir} | tr '/' '.')
echo file_name_base is $file_name_base
cp /tmp/${pid}_${bid}_buggy/${file_name} $dir_modified/${bid}_${file_name_base}_faulted
#get the fixed file
git -C /tmp/${pid}_${bid}_buggy checkout $fixed -- "${file_name}"
cp /tmp/${pid}_${bid}_buggy/${file_name} $dir_modified/${bid}_${file_name_base}_fixed
#removing the comments and white space from both version
python $BFM/scripts/remove_comment.py $dir_modified/${bid}_${file_name_base}_faulted
python $BFM/scripts/remove_comment.py $dir_modified/${bid}_${file_name_base}_fixed
#get the diff from both file
diff -w -b -B $dir_modified/${bid}_${file_name_base}_fixed_nospcm_ $dir_modified/${bid}_${file_name_base}_faulted_nospcm_ > $dir_original/${bid}.file_n_${fid}.dif
#get the stat of the diff
diffstat -m -t -R $dir_original/${bid}.file_n_${fid}.dif > $dir_original/${bid}.file_n_${fid}.dif.stat
fi
fi
done
rm -rf /tmp/${pid}_${bid}_buggy
done
done
cd $current
| true
|
15976200b0aa4ac3fe2734d5b4b37426509ccf38
|
Shell
|
OpuRahman1/Kubernetes-Master-Class
|
/disaster-recovery/broken-kube-ca/clean_nodes.sh
|
UTF-8
| 647
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Cleanup current cluster"
rke remove --force --config cluster.yml
rm cluster.rkestate.org kube_config_cluster.yml.org
echo "Clean nodes.."
for node in `cat cluster.yml | grep ' address: ' | awk '{print $3}'`
do
echo "Node: $node"
ssh -o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null root@"$node" 'curl https://raw.githubusercontent.com/rancherlabs/support-tools/master/extended-rancher-2-cleanup/extended-cleanup-rancher2.sh | bash'
ssh -o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null root@"$node" 'systemctl restart docker'
done
| true
|
beb50fff4390e7fbe691537fd8d104b1562783f0
|
Shell
|
LeeActon/ErriezLKM1638
|
/.auto-build.sh
|
UTF-8
| 1,323
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
################################################################################
# Title : .auto-build.sh
# Date created : 2 August 2018
__AUTHOR__="Erriez"
#
# This script will start PlatformIO build.
#
################################################################################
################################################################################
##### Setup this script and get the current gh-pages branch. #####
echo 'Setting up the script...'
# Exit with nonzero exit code if anything fails
set -e
# Build sources
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/Brightness/Brightness.ino
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/Buttons/Buttons.ino
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/Counter/Counter.ino
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/Date/Date.ino
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/Demo/Demo.ino
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/Temperature/Temperature.ino
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/TestLEDs/TestLEDs.ino
platformio ci --lib="." --project-conf=platformio/platformio.ini examples/Time/Time.ino
| true
|
c5c7a4d0470ee3a0524cb7f7c83ebe200b4421ee
|
Shell
|
winneryong/CloudHands
|
/install.sh
|
UTF-8
| 1,644
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#By shajf
[ `id -u` -ne 0 ] && {
echo "You must run this by root" >&2
exit 1
}
dpdk_install_prefix=/usr/local/dpdk
nr_pages=1024
nodes_dir=/sys/devices/system/node
huge_pages_mnt=/mnt/huge_pages
if [[ ! -d build ]];then
mkdir build
fi
if [[ ! -d $dpdk_install_prefix ]];then
mkdir $dpdk_install_prefix
fi
#clear build dir
rm -rf build/*
#reverse huge pages memory for dpdk
for f in `ls $nodes_dir`
do
[[ $f == "node"* ]] &&{
echo $nr_pages > $nodes_dir/$f/hugepages/hugepages-2048kB/nr_hugepages
}
done
#mount huge pages
if [[ ! -d $huge_pages_mnt ]];then
mkdir -p $huge_pages_mnt
fi
mount -t hugetlbfs nodev $huge_pages_mnt
#install dpdk
function install_dpdk(){
echo 'install dpdk----------------------------------'
cp dpdk-2.0.0.zip build
cd build
unzip dpdk-2.0.0.zip
cp ../common_linuxapp dpdk-2.0.0/config
cd dpdk-2.0.0
make install T=x86_64-native-linuxapp-gcc
cp -r x86_64-native-linuxapp-gcc/* $dpdk_install_prefix
cp -r tools $dpdk_install_prefix
rm -rf $dpdk_install_prefix/build
cd ../../
}
function install_cloudhands(){
export RTE_SDK=`pwd`/build/dpdk-2.0.0
cp -r core build
cp -r util build
cp -r assemble build
cp -r app build
cp Makefile build
make -C build/
if [[ ! -d $dpdk_install_prefix/CloudHands ]];then
mkdir $dpdk_install_prefix/CloudHands
fi
cp -r build/core/build/app/* $dpdk_install_prefix/CloudHands
}
function prepare_run_env(){
modprobe uio
insmod $dpdk_install_prefix/kmod/igb_uio.ko
}
install_dpdk
install_cloudhands
prepare_run_env
| true
|
f2cca99754945336db1a92d845431dbf36357c95
|
Shell
|
marceltoben/evandrix.github.com
|
/projects/google-prediction-api/train.sh
|
UTF-8
| 314
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
DATA=$1
AUTH=`cat auth-token`
# Encode the model name.
model=`echo $DATA | perl -pe 's:/:%2F:g'`
# Train a model.
curl -X POST \
-H "Content-Type:application/json" \
-H "Authorization: GoogleLogin auth=$AUTH" \
-d "{data:{}}" \
https://www.googleapis.com/prediction/v1.1/training?data=$model
echo
| true
|
f2e383fb9ec554c360b0d4072cd71aae591b05d8
|
Shell
|
cr8ivecodesmith/server_build
|
/docker_install.sh
|
UTF-8
| 1,667
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# Server Build Script
SCRIPT=$(basename $0)
SCRIPTPATH=$(readlink -f $0)
SCRIPTDIR=$(dirname $SCRIPTPATH)
FILENAME="${SCRIPT%.*}"
# This script will always install the latest version of docker.
# docker-compose should be updated accordingly as well. To check the latest
# version of compose go to:
# https://github.com/docker/compose/releases
DOCKER_VER=17.06.2~ce-0~ubuntu
COMPOSE_VER=1.15.0
function log {
level=$1
msg=$2
timestamp=$(date +"%Y-%m-%d %H:%M:%S")
echo "[${timestamp}] ${FILENAME} ${level} - ${msg}" \
>> $SCRIPTDIR/${FILENAME}.log
}
## MAIN ##
if ! [ $(id -u) = 0 ]; then
log ERROR "These commands have to run as root!"
exit 1
fi
log INFO "Updating initial packages"
apt -o Acquire::ForceIPv4=true update --fix-missing
log INFO "Installing initial packages"
apt -o Acquire::ForceIPv4=true install -y \
lsb-release \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
log INFO "Adding docker repo"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
log INFO "Installing docker"
apt -o Acquire::ForceIPv4=true update --fix-missing \
&& apt -o Acquire::ForceIPv4=true install -y docker-ce=$DOCKER_VER
log INFO "Installing docker-compose"
curl -L \
https://github.com/docker/compose/releases/download/$COMPOSE_VER/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
log INFO "Starting up docker"
service docker start
log INFO "Done"
| true
|
ce8db8e0f42d8ff8a312254e2911ad374194e882
|
Shell
|
jameskr97/etterna
|
/.ci/install_ldoc.sh
|
UTF-8
| 501
| 2.6875
| 3
|
[
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
mkdir lua_tmp && cd lua_tmp
# Install various requirements
sudo apt install build-essential libreadline-dev unzip
# Acquire and install Lua
curl -R -O http://www.lua.org/ftp/lua-5.1.5.tar.gz
tar -zxf lua-5.1.5.tar.gz
cd lua-5.1.5
make linux
sudo make install
cd ..
# Acquire and install LuaRocks
wget https://luarocks.org/releases/luarocks-3.9.1.tar.gz
tar zxpf luarocks-3.9.1.tar.gz
cd luarocks-3.9.1
./configure && make
sudo make install
# Install ldoc
luarocks install --local ldoc
| true
|
6e0eeb982a02cf2489b6820423b12fe5b88c1f29
|
Shell
|
hhuangMITRE/dioptra
|
/docker/nginx/include/etc/securing-ai/docker/entrypoint-nginx.sh
|
UTF-8
| 8,545
| 3.359375
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
# Created by argbash-init v2.8.1
# ARG_OPTIONAL_SINGLE([ai-lab-host],[],[AI Lab Service host],[restapi])
# ARG_OPTIONAL_SINGLE([ai-lab-port],[],[AI Lab Service port],[5000])
# ARG_OPTIONAL_SINGLE([mlflow-tracking-host],[],[AI Lab Service host],[mlflow-tracking])
# ARG_OPTIONAL_SINGLE([mlflow-tracking-port],[],[AI Lab Service port],[5000])
# ARG_OPTIONAL_SINGLE([nginx-lab-port],[],[Nginx listening port],[30080])
# ARG_OPTIONAL_SINGLE([nginx-mlflow-port],[],[Nginx listening port],[35000])
# ARG_DEFAULTS_POS()
# ARGBASH_SET_INDENT([ ])
# ARG_HELP([Nginx Entry Point\n])"
# ARGBASH_GO()
# needed because of Argbash --> m4_ignore([
### START OF CODE GENERATED BY Argbash v2.10.0 one line above ###
# Argbash is a bash code generator used to get arguments parsing right.
# Argbash is FREE SOFTWARE, see https://argbash.io for more info
die()
{
local _ret="${2:-1}"
test "${_PRINT_HELP:-no}" = yes && print_help >&2
echo "$1" >&2
exit "${_ret}"
}
begins_with_short_option()
{
local first_option all_short_options='h'
first_option="${1:0:1}"
test "$all_short_options" = "${all_short_options/$first_option/}" && return 1 || return 0
}
# THE DEFAULTS INITIALIZATION - OPTIONALS
_arg_ai_lab_host="restapi"
_arg_ai_lab_port="5000"
_arg_mlflow_tracking_host="mlflow-tracking"
_arg_mlflow_tracking_port="5000"
_arg_nginx_lab_port="30080"
_arg_nginx_mlflow_port="35000"
print_help()
{
printf '%s\n' "Nginx Entry Point
"
printf 'Usage: %s [--ai-lab-host <arg>] [--ai-lab-port <arg>] [--mlflow-tracking-host <arg>] [--mlflow-tracking-port <arg>] [--nginx-lab-port <arg>] [--nginx-mlflow-port <arg>] [-h|--help]\n' "$0"
printf '\t%s\n' "--ai-lab-host: AI Lab Service host (default: 'restapi')"
printf '\t%s\n' "--ai-lab-port: AI Lab Service port (default: '5000')"
printf '\t%s\n' "--mlflow-tracking-host: AI Lab Service host (default: 'mlflow-tracking')"
printf '\t%s\n' "--mlflow-tracking-port: AI Lab Service port (default: '5000')"
printf '\t%s\n' "--nginx-lab-port: Nginx listening port (default: '30080')"
printf '\t%s\n' "--nginx-mlflow-port: Nginx listening port (default: '35000')"
printf '\t%s\n' "-h, --help: Prints help"
}
parse_commandline()
{
while test $# -gt 0
do
_key="$1"
case "$_key" in
--ai-lab-host)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_ai_lab_host="$2"
shift
;;
--ai-lab-host=*)
_arg_ai_lab_host="${_key##--ai-lab-host=}"
;;
--ai-lab-port)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_ai_lab_port="$2"
shift
;;
--ai-lab-port=*)
_arg_ai_lab_port="${_key##--ai-lab-port=}"
;;
--mlflow-tracking-host)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_mlflow_tracking_host="$2"
shift
;;
--mlflow-tracking-host=*)
_arg_mlflow_tracking_host="${_key##--mlflow-tracking-host=}"
;;
--mlflow-tracking-port)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_mlflow_tracking_port="$2"
shift
;;
--mlflow-tracking-port=*)
_arg_mlflow_tracking_port="${_key##--mlflow-tracking-port=}"
;;
--nginx-lab-port)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_nginx_lab_port="$2"
shift
;;
--nginx-lab-port=*)
_arg_nginx_lab_port="${_key##--nginx-lab-port=}"
;;
--nginx-mlflow-port)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_nginx_mlflow_port="$2"
shift
;;
--nginx-mlflow-port=*)
_arg_nginx_mlflow_port="${_key##--nginx-mlflow-port=}"
;;
-h|--help)
print_help
exit 0
;;
-h*)
print_help
exit 0
;;
*)
_PRINT_HELP=yes die "FATAL ERROR: Got an unexpected argument '$1'" 1
;;
esac
shift
done
}
parse_commandline "$@"
# OTHER STUFF GENERATED BY Argbash
### END OF CODE GENERATED BY Argbash (sortof) ### ])
# [ <-- needed because of Argbash
shopt -s extglob
set -euo pipefail
###########################################################################################
# Global parameters
###########################################################################################
readonly ai_lab_host="${_arg_ai_lab_host}"
readonly ai_lab_port="${_arg_ai_lab_port}"
readonly mlflow_tracking_host="${_arg_mlflow_tracking_host}"
readonly mlflow_tracking_port="${_arg_mlflow_tracking_port}"
readonly nginx_lab_port="${_arg_nginx_lab_port}"
readonly nginx_mlflow_port="${_arg_nginx_mlflow_port}"
readonly logname="Container Entry Point"
###########################################################################################
# Secure the container at runtime
#
# Globals:
# None
# Arguments:
# None
# Returns:
# None
###########################################################################################
secure_container() {
if [[ -f /usr/local/bin/secure-container.sh ]]; then
/usr/local/bin/secure-container.sh
else
echo "${logname}: ERROR - /usr/local/bin/secure-container.sh script missing" 1>&2
exit 1
fi
}
###########################################################################################
# Set nginx configuration variables
#
# Globals:
# ai_lab_host
# ai_lab_port
# mlflow_tracking_host
# mlflow_tracking_port
# nginx_lab_port
# nginx_mlflow_port
# Arguments:
# None
# Returns:
# None
###########################################################################################
set_nginx_variables() {
echo "${logname}: INFO - Set nginx variables | \
AI_LAB_HOST=${ai_lab_host} \
AI_LAB_PORT=${ai_lab_port} \
MLFLOW_TRACKING_HOST=${mlflow_tracking_host} \
MLFLOW_TRACKING_PORT=${mlflow_tracking_port} \
NGINX_LAB_PORT=${nginx_lab_port}\
NGINX_MLFLOW_PORT=${nginx_mlflow_port}"
sed -i -e 's/$AI_LAB_HOST/'"${ai_lab_host}"'/g' /etc/nginx/conf.d/default.conf
sed -i -e 's/$AI_LAB_PORT/'"${ai_lab_port}"'/g' /etc/nginx/conf.d/default.conf
sed -i -e 's/$MLFLOW_TRACKING_HOST/'"${mlflow_tracking_host}"'/g' \
/etc/nginx/conf.d/default.conf
sed -i -e 's/$MLFLOW_TRACKING_PORT/'"${mlflow_tracking_port}"'/g' \
/etc/nginx/conf.d/default.conf
sed -i -e 's/$NGINX_LAB_PORT/'"${nginx_lab_port}"'/g' /etc/nginx/conf.d/default.conf
sed -i -e 's/$NGINX_MLFLOW_PORT/'"${nginx_mlflow_port}"'/g' /etc/nginx/conf.d/default.conf
local default_conf=$(cat /etc/nginx/conf.d/default.conf)
echo "${logname}: INFO - Updated contents of /etc/nginx/conf.d/default.conf"
echo "${default_conf}"
}
###########################################################################################
# Start nginx server
#
# Globals:
# None
# Arguments:
# None
# Returns:
# None
###########################################################################################
start_nginx() {
echo "${logname}: INFO - Starting Nginx process"
/usr/sbin/nginx
}
###########################################################################################
# Main script
###########################################################################################
secure_container
set_nginx_variables
start_nginx
# ] <-- needed because of Argbash
| true
|
1a5b2537d0e323c5e2817a0da0ead63efa24db4c
|
Shell
|
sierdzio/tbus
|
/src/tbus-clone
|
UTF-8
| 264
| 3.359375
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# Clone, connect to existing repositories
. tbus-global
if [ "${1}" = "-h" ] || [ "${1}" = "--help" ]; then
echo "tbus-clone - used to connect to existing repository
At the moment this command is a synonym to git clone."
exit
fi
git clone "$@"
| true
|
3ae0cec6179914f7ed74cc5fbd576d59f169a564
|
Shell
|
panziqiang007/jenkins_demo
|
/deploy_jenkins.sh
|
UTF-8
| 1,042
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Program:
# deploy jenkins
# Version:
# 1.0.1
# History:
# Created on 2018/08/15
# Last modified on 2018/08/16
# 容器名称
# CONTAINER="jenkins_demo"
# 镜像名称(以日期时间为镜像标签,防止重复)
# IMAGE=$CONTAINER":"$(date -d "today" +"%Y%m%d_%H%M%S")
# 删除滚动更新残留的容器
# docker rm "docker ps -a | grep -w $CONTAINER'_'$CONTAINER | awk '{print $1}'"
# 强制删除滚动更新残留的镜像
# docker rmi --force "docker images | grep -w $CONTAINER | awk '{print $3}'"
# 创建新镜像
echo '创建新镜像'
pwd
docker build -t jenkins_demo:v1 .
# 删除 docker-compose.jenkins.yml 文件,防止使用相同镜像
# rm -rf docker-compose.jenkins.yml
# 复制 docker-compose.src.yml 文件,防止污染原文件
# cp docker-compose.src.yml docker-compose.jenkins.yml
# 替换镜像名标志位为最新镜像
# sed -i jenkins_demo:v1 docker-compose.jenkins.yml
# 使用 docker stack 启动服务
docker stack deploy -c docker-compose.jenkins.yml jenkins_demo
| true
|
c648ea4c195630ebcec1565be1c63d9fb3f5ad6d
|
Shell
|
rhenrhee/Whatsminer
|
/upgrade-rootfs/allwinner-rootfs/usr/bin/detect-voltage-info
|
UTF-8
| 2,881
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Script to detect voltage info. Info will be output to file /tmp/voltage-info. (Used by remote-daemon)
#
# Output format:
#
# [MINER_TYPE]_[MINER_VERSION],[POWER_VERSION],[POWER_VALUE_IN_CONFIG_FILE],[FREQ_VALUE_IN_CONFIG_FILE],[POWER_VALUE_IN_RUNNING],[GHsav]
#
# e.g.
#
# m3_v20,000107,17,618,15,15100
#
# Output info
OUTPUT_FILENAME="/tmp/voltage-info"
show_voltage_info() {
echo -n "Detected Voltage Info: "
cat $OUTPUT_FILENAME
echo ""
}
#
# Detect voltage info
#
# miner_type & miner_version
/usr/bin/detect-miner-info
minerinfo=`cat /tmp/miner-info`
miner_type=`echo $minerinfo | cut -d "-" -f2 | tr A-Z a-z`
hb_version=`echo $minerinfo | cut -d "-" -f5`
version_num=`echo ${hb_version##*HB}`
major_version_num=`expr substr "$version_num" 1 1`
miner_version="v""$major_version_num""0" # Force vnx to vn0, e.g.: v23 -> v20
# power_version
pvstr=`readpower | grep "Read power version ="` # e.g.: "Read power version = d0 00 00 00 01 07"
pvstr=`echo $pvstr | cut -d "=" -f2` # " d0 00 00 00 01 07"
power_version=`echo $pvstr | cut -d " " -f4``echo $pvstr | cut -d " " -f5``echo $pvstr | cut -d " " -f6` # "000107"
if [ "$power_version" = "" ]; then
miner_info=`detect-miner-info`
p=`echo $miner_info | grep "\-P1\-"`
if [ "$p" != "" ]; then
power_version="000004"
fi
p=`echo $miner_info | grep "\-P2\-"`
if [ "$p" != "" ]; then
power_version="000105"
fi
p=`echo $miner_info | grep "\-P3\-"`
if [ "$p" != "" ]; then
power_version="000106"
fi
p=`echo $miner_info | grep "\-P5\-"`
if [ "$p" != "" ]; then
power_version="000107"
fi
p=`echo $miner_info | grep "\-P6\-"`
if [ "$p" != "" ]; then
power_version="000022"
fi
fi
if [ "$power_version" != "" ]; then
# power_config_val,freq_config_val
configline=`grep $power_version /etc/config/powers` # "list 'power_limit' '4:000107:17:618:618:618:1:11329:10649:10196:600:7:1:6'"
configline=`echo $configline | cut -d " " -f3` # "'4:000107:17:618:618:618:1:11329:10649:10196:600:7:1:6'"
power_config_val=`echo $configline | cut -d ":" -f3`
freq_config_val=`echo $configline | cut -d ":" -f4`
fi
# power_running_val
cgminerapisummary=`cgminer-api -o summary`
str=`echo ${cgminerapisummary##*Voltage=}`
power_running_val=`echo $str | cut -d "," -f1`
# ghsav
str=`echo ${cgminerapisummary##*MHS av=}`
mhsav=`echo $str | cut -d "," -f1`
mhsav=`echo $mhsav | cut -d "." -f1`
ghsav=`expr $mhsav / 1000`
# Output to $OUTPUT_FILENAME
echo -n "$miner_type" > $OUTPUT_FILENAME
echo -n "_$miner_version" >> $OUTPUT_FILENAME
echo -n ",$power_version" >> $OUTPUT_FILENAME
echo -n ",$power_config_val" >> $OUTPUT_FILENAME
echo -n ",$freq_config_val" >> $OUTPUT_FILENAME
echo -n ",$power_running_val" >> $OUTPUT_FILENAME
echo -n ",$ghsav" >> $OUTPUT_FILENAME
show_voltage_info
| true
|
cbcaf0edbc511775c4ea2322808b690a646c9247
|
Shell
|
dtopuzov/setup
|
/java/install-java.sh
|
UTF-8
| 1,103
| 4.28125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
########################################################
#
# This script will install Open JDK 1.8, 11 and 13.
# Default in .bash_profile will be `Open JDK 1.8`.
#
########################################################
# shellcheck disable=SC1090
# shellcheck disable=SC2140
# shellcheck disable=SC2181
source "$HOME"/.bash_profile
# Install JDKs
declare -a arr=("1.8" "11" "15")
for i in "${arr[@]}"; do
set +e
VERSION=$(echo $i | cut -d '.' -f2)
$(/usr/libexec/java_home -v "$i" 2>/dev/null | grep $VERSION >/dev/null 2>&1)
EXIT_CODE=$?
set -e
if [ $EXIT_CODE == 0 ]; then
echo "Open JDK $i found."
else
echo "Install Open JDK $VERSION."
{
brew tap adoptopenjdk/openjdk
brew install --cask adoptopenjdk"$VERSION"
} &>"$HOME"/logs/install-java.logs
fi
done
# Reset variables
echo "Set JAVA_HOME in ~/.bash_profile (use JDK 1.8)."
{
sed -i '' '/JAVA_HOME/d' "$HOME"/.bash_profile
echo "export JAVA_HOME=$(/usr/libexec/java_home -v 1.8)" >> "$HOME"/.bash_profile
source "$HOME"/.bash_profile
} &>"$HOME"/logs/install-java.logs
| true
|
d017e86c61e0bfef89b25e121e8bf40f6d1ea9c7
|
Shell
|
urbanautomaton/dotfiles
|
/install.sh
|
UTF-8
| 1,004
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/bash
set -euo pipefail
case $(uname) in
'Darwin')
dircmd='ln -sfh'
;;
'Linux')
dircmd='ln -sfT'
;;
esac
readonly DOTDIRECTORIES="
vim
git_template
env_hooks
"
readonly DOTFILES="
bashrc
bash_aliases
bash_completion
env_hooker
inputrc
vimrc
irbrc
gemrc
pryrc
gitconfig
gitignore_global
gitattributes
gvimrc
ackrc
tmux.conf
tmux-osx.conf
terraformrc
"
for dir in $DOTDIRECTORIES; do
if [[ -n "$dir" ]]; then
$dircmd "${HOME}/dotfiles/${dir}" "${HOME}/.${dir}"
fi
done
for file in $DOTFILES; do
if [[ -n "$file" ]]; then
ln -sf "${HOME}/dotfiles/${file}" "${HOME}/.${file}"
fi
done
mkdir -p ~/.bashrc.d
for script_file in ~/dotfiles/bashrc.d/*; do
script_name=$(basename "${script_file}")
ln -sf "${script_file}" "${HOME}/.bashrc.d/${script_name}"
done
mkdir -p ~/bin
for script_file in ~/dotfiles/bin/*; do
script_name=$(basename "${script_file}")
ln -sf "${script_file}" "${HOME}/bin/${script_name}"
done
| true
|
c0ca48fc6f1e381a2cad12d76a1a057fbdbfc2ca
|
Shell
|
mbektas/gt-coar-lab
|
/templates/construct/post_install.sh.j2
|
UTF-8
| 483
| 3.171875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# {{ copyright }}
# {{ license }}
#
# {{ name }}-{{ variant.upper() }} {{ version }} defaults
#
set -x
export POST_INSTALL_LOG="${PREFIX}/post_install.log"
echo "0.0: start" >> "${POST_INSTALL_LOG}"
ls "${PREFIX}/{{ settings_path }}" || echo "probably ok"
mkdir -p "${PREFIX}/{{ settings_path }}" || echo "settings path probably exists"
cat << EOF > "${PREFIX}/{{ settings_path }}/overrides.json"
{{ overrides }}
EOF
echo "4.0: done" >> "${POST_INSTALL_LOG}"
| true
|
a83d4a426cd713c56972c476ffd5bf0b38b453e7
|
Shell
|
rvpatil1432/bridzelabz-sample
|
/archBckp.sh
|
UTF-8
| 139
| 2.65625
| 3
|
[] |
no_license
|
for file in *.txt *.pdf *.log
do
echo $file;
allFiles=`find var/log -mtime +7`;
echo $allFiles;
tar -cvf $allFiles.tar /Bckp;
done
| true
|
bbe200584607ab52e8ef45ec84efefcc4855098c
|
Shell
|
admin-zoujing/redis
|
/init-redis.sh
|
UTF-8
| 8,513
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
#安装centos7.4安装redis4.0.12主从脚本
#官网地址 https://redis.io/ 图形客户端下载地址:https://redisdesktop.com/download
#1、------------------------------RHEL7主服务器(master)-------------------------------
sourceinstall=/usr/local/src/redis
chmod -R 777 /usr/local/src/redis
#时间时区同步,修改主机名
ntpdate ntp1.aliyun.com
hwclock --systohc
echo "*/30 * * * * root ntpdate -s ntp1.aliyun.com" >> /etc/crontab
sed -i 's|SELINUX=.*|SELINUX=disabled|' /etc/selinux/config
sed -i 's|SELINUXTYPE=.*|#SELINUXTYPE=targeted|' /etc/selinux/config
sed -i 's|SELINUX=.*|SELINUX=disabled|' /etc/sysconfig/selinux
sed -i 's|SELINUXTYPE=.*|#SELINUXTYPE=targeted|' /etc/sysconfig/selinux
setenforce 0 && systemctl stop firewalld && systemctl disable firewalld
rm -rf /var/run/yum.pid
rm -rf /var/run/yum.pid
#查看系统版本号
cat /etc/redhat-release
yum -y install gcc gcc-c++ openssl-devel tcl cmake
#cd /usr/local/src/redis/rpm
#rpm -ivh /usr/local/src/redis/rpm/*.rpm --force --nodeps
#wget http://download.redis.io/releases/redis-4.0.12.tar.gz
#chmod 777 redis-4.0.12.tar.gz
groupadd redis
useradd -g redis -s /sbin/nologin redis
mkdir -pv /usr/local/redis
cd $sourceinstall
tar -zxvf redis-4.0.12.tar.gz -C /usr/local/redis
cd /usr/local/redis/redis-4.0.12
make PREFIX=/usr/local/redis install
make test
cp /usr/local/redis/redis-4.0.12/redis.conf /usr/local/redis
mkdir -pv /usr/local/redis/{logs,backup}
sed -i 's|bind 127.0.0.1|#bind 127.0.0.1|' /usr/local/redis/redis.conf
sed -i 's|protected-mode yes|protected-mode no|' /usr/local/redis/redis.conf
sed -i 's|dir ./|dir /usr/local/redis/backup|' /usr/local/redis/redis.conf
sed -i 's|daemonize no|daemonize yes|' /usr/local/redis/redis.conf
sed -i 's|pidfile /var/run/redis_6379.pid|pidfile /usr/local/redis/logs/redis_6379.pid|' /usr/local/redis/redis.conf
sed -i 's|logfile ""|logfile "/usr/local/redis/logs/redis.log"|' /usr/local/redis/redis.conf
sed -i 's|# requirepass foobared|requirepass sanxin|' /usr/local/redis/redis.conf
chown -Rf redis:redis /usr/local/redis
#二进制程序:
echo 'export PATH=/usr/local/redis/bin:$PATH' > /etc/profile.d/redis.sh
source /etc/profile.d/redis.sh
#头文件输出给系统:
#ln -sv /usr/local/redis/include /usr/include/redis
#库文件输出
#echo '/usr/local/redis/lib' > /etc/ld.so.conf.d/redis.conf
#让系统重新生成库文件路径缓存
#ldconfig
#导出man文件:
#echo 'MANDATORY_MANPATH /usr/local/redis/man' >> /etc/man_db.conf
#source /etc/profile.d/redis.sh
#sleep 5
#source /etc/profile.d/redis.sh
#开机启动脚本
#/usr/local/redis/bin/redis-server /usr/local/redis/redis.conf
#echo '/usr/local/redis/bin/redis-server /usr/local/redis/redis.conf' >> /etc/rc.d/rc.local
#chmod +x /etc/rc.d/rc.local
cat > /usr/lib/systemd/system/redis.service <<EOF
[Unit]
Description=Redis persistent key-value database
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
User=redis
Group=redis
Type=notify
LimitNOFILE=10240
PIDFile=/usr/local/redis/logs/redis_6379.pid
ExecStart=/usr/local/redis/bin/redis-server /usr/local/redis/redis.conf --supervised systemd
#RuntimeDirectory=redis
#RuntimeDirectoryMode=0755
ExecReload=/bin/kill -USR2 \$MAINPID
ExecStop=/bin/kill -SIGINT \$MAINPID
[Install]
WantedBy=multi-user.target
EOF
chmod 755 /usr/lib/systemd/system/redis.service
systemctl daemon-reload
systemctl enable redis.service
#优化了系统参数
cat >> /etc/sysctl.conf <<EOF
fs.file-max = 100000
vm.overcommit_memory = 1
net.core.somaxconn = 1024
EOF
sysctl -p
echo 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' >> /etc/rc.d/rc.local
chmod +x /etc/rc.d/rc.local
echo never > /sys/kernel/mm/transparent_hugepage/enabled
systemctl restart redis.service
#rdb备份脚本
mkdir -pv /home/redis_backup
cat > /usr/local/redis/backup/init-redisbackup.sh <<EOF
#!/bin/bash
PATH=/usr/local/redis/bin:\$PATH
redis-cli -a sanxin SAVE
time=\$(date +"%Y%m%d")
cp /usr/local/redis/backup/dump.rdb /home/redis_backup/\$time.rdb
echo "done!"
before=\$(date -d '2 day ago' +%Y%m%d)
rm -rf /home/redis_backup/\$before*
EOF
chmod 744 /usr/local/redis/backup/init-redisbackup.sh
echo "30 1 * * * root /usr/local/redis/backup/init-redisbackup.sh >/dev/null 2>&1" >> /etc/crontab
crontab /etc/crontab
crontab -l
rm -rf /usr/local/src/redis
#sshpass -p Root123456 scp /home/redis_backup/* root@192.168.1.101:/home/redis_backup
#客户端连接测试:redis-cli (-h 192.168.8.20 -a sanxin)
#-------------------------------安装主从时修改主配置文件-------------------------------
#systemctl stop redis-server.service && netstat -lanput
#cp /usr/local/redis/redis.conf{,.backup}
#sed -i 's|bind 127.0.0.1|bind 192.168.8.20|' /usr/local/redis/redis.conf
#systemctl daemon-reload && systemctl start redis-server.service && netstat -lanput
#1、------------------------------RHEL7从服务器(slave)-------------------------------
#sed -i 's|daemonize no|daemonize yes|' /usr/local/redis/redis.conf
#sed -i 's|logfile ""|logfile "/usr/local/redis/logs/redis.log"|' /usr/local/redis/redis.conf
#sed -i 's|# requirepass foobared|requirepass sanxin|' /usr/local/redis/redis.conf
#客户端连接测试:redis-cli (-h 192.168.8.21)
#-------------------------------安装主从时修改从配置文件-------------------------------
#systemctl stop redis-server.service && netstat -lanput
#cp /usr/local/redis/redis.conf{,.backup}
#sed -i 's|bind 127.0.0.1|bind 192.168.8.21|' /usr/local/redis/redis.conf
#sed -i '/^# slaveof <masterip> <masterport>/a\slaveof 192.168.8.20 6379' /usr/local/redis/redis.conf
#sed -i '/^# masterauth <master-password> /a\masterauth sanxin' /usr/local/redis/redis.conf
#systemctl daemon-reload && systemctl start redis-server.service && netstat -lanput
########------------------------远程连接设置--------------------------------#########
#systemctl stop redis-server.service && netstat -lanput
#sed -i 's|bind 127.0.0.1|#bind 127.0.0.1|' /usr/local/redis/redis.conf
#sed -i 's|protected-mode yes|protected-mode no|' /usr/local/redis/redis.conf
#systemctl daemon-reload && systemctl start redis-server.service && netstat -lanput
#######------------------------从服务器开启AOF备份-------------------------###########
#systemctl stop redis-server.service && netstat -lanput
#sed -i 's|appendonly no|appendonly yes|' /usr/local/redis/redis.conf
#aof与dump备份不同
#aof文件备份与dump文件备份不同。dump文件的编码格式和存储格式与数据库一致,而且dump文件中备份的是数据库的当前快照,意思就是,不管数据之前什么样,只要BGSAVE了,dump文件就会刷新成当前数据库数据。
#当redis重启时,会按照以下优先级进行启动:
# 如果只配置AOF,重启时加载AOF文件恢复数据;
# 如果同时 配置了RBD和AOF,启动是只加载AOF文件恢复数据;
# 如果只配置RBD,启动时将加载dump文件恢复数据。
#注意:只要配置了aof,但是没有aof文件,这个时候启动的数据库会是空的
#在linux环境运行Redis时,如果系统的内存比较小,这个时候自动备份会有可能失败,需要修改系统的vm.overcommit_memory 参数,它有三个选值,是linux系统的内存分配策略:
# 0, 表示内核将检查是否有足够的可用内存供应用进程使用;如果有足够的可用内存,内存申请允许;否则,内存申请失败,并把错误返回给应用进程。
# 1, 表示内核允许分配所有的物理内存,而不管当前的内存状态如何。
# 2, 表示内核允许分配超过所有物理内存和交换空间总和的内存
#Redis官方的说明是,建议将vm.overcommit_memory的值修改为1,可以用下面几种方式进行修改:
# (1)编辑/etc/sysctl.conf ,改vm.overcommit_memory=1,然后sysctl -p 使配置文件生效
# (2)sysctl vm.overcommit_memory=1
# (3)echo 1 > /proc/sys/vm/overcommit_memory
#1.启动redis进入redis目录
#redis-cli
#2.数据备份
#redis 127.0.0.1:6379> auth "yourpassword"
#redis 127.0.0.1:6379> SAVE
#3.恢复数据
##获取备份目录
#redis 127.0.0.1:6379> CONFIG GET dir
#1) "dir"
#2) "/usr/local/redis/backup"
#以上命令 CONFIG GET dir 输出的 redis 备份目录为 /usr/local/redis/backup。
##停止redis服务
##拷贝备份文件到 /usr/local/redis/backup目录下
##重新启动redis服务
| true
|
cb019e36afac37e65468f69929afe15132b800bc
|
Shell
|
handankel/pistuff
|
/init/fan
|
UTF-8
| 1,013
| 4.03125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#! /bin/bash
PATH="/opt/scripts/"
DAEMON="run-fan.py"
NAME="fan"
DESC="Fan control"
PIDFILE=/var/run/$NAME.pid
case "$1" in
start)
printf "%-50s" "Starting $NAME..."
cd PATH
PID=`$DAEMON > /dev/null 2>$1 & echo $!`
if [ -z $PID ]; then
printf "%s\n" "Failed to start $NAME."
else
echo $PID > $PIDFILE
printf "%s\n" "$NAME started."
fi
;;
status)
printf "%-50s" "Checking $NAME..."
if [ -f $PIDFILE ]; then
PID=`cat $PIDFILE`
if [ -z "`ps axf | grep ${PID} | grep -v grep`" ]; then
printf "%s\n" "Process dead but pidfile exists" $PIDFILE
else
echo "Running"
fi
else
printf "%s\n" "$NAME not running"
fi
;;
stop)
printf "%-50s" "Stopping $NAME"
if [ -f $PIDFILE ]; then
kill -HUP $PID
printf "%s\n" "Ok"
rm -f $PIDFILE
else
printf "%s\n" "pidfile not found"
fi
;;
restart)
$0 stop
$0 start
;;
*)
echo "Usage: $0 {status|start|stop|restart}"
exit 1
esac
| true
|
b5a11a91016bc235062bc436ae7be93c8915ea5d
|
Shell
|
vanthaiunghoa/hts_for_vietnamese
|
/src/scripts/data/sptk_wav.sh
|
UTF-8
| 1,035
| 3.125
| 3
|
[] |
no_license
|
#! /bin/bash
#
# run.sh
# Copyright (C) 2017 truongdo <truongdo@vais.vn>
#
# Distributed under terms of the modified-BSD license.
#
root=`dirname $0`
source Config.cfg
BYTEPERFRAME=`expr 4 \* \( $MGCWINDIM + $LF0WINDIM \)`
datadir=$1
odir=$2
mkdir -p $odir
mgcdir=$datadir/mgc
for mgc in `find $datadir -iname "*.mgc*" | sort`;
do
base=`basename $mgc .mgc`
echo $base
lf0=`echo $mgc | sed "s/mgc/lf0/g"`
sopr -magic -1.0E+10 -EXP -INV -m $SAMPFREQ -MAGIC 0.0 $lf0 > $odir/${base}.pit || exit 1
lfil=`perl $root/../makefilter.pl $SAMPFREQ 0`
hfil=`perl $root/../makefilter.pl $SAMPFREQ 1`
sopr -m 0 $odir/${base}.pit | excite -n -p $fs | dfs -b $hfil > $odir/${base}.unv || exit 1
excite -n -p $fs $odir/${base}.pit | dfs -b $lfil | vopr -a $odir/${base}.unv | \
mglsadf -P 5 -m ${MGCORDER} -p $fs -a $FREQWARP -c 0 $mgc | \
x2x +fs -o > $odir/${base}.raw || exit 1
raw2wav -s $SAMPKHZ -d $odir $odir/${base}.raw || exit 1
rm $odir/${base}.unv $odir/${base}.pit $odir/${base}.raw
done
| true
|
46be3219e4096f9001edbbe10060487b8778224c
|
Shell
|
BuriedInTheGround/nixpkgs-config
|
/wm/polybar/scripts/auto-toggle-polybar.sh
|
UTF-8
| 604
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
while true; do
status=$(cat ~/.auto-toggle-polybar | head -n 1)
if [[ $status == "" ]]; then
if [[ $(bspc query -N -n focused.fullscreen) != "" ]]; then
polybar-msg cmd hide
bspc config top_padding 0
echo "auto-hidden" > ~/.auto-toggle-polybar
fi
fi
if [[ $status == "auto-hidden" ]]; then
if [[ $(bspc query -N -n focused.fullscreen) == "" ]]; then
polybar-msg cmd show
bspc config top_padding 32
echo "" > ~/.auto-toggle-polybar
fi
fi
sleep 0.5
done
| true
|
f2484f6bd92f278233d6d318b97ea569db199ff3
|
Shell
|
leizhilong/my-scripts
|
/fedora-init-script/init-fedora.sh
|
UTF-8
| 2,068
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
##########################install###############################################
dnf erase libreoffice* qemu* libvirt* cheese rhythmbox gnome-maps gnome-photos -y
dnf autoremove
dnf clean all
dnf check-update
dnf update -y
dnf install fedora-workstation-repositories -y
dnf config-manager --set-enabled google-chrome
# add vscode repo
rpm --import https://packages.microsoft.com/keys/microsoft.asc
sh -c 'echo -e "[code]\nname=Visual Studio Code\nbaseurl=https://packages.microsoft.com/yumrepos/vscode\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.microsoft.com/keys/microsoft.asc" > /etc/yum.repos.d/vscode.repo'
# install tools which are usually used in working
dnf install ctags ack dos2unix cvs svn \
git gitg htop tig zsh terminator \
wireshark telnet httpd filezilla \
golang rust nodejs \
vim code meld pacmanager \
google-chrome-stable thunderbird samba \
gcc gcc-c++ expect the_silver_searcher -y
##########################config###############################################
# disable selinux
sed -i 's@SELINUX=enforcing@SELINUX=disabled@g' /etc/selinux/config
setenforce 0
# close unuseful services
systemctl disable firewalld && sudo systemctl stop firewalld
# git config
git config --global user.name "Zhilong Lei"
git config --global user.email "leizhilong@me.com"
git config --global color.ui true
git config --global alias.co checkout
git config --global alias.ci commit
git config --global alias.st status
git config --global alias.br branch
git config --global alias.up "pull --rebase"
###############################################################################
# zsh
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# cp ./zshrc ~/.zshrc
cat << EOF
## TODO add these to ~/.zshrc
ZSH_THEME="gentoo"
...
plugins=(
git
mvn
docker
npm
nvm
pip
python
kubectl
helm
man
python
golang
rsync
systemd
yarn
)
...
alias l='ls -ltr'
alias c='clear'
alias f='find . -iname'
alias p='ps -ef|grep java'
alias grep='grep --color=always'
EOF
echo "All done, Good luck!"
| true
|
eec5157d1dcbdbbf48214ed5afbb47de34bd7eb1
|
Shell
|
petusa/petusa.github.io
|
/ds/ds-wp-preview-on-gbucket.sh
|
UTF-8
| 3,820
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
website=$1
[ ! "$website" ] && echo "Please provide an existing WP website domain, e.g. gapps-in.doctusoft.com" && exit 1
target_bucket_root=$2
[ ! "$target_bucket_root" ] && echo "Please provide a target domain where your WP website should be uploaded, e.g. gapps.doctusoft.com" && exit 1
target_version_folder=v
workdir=/srv/ds_wp_websites
additional_links_file=wp-additional-links.txt
httrack_work_folder=httrack-work-folder
echo "workdir set to $workdir"
echo ""
echo "1. Grabbing static content from source website..."
echo ""
START=$(date +%s)
httrack --update -X -C2 http://$website -c16 -A2500000 -O $workdir/$httrack_work_folder
END=$(date +%s)
DIFF=$(($END - $START))
echo ""
echo "finished (in $DIFF seconds)"
echo ""
echo "2. Grabbing additional files (robots.txt, ...) listed in $additional_link_file"
echo ""
sed -e "s/^/http:\/\/${website}\//" $additional_links_file | xargs wget -N --directory-prefix=$workdir/$httrack_work_folder/$website
echo ""
echo "finished"
echo ""
echo "3. Grabbing sitemap files..."
echo ""
echo "TODO change sitemap file to a non WP/Yoast SEO style"
wget --quiet "http://${website}/sitemap_index.xml" --output-document - | egrep -o "https?://[^<]+" | wget -i - -N --directory-prefix=$workdir/$httrack_work_folder/$website
echo ""
echo "finished"
echo ""
echo "4. Creating versioned folder..."
echo ""
now=$(date +%Y_%m_%d_%H%M%S)
destination_folder=$target_bucket_root/$target_version_folder/$now
mkdir -p $workdir/$destination_folder
cp -a $workdir/httrack-work-folder/$website/* $workdir/$destination_folder
echo "folder created: '$destination_folder'"
echo ""
echo "5. Making extra transformations on versioned folder content..."
echo ""
echo 'grep -rl "http://${website}" $workdir/$website/$now/ | xargs sed -i "s/http:\/\/${website}/http:\/\/${target_bucket_root}\/${target_version_folder}\/${now}/g"'
grep -rl "http://${website}" $workdir/$destination_folder/ | xargs sed -i "s/http:\/\/${website}/http:\/\/${target_bucket_root}\/${target_version_folder}\/${now}/g"
echo ""
echo "TODO: temporarily rewriting js/gapps/elision specific theme root"
grep -rli --include \*.js "theme_root = 'wp-content" $workdir/$destination_folder/ | xargs sed -i "s/theme_root = 'wp-content\/themes\/elision\/index.html'/theme_root = 'http:\/\/${target_bucket_root}\/${target_version_folder}\/${now}\/wp-content\/themes\/elision\/'/g"
echo "transformations done"
echo ""
echo "Fixing canonical urls in index.html pages"
echo ""
current=$(pwd)
cd $workdir
# creates: <link rel="canonical" href="index.html" /> ==>> <link rel="canonical" href="http://gapps.doctusoft.com/v/2016_04_04_155037/biztonsag/" />
grep -rli --include index.html "<link rel=\"canonical\"" $destination_folder/ | sed "s/index.html$//" | xargs -I {INPUT} sed -i "s|<link rel=\"canonical\" href=\"index.html\"|<link rel=\"canonical\" href=\"http:\/\/{INPUT}\"|g" ${workdir}/{INPUT}/index.html #sed -i "s|<link rel=\"canonical\" href=|<link rel=\"canonical\" href=\"http:\/\/{INPUT}\"|g" ${${workdir}/{INPUT}/index.html}
cd $current
echo ""
echo "finished"
echo
echo "TODO: temporarily copying wp-content from local server folder"
echo ""
echo $workdir
echo $destination_folder
/root/gsutil/gsutil -m rsync -C -r /var/www/html/googleapps.doctusoft.com/www/wp-content/uploads/ $workdir/$destination_folder/wp-content/uploads
echo ""
echo "6. Uploading versioned folder to a target Google test bukcet..."
echo ""
START=$(date +%s)
/root/gsutil/gsutil -m rsync -r $workdir/$target_bucket_root/$target_version_folder/ gs://$target_bucket_root/$target_version_folder
END=$(date +%s)
DIFF=$(($END - $START))
echo ""
echo "finished (in $DIFF seconds)"
echo ""
echo "Please check the website hosted from test bucket: $destination_folder. Set it as the default if no error found."
echo ""
CONTEXT=$destination_folder
| true
|
29090999dff3df6dac7a3e0b158897a142b7ae05
|
Shell
|
jdanders/homedir
|
/bash-completion/share/bash-completion/completions/aptitude
|
UTF-8
| 4,219
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
# Debian aptitude(1) completion -*- shell-script -*-
_have grep-status && {
_comp_dpkg_hold_packages()
{
grep-status -P -e "^$1" -a -FStatus 'hold' -n -s Package
}
} || {
_comp_dpkg_hold_packages()
{
command grep -B 2 'hold' /var/lib/dpkg/status |
awk "/Package: $1/ { print \$2 }"
}
}
_aptitude()
{
local cur prev words cword
_init_completion || return
local special i
for ((i = 1; i < ${#words[@]} - 1; i++)); do
if [[ ${words[i]} == @(@(|re)install|@(|un)hold|@(|un)markauto|@(dist|full|safe)-upgrade|download|show|forbid-version|purge|remove|changelog|why@(|-not)|keep@(|-all)|build-dep|@(add|remove)-user-tag|versions) ]]; then
special=${words[i]}
break
fi
done
if [[ -n $special ]]; then
case $special in
install | hold | markauto | unmarkauto | dist-upgrade | full-upgrade | \
safe-upgrade | download | show | changelog | why | why-not | build-dep | \
add-user-tag | remove-user-tag | versions)
COMPREPLY=($(_xfunc apt-cache _apt_cache_packages))
return
;;
purge | remove | reinstall | forbid-version)
COMPREPLY=(
$(_xfunc dpkg _comp_dpkg_installed_packages "$cur"))
return
;;
unhold)
COMPREPLY=($(_comp_dpkg_hold_packages "$cur"))
return
;;
esac
fi
case $prev in
# don't complete anything if these options are found
autoclean | clean | forget-new | search | upgrade | update | keep-all)
return
;;
-!(-*)S)
_filedir
return
;;
--display-format | --width | -!(-*)[wFo])
return
;;
--sort | -!(-*)O)
COMPREPLY=($(compgen -W 'installsize installsizechange debsize
name priority version' -- "$cur"))
return
;;
--target-release | --default-release | -!(-*)t)
COMPREPLY=($(apt-cache policy |
command grep "release.o=Debian,a=$cur" |
command sed -e "s/.*a=\(\w*\).*/\1/" | uniq 2>/dev/null))
return
;;
esac
if [[ $cur == -* ]]; then
local opts=" $($1 --help 2>&1 | command sed -e \
's/--with(out)-recommends/--without-recommends\n--with-recommends/' |
_parse_help - | tr '\n' ' ') "
# Exclude some mutually exclusive options
for i in "${words[@]}"; do
[[ $i == -u ]] && opts=${opts/ -i / }
[[ $i == -i ]] && opts=${opts/ -u / }
done
# Do known short -> long replacements; at least up to 0.8.12, --help
# outputs mostly only short ones.
COMPREPLY=($opts)
for i in "${!COMPREPLY[@]}"; do
case ${COMPREPLY[i]} in
-h) COMPREPLY[i]=--help ;;
-s) COMPREPLY[i]=--simulate ;;
-d) COMPREPLY[i]=--download-only ;;
-P) COMPREPLY[i]=--prompt ;;
-y) COMPREPLY[i]=--assume-yes ;;
-F) COMPREPLY[i]=--display-format ;;
-O) COMPREPLY[i]=--sort ;;
-W) COMPREPLY[i]=--show-why ;;
-w) COMPREPLY[i]=--width ;;
-V) COMPREPLY[i]=--show-versions ;;
-D) COMPREPLY[i]=--show-deps ;;
-v) COMPREPLY[i]=--verbose ;;
-t) COMPREPLY[i]=--target-release ;;
-q) COMPREPLY[i]=--quiet ;;
esac
done
COMPREPLY=($(compgen -W '${COMPREPLY[@]}' -- "$cur"))
else
COMPREPLY=($(compgen -W 'update upgrade safe-upgrade forget-new
clean autoclean install reinstall remove hold unhold purge markauto
unmarkauto why why-not dist-upgrade full-upgrade download search
show forbid-version changelog keep keep-all build-dep add-user-tag
remove-user-tag versions' -- "$cur"))
fi
} &&
complete -F _aptitude -o default aptitude aptitude-curses
# ex: filetype=sh
| true
|
e2a5ea59f91fe9bac65b31979041eb8bc77175e2
|
Shell
|
sandeep-majumdar/Arius
|
/install/install.sh
|
UTF-8
| 463
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# From https://blog.markvincze.com/download-artifacts-from-a-latest-github-release-in-sh-and-powershell/
LATEST_RELEASE=$(curl -L -s -H 'Accept: application/json' https://github.com/woutervanranst/arius/releases/latest)
LATEST_VERSION=$(echo $LATEST_RELEASE | sed -e 's/.*"tag_name":"\([^"]*\)".*/\1/')
ARTIFACT_URL="https://github.com/woutervanranst/arius/releases/download/$LATEST_VERSION/release.zip"
wget $ARTIFACT_URL
unzip release.zip
| true
|
a822c18d474278a08f6443c15fcb45d25ceaa248
|
Shell
|
jeferrb/AzureTemplates
|
/scripts_gpu_gramos/1-setup_gpu_env.sh
|
UTF-8
| 3,942
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get dist-upgrade -y
sudo apt-get install build-essential ubuntu-desktop make -y tmux zsh clinfo
sudo apt-get install build-essential gcc make -y
# sudo sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# Disable the Nouveau kernel driver, which is incompatible with the NVIDIA driver. (Only use the NVIDIA driver on NV VMs.) To do this, create a file in /etc/modprobe.d named nouveau.conf
# with the following contents:
sudo bash -c 'echo "blacklist nouveau" >> /etc/modprobe.d/nouveau.conf'
sudo bash -c 'echo "blacklist lbm-nouveau" >> /etc/modprobe.d/nouveau.conf'
#Reboot the VM and reconnect. Exit X server:
sudo systemctl stop lightdm.service
wget -O NVIDIA-Linux-x86_64-384.73-grid.run https://go.microsoft.com/fwlink/?linkid=849941
chmod +x NVIDIA-Linux-x86_64-384.73-grid.run
sudo ./NVIDIA-Linux-x86_64-384.73-grid.run --ui=none --no-questions --accept-license --disable-nouveau
# When you're asked whether you want to run the nvidia-xconfig utility to update your X configuration file, select Yes.
sudo cp /etc/nvidia/gridd.conf.template /etc/nvidia/gridd.conf
#Add the following to /etc/nvidia/gridd.conf:
sudo bash -c 'echo "IgnoreSP=TRUE" >> /etc/nvidia/gridd.conf'
#Cuda:
CUDA_REPO_PKG=cuda-repo-ubuntu1604_9.0.176-1_amd64.deb
wget -O /tmp/${CUDA_REPO_PKG} http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/${CUDA_REPO_PKG}
sudo dpkg -i /tmp/${CUDA_REPO_PKG}
sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub
rm -f /tmp/${CUDA_REPO_PKG}
sudo apt-get update
sudo apt-get install -y cuda-drivers cuda
bash -c 'echo "export PATH=/usr/local/cuda-9.1/bin${PATH:+:${PATH}}" >> .bashrc'
bash -c 'echo "export LD_LIBRARY_PATH=/usr/local/cuda-9.1/lib64\${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}" >> .bashrc'
sudo apt install -y ocl-icd-opencl-dev
# sudo yum install ocl-icd ocl-icd-devel mesa-libGL-devel -y
# sudo reboot now
if [[ $OLD_GCC ]]; then
#statements
sudo apt-get -y purge gcc
sudo apt-get -y autoclean
sudo apt-get -y autoremove
sudo apt-get -y install python-software-properties
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
sudo apt-get -y update
sudo apt-get -y install gcc-4.8
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 50
fi
#Reboot the VM and proceed to verify the installation.
sudo apt-get install -y wget make gcc libgfortran3 tmux htop git sysstat libibnetdisc-dev openmpi-bin libopenmpi-dev libhdf5-openmpi-dev bc automake m4 cmake
# Installing singularity
VERSION=2.4.2
wget -q https://github.com/singularityware/singularity/releases/download/$VERSION/singularity-$VERSION.tar.gz
tar xvf singularity-$VERSION.tar.gz
cd singularity-$VERSION
echo "libgomp.so" >> etc/nvliblist.conf
./configure --prefix=/usr/local
make -j
sudo make install
sudo mkdir /home/username/mymountpoint
echo "${1}" > pass
echo "gGEn7CeoUxlkf/EY6sUlrZFg4ebJw3ZkjJ0QvZ5viW0ES+bRDllVwLQy17M9PcWaM4PoRGhqycd9BFE7OadAqg==" > pass
sudo bash -c 'echo "//test1diag281.file.core.windows.net/shared-fs /home/username/mymountpoint cifs nofail,vers=3.0,username=test1diag281,password=`cat pass`,dir_mode=0777,file_mode=0777,serverino" >> /etc/fstab'
rm pass
sudo mount -a
#For updates:
# sudo apt-get update
# sudo apt-get upgrade -y
# sudo apt-get dist-upgrade -y
# sudo apt-get install cuda-drivers
# sudo reboot
# Put the files located at "mymountpoint" in Home
cd
cp -r ~/mymountpoint/OpenCL-seismic-processing-tiago .
cp -r ~/mymountpoint/Data .
# pull the opencl imagem from singularity
singularity pull docker://nvidia/opencl
# changing the opencl image name
mv opencl.simg opencl.img
| true
|
7147b9aeb66ea412a3f4e2207fa52d15dd47e346
|
Shell
|
jakutis/dotfiles
|
/bin/syncandroid
|
UTF-8
| 661
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ROOT="/mnt/android"
if [ -n "$(ls --almost-all "$ROOT")" ]
then
echo "Error: android has to be unmounted" 1>&2
exit 1
fi
RSYNC="rsync --recursive --verbose --progress --size-only --inplace"
mymount android || exit 1
mkdir --parents "$ROOT/sdcard1/Music" || exit 1
mkdir --parents "$ROOT/sdcard1/Audiobooks" || exit 1
$RSYNC "$HOME/Desktop/sync/music/" "$ROOT/sdcard1/Music/" || exit 1
$RSYNC "$ROOT/sdcard1/Music/" "$HOME/Desktop/sync/music/" || exit 1
$RSYNC "$HOME/Desktop/sync/audiobook/" "$ROOT/sdcard1/Audiobooks/" || exit 1
$RSYNC "$ROOT/sdcard1/Audiobooks/" "$HOME/Desktop/sync/audiobook/" || exit 1
mymount android || exit 1
| true
|
113e7af896e02d7ff1690c061c776f5812d39465
|
Shell
|
pld-linux/onelogin-ldap_connector
|
/ol-ldapc.init
|
UTF-8
| 2,429
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
#
# ol-ldapc Onelogin Directory Integration
# chkconfig: 345 <start_level> <stop_level>
# description: ol-ldapc Onelogin Directory Integration
# processname: java
# config: /etc/sysconfig/ol-ldapc
# pidfile: /var/run/ol-ldapc.pid
#
# Source function library
. /etc/rc.d/init.d/functions
# Get network config
. /etc/sysconfig/network
# Check that networking is up.
if is_yes "${NETWORKING}"; then
if [ ! -f /var/lock/subsys/network -a "$1" != stop -a "$1" != status ]; then
msg_network_down "OneLogin LDAP Connector"
exit 1
fi
else
exit 0
fi
BASEDN="dc=example,dc=org"
TOKEN=""
SYNC_INTERVAL=30
OBJECT_CLASSES="posixAccount"
JAVA_OPTIONS="-XX:-UseGCOverheadLimit -Xmx64M -Xms64M"
USER="http"
OPTIONS=""
# Get service config - may override defaults
[ -r /etc/sysconfig/ol-ldapc ] && . /etc/sysconfig/ol-ldapc
appdir="/usr/lib/ol-ldapc"
pidfile="/var/run/ol-ldapc.pid"
logdir="/var/log/ol-ldapc"
start() {
# Check if the service is already running?
if [ -f /var/lock/subsys/ol-ldapc ]; then
msg_already_running "OneLogin LDAP Connector"
return
fi
msg_starting "OneLogin LDAP Connector"
# make pid and log dir accessilbe for changed user
touch "$pidfile"
chown "$USER" "$pidfile"
chown "$USER" "$logdir"
# using ssd loses pid. so force rc-logging=yes, also when using ssd need to pass --fork
RC_LOGGING=yes
daemon \
--chdir "$appdir" \
--user "$USER" \
--pidfile "$pidfile" \
--makepid \
java -jar $appdir/ldap-connector.jar "$@" \
--sync-interval "$SYNC_INTERVAL" \
--object-classes "$OBJECT_CLASSES" \
--base-dn "$BASEDN" \
--token "$TOKEN" \
$OPTIONS
RETVAL=$?
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/ol-ldapc
}
stop() {
if [ ! -f /var/lock/subsys/ol-ldapc ]; then
msg_not_running "OneLogin LDAP Connector"
return
fi
# Stop daemons.
msg_stopping "OneLogin LDAP Connector"
killproc --pidfile $pidfile java
rm -f /var/lock/subsys/ol-ldapc
}
condrestart() {
if [ ! -f /var/lock/subsys/ol-ldapc ]; then
msg_not_running "OneLogin LDAP Connector"
RETVAL=$1
return
fi
stop
start
}
RETVAL=0
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
try-restart)
condrestart 0
;;
force-reload)
condrestart 7
;;
status)
status --pidfile $pidfile ol-ldapc java
RETVAL=$?
;;
*)
msg_usage "$0 {start|stop|restart|try-restart|reload|force-reload|status}"
exit 3
esac
exit $RETVAL
| true
|
e3095cef51b4c79b8b1381526a2fce66d167e1f4
|
Shell
|
Rich143/dotfiles
|
/install.sh
|
UTF-8
| 5,186
| 3.34375
| 3
|
[] |
no_license
|
DOTS="vim vimrc tmux.conf zshrc gitconfig ag-ignore ctags gitignore hammerspoon"
NON_DOTS="bin"
PREFERENCES="com.googlecode.iterm2.plist"
PREFERENCES_DEST_DIR=~/Library/Preferences
check_exists() {
file=$1
if [ -L $file -o -f $file -o -d $file ]; then
echo "File exists already: $file"
read -p "Do you want to delete it? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]];
then
rm -f $file
else
read -p "Do you want to save a backup and replace it? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]];
then
echo "Moving $file to $file.bak"
echo "mv $file $file.bak"
mv $file $file.bak
else
echo "Skipping $file"
return 1
fi
fi
fi
}
link_file() {
src=$1
dst=$2
echo "linking $src to $dst"
ln -s $src $dst
}
install_homebrew() {
echo "Checking if homebrew installed"
if command -v brew >/dev/null 2>&1; then
echo "Homebrew already installed"
else
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
fi
}
install_oh_my_zsh() {
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
}
install_homebrew_packages() {
if brew ls --versions the_silver_searcher > /dev/null; then
echo "the_silver_searcher installed already"
else
brew install the_silver_searcher
fi
if brew ls --versions universal-ctags > /dev/null; then
echo "universal-ctags installed already"
else
brew tap universal-ctags/universal-ctags
#brew install --with-jansson --HEAD universal-ctags/universal-ctags/universal-ctags
brew install --HEAD universal-ctags/universal-ctags/universal-ctags
fi
if brew ls --versions macvim > /dev/null; then
echo "macvim installed already"
else
brew install macvim
fi
if brew ls --versions llvm > /dev/null; then
echo "llvm installed already"
else
brew install llvm && echo "llvm installed, to setup clangd, symlink clangd into your path"
fi
if [ -d ~/.hammerspoon ]; then
echo "hammerspoon installed already"
else
brew install hammerspoon
fi
if brew ls --versions pyenv > /dev/null; then
echo "pyenv installed already"
else
brew install pyenv
fi
if brew ls --versions font-fira-mono-nerd-font > /dev/null; then
echo "fira-mono nerf font installed already"
else
brew tap homebrew/cask-fonts
brew install font-fira-mono-nerd-font
fi
if brew ls --versions speedtest_cli > /dev/null; then
echo "speedtest_cli installed already"
else
brew install speedtest_cli
fi
if brew ls --versions pipx > /dev/null; then
echo "pipx installed already"
else
brew install pipx
pipx ensurepath
fi
if brew ls --versions doxygen > /dev/null; then
echo "doxygen installed already"
else
brew install doxygen
fi
if brew ls --version node > /dev/null; then
echo "node.js installed already"
else
brew install node
fi
if brew ls --version fzf > /dev/null; then
echo "fzf installed already"
else
brew install fzf
$(brew --prefix)/opt/fzf/install
fi
if brew ls --version tree > /dev/null; then
echo "tree installed already"
else
brew install tree
fi
if brew ls --version pyenv-virtualenv > /dev/null; then
echo "pyenv-virtualenv installed already"
else
brew install pyenv-virtualenv
fi
}
install_zinit() {
ZINIT_DIRECTORY=~/.zinit
if [ ! -d "$ZINIT_DIRECTORY" ]; then
sh -c "$(curl -fsSL https://git.io/zinit-install)"
fi
}
# Install node_js, required for coc.nvim
install_node_js() {
echo "Checking if node.js installed"
if command -v node >/dev/null 2>&1; then
echo "node.js already installed"
else
curl -sL install-node.now.sh/lts | zsh
fi
}
install_python_packages() {
pyenv install 3.10.0
pyenv global 3.10.0
python -m pip install virtualenv
python -m pip install virtualenvwrapper
}
main() {
install_homebrew
install_homebrew_packages
install_oh_my_zsh
install_zinit
install_python_packages
for file in $DOTS; do
dst="$HOME/.$file"
src=$PWD/$file
echo "checking: $dst"
check_exists $dst
local res=$?
if [ $res -eq 0 ]; then
link_file $src $dst
fi
done
for file in $NON_DOTS; do
dst="$HOME/$file"
src=$PWD/$file
echo "checking: $dst"
check_exists $dst
local res=$?
if [ $res -eq 0 ]; then
link_file $src $dst
fi
done
for file in $PREFERENCES; do
dst="$PREFERENCES_DEST_DIR/$file"
src=$PWD/$file
echo "checking: $dst"
check_exists $dst
local res=$?
if [ $res -eq 0 ]; then
link_file $src $dst
fi
done
}
main
| true
|
86d35dda577010064647bfa248e80dc0b5c3bb9f
|
Shell
|
google/filament
|
/build.sh
|
UTF-8
| 31,682
| 3.765625
| 4
|
[
"Apache-2.0",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
# Host tools required by Android, WebGL, and iOS builds
MOBILE_HOST_TOOLS="matc resgen cmgen filamesh uberz"
WEB_HOST_TOOLS="${MOBILE_HOST_TOOLS} mipgen filamesh"
function print_help {
local self_name=$(basename "$0")
echo "Usage:"
echo " $self_name [options] <build_type1> [<build_type2> ...] [targets]"
echo ""
echo "Options:"
echo " -h"
echo " Print this help message."
echo " -a"
echo " Generate .tgz build archives, implies -i."
echo " -c"
echo " Clean build directories."
echo " -C"
echo " Clean build directories and revert android/ to a freshly sync'ed state."
echo " All (and only) git-ignored files under android/ are deleted."
echo " This is sometimes needed instead of -c (which still misses some clean steps)."
echo " -d"
echo " Enable matdbg."
echo " -f"
echo " Always invoke CMake before incremental builds."
echo " -g"
echo " Disable material optimization."
echo " -i"
echo " Install build output"
echo " -m"
echo " Compile with make instead of ninja."
echo " -p platform1,platform2,..."
echo " Where platformN is [desktop|android|ios|webgl|all]."
echo " Platform(s) to build, defaults to desktop."
echo " Building for iOS will automatically perform a partial desktop build."
echo " -q abi1,abi2,..."
echo " Where platformN is [armeabi-v7a|arm64-v8a|x86|x86_64|all]."
echo " ABIs to build when the platform is Android. Defaults to all."
echo " -u"
echo " Run all unit tests, will trigger a debug build if needed."
echo " -v"
echo " Exclude Vulkan support from the Android build."
echo " -s"
echo " Add iOS simulator support to the iOS build."
echo " -t"
echo " Enable SwiftShader support for Vulkan in desktop builds."
echo " -e"
echo " Enable EGL on Linux support for desktop builds."
echo " -l"
echo " Build arm64/x86_64 universal libraries."
echo " For iOS, this builds universal binaries for devices and the simulator (implies -s)."
echo " For macOS, this builds universal binaries for both Apple silicon and Intel-based Macs."
echo " -w"
echo " Build Web documents (compiles .md.html files to .html)."
echo " -k sample1,sample2,..."
echo " When building for Android, also build select sample APKs."
echo " sampleN is an Android sample, e.g., sample-gltf-viewer."
echo " This automatically performs a partial desktop build and install."
echo " -b"
echo " Enable Address and Undefined Behavior Sanitizers (asan/ubsan) for debugging."
echo " This is only for the desktop build."
echo ""
echo "Build types:"
echo " release"
echo " Release build only"
echo " debug"
echo " Debug build only"
echo ""
echo "Targets:"
echo " Any target supported by the underlying build system"
echo ""
echo "Examples:"
echo " Desktop release build:"
echo " \$ ./$self_name release"
echo ""
echo " Desktop debug and release builds:"
echo " \$ ./$self_name debug release"
echo ""
echo " Clean, desktop debug build and create archive of build artifacts:"
echo " \$ ./$self_name -c -a debug"
echo ""
echo " Android release build type:"
echo " \$ ./$self_name -p android release"
echo ""
echo " Desktop and Android release builds, with installation:"
echo " \$ ./$self_name -p desktop,android -i release"
echo ""
echo " Desktop matc target, release build:"
echo " \$ ./$self_name release matc"
echo ""
echo " Build gltf_viewer:"
echo " \$ ./$self_name release gltf_viewer"
echo ""
}
function print_matdbg_help {
echo "matdbg is enabled in the build, but some extra steps are needed."
echo ""
echo "FOR DESKTOP BUILDS:"
echo ""
echo "Please set the port environment variable before launching. e.g., on macOS do:"
echo " export FILAMENT_MATDBG_PORT=8080"
echo ""
echo "FOR ANDROID BUILDS:"
echo ""
echo "1) For Android Studio builds, make sure to set:"
echo " -Pcom.google.android.filament.matdbg"
echo " option in Preferences > Build > Compiler > Command line options."
echo ""
echo "2) The port number is hardcoded to 8081 so you will need to do:"
echo " adb forward tcp:8081 tcp:8081"
echo ""
echo "3) Be sure to enable INTERNET permission in your app's manifest file."
echo ""
}
# Unless explicitly specified, NDK version will be selected as highest available version within same major release chain
FILAMENT_NDK_VERSION=${FILAMENT_NDK_VERSION:-$(cat `dirname $0`/build/android/ndk.version | cut -f 1 -d ".")}
# Requirements
CMAKE_MAJOR=3
CMAKE_MINOR=19
# Internal variables
ISSUE_CLEAN=false
ISSUE_CLEAN_AGGRESSIVE=false
ISSUE_DEBUG_BUILD=false
ISSUE_RELEASE_BUILD=false
# Default: build desktop only
ISSUE_ANDROID_BUILD=false
ISSUE_IOS_BUILD=false
ISSUE_DESKTOP_BUILD=true
ISSUE_WEBGL_BUILD=false
# Default: all
ABI_ARMEABI_V7A=true
ABI_ARM64_V8A=true
ABI_X86=true
ABI_X86_64=true
ABI_GRADLE_OPTION="all"
ISSUE_ARCHIVES=false
BUILD_JS_DOCS=false
ISSUE_CMAKE_ALWAYS=false
ISSUE_WEB_DOCS=false
ANDROID_SAMPLES=()
BUILD_ANDROID_SAMPLES=false
RUN_TESTS=false
INSTALL_COMMAND=
VULKAN_ANDROID_OPTION="-DFILAMENT_SUPPORTS_VULKAN=ON"
VULKAN_ANDROID_GRADLE_OPTION=""
SWIFTSHADER_OPTION="-DFILAMENT_USE_SWIFTSHADER=OFF"
EGL_ON_LINUX_OPTION="-DFILAMENT_SUPPORTS_EGL_ON_LINUX=OFF"
MATDBG_OPTION="-DFILAMENT_ENABLE_MATDBG=OFF"
MATDBG_GRADLE_OPTION=""
MATOPT_OPTION=""
MATOPT_GRADLE_OPTION=""
ASAN_UBSAN_OPTION=""
IOS_BUILD_SIMULATOR=false
BUILD_UNIVERSAL_LIBRARIES=false
BUILD_GENERATOR=Ninja
BUILD_COMMAND=ninja
BUILD_CUSTOM_TARGETS=
UNAME=$(uname)
LC_UNAME=$(echo "${UNAME}" | tr '[:upper:]' '[:lower:]')
# Functions
function build_clean {
echo "Cleaning build directories..."
rm -Rf out
rm -Rf android/filament-android/build android/filament-android/.externalNativeBuild android/filament-android/.cxx
rm -Rf android/filamat-android/build android/filamat-android/.externalNativeBuild android/filamat-android/.cxx
rm -Rf android/gltfio-android/build android/gltfio-android/.externalNativeBuild android/gltfio-android/.cxx
rm -Rf android/filament-utils-android/build android/filament-utils-android/.externalNativeBuild android/filament-utils-android/.cxx
}
function build_clean_aggressive {
echo "Cleaning build directories..."
rm -Rf out
git clean -qfX android
}
function build_desktop_target {
local lc_target=$(echo "$1" | tr '[:upper:]' '[:lower:]')
local build_targets=$2
if [[ ! "${build_targets}" ]]; then
build_targets=${BUILD_CUSTOM_TARGETS}
fi
echo "Building ${lc_target} in out/cmake-${lc_target}..."
mkdir -p "out/cmake-${lc_target}"
cd "out/cmake-${lc_target}"
# On macOS, set the deployment target to 10.15.
local lc_name=$(echo "${UNAME}" | tr '[:upper:]' '[:lower:]')
if [[ "${lc_name}" == "darwin" ]]; then
local deployment_target="-DCMAKE_OSX_DEPLOYMENT_TARGET=10.15"
if [[ "${BUILD_UNIVERSAL_LIBRARIES}" == "true" ]]; then
local architectures="-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64"
fi
fi
if [[ ! -d "CMakeFiles" ]] || [[ "${ISSUE_CMAKE_ALWAYS}" == "true" ]]; then
cmake \
-G "${BUILD_GENERATOR}" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_BUILD_TYPE="$1" \
-DCMAKE_INSTALL_PREFIX="../${lc_target}/filament" \
${SWIFTSHADER_OPTION} \
${EGL_ON_LINUX_OPTION} \
${MATDBG_OPTION} \
${MATOPT_OPTION} \
${ASAN_UBSAN_OPTION} \
${deployment_target} \
${architectures} \
../..
fi
${BUILD_COMMAND} ${build_targets}
if [[ "${INSTALL_COMMAND}" ]]; then
echo "Installing ${lc_target} in out/${lc_target}/filament..."
${BUILD_COMMAND} ${INSTALL_COMMAND}
fi
if [[ -d "../${lc_target}/filament" ]]; then
if [[ "${ISSUE_ARCHIVES}" == "true" ]]; then
echo "Generating out/filament-${lc_target}-${LC_UNAME}.tgz..."
cd "../${lc_target}"
tar -czvf "../filament-${lc_target}-${LC_UNAME}.tgz" filament
fi
fi
cd ../..
}
function build_desktop {
if [[ "${ISSUE_DEBUG_BUILD}" == "true" ]]; then
build_desktop_target "Debug" "$1"
fi
if [[ "${ISSUE_RELEASE_BUILD}" == "true" ]]; then
build_desktop_target "Release" "$1"
fi
}
function build_webgl_with_target {
local lc_target=$(echo "$1" | tr '[:upper:]' '[:lower:]')
echo "Building WebGL ${lc_target}..."
mkdir -p "out/cmake-webgl-${lc_target}"
cd "out/cmake-webgl-${lc_target}"
if [[ ! "${BUILD_TARGETS}" ]]; then
BUILD_TARGETS=${BUILD_CUSTOM_TARGETS}
ISSUE_CMAKE_ALWAYS=true
fi
if [[ ! -d "CMakeFiles" ]] || [[ "${ISSUE_CMAKE_ALWAYS}" == "true" ]]; then
# Apply the emscripten environment within a subshell.
(
# shellcheck disable=SC1090
source "${EMSDK}/emsdk_env.sh"
cmake \
-G "${BUILD_GENERATOR}" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_TOOLCHAIN_FILE="${EMSDK}/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake" \
-DCMAKE_BUILD_TYPE="$1" \
-DCMAKE_INSTALL_PREFIX="../webgl-${lc_target}/filament" \
-DWEBGL=1 \
../..
${BUILD_COMMAND} ${BUILD_TARGETS}
)
fi
if [[ -d "web/filament-js" ]]; then
if [[ "${BUILD_JS_DOCS}" == "true" ]]; then
echo "Generating JavaScript documentation..."
local DOCS_FOLDER="web/docs"
local DOCS_SCRIPT="../../web/docs/build.py"
python3 ${DOCS_SCRIPT} --disable-demo \
--output-folder "${DOCS_FOLDER}" \
--build-folder "${PWD}"
fi
if [[ "${ISSUE_ARCHIVES}" == "true" ]]; then
echo "Generating out/filament-${lc_target}-web.tgz..."
cd web/filament-js
tar -cvf "../../../filament-${lc_target}-web.tar" filament.js
tar -rvf "../../../filament-${lc_target}-web.tar" filament.wasm
tar -rvf "../../../filament-${lc_target}-web.tar" filament.d.ts
cd -
gzip -c "../filament-${lc_target}-web.tar" > "../filament-${lc_target}-web.tgz"
rm "../filament-${lc_target}-web.tar"
fi
fi
cd ../..
}
function build_webgl {
# For the host tools, suppress install and always use Release.
local old_install_command=${INSTALL_COMMAND}; INSTALL_COMMAND=
local old_issue_debug_build=${ISSUE_DEBUG_BUILD}; ISSUE_DEBUG_BUILD=false
local old_issue_release_build=${ISSUE_RELEASE_BUILD}; ISSUE_RELEASE_BUILD=true
build_desktop "${WEB_HOST_TOOLS}"
INSTALL_COMMAND=${old_install_command}
ISSUE_DEBUG_BUILD=${old_issue_debug_build}
ISSUE_RELEASE_BUILD=${old_issue_release_build}
if [[ "${ISSUE_DEBUG_BUILD}" == "true" ]]; then
build_webgl_with_target "Debug"
fi
if [[ "${ISSUE_RELEASE_BUILD}" == "true" ]]; then
build_webgl_with_target "Release"
fi
}
function build_android_target {
local lc_target=$(echo "$1" | tr '[:upper:]' '[:lower:]')
local arch=$2
echo "Building Android ${lc_target} (${arch})..."
mkdir -p "out/cmake-android-${lc_target}-${arch}"
cd "out/cmake-android-${lc_target}-${arch}"
if [[ ! -d "CMakeFiles" ]] || [[ "${ISSUE_CMAKE_ALWAYS}" == "true" ]]; then
cmake \
-G "${BUILD_GENERATOR}" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_BUILD_TYPE="$1" \
-DFILAMENT_NDK_VERSION="${FILAMENT_NDK_VERSION}" \
-DCMAKE_INSTALL_PREFIX="../android-${lc_target}/filament" \
-DCMAKE_TOOLCHAIN_FILE="../../build/toolchain-${arch}-linux-android.cmake" \
${MATDBG_OPTION} \
${MATOPT_OPTION} \
${VULKAN_ANDROID_OPTION} \
../..
fi
# We must always install Android libraries to build the AAR
${BUILD_COMMAND} install
cd ../..
}
function build_android_arch {
local arch=$1
if [[ "${ISSUE_DEBUG_BUILD}" == "true" ]]; then
build_android_target "Debug" "${arch}"
fi
if [[ "${ISSUE_RELEASE_BUILD}" == "true" ]]; then
build_android_target "Release" "${arch}"
fi
}
function archive_android {
local lc_target=$(echo "$1" | tr '[:upper:]' '[:lower:]')
if [[ -d "out/android-${lc_target}/filament" ]]; then
if [[ "${ISSUE_ARCHIVES}" == "true" ]]; then
echo "Generating out/filament-android-${lc_target}-${LC_UNAME}.tgz..."
cd "out/android-${lc_target}"
tar -czvf "../filament-android-${lc_target}-${LC_UNAME}.tgz" filament
cd ../..
fi
fi
}
function ensure_android_build {
if [[ "${ANDROID_HOME}" == "" ]]; then
echo "Error: ANDROID_HOME is not set, exiting"
exit 1
fi
# shellcheck disable=SC2012
if [[ -z $(ls "${ANDROID_HOME}/ndk/" | sort -V | grep "^${FILAMENT_NDK_VERSION}") ]]; then
echo "Error: Android NDK side-by-side version ${FILAMENT_NDK_VERSION} or compatible must be installed, exiting"
exit 1
fi
local cmake_version=$(cmake --version)
if [[ "${cmake_version}" =~ ([0-9]+)\.([0-9]+)\.[0-9]+ ]]; then
if [[ "${BASH_REMATCH[1]}" -lt "${CMAKE_MAJOR}" ]] || \
[[ "${BASH_REMATCH[2]}" -lt "${CMAKE_MINOR}" ]]; then
echo "Error: cmake version ${CMAKE_MAJOR}.${CMAKE_MINOR}+ is required," \
"${BASH_REMATCH[1]}.${BASH_REMATCH[2]} installed, exiting"
exit 1
fi
fi
}
function build_android {
ensure_android_build
# Suppress intermediate desktop tools install
local old_install_command=${INSTALL_COMMAND}
INSTALL_COMMAND=
build_desktop "${MOBILE_HOST_TOOLS}"
# When building the samples, we need to partially "install" the host tools so Gradle can see
# them.
if [[ "${BUILD_ANDROID_SAMPLES}" == "true" ]]; then
if [[ "${ISSUE_DEBUG_BUILD}" == "true" ]]; then
mkdir -p out/debug/filament/bin
for tool in ${MOBILE_HOST_TOOLS}; do
cp out/cmake-debug/tools/${tool}/${tool} out/debug/filament/bin/
done
fi
if [[ "${ISSUE_RELEASE_BUILD}" == "true" ]]; then
mkdir -p out/release/filament/bin
for tool in ${MOBILE_HOST_TOOLS}; do
cp out/cmake-release/tools/${tool}/${tool} out/release/filament/bin/
done
fi
fi
INSTALL_COMMAND=${old_install_command}
if [[ "${ABI_ARM64_V8A}" == "true" ]]; then
build_android_arch "aarch64" "aarch64-linux-android"
fi
if [[ "${ABI_ARMEABI_V7A}" == "true" ]]; then
build_android_arch "arm7" "arm-linux-androideabi"
fi
if [[ "${ABI_X86_64}" == "true" ]]; then
build_android_arch "x86_64" "x86_64-linux-android"
fi
if [[ "${ABI_X86}" == "true" ]]; then
build_android_arch "x86" "i686-linux-android"
fi
if [[ "${ISSUE_DEBUG_BUILD}" == "true" ]]; then
archive_android "Debug"
fi
if [[ "${ISSUE_RELEASE_BUILD}" == "true" ]]; then
archive_android "Release"
fi
cd android
if [[ "${ISSUE_DEBUG_BUILD}" == "true" ]]; then
./gradlew \
-Pcom.google.android.filament.dist-dir=../out/android-debug/filament \
-Pcom.google.android.filament.abis=${ABI_GRADLE_OPTION} \
${VULKAN_ANDROID_GRADLE_OPTION} \
${MATDBG_GRADLE_OPTION} \
${MATOPT_GRADLE_OPTION} \
:filament-android:assembleDebug \
:gltfio-android:assembleDebug \
:filament-utils-android:assembleDebug
./gradlew \
-Pcom.google.android.filament.dist-dir=../out/android-debug/filament \
-Pcom.google.android.filament.abis=${ABI_GRADLE_OPTION} \
:filamat-android:assembleDebug
if [[ "${BUILD_ANDROID_SAMPLES}" == "true" ]]; then
for sample in ${ANDROID_SAMPLES}; do
./gradlew \
-Pcom.google.android.filament.dist-dir=../out/android-debug/filament \
-Pcom.google.android.filament.abis=${ABI_GRADLE_OPTION} \
${MATOPT_GRADLE_OPTION} \
:samples:${sample}:assembleDebug
done
fi
if [[ "${INSTALL_COMMAND}" ]]; then
echo "Installing out/filamat-android-debug.aar..."
cp filamat-android/build/outputs/aar/filamat-android-full-debug.aar ../out/filamat-android-debug.aar
echo "Installing out/filament-android-debug.aar..."
cp filament-android/build/outputs/aar/filament-android-debug.aar ../out/
echo "Installing out/gltfio-android-debug.aar..."
cp gltfio-android/build/outputs/aar/gltfio-android-full-debug.aar ../out/gltfio-android-debug.aar
echo "Installing out/filament-utils-android-debug.aar..."
cp filament-utils-android/build/outputs/aar/filament-utils-android-debug.aar ../out/filament-utils-android-debug.aar
if [[ "${BUILD_ANDROID_SAMPLES}" == "true" ]]; then
for sample in ${ANDROID_SAMPLES}; do
echo "Installing out/${sample}-debug.apk"
cp samples/${sample}/build/outputs/apk/debug/${sample}-debug-unsigned.apk \
../out/${sample}-debug.apk
done
fi
fi
fi
if [[ "${ISSUE_RELEASE_BUILD}" == "true" ]]; then
./gradlew \
-Pcom.google.android.filament.dist-dir=../out/android-release/filament \
-Pcom.google.android.filament.abis=${ABI_GRADLE_OPTION} \
${VULKAN_ANDROID_GRADLE_OPTION} \
${MATDBG_GRADLE_OPTION} \
${MATOPT_GRADLE_OPTION} \
:filament-android:assembleRelease \
:gltfio-android:assembleRelease \
:filament-utils-android:assembleRelease
./gradlew \
-Pcom.google.android.filament.dist-dir=../out/android-release/filament \
-Pcom.google.android.filament.abis=${ABI_GRADLE_OPTION} \
:filamat-android:assembleRelease
if [[ "${BUILD_ANDROID_SAMPLES}" == "true" ]]; then
for sample in ${ANDROID_SAMPLES}; do
./gradlew \
-Pcom.google.android.filament.dist-dir=../out/android-release/filament \
-Pcom.google.android.filament.abis=${ABI_GRADLE_OPTION} \
${MATOPT_GRADLE_OPTION} \
:samples:${sample}:assembleRelease
done
fi
if [[ "${INSTALL_COMMAND}" ]]; then
echo "Installing out/filamat-android-release.aar..."
cp filamat-android/build/outputs/aar/filamat-android-lite-release.aar ../out/
cp filamat-android/build/outputs/aar/filamat-android-full-release.aar ../out/filamat-android-release.aar
echo "Installing out/filament-android-release.aar..."
cp filament-android/build/outputs/aar/filament-android-release.aar ../out/
echo "Installing out/gltfio-android-release.aar..."
cp gltfio-android/build/outputs/aar/gltfio-android-full-release.aar ../out/gltfio-android-release.aar
echo "Installing out/filament-utils-android-release.aar..."
cp filament-utils-android/build/outputs/aar/filament-utils-android-release.aar ../out/filament-utils-android-release.aar
if [[ "${BUILD_ANDROID_SAMPLES}" == "true" ]]; then
for sample in ${ANDROID_SAMPLES}; do
echo "Installing out/${sample}-release.apk"
cp samples/${sample}/build/outputs/apk/release/${sample}-release-unsigned.apk \
../out/${sample}-release.apk
done
fi
fi
fi
cd ..
}
function build_ios_target {
local lc_target=$(echo "$1" | tr '[:upper:]' '[:lower:]')
local arch=$2
local platform=$3
echo "Building iOS ${lc_target} (${arch}) for ${platform}..."
mkdir -p "out/cmake-ios-${lc_target}-${arch}"
cd "out/cmake-ios-${lc_target}-${arch}"
if [[ ! -d "CMakeFiles" ]] || [[ "${ISSUE_CMAKE_ALWAYS}" == "true" ]]; then
cmake \
-G "${BUILD_GENERATOR}" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_BUILD_TYPE="$1" \
-DCMAKE_INSTALL_PREFIX="../ios-${lc_target}/filament" \
-DIOS_ARCH="${arch}" \
-DPLATFORM_NAME="${platform}" \
-DIOS=1 \
-DCMAKE_TOOLCHAIN_FILE=../../third_party/clang/iOS.cmake \
${MATDBG_OPTION} \
${MATOPT_OPTION} \
../..
fi
${BUILD_COMMAND}
if [[ "${INSTALL_COMMAND}" ]]; then
echo "Installing ${lc_target} in out/${lc_target}/filament..."
${BUILD_COMMAND} ${INSTALL_COMMAND}
fi
cd ../..
}
function archive_ios {
local lc_target=$(echo "$1" | tr '[:upper:]' '[:lower:]')
if [[ -d "out/ios-${lc_target}/filament" ]]; then
if [[ "${ISSUE_ARCHIVES}" == "true" ]]; then
echo "Generating out/filament-${lc_target}-ios.tgz..."
cd "out/ios-${lc_target}"
tar -czvf "../filament-${lc_target}-ios.tgz" filament
cd ../..
fi
fi
}
function build_ios {
# Suppress intermediate desktop tools install
local old_install_command=${INSTALL_COMMAND}
INSTALL_COMMAND=
build_desktop "${MOBILE_HOST_TOOLS}"
INSTALL_COMMAND=${old_install_command}
# In theory, we could support iPhone architectures older than arm64, but
# only arm64 devices support OpenGL 3.0 / Metal
if [[ "${ISSUE_DEBUG_BUILD}" == "true" ]]; then
build_ios_target "Debug" "arm64" "iphoneos"
if [[ "${IOS_BUILD_SIMULATOR}" == "true" ]]; then
build_ios_target "Debug" "x86_64" "iphonesimulator"
fi
if [[ "${BUILD_UNIVERSAL_LIBRARIES}" == "true" ]]; then
build/ios/create-universal-libs.sh \
-o out/ios-debug/filament/lib/universal \
out/ios-debug/filament/lib/arm64 \
out/ios-debug/filament/lib/x86_64
rm -rf out/ios-debug/filament/lib/arm64
rm -rf out/ios-debug/filament/lib/x86_64
fi
archive_ios "Debug"
fi
if [[ "${ISSUE_RELEASE_BUILD}" == "true" ]]; then
build_ios_target "Release" "arm64" "iphoneos"
if [[ "${IOS_BUILD_SIMULATOR}" == "true" ]]; then
build_ios_target "Release" "x86_64" "iphonesimulator"
fi
if [[ "${BUILD_UNIVERSAL_LIBRARIES}" == "true" ]]; then
build/ios/create-universal-libs.sh \
-o out/ios-release/filament/lib/universal \
out/ios-release/filament/lib/arm64 \
out/ios-release/filament/lib/x86_64
rm -rf out/ios-release/filament/lib/arm64
rm -rf out/ios-release/filament/lib/x86_64
fi
archive_ios "Release"
fi
}
function build_web_docs {
echo "Building Web documents..."
mkdir -p out/web-docs
cp -f docs/web-docs-package.json out/web-docs/package.json
cd out/web-docs
npm install > /dev/null
# Generate documents
npx markdeep-rasterizer ../../docs/Filament.md.html ../../docs/Materials.md.html ../../docs/
cd ../..
}
function validate_build_command {
set +e
# Make sure CMake is installed
local cmake_binary=$(command -v cmake)
if [[ ! "${cmake_binary}" ]]; then
echo "Error: could not find cmake, exiting"
exit 1
fi
# Make sure Ninja is installed
if [[ "${BUILD_COMMAND}" == "ninja" ]]; then
local ninja_binary=$(command -v ninja)
if [[ ! "${ninja_binary}" ]]; then
echo "Warning: could not find ninja, using make instead"
BUILD_GENERATOR="Unix Makefiles"
BUILD_COMMAND="make"
fi
fi
# Make sure Make is installed
if [[ "${BUILD_COMMAND}" == "make" ]]; then
local make_binary=$(command -v make)
if [[ ! "${make_binary}" ]]; then
echo "Error: could not find make, exiting"
exit 1
fi
fi
# If building a WebAssembly module, ensure we know where Emscripten lives.
if [[ "${EMSDK}" == "" ]] && [[ "${ISSUE_WEBGL_BUILD}" == "true" ]]; then
echo "Error: EMSDK is not set, exiting"
exit 1
fi
# Web documents require node and npm for processing
if [[ "${ISSUE_WEB_DOCS}" == "true" ]]; then
local node_binary=$(command -v node)
local npm_binary=$(command -v npm)
local npx_binary=$(command -v npx)
if [[ ! "${node_binary}" ]] || [[ ! "${npm_binary}" ]] || [[ ! "${npx_binary}" ]]; then
echo "Error: Web documents require node, npm and npx to be installed"
exit 1
fi
fi
set -e
}
function run_test {
local test=$1
# The input string might contain arguments, so we use "set -- $test" to replace $1 with the
# first whitespace-separated token in the string.
# shellcheck disable=SC2086
set -- ${test}
local test_name=$(basename "$1")
# shellcheck disable=SC2086
./out/cmake-debug/${test} --gtest_output="xml:out/test-results/${test_name}/sponge_log.xml"
}
function run_tests {
if [[ "${ISSUE_WEBGL_BUILD}" == "true" ]]; then
if ! echo "TypeScript $(tsc --version)" ; then
tsc --noEmit \
third_party/gl-matrix/gl-matrix.d.ts \
web/filament-js/filament.d.ts \
web/filament-js/test.ts
fi
else
while read -r test; do
run_test "${test}"
done < build/common/test_list.txt
fi
}
# Beginning of the script
pushd "$(dirname "$0")" > /dev/null
while getopts ":hacCfgijmp:q:uvslwtedk:b" opt; do
case ${opt} in
h)
print_help
exit 1
;;
a)
ISSUE_ARCHIVES=true
INSTALL_COMMAND=install
;;
c)
ISSUE_CLEAN=true
;;
C)
ISSUE_CLEAN_AGGRESSIVE=true
;;
d)
PRINT_MATDBG_HELP=true
MATDBG_OPTION="-DFILAMENT_ENABLE_MATDBG=ON, -DFILAMENT_BUILD_FILAMAT=ON"
MATDBG_GRADLE_OPTION="-Pcom.google.android.filament.matdbg"
;;
f)
ISSUE_CMAKE_ALWAYS=true
;;
g)
MATOPT_OPTION="-DFILAMENT_DISABLE_MATOPT=ON"
MATOPT_GRADLE_OPTION="-Pcom.google.android.filament.matnopt"
;;
i)
INSTALL_COMMAND=install
;;
m)
BUILD_GENERATOR="Unix Makefiles"
BUILD_COMMAND="make"
;;
p)
ISSUE_DESKTOP_BUILD=false
platforms=$(echo "${OPTARG}" | tr ',' '\n')
for platform in ${platforms}
do
case ${platform} in
desktop)
ISSUE_DESKTOP_BUILD=true
;;
android)
ISSUE_ANDROID_BUILD=true
;;
ios)
ISSUE_IOS_BUILD=true
;;
webgl)
ISSUE_WEBGL_BUILD=true
;;
all)
ISSUE_ANDROID_BUILD=true
ISSUE_IOS_BUILD=true
ISSUE_DESKTOP_BUILD=true
ISSUE_WEBGL_BUILD=false
;;
esac
done
;;
q)
ABI_ARMEABI_V7A=false
ABI_ARM64_V8A=false
ABI_X86=false
ABI_X86_64=false
ABI_GRADLE_OPTION="${OPTARG}"
abis=$(echo "${OPTARG}" | tr ',' '\n')
for abi in ${abis}
do
case ${abi} in
armeabi-v7a)
ABI_ARMEABI_V7A=true
;;
arm64-v8a)
ABI_ARM64_V8A=true
;;
x86)
ABI_X86=true
;;
x86_64)
ABI_X86_64=true
;;
all)
ABI_ARMEABI_V7A=true
ABI_ARM64_V8A=true
ABI_X86=true
ABI_X86_64=true
;;
esac
done
;;
u)
ISSUE_DEBUG_BUILD=true
RUN_TESTS=true
;;
v)
VULKAN_ANDROID_OPTION="-DFILAMENT_SUPPORTS_VULKAN=OFF"
VULKAN_ANDROID_GRADLE_OPTION="-Pcom.google.android.filament.exclude-vulkan"
echo "Disabling support for Vulkan in the core Filament library."
echo "Consider using -c after changing this option to clear the Gradle cache."
;;
s)
IOS_BUILD_SIMULATOR=true
echo "iOS simulator support enabled."
;;
t)
SWIFTSHADER_OPTION="-DFILAMENT_USE_SWIFTSHADER=ON"
echo "SwiftShader support enabled."
;;
e)
EGL_ON_LINUX_OPTION="-DFILAMENT_SUPPORTS_EGL_ON_LINUX=ON -DFILAMENT_SKIP_SDL2=ON -DFILAMENT_SKIP_SAMPLES=ON"
echo "EGL on Linux support enabled; skipping SDL2."
;;
l)
IOS_BUILD_SIMULATOR=true
BUILD_UNIVERSAL_LIBRARIES=true
echo "Building universal libraries."
;;
w)
ISSUE_WEB_DOCS=true
;;
k)
BUILD_ANDROID_SAMPLES=true
ANDROID_SAMPLES=$(echo "${OPTARG}" | tr ',' '\n')
;;
b) ASAN_UBSAN_OPTION="-DFILAMENT_ENABLE_ASAN_UBSAN=ON"
echo "Enabled ASAN/UBSAN"
;;
\?)
echo "Invalid option: -${OPTARG}" >&2
echo ""
print_help
exit 1
;;
:)
echo "Option -${OPTARG} requires an argument." >&2
echo ""
print_help
exit 1
;;
esac
done
if [[ "$#" == "0" ]]; then
print_help
exit 1
fi
shift $((OPTIND - 1))
for arg; do
if [[ "${arg}" == "release" ]]; then
ISSUE_RELEASE_BUILD=true
elif [[ "${arg}" == "debug" ]]; then
ISSUE_DEBUG_BUILD=true
else
BUILD_CUSTOM_TARGETS="${BUILD_CUSTOM_TARGETS} ${arg}"
fi
done
validate_build_command
if [[ "${ISSUE_CLEAN}" == "true" ]]; then
build_clean
fi
if [[ "${ISSUE_CLEAN_AGGRESSIVE}" == "true" ]]; then
build_clean_aggressive
fi
if [[ "${ISSUE_DESKTOP_BUILD}" == "true" ]]; then
build_desktop
fi
if [[ "${ISSUE_ANDROID_BUILD}" == "true" ]]; then
build_android
fi
if [[ "${ISSUE_IOS_BUILD}" == "true" ]]; then
build_ios
fi
if [[ "${ISSUE_WEBGL_BUILD}" == "true" ]]; then
build_webgl
fi
if [[ "${ISSUE_WEB_DOCS}" == "true" ]]; then
build_web_docs
fi
if [[ "${RUN_TESTS}" == "true" ]]; then
run_tests
fi
if [[ "${PRINT_MATDBG_HELP}" == "true" ]]; then
print_matdbg_help
fi
| true
|
86fe57bc97cf4f1c46996ab47ee67b4679f9f572
|
Shell
|
rohitrehan/docker-utils
|
/wordpress/start-wordpress.sh
|
UTF-8
| 337
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
# to host wordpress locally
if [ ! -d "wordpress_data/db" ]; then
echo "creating db directory..."
mkdir -p wordpress_data/db
fi
if [ ! -d "wordpress_data/content" ]; then
echo "creating wp content directory..."
mkdir -p wordpress_data/content
fi
echo "starting wordpress containers..."
docker-compose up -d
| true
|
0e4eac82d1cfb104028ee8b1199c9aaf20f63c3f
|
Shell
|
ytyaru/Shell.HatenaLink.20200323171932
|
/src/lib/cli/showvartable.sh
|
UTF-8
| 263
| 3.703125
| 4
|
[
"CC0-1.0"
] |
permissive
|
ShowVarTable() { # 引数で渡された名前の変数名とその値をcolumnコマンドで整形して表示する
local text=
for v in "$@"; do
text="$text""\n""$v"'='"$(echo "$(eval echo '$'"$v")")"
done
echo -e "$text" | tail -n +2 | column -t -s=
}
| true
|
cdf40aa380b2bd83b70f13fd07fdd916cda3fb6e
|
Shell
|
skinforhair/.skinforhair_settings
|
/bin/Wheatley/episodes/check_downloads
|
UTF-8
| 3,110
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
### Simple interface for checking status of deluge torrents
source "$pLIB/colors"
vpnStatus=$("$pBIN/Wheatley/vpn_tools/vpn" "status")
vpnStatus=`echo "$vpnStatus" |awk '{print $2}'`
tmpfile="/tmp/dl_list.txt"
if [ -f $tmpfile ]; then rm -f $tmpfile; fi
usage() {
echo " Usage: $0 <options>
Options:
-d [D]elete a torrent. Must be followed by a number
-h [H]elp
"
} #usage
DeleteID=0
while getopts ":hd:" OPTION; do
case $OPTION in
d) DeleteID=$OPTARG;;
h) usage;exit 0;;
esac
done
CONSOLE="/usr/bin/deluge-console"
display_running_torrents() {
bob=$($CONSOLE info)
if [ "$bob" == "" ]; then
echo -e "---${white}NO DOWNLOADS CURRENTLY RUNNING${NC}---"
exit 0;
fi
bob=`echo "$bob" | tr -d '['`
bob=`echo "$bob" | tr -d ']'`
echo "$bob" > $tmpfile
c=0
while read line; do
if [ "$line" == "" ]; then
c=$((c+1))
fi
attrib=${line:0:5}
if [ "${attrib:0:2}" == "ID" ]; then attrib="ID"; fi
case "$attrib" in
"Name:") NameList[$c]="${line:6}";;
"State") StateList[$c]="${line:7}";;
"Progr") ProgList[$c]="${line:10}";;
"ID" ) idList[$c]="${line:4}";;
"Size:" ) SizeList[$c]="${line:6}";;
esac
done < $tmpfile
} #display_running_torrents
display_running_torrents
if [ "$DeleteID" != "0" ]; then
DID=${idList[$DeleteID]}
DN=${NameList[$DeleteID]}
if [ "$DID" != "" ]; then
echo -e "Removing ${yellow}$DN...${NC}"
bob=$($CONSOLE rm $DID)
fi
display_running_torrents
fi
if [ "$vpnStatus" != "Connected" ]; then
echo -e "${red}WARNING - VPN is $vpnStatus${NC}"
echo ""
fi
echo ""
function checkSize() {
line="${SizeList[$1]}"
spacecheck=`expr index "$line" \ `
firstSize="${line:0:$((spacecheck-1))}"
slashcheck=`expr index "$line" /`
line=${line:$slashcheck:$((${#line}-$slashcheck))}
spacecheck=`expr index "$line" \ `
secSize="${line:0:$((spacecheck-1))}"
echo "$firstSize / $secSize"
if [ "$firstSize" == "$secSize" ]; then
return 1;
else
return 0;
fi
} #checkSize
function QueueState() {
input="$1"
if [ "$input" == "" ]; then
checkSize "$2"
if [ "$?" == "1" ]; then
d="100"
else
d="101"
fi
else
j=`expr index "$input" %`
if [ $j -gt 0 ]; then
j=$((j-1))
fi
echo "input: $input j: $j"
d="${input:0:$j}"
# t="${input:$j:$((${#input}-$j))}"
t="${input:$((j+1)):$j}"
fi
if [ "$d" == "101" ]; then
echo -e "${red}ERROR${NC}"
elif [ "$d" == "100" ]; then
echo -e "${green}Complete - Queued to Seed ${NC}"
else
echo -e "${yellow}Queued for Download${NC}"
fi
if false; then
if [ "$d" == "$t" ]; then
echo -e "${green}Complete - Seed Queued${NC} ($d / $t)"
else
echo -e "${yellow}Queued to download${NC}"
fi
fi
} #QueueState
i=0
while [ $i -le $c ]; do
if [ "${NameList[i]}" != "" ]; then
echo -en "${White}$i: ${Blue}${NameList[i]}${NC} - "
stringZ=${StateList[i]:0:4}
case "$stringZ" in
"Seed") echo -e "${green}Seeding${NC}";;
"Queu") QueueState "${ProgList[i]}" "$i";;
"Down") echo -e "${red}Downloading${NC}";;
esac
if [ "${ProgList[i]:0:4}" != "Down" ]; then
echo " ${ProgList[i]}"
echo ""
fi
fi
i=$((i+1))
done
rm $tmpfile
| true
|
a391a3324663ad3d5e7246fbef5e5e211e8e66ae
|
Shell
|
tobw/meta-jens
|
/recipes-rdm/hp2sm/files/hp2sm.run
|
UTF-8
| 406
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PLACK_APP="/opt/rdm/hp2sm/bin/app.pl"
PLACK_PORT=8081
PLACK_ENV="production"
STARMAN_WORKERS=2
OPTIONS="--env ${PLACK_ENV} --listen :${PLACK_PORT} --workers ${STARMAN_WORKERS} ${PLACK_APP}"
PIDFILE="/var/run/hp2sm.pid"
export SYSTEM_IMAGE_UPDATE_DIR=/data/.update
mkdir ${SYSTEM_IMAGE_UPDATE_DIR}
exec start-stop-daemon -m --start --quiet --pidfile "$PIDFILE" --exec starman -- $OPTIONS 2>&1
| true
|
f48df22957721ad2b93887fabea6291a9579d0c0
|
Shell
|
ebriand/conf-cilium
|
/setup-local.sh
|
UTF-8
| 583
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
killall watch
watch "gcloud compute ssh --ssh-flag=\"-L 6443:localhost:6443 -N\" --zone europe-west1-c controller-0" 2>&1 > gcloud.log &
echo "Forwarding distant cluster to :6443"
# Port forward events
watch "kubectl port-forward -n events service/events-frontend" 8080:80 2>&1 > events.log &
echo "Forwarding to http://events.shield.com:8080"
# Port forward internal
watch "kubectl port-forward -n internal service/internal-frontend" 8081:80 2>&1 > internal.log &
echo "Forwarding to http://internal.shield.com:8081"
echo "To stop just launch: killall watch"
| true
|
c72f64f3d4d14ac4275c66ef7c05e2cc769df818
|
Shell
|
Banshee1221/SCC-Housekeeping
|
/comp_apps/ompi/hpcc/atlas.sh
|
UTF-8
| 330
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
sudo yum install -y bunzip2
ATLAS_VER="3.10.3"
ATLAS_PKG="atlas"
ATLAS_EXT="tar.bz2"
ATLAS_PKG_NAME=$ATLAS_PKG$ATLAS_VER
ATLAS_PKG_FULL=$ATLAS_PKG_NAME.$ATLAS_EXT
ATLAS_URL="http://downloads.sourceforge.net/project/math-atlas/Stable/$ATLAS_VER/$ATLAS_PKG_FULL"
wget $ATLAS_URL
bunzip2 -c $ATLAS_PKG_FULL | tar xfm -
| true
|
83431d11c61ce69459da1d83d228baf3d422bcb4
|
Shell
|
bastiango97/fedora_ros_setup
|
/ros_scripts/ros_install.sh
|
UTF-8
| 799
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [ -z $1 ] ; then
echo "First argument needed: a name of a normal (not root) user "
exit 1
fi
if [ -z $2 ] ; then
echo "Second argument needed: the ros distro to install "
exit 1
fi
#if [ sudo -u "$1 rosdep update" ] ; then
# echo "rosdep update done"
#else
# echo "rosdep existed"
#fi
mkdir -p /opt/ros/kinetic/ros_catkin_ws
cd /opt/ros/kinetic/ros_catkin_ws
if [ ! -f "${2}-desktop-full-wet.rosinstall" ] ; then
rosinstall_generator-2.7 desktop_full --rosdistro "$2" --deps --wet-only --tar > "${2}-desktop-full-wet.rosinstall"
wstool init -j4 src "${2}-desktop-full-wet.rosinstall"
fi
./src/catkin/bin/catkin_make_isolated --install -DCMAKE_BUILD_TYPE=Release
echo "source /opt/ros/kinetic/ros_catkin_ws/devel_isolated/setup.bash" >> /etc/bashrc
| true
|
8984057fb6245924c92059006ecb10cd31f13d2c
|
Shell
|
fargusplumdoodle/BorDNS
|
/scripts/docker_build_version.sh
|
UTF-8
| 199
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Building version $1"
if [ $# -ne 1 ];
then
echo "must supply version"
echo "./scripts/build_version.sh <version>"
exit -1
fi
docker build -t bordns:$1 -f docker/Dockerfile .
| true
|
53a2869b44465e9299528af8ad72e4cb939fa4b5
|
Shell
|
kendas/dotfiles
|
/zsh/.zshrc
|
UTF-8
| 4,377
| 2.84375
| 3
|
[] |
no_license
|
# The following lines were added by compinstall
zstyle ':completion:*' completer _complete _ignored
zstyle :compinstall filename '/home/kaarel/.zshrc'
autoload -Uz compinit
compinit
# End of lines added by compinstall
# Lines configured by zsh-newuser-install
HISTFILE=~/.histfile
HISTSIZE=1000
SAVEHIST=1000
bindkey -e
# End of lines configured by zsh-newuser-install
autoload promptinit
promptinit
prompt walters
############################################################
# Python stuff
############################################################
export WORKON_HOME=~/.virtualenvs
export PROJECT_HOME=~/projects
export PATH="$PATH":"$HOME/.local/bin"
# Virtualenvwrapper
# Ubuntu/Debian
if [[ -e /usr/share/virtualenvwrapper/virtualenvwrapper.sh ]]
then
source /usr/share/virtualenvwrapper/virtualenvwrapper.sh
fi
# Arch Linux
if [[ -e /usr/bin/virtualenvwrapper.sh ]]
then
source /usr/bin/virtualenvwrapper.sh
fi
############################################################
# Dart stuff
############################################################
if [[ -e $HOME/.pub-cache/bin ]]
then
export PATH="$PATH":"$HOME/.pub-cache/bin"
fi
############################################################
# Rust stuff
############################################################
if [[ -e $HOME/.cargo/bin ]]
then
export PATH="$HOME/.cargo/bin":"$PATH"
fi
############################################################
# JavaScript stuff
############################################################
# yarn binaries
if [[ -e $HOME/.yarn/bin ]]
then
export PATH="$PATH":"$HOME/.yarn/bin"
fi
############################################################
# Project management stuff
############################################################
# prm
export RPROMPT=$RPS1
alias prm=". $HOME/dotfiles/prm/prm.sh"
#alias prm=". $HOME/dotfiles/prm/prm.sh"
# git
GIT_PROMPT_EXECUTABLE="haskell"
source /home/kaarel/dotfiles/zsh/zsh-git-prompt/zshrc.sh
PROMPT='%B%(?..[%?] )%b%n@%U%m%u$(git_super_status)> '
############################################################
# General ease of use stuff
############################################################
export EDITOR="vim"
randpw(){ < /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c${1:-16};echo;}
if [[ -e /home/kaarel/.dir_colors/dircolors ]]
then
eval `dircolors /home/kaarel/.dir_colors/dircolors`
fi
setopt PROMPT_SP
############################################################
# Keybinding stuff
############################################################
typeset -g -A key
key[Home]="${terminfo[khome]}"
key[End]="${terminfo[kend]}"
key[Insert]="${terminfo[kich1]}"
key[Backspace]="${terminfo[kbs]}"
key[Delete]="${terminfo[kdch1]}"
key[Up]="${terminfo[kcuu1]}"
key[Down]="${terminfo[kcud1]}"
key[Left]="${terminfo[kcub1]}"
key[Right]="${terminfo[kcuf1]}"
key[PageUp]="${terminfo[kpp]}"
key[PageDown]="${terminfo[knp]}"
key[ShiftTab]="${terminfo[kcbt]}"
# setup key accordingly
[[ -n "${key[Home]}" ]] && bindkey -- "${key[Home]}" beginning-of-line
[[ -n "${key[End]}" ]] && bindkey -- "${key[End]}" end-of-line
[[ -n "${key[Insert]}" ]] && bindkey -- "${key[Insert]}" overwrite-mode
[[ -n "${key[Backspace]}" ]] && bindkey -- "${key[Backspace]}" backward-delete-char
[[ -n "${key[Delete]}" ]] && bindkey -- "${key[Delete]}" delete-char
[[ -n "${key[Up]}" ]] && bindkey -- "${key[Up]}" up-line-or-history
[[ -n "${key[Down]}" ]] && bindkey -- "${key[Down]}" down-line-or-history
[[ -n "${key[Left]}" ]] && bindkey -- "${key[Left]}" backward-char
[[ -n "${key[Right]}" ]] && bindkey -- "${key[Right]}" forward-char
[[ -n "${key[PageUp]}" ]] && bindkey -- "${key[PageUp]}" beginning-of-buffer-or-history
[[ -n "${key[PageDown]}" ]] && bindkey -- "${key[PageDown]}" end-of-buffer-or-history
[[ -n "${key[ShiftTab]}" ]] && bindkey -- "${key[ShiftTab]}" reverse-menu-complete
# Finally, make sure the terminal is in application mode, when zle is
# active. Only then are the values from $terminfo valid.
if (( ${+terminfo[smkx]} && ${+terminfo[rmkx]} )); then
autoload -Uz add-zle-hook-widget
function zle_application_mode_start {
echoti smkx
}
function zle_application_mode_stop {
echoti rmkx
}
add-zle-hook-widget -Uz zle-line-init zle_application_mode_start
add-zle-hook-widget -Uz zle-line-finish zle_application_mode_stop
fi
| true
|
01988368867199cdbeacd28f1f615edb04c6ca0f
|
Shell
|
atlanmod/hadoop-cluster-docker
|
/hadoop-master/files/dist/dist-trans/experiments-data/boxplotter.sh
|
UTF-8
| 296
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
for i in `seq 0 5`
do
cut.exe -f2,3,4,5,6,7,8,9,10,11,12,13,14,15 timings-dag-$i-cut.txt > timings-dag-$i-cut-no-nodes.txt
done
paste timings-dag-?-cut-no-nodes.txt &> timings-all-merged-boxplot.txt
split -l 1 -a 1 timings-all-merged-boxplot.txt timings-all-merged-boxplot.txt.
| true
|
f48af976c5961836d3b89c493bd37bd51c99162c
|
Shell
|
krixkrix/olive
|
/configure
|
UTF-8
| 1,202
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Very simple configure for Olive
#
VERSION=0.2
prefix=NONE
test -e "$CONFIG_SITE" && . "$CONFIG_SITE"
test "$prefix" = NONE && prefix=/usr/local
while test x$1 != x; do
case $1 in
--prefix=*)
prefix=`echo $1 | sed 's/--prefix=//'`
;;
--prefix)
echo --prefix needs an argument: --prefix=directory >&2
;;
*)
echo Unknown argument $1 >&2
esac
shift
done
# ensure pc files are regenerated with make
# since the prefix may have changed
touch data/*.in
export VERSION
MONO_VERSION=$(awk 'BEGIN {
split (ENVIRON["VERSION"] ".0.0.0", vsplit, ".")
if(length(vsplit [1]) > 4) {
split (substr(ENVIRON["VERSION"], 0, 4) "." substr(ENVIRON["VERSION"], 5) ".0.0", vsplit, ".")
}
print vsplit [1] "." vsplit [2] "." vsplit [3] "." vsplit [4]
}')
echo "prefix=$prefix" > build/config.make
echo "exec_prefix=\${prefix}" >> build/config.make
echo "mono_libdir=\${exec_prefix}/lib" >> build/config.make
echo "MCS_FLAGS=-debug+" >> build/config.make
echo "RUNTIME=mono" >> build/config.make
echo "OLIVE_VERSION=$VERSION" >> build/config.make
echo "MONO_VERSION=$MONO_VERSION" >> build/config.make
echo "Olive $VERSION"
echo
echo " Install Prefix: ${prefix}"
echo
| true
|
3cef17e06bfd48b498ab74131870395a5aa60c8e
|
Shell
|
dflick-pivotal/sentimentr-release
|
/jobs/broker-deregistrar/templates/errand.sh.erb
|
UTF-8
| 760
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
CF_API_URL='<%= p("cf.api_url") %>'
CF_ADMIN_USERNAME='<%= p("cf.admin_username") %>'
CF_ADMIN_PASSWORD='<%= p("cf.admin_password") %>'
CF_SKIP_SSL_VALIDATION='<%= p("cf.skip_ssl_validation") %>'
BROKER_NAME='<%= p("broker.name") %>'
export PATH=/var/vcap/packages/cf-cli/bin
echo "CF_API_URL=${CF_API_URL}"
echo "CF_SKIP_SSL_VALIDATION=${CF_SKIP_SSL_VALIDATION}"
echo "CF_ADMIN_USERNAME=${CF_ADMIN_USERNAME}"
echo "BROKER_NAME=${BROKER_NAME}"
if [[ ${CF_SKIP_SSL_VALIDATION} == "true" ]]; then
cf api ${CF_API_URL} --skip-ssl-validation
else
cf api ${CF_API_URL}
fi
cf auth \
${CF_ADMIN_USERNAME} \
${CF_ADMIN_PASSWORD}
cf purge-service-offering \
${BROKER_NAME} \
-f
cf delete-service-broker \
${BROKER_NAME} \
-f
| true
|
8f07900999422df3c2e74aa7e68c76e50d981bd8
|
Shell
|
asokolsky/libunistd
|
/cmaker/cmaker_class.sh
|
UTF-8
| 1,850
| 3.921875
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"MIT"
] |
permissive
|
#!/bin/bash
# cmaker_class.sh
# Created by Robin Rowe 2019/1/10
# License MIT open source
#set -x
h_file=$0.h
cpp_file=$0.cpp
test_file=$0.test.cpp
outstream=$0.OutStream.h
cmakelist=CMakeLists.txt
sources=sources.cmake
date=$(date +%Y-%m-%d)
args=("$@")
ReadLicenseFile()
{ if [[ ! -e LICENSE ]]; then
echo "Missing LICENSE file"
exit 1
fi
read -r license < LICENSE
echo "License: ${license}"
}
LowerCase()
{ local c=${lower:0:1}
local body=${lower:1}
local octal=$(printf '%o' "'${c}")
octal=$((octal + 40))
c=$(printf '%b' '\'${octal})
lower="${c}${body}"
}
Sed()
{ local arg=$1
local file=$2
sed "${arg}" ${file} > ${file}.tmp
mv -f ${file}.tmp ${file}
}
CreateFile()
{ local src=$1
local dst=$2
local arg=$3
lower=$3
echo Creating ${dst}...
LowerCase
cp ${src} ${dst}
Sed "s|OBJECT|${lower}|g" ${dst}
Sed "s|CLASS|${arg}|g" ${dst}
Sed "s|DATE|${date}|g" ${dst}
Sed "s|AUTHOR|${AUTHOR}|g" ${dst}
Sed "s|LICENSE|${license}|g" ${dst}
}
UpdateCmakeList()
{ local arg=$1
echo "Updating ${cmakelist} with $arg..."
echo "add_executable(test_${arg} test/test_${arg}.cpp)" >> ${cmakelist}
echo "add_test(test_${arg} test_${arg})" >> ${cmakelist}
}
UpdateCmakeSources()
{ local arg=$1
if [[ ! -e ${sources} ]]; then
echo ${sources} > ${sources}
fi
echo "${arg}.h" >> ${sources}
echo "${arg}.cpp" >> ${sources}
}
main()
{ if [ -z "$AUTHOR" ]; then
echo "In bash set your name: % export AUTHOR=\"Your Name\""
exit 1
fi
ReadLicenseFile
if [[ ! -e test ]]; then
mkdir test
fi
for arg in "${args[@]}"; do
if [[ -e ${arg}.h ]]; then
echo "Skipping... ${arg}.h already exists!"
continue
fi
CreateFile ${h_file} "./${arg}.h" ${arg}
CreateFile ${cpp_file} "./${arg}.cpp" ${arg}
CreateFile ${test_file} "./test/test_${arg}.cpp" ${arg}
UpdateCmakeList $arg
UpdateCmakeSources $arg
done
}
main
| true
|
6617df0fdd84a50ba6d9acd373a63ef3cc952d83
|
Shell
|
NJCtony/vagrant-centos
|
/python36_setup.sh
|
UTF-8
| 558
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Updating yum..."
sudo yum -y update
sudo yum -y install yum-utils
echo "Installing CentOS Development Tools..."
sudo yum -y groupinstall development
echo "Installing Python 3.6..."
sudo yum -y install https://centos7.iuscommunity.org/ius-release.rpm
sudo yum -y install python36u
echo "Installing pip..."
sudo yum -y install python36u-pip
sudo yum -y install python36u-devel
echo "Creating Virtual Environment..."
cd /srv
sudo python3.6 -m venv venv
# sudo chmod -R 777 venv
sudo chown vagrant venv/
sudo chmod -R ug+rwx venv/
| true
|
25a6cf3e23f29109a318852c373575f5ed43df71
|
Shell
|
ivo922/wow
|
/scripts/lint
|
UTF-8
| 440
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
if test -z "$*"; then
echo "$0: missing argument. e.g. $0 <directory>..." >&2
exit 1
fi
for arg in "$@"; do
directory="$(realpath $arg)"
if ! test -d "$directory"; then
echo "$directory is not a directory." >&2
exit 1
fi
find "$directory" -name "*.lua" -exec luacheck {} +
find "$directory" -name "*.xml" -exec xmllint --noout --schema UI.xsd {} +
done
| true
|
2333e6f5ccbdf2f2d6fb1ec1ebbfdd66566e3010
|
Shell
|
UCL-RITS/rcps-buildscripts
|
/star-2.7.3a_install
|
UTF-8
| 790
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###############################################
# Installing STAR 2.7.3a
#
# by Owain Kenway, 2015
# Updated for 2.7.3a January 2020
APPNAME=${APPNAME:-star}
VERSION=${VERSION:-2.7.3a}
COMPILER_TAG=${COMPILER_TAG:-gnu-4.9.2}
INSTALL_PREFIX=${INSTALL_PREFIX:-/shared/ucl/apps/$APPNAME/$VERSION/$COMPILER_TAG}
SRC_ARCHIVE=${SRC_ARCHIVE:-https://github.com/alexdobin/STAR/archive/$VERSION.tar.gz}
set -e
for i in ${includes_dir:=$(dirname $0 2>/dev/null)/includes}/{module_maker,require}_inc.sh
do
. $i
done
require gcc-libs/4.9.2
require compilers/gnu/4.9.2
export PATH=$INSTALL_PREFIX/bin:$PATH
mkdir -p $INSTALL_PREFIX
cd $INSTALL_PREFIX
wget $SRC_ARCHIVE
tar -xvf $VERSION.tar.gz
cd STAR-$VERSION/source
make STAR
rm -rf ../bin/*
mv STAR ../bin
echo "Done."
| true
|
4604df9660a56522dfc52f8d34aa4b84107cc6cb
|
Shell
|
wickedviking/php56
|
/assets/provision.sh
|
UTF-8
| 724
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#!/bin/bash
apt-get update
apt-get -y install libldap2-dev
rm -rf /var/lib/apt/lists/*
docker-php-ext-configure ldap --with-libdir=lib/x86_64-linux-gnu/
docker-php-ext-install -j$(nproc) ldap pdo_mysql
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" \
&& php -r "if (hash_file('SHA384', 'composer-setup.php') === '544e09ee996cdf60ece3804abc52599c22b1f40f4323403c44d44fdfdd586475ca9813a858088ffbc1f233e9b180f061') { echo 'Installer verified'; } else { echo 'Installer corrupt'; unlink('composer-setup.php'); } echo PHP_EOL;" \
&& php composer-setup.php \
&& php -r "unlink('composer-setup.php');" \
&& mv composer.phar /usr/local/bin/composer
# clean up
rm -rf /assets
| true
|
4d967ed06d4196aea8c681c068c0e4eca69bffd4
|
Shell
|
Cokemonkey11/wurstdoktor
|
/publish/publish.sh
|
UTF-8
| 378
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euxo pipefail
pushd ..
cargo build --release
popd
find ../../WurstStdlib2/ -name '*.wurst' | \
head -n 100 | \
xargs -I {} bash -c " \
mkdir -p \$( echo '{}' | cut -d'/' -f5- | rev | cut -d'/' -f2- | rev ) && \
echo '{}' >&2 && \
../target/release/wurstdoktor.exe < {} > \$( echo '{}' | cut -d '/' -f5- ).yaml \
"
| true
|
55ed99b8a551a5439df8c955708e057b063add24
|
Shell
|
afonsopacifer/friday
|
/scripts/move.sh
|
UTF-8
| 822
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Colors
NC='\033[0m'
GREEN='\033[0;32m'
GRAY='\033[0;37m'
# log
echo "\n${GRAY}Desktop files moved to ~/Documents/${NC}\n"
# move to Documents
function move {
if [ `ls -l ${1} | wc -l` -ne 0 ]; then
mv ${1} ~/Documents/${2}
echo "All ${2} files have been transferred to ${GREEN}~/Documents/${2}${NC}"
fi
}
# Move all music files
move ~/Desktop/*.mp3 musics
# Move all image files
move ~/Desktop/*.jpg images
move ~/Desktop/*.png images
move ~/Desktop/*.gif images
move ~/Desktop/*.svg images
# Move all video files
move ~/Desktop/*.mkv videos
move ~/Desktop/*.mp4 videos
# Move all note files
move ~/Desktop/*.md notes
move ~/Desktop/*.txt notes
# Move all book files
move ~/Desktop/*.pdf books
# Move all psd files
move ~/Desktop/*.psd psd
# Move all others files
move ~/Desktop/*.* others
| true
|
03cb6c6d3fffe6cf53eda56ae003723407d85238
|
Shell
|
ShipSoft/FairShip
|
/housekeeping/create-coverage-report.sh
|
UTF-8
| 416
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# Creates coverage report in separate directory `coverage`
# Arguments:
# $1 - Path to the project root directory
# $2 - Path to the current build directory
PROJECT_ROOT_DIR=$1
BUILD_DIR=$2
pushd $BUILD_DIR
mkdir coverage 2>/dev/null
cd coverage
cmake -Dcoverage=on $PROJECT_ROOT_DIR >/dev/null
cmake --build . --target runFairShipTests >/dev/null
cmake --build . --target coverage >/dev/null
popd
| true
|
f7ada167a0b2c6217c89b010f4cfd18e23c1c4ae
|
Shell
|
NadavTasher/Nockio
|
/git/configurations/git/hooks/post-update
|
UTF-8
| 442
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This hook script is used to deploy an application on Nockio.
#
# This hook is loaded by default.
# Build the application
echo "Nockio: Building Application"
docker-compose --file ../.compose.nockio up --build --detach --no-color 1> ../.log.compose.nockio
# Restart the proxy
echo "Nockio: Restarting Proxy"
curl --silent --unix-socket /var/run/docker.sock --request POST http://localhost/containers/proxy/restart 1> /dev/null
| true
|
133fd230cc5b4ed79be972500d6dc29281fab8fc
|
Shell
|
martainius/moa-project
|
/bash-files/analyse.sh
|
UTF-8
| 638
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This is a short script which reads a given class file and writes each
# variable into a separate file, allowing more in-depth analysis of the results.i
filename=$1
for i in "1:cepheid" "2:cepheid_" "3:anocephe" "4:LPV" "5:DPV" "6:RR_Lyra" "7:Del_Sct" "8:EB"; do
search_string=$i
IFS=':' read -a split_string <<< "$search_string"
echo ${split_string[0]}
echo ${split_string[1]}
export vartype=${split_string[1]}
export search_string=$i
echo "Searching for ${search_string} in ${filename}"
awk '$3==ENVIRON["search_string"]{print}' ${filename} > ./gen/${vartype}.dat
done
| true
|
eae75d1d7cadd8144feb4680df9ac33b4f8a216f
|
Shell
|
Zhuvikin/voyager
|
/hack/docker/haproxy/1.7.6/sv/reloader/restart
|
UTF-8
| 484
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source /etc/envvars
CERT_DIR=/etc/ssl/private/haproxy
mkdir -p /etc/ssl/private/haproxy
# http://stackoverflow.com/a/2108296
for dir in /srv/haproxy/secrets/*/
do
# remove trailing /
dir=${dir%*/}
# just basename
secret=${dir##*/}
cat $dir/tls.crt > $CERT_DIR/$secret.pem
echo '\n' >> $CERT_DIR/$secret.pem
cat $dir/tls.key >> $CERT_DIR/$secret.pem
done
haproxy -c -f /etc/haproxy/haproxy.cfg
sv reload haproxy
| true
|
dd017cf68160caa37a12ae08717794475309cc71
|
Shell
|
toddself/dotfiles
|
/setup/linux/arch/install.sh
|
UTF-8
| 633
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "-> Installing packages from arch"
sudo pacman -Sy --needed $(comm -12 <(pacman -Slq|sort) <(sort pkglist.txt)) > /dev/null
if [ ! -d ${HOME}/.pacaur ]; then
echo "-> Installing pacaur"
mkdir $HOME/.pacaur
pushd $HOME/.pacaur
git clone https://aur.archlinux.org/auracle-git.git > /dev/null
pushd $HOME/.pacaur/auracle-git
makepkg -sic > /dev/null
popd
git clone https://aur.archlinux.org/pacaur.git > /dev/null
pushd $HOME/.pacaur/pacaur
makepkg -sic > /dev/null
popd
popd
fi
echo "-> Installing packages from AUR"
pacaur -S --noedit --noconfirm --needed $(cat aur-pkglist.txt) > /dev/null
| true
|
0f73880f5a77d67a132bf653f68b841a3faa0334
|
Shell
|
gppfusco/mdw-api-registry
|
/mdw-api-registry.sh
|
UTF-8
| 1,007
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
function usage_error() {
echo "Usage: mdw-api-registry.sh"
echo "mdw_api_registry_jar_file"
echo "osb_conf_file"
echo "esb_conf_file"
echo "report_conf_file"
echo "repository_conf_file"
}
function run_mdw_api_registry() {
mdw_api_registry_jar=$1
osb_conf=$2
esb_conf=$3
report_conf=$4
repository_conf=$5
echo "Starting with the following configuration:"
echo "mdw_api_registry_jar = $mdw_api_registry_jar"
echo "osb conf = $osb_conf"
echo "esb conf = $esb_conf"
echo "report conf = $report_conf"
echo "repository conf = $repository_conf"
java -jar $mdw_api_registry_jar repository -c=$repository_conf -a=init &&
java -jar $mdw_api_registry_jar registry -c=$osb_conf -r=OSBApiRegistry -f &&
java -jar $mdw_api_registry_jar registry -c=$esb_conf -r=ESBApiRegistry -f &&
java -jar $mdw_api_registry_jar report -c=$report_conf &&
java -jar $mdw_api_registry_jar repository -c=$repository_conf -a=update
}
if [ $# -eq 5 ]
then
run_mdw_api_registry $*
else
usage_error
fi
| true
|
41e66b1d8a87d9c67e0d55dc9d6fbad932e7c57d
|
Shell
|
deajan/obackup
|
/dev/n_obackup.sh
|
UTF-8
| 75,883
| 3.453125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
#TODO: do we rotate encrypted files too or only temp files in storage dir (pull / local question)
###### Remote push/pull (or local) backup script for files & databases
PROGRAM="obackup"
AUTHOR="(C) 2013-2019 by Orsiris de Jong"
CONTACT="http://www.netpower.fr/obackup - ozy@netpower.fr"
PROGRAM_VERSION=2.1-dev-postRC1
PROGRAM_BUILD=2020050402
IS_STABLE=true
CONFIG_FILE_REVISION_REQUIRED=2.1
#### Execution order #__WITH_PARANOIA_DEBUG
# GetLocalOS #__WITH_PARANOIA_DEBUG
# InitLocalOSDependingSettings #__WITH_PARANOIA_DEBUG
# CheckRunningInstances #__WITH_PARANOIA_DEBUG
# PreInit #__WITH_PARANOIA_DEBUG
# Init #__WITH_PARANOIA_DEBUG
# CheckEnvironment #__WITH_PARANOIA_DEBUG
# Postinit #__WITH_PARANOIA_DEBUG
# CheckCurrentConfig #__WITH_PARANOIA_DEBUG
# GetRemoteOS #__WITH_PARANOIA_DEBUG
# InitRemoteOSDependingSettings #__WITH_PARANOIA_DEBUG
# RunBeforeHook #__WITH_PARANOIA_DEBUG
# Main #__WITH_PARANOIA_DEBUG
# ListDatabases #__WITH_PARANOIA_DEBUG
# ListRecursiveBackupDirectories #__WITH_PARANOIA_DEBUG
# GetDirectoriesSize #__WITH_PARANOIA_DEBUG
# CreateSrorageDirectories #__WITH_PARANOIA_DEBUG
# CheckDiskSpace #__WITH_PARANOIA_DEBUG
# RotateBackups #__WITH_PARANOIA_DEBUG
# BackupDatabases #__WITH_PARANOIA_DEBUG
# RotateBackups #__WITH_PARANOIA_DEBUG
# RsyncPatterns #__WITH_PARANOIA_DEBUG
# FilesBackup #__WITH_PARANOIA_DEBUG
include #### OFUNCTIONS FULL SUBSET ####
# If using "include" statements, make sure the script does not get executed unless it's loaded by bootstrap
include #### _OFUNCTIONS_BOOTSTRAP SUBSET ####
[ "$_OFUNCTIONS_BOOTSTRAP" != true ] && echo "Please use bootstrap.sh to load this dev version of $(basename $0)" && exit 1
_LOGGER_PREFIX="time"
## Working directory for partial downloads
PARTIAL_DIR=".obackup_workdir_partial"
## File extension for encrypted files
CRYPT_FILE_EXTENSION=".$PROGRAM.gpg"
# List of runtime created global variables
# $SQL_DISK_SPACE, disk space available on target for sql backups
# $FILE_DISK_SPACE, disk space available on target for file backups
# $SQL_BACKUP_TASKS, list of all databases to backup, space separated
# $SQL_EXCLUDED_TASKS, list of all database to exclude from backup, space separated
# $FILE_BACKUP_TASKS list of directories to backup, found in config file
# $FILE_RECURSIVE_BACKUP_TASKS, list of directories to backup, computed from config file recursive list
# $FILE_RECURSIVE_EXCLUDED_TASKS, list of all directories excluded from recursive list
# $FILE_SIZE_LIST, list of all directories to include in GetDirectoriesSize, enclosed by escaped doublequotes
# Assume that anything can be backed up unless proven otherwise
CAN_BACKUP_SQL=true
CAN_BACKUP_FILES=true
function TrapStop {
Logger "/!\ Manual exit of backup script. Backups may be in inconsistent state." "WARN"
exit 2
}
function TrapQuit {
local exitcode
# Get ERROR / WARN alert flags from subprocesses that call Logger
if [ -f "$RUN_DIR/$PROGRAM.Logger.warn.$SCRIPT_PID.$TSTAMP" ]; then
WARN_ALERT=true
fi
if [ -f "$RUN_DIR/$PROGRAM.Logger.error.$SCRIPT_PID.$TSTAMP" ]; then
ERROR_ALERT=true
fi
if [ $ERROR_ALERT == true ]; then
if [ "$RUN_AFTER_CMD_ON_ERROR" == true ]; then
RunAfterHook
fi
Logger "$PROGRAM finished with errors." "ERROR"
SendAlert
exitcode=1
elif [ $WARN_ALERT == true ]; then
if [ "$RUN_AFTER_CMD_ON_ERROR" == true ]; then
RunAfterHook
fi
Logger "$PROGRAM finished with warnings." "WARN"
SendAlert
exitcode=2
else
RunAfterHook
Logger "$PROGRAM finshed." "ALWAYS"
exitcode=0
fi
if [ -f "$RUN_DIR/$PROGRAM.$INSTANCE_ID" ]; then
rm -f "$RUN_DIR/$PROGRAM.$INSTANCE_ID"
fi
CleanUp
KillChilds $$ > /dev/null 2>&1
exit $exitcode
}
function CheckEnvironment {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
if [ "$REMOTE_OPERATION" == true ]; then
if ! type ssh > /dev/null 2>&1 ; then
Logger "ssh not present. Cannot start backup." "CRITICAL"
exit 1
fi
if [ "$SSH_PASSWORD_FILE" != "" ] && ! type sshpass > /dev/null 2>&1 ; then
Logger "sshpass not present. Cannot use password authentication." "CRITICAL"
exit 1
fi
else
if [ "$SQL_BACKUP" != false ]; then
if ! type mysqldump > /dev/null 2>&1 ; then
Logger "mysqldump not present. Cannot backup SQL." "CRITICAL"
CAN_BACKUP_SQL=false
fi
if ! type mysql > /dev/null 2>&1 ; then
Logger "mysql not present. Cannot backup SQL." "CRITICAL"
CAN_BACKUP_SQL=false
fi
fi
fi
if [ "$FILE_BACKUP" != false ]; then
if ! type rsync > /dev/null 2>&1 ; then
Logger "rsync not present. Cannot backup files." "CRITICAL"
CAN_BACKUP_FILES=false
fi
fi
if [ "$ENCRYPTION" == true ]; then
CheckCryptEnvironnment
fi
if ! type pgrep > /dev/null 2>&1 ; then
Logger "pgrep not present. $0 cannot start." "CRITICAL"
exit 1
fi
}
function CheckCryptEnvironnment {
if ! type gpg2 > /dev/null 2>&1 ; then
if ! type gpg > /dev/null 2>&1; then
Logger "Programs gpg2 nor gpg not present. Cannot encrypt backup files." "CRITICAL"
CAN_BACKUP_FILES=false
else
Logger "Program gpg2 not present, falling back to gpg." "NOTICE"
CRYPT_TOOL=gpg
fi
else
CRYPT_TOOL=gpg2
fi
}
function CheckCurrentConfig {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local test
local booleans
local num_vars
if [ "$INSTANCE_ID" == "" ]; then
Logger "No INSTANCE_ID defined in config file." "CRITICAL"
exit 1
fi
# v2 config will use true / false instead of yes / no
# Check all variables that should contain "yes" or "no", true or false
declare -a booleans=(SQL_BACKUP FILE_BACKUP ENCRYPTION CREATE_DIRS KEEP_ABSOLUTE_PATHS GET_BACKUP_SIZE SSH_COMPRESSION SSH_IGNORE_KNOWN_HOSTS REMOTE_HOST_PING SUDO_EXEC DATABASES_ALL PRESERVE_PERMISSIONS PRESERVE_OWNER PRESERVE_GROUP PRESERVE_EXECUTABILITY PRESERVE_ACL PRESERVE_XATTR COPY_SYMLINKS KEEP_DIRLINKS PRESERVE_HARDLINKS RSYNC_COMPRESS PARTIAL DELETE_VANISHED_FILES DELTA_COPIES ROTATE_SQL_BACKUPS ROTATE_FILE_BACKUPS STOP_ON_CMD_ERROR RUN_AFTER_CMD_ON_ERROR)
for i in "${booleans[@]}"; do
test="if [ \"\$$i\" != \"yes\" ] && [ \"\$$i\" != \"no\" ] && [ \"\$$i\" != true ] && [ \"\$$i\" != false ]; then Logger \"Bogus $i value [\$$i] defined in config file. Correct your config file or update it with the update script if using and old version.\" \"CRITICAL\"; exit 1; fi"
eval "$test"
done
if [ "$BACKUP_TYPE" != "local" ] && [ "$BACKUP_TYPE" != "pull" ] && [ "$BACKUP_TYPE" != "push" ]; then
Logger "Bogus BACKUP_TYPE value in config file." "CRITICAL"
exit 1
fi
# Check all variables that should contain a numerical value >= 0
declare -a num_vars=(BACKUP_SIZE_MINIMUM SQL_WARN_MIN_SPACE FILE_WARN_MIN_SPACE SOFT_MAX_EXEC_TIME_DB_TASK HARD_MAX_EXEC_TIME_DB_TASK COMPRESSION_LEVEL SOFT_MAX_EXEC_TIME_FILE_TASK HARD_MAX_EXEC_TIME_FILE_TASK BANDWIDTH SOFT_MAX_EXEC_TIME_TOTAL HARD_MAX_EXEC_TIME_TOTAL ROTATE_SQL_COPIES ROTATE_FILE_COPIES KEEP_LOGGING MAX_EXEC_TIME_PER_CMD_BEFORE MAX_EXEC_TIME_PER_CMD_AFTER)
for i in "${num_vars[@]}"; do
test="if [ $(IsNumericExpand \"\$$i\") -eq 0 ]; then Logger \"Bogus $i value [\$$i] defined in config file. Correct your config file or update it with the update script if using and old version.\" \"CRITICAL\"; exit 1; fi"
eval "$test"
done
if [ "$FILE_BACKUP" == true ]; then
if [ "$DIRECTORY_LIST" == "" ] && [ "$RECURSIVE_DIRECTORY_LIST" == "" ]; then
Logger "No directories specified in config file, no files to backup." "ERROR"
CAN_BACKUP_FILES=false
fi
fi
if [ "$REMOTE_OPERATION" == true ] && [ ! -f "$SSH_RSA_PRIVATE_KEY" ]; then
Logger "Cannot find rsa private key [$SSH_RSA_PRIVATE_KEY]. Cannot connect to remote system." "CRITICAL"
exit 1
fi
#WIP: Encryption use key file instead of recipient ?
#if [ ! -f "$ENCRYPT_GPG_PYUBKEY" ]; then
# Logger "Cannot find gpg pubkey [$ENCRYPT_GPG_PUBKEY]. Cannot encrypt backup files." "CRITICAL"
# exit 1
#fi
if [ "$SQL_BACKUP" == true ] && [ "$SQL_STORAGE" == "" ]; then
Logger "SQL_STORAGE not defined." "CRITICAL"
exit 1
fi
if [ "$FILE_BACKUP" == true ] && [ "$FILE_STORAGE" == "" ]; then
Logger "FILE_STORAGE not defined." "CRITICAL"
exit 1
fi
if [ "$ENCRYPTION" == true ]; then
if [ "$CRYPT_STORAGE" == "" ]; then
Logger "CRYPT_STORAGE not defined." "CRITICAL"
exit 1
fi
if [ "$GPG_RECIPIENT" == "" ]; then
Logger "No GPG recipient defined." "CRITICAL"
exit 1
fi
fi
if [ "$REMOTE_OPERATION" == true ] && ([ ! -f "$SSH_RSA_PRIVATE_KEY" ] && [ ! -f "$SSH_PASSWORD_FILE" ]); then
Logger "Cannot find rsa private key [$SSH_RSA_PRIVATE_KEY] nor password file [$SSH_PASSWORD_FILE]. No authentication method provided." "CRITICAL"
exit 1
fi
}
# Change all booleans with "yes" or "no" to true / false for v2 config syntax compatibility
function UpdateBooleans {
local update
local booleans
declare -a booleans=(SQL_BACKUP FILE_BACKUP ENCRYPTION CREATE_DIRS KEEP_ABSOLUTE_PATHS GET_BACKUP_SIZE SSH_COMPRESSION SSH_IGNORE_KNOWN_HOSTS REMOTE_HOST_PING SUDO_EXEC DATABASES_ALL PRESERVE_PERMISSIONS PRESERVE_OWNER PRESERVE_GROUP PRESERVE_EXECUTABILITY PRESERVE_ACL PRESERVE_XATTR COPY_SYMLINKS KEEP_DIRLINKS PRESERVE_HARDLINKS RSYNC_COMPRESS PARTIAL DELETE_VANISHED_FILES DELTA_COPIES ROTATE_SQL_BACKUPS ROTATE_FILE_BACKUPS STOP_ON_CMD_ERROR RUN_AFTER_CMD_ON_ERROR)
for i in "${booleans[@]}"; do
update="if [ \"\$$i\" == \"yes\" ]; then $i=true; fi; if [ \"\$$i\" == \"no\" ]; then $i=false; fi"
eval "$update"
done
}
function CheckRunningInstances {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
if [ -f "$RUN_DIR/$PROGRAM.$INSTANCE_ID" ]; then
pid="$(head -c16384 "$RUN_DIR/$PROGRAM.$INSTANCE_ID")"
if ps aux | awk '{print $2}' | grep $pid > /dev/null; then
Logger "Another instance [$INSTANCE_ID] of obackup is already running." "CRITICAL"
exit 1
fi
fi
echo $SCRIPT_PID > "$RUN_DIR/$PROGRAM.$INSTANCE_ID"
}
function _ListDatabasesLocal {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local retval
local sqlCmd
sqlCmd="mysql -u $SQL_USER -Bse 'SELECT table_schema, round(sum( data_length + index_length ) / 1024) FROM information_schema.TABLES GROUP by table_schema;' > \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2>&1"
Logger "Launching command [$sqlCmd]." "DEBUG"
eval "$sqlCmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -eq 0 ]; then
Logger "Listing databases succeeded." "NOTICE"
else
Logger "Listing databases failed." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$sqlCmd]." "WARN"
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
return 1
fi
}
function _ListDatabasesRemote {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local sqlCmd
local retval
CheckConnectivity3rdPartyHosts
CheckConnectivityRemoteHost
sqlCmd="$SSH_CMD \"env _REMOTE_TOKEN=$_REMOTE_TOKEN mysql -u $SQL_USER -Bse 'SELECT table_schema, round(sum( data_length + index_length ) / 1024) FROM information_schema.TABLES GROUP by table_schema;'\" > \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2>&1"
Logger "Command output: $sqlCmd" "DEBUG"
eval "$sqlCmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -eq 0 ]; then
Logger "Listing databases succeeded." "NOTICE"
else
Logger "Listing databases failed." "ERROR"
Logger "Command output: $sqlCmd" "WARN"
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
return $retval
fi
}
function ListDatabases {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local outputFile # Return of subfunction
local dbName
local dbSize
local dbBackup
local missingDatabases=false
local dbArray
if [ $CAN_BACKUP_SQL == false ]; then
Logger "Cannot list databases." "ERROR"
return 1
fi
Logger "Listing databases." "NOTICE"
if [ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "push" ]; then
_ListDatabasesLocal
if [ $? -ne 0 ]; then
outputFile=""
else
outputFile="$RUN_DIR/$PROGRAM._ListDatabasesLocal.$SCRIPT_PID.$TSTAMP"
fi
elif [ "$BACKUP_TYPE" == "pull" ]; then
_ListDatabasesRemote
if [ $? -ne 0 ]; then
outputFile=""
else
outputFile="$RUN_DIR/$PROGRAM._ListDatabasesRemote.$SCRIPT_PID.$TSTAMP"
fi
fi
if [ -f "$outputFile" ] && [ $CAN_BACKUP_SQL == true ]; then
while read -r line; do
while read -r name size; do dbName=$name; dbSize=$size; done <<< "$line"
if [ "$DATABASES_ALL" == true ]; then
dbBackup=true
IFS=$PATH_SEPARATOR_CHAR read -r -a dbArray <<< "$DATABASES_ALL_EXCLUDE_LIST"
for j in "${dbArray[@]}"; do
if [ "$dbName" == "$j" ]; then
dbBackup=false
fi
done
else
dbBackup=false
IFS=$PATH_SEPARATOR_CHAR read -r -a dbArray <<< "$DATABASES_LIST"
for j in "${dbArray[@]}"; do
if [ "$dbName" == "$j" ]; then
dbBackup=true
fi
done
if [ $dbBackup == false ]; then
missingDatabases=true
fi
fi
if [ $dbBackup == true ]; then
if [ "$SQL_BACKUP_TASKS" != "" ]; then
SQL_BACKUP_TASKS="$SQL_BACKUP_TASKS $dbName"
else
SQL_BACKUP_TASKS="$dbName"
fi
TOTAL_DATABASES_SIZE=$((TOTAL_DATABASES_SIZE+dbSize))
else
SQL_EXCLUDED_TASKS="$SQL_EXCLUDED_TASKS $dbName"
fi
done < "$outputFile"
if [ $missingDatabases == true ]; then
IFS=$PATH_SEPARATOR_CHAR read -r -a dbArray <<< "$DATABASES_LIST"
for i in "${dbArray[@]}"; do
if ! grep "$i" "$outputFile" > /dev/null 2>&1; then
Logger "Missing database [$i]." "CRITICAL"
fi
done
fi
Logger "Database backup list: $SQL_BACKUP_TASKS" "DEBUG"
Logger "Database exclude list: $SQL_EXCLUDED_TASKS" "DEBUG"
else
Logger "Will not execute database backup." "ERROR"
CAN_BACKUP_SQL=false
fi
}
function _ListRecursiveBackupDirectoriesLocal {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local cmd
local directories
local directory
local retval
local successfulRun=false
local failuresPresent=false
IFS=$PATH_SEPARATOR_CHAR read -r -a directories <<< "$RECURSIVE_DIRECTORY_LIST"
for directory in "${directories[@]}"; do
# Make sure there is only one trailing slash
directory="${directory%/}/"
# No sudo here, assuming you should have all necessary rights for local checks
cmd="$FIND_CMD -L $directory -mindepth 1 -maxdepth 1 -type d >> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP\""
Logger "Launching command [$cmd]." "DEBUG"
eval "$cmd"
retval=$?
if [ $retval -ne 0 ]; then
Logger "Could not enumerate directories in [$directory]." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$cmd]." "WARN"
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated error output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
failuresPresent=true
else
successfulRun=true
fi
done
if [ $successfulRun == true ] && [ $failuresPresent == true ]; then
return 2
elif [ $successfulRun == true ] && [ $failuresPresent == false ]; then
return 0
else
return 1
fi
}
function _ListRecursiveBackupDirectoriesRemote {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local retval
$SSH_CMD env _REMOTE_TOKEN=$_REMOTE_TOKEN \
env _DEBUG="'$_DEBUG'" env _PARANOIA_DEBUG="'$_PARANOIA_DEBUG'" env _LOGGER_SILENT="'$_LOGGER_SILENT'" env _LOGGER_VERBOSE="'$_LOGGER_VERBOSE'" env _LOGGER_PREFIX="'$_LOGGER_PREFIX'" env _LOGGER_ERR_ONLY="'$_LOGGER_ERR_ONLY'" \
env _REMOTE_EXECUTION="true" env PROGRAM="'$PROGRAM'" env SCRIPT_PID="'$SCRIPT_PID'" env TSTAMP="'$TSTAMP'" \
env RECURSIVE_DIRECTORY_LIST="'$RECURSIVE_DIRECTORY_LIST'" env PATH_SEPARATOR_CHAR="'$PATH_SEPARATOR_CHAR'" \
env REMOTE_FIND_CMD="'$REMOTE_FIND_CMD'" $COMMAND_SUDO' bash -s' << 'ENDSSH' > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2> "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP"
_REMOTE_TOKEN="(o_0)"
include #### DEBUG SUBSET ####
include #### TrapError SUBSET ####
include #### RemoteLogger SUBSET ####
function _ListRecursiveBackupDirectoriesRemoteSub {
local directories
local directory
local retval
local successfulRun=false
local failuresPresent=false
local cmd
IFS=$PATH_SEPARATOR_CHAR read -r -a directories <<< "$RECURSIVE_DIRECTORY_LIST"
for directory in "${directories[@]}"; do
# Make sure there is only one trailing slash
directory="${directory%/}/"
cmd="$REMOTE_FIND_CMD -L \"$directory\" -mindepth 1 -maxdepth 1 -type d"
Logger "Launching command [$cmd]." "DEBUG"
eval $cmd
retval=$?
if [ $retval -ne 0 ]; then
RemoteLogger "Could not enumerate directories in [$directory]." "ERROR"
RemoteLogger "Command was [$cmd]." "WARN"
failuresPresent=true
else
successfulRun=true
fi
done
if [ $successfulRun == true ] && [ $failuresPresent == true ]; then
return 2
elif [ $successfulRun == true ] && [ $failuresPresent == false ]; then
return 0
else
return 1
fi
}
_ListRecursiveBackupDirectoriesRemoteSub
exit $?
ENDSSH
retval=$?
if [ $retval -ne 0 ]; then
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated error output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
fi
return $retval
}
function ListRecursiveBackupDirectories {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local output_file
local file_exclude
local excluded
local fileArray
if [ "$RECURSIVE_DIRECTORY_LIST" != "" ]; then
# Return values from subfunctions can be 0 (no error), 1 (only errors) or 2 (some errors). Do process output except on 1 return code
Logger "Listing directories to backup." "NOTICE"
if [ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "push" ]; then
_ListRecursiveBackupDirectoriesLocal &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
if [ $? -eq 1 ]; then
output_file=""
else
output_file="$RUN_DIR/$PROGRAM._ListRecursiveBackupDirectoriesLocal.$SCRIPT_PID.$TSTAMP"
fi
elif [ "$BACKUP_TYPE" == "pull" ]; then
_ListRecursiveBackupDirectoriesRemote &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
if [ $? -eq 1 ]; then
output_file=""
else
output_file="$RUN_DIR/$PROGRAM._ListRecursiveBackupDirectoriesRemote.$SCRIPT_PID.$TSTAMP"
fi
fi
if [ -f "$output_file" ]; then
while read -r line; do
file_exclude=0
IFS=$PATH_SEPARATOR_CHAR read -r -a fileArray <<< "$RECURSIVE_EXCLUDE_LIST"
for excluded in "${fileArray[@]}"; do
if [ "$excluded" == "$line" ]; then
file_exclude=1
fi
done
if [ $file_exclude -eq 0 ]; then
if [ "$FILE_RECURSIVE_BACKUP_TASKS" == "" ]; then
FILE_SIZE_LIST="'$line'"
FILE_RECURSIVE_BACKUP_TASKS="$line"
else
FILE_SIZE_LIST="$FILE_SIZE_LIST '$line'"
FILE_RECURSIVE_BACKUP_TASKS="$FILE_RECURSIVE_BACKUP_TASKS$PATH_SEPARATOR_CHAR$line"
fi
else
FILE_RECURSIVE_EXCLUDED_TASKS="$FILE_RECURSIVE_EXCLUDED_TASKS$PATH_SEPARATOR_CHAR$line"
fi
done < "$output_file"
fi
fi
if [ "$DIRECTORY_LIST" != "" ]; then
IFS=$PATH_SEPARATOR_CHAR read -r -a fileArray <<< "$DIRECTORY_LIST"
for directory in "${fileArray[@]}"; do
if [ "$FILE_SIZE_LIST" == "" ]; then
FILE_SIZE_LIST="'$directory'"
else
FILE_SIZE_LIST="$FILE_SIZE_LIST '$directory'"
fi
if [ "$FILE_BACKUP_TASKS" == "" ]; then
FILE_BACKUP_TASKS="$directory"
else
FILE_BACKUP_TASKS="$FILE_BACKUP_TASKS$PATH_SEPARATOR_CHAR$directory"
fi
done
fi
}
function _GetDirectoriesSizeLocal {
local dirList="${1}"
__CheckArguments 1 $# "$@" #__WITH_PARANOIA_DEBUG
local cmd
local retval
# No sudo here, assuming you should have all the necessary rights
# This is not pretty, but works with all supported systems
cmd="du -cs $dirList | tail -n1 | cut -f1 > \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP\""
Logger "Launching command [$cmd]." "DEBUG"
eval "$cmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
# $cmd will return 0 even if some errors found, so we need to check if there is an error output
retval=$?
if [ $retval -ne 0 ] || [ -s "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Could not get files size for some or all local directories." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$cmd]." "WARN"
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated error output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
else
Logger "File size fetched successfully." "NOTICE"
fi
if [ -s "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
TOTAL_FILES_SIZE="$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")"
if [ $(IsInteger "$TOTAL_FILES_SIZE") -eq 0 ]; then
TOTAL_FILES_SIZE="$(HumanToNumeric "$TOTAL_FILES_SIZE")"
fi
else
TOTAL_FILES_SIZE=-1
fi
}
function _GetDirectoriesSizeRemote {
local dirList="${1}"
__CheckArguments 1 $# "$@" #__WITH_PARANOIA_DEBUG
local cmd
local retval
# Error output is different from stdout because not all files in list may fail at once
$SSH_CMD env _REMOTE_TOKEN=$_REMOTE_TOKEN \
env _DEBUG="'$_DEBUG'" env _PARANOIA_DEBUG="'$_PARANOIA_DEBUG'" env _LOGGER_SILENT="'$_LOGGER_SILENT'" env _LOGGER_VERBOSE="'$_LOGGER_VERBOSE'" env _LOGGER_PREFIX="'$_LOGGER_PREFIX'" env _LOGGER_ERR_ONLY="'$_LOGGER_ERR_ONLY'" \
env _REMOTE_EXECUTION="true" env PROGRAM="'$PROGRAM'" env SCRIPT_PID="'$SCRIPT_PID'" env TSTAMP="'$TSTAMP'" \
env dirList="\"$dirList\"" \
$COMMAND_SUDO' bash -s' << 'ENDSSH' > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2> "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" &
_REMOTE_TOKEN="(o_0)"
include #### DEBUG SUBSET ####
include #### TrapError SUBSET ####
include #### RemoteLogger SUBSET ####
cmd="du -cs $dirList | tail -n1 | cut -f1"
eval "$cmd"
retval=$?
if [ $retval != 0 ]; then
RemoteLogger "Command was [$cmd]." "WARN"
fi
exit $retval
ENDSSH
# $cmd will return 0 even if some errors found, so we need to check if there is an error output
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -ne 0 ] || [ -s "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Could not get files size for some or all remote directories." "ERROR"
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated error output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
else
Logger "File size fetched successfully." "NOTICE"
fi
if [ -s "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
TOTAL_FILES_SIZE="$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")"
if [ $(IsInteger "$TOTAL_FILES_SIZE") -eq 0 ]; then
TOTAL_FILES_SIZE="$(HumanToNumeric "$TOTAL_FILES_SIZE")"
fi
else
TOTAL_FILES_SIZE=-1
fi
}
function GetDirectoriesSize {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
Logger "Getting files size" "NOTICE"
if [ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "push" ]; then
if [ "$FILE_BACKUP" != false ]; then
_GetDirectoriesSizeLocal "$FILE_SIZE_LIST"
fi
elif [ "$BACKUP_TYPE" == "pull" ]; then
if [ "$FILE_BACKUP" != false ]; then
_GetDirectoriesSizeRemote "$FILE_SIZE_LIST"
fi
fi
}
function _CreateDirectoryLocal {
local dirToCreate="${1}"
__CheckArguments 1 $# "$@" #__WITH_PARANOIA_DEBUG
local retval
if [ ! -d "$dirToCreate" ]; then
# No sudo, you should have all necessary rights
mkdir -p "$dirToCreate" > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2>&1 &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 720 1800 true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -ne 0 ]; then
Logger "Cannot create directory [$dirToCreate]" "CRITICAL"
if [ -f "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" ]; then
Logger "Truncated output: $(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
fi
return $retval
fi
fi
}
function _CreateDirectoryRemote {
local dirToCreate="${1}"
__CheckArguments 1 $# "$@" #__WITH_PARANOIA_DEBUG
local cmd
local retval
CheckConnectivity3rdPartyHosts
CheckConnectivityRemoteHost
$SSH_CMD env _REMOTE_TOKEN=$_REMOTE_TOKEN \
env _DEBUG="'$_DEBUG'" env _PARANOIA_DEBUG="'$_PARANOIA_DEBUG'" env _LOGGER_SILENT="'$_LOGGER_SILENT'" env _LOGGER_VERBOSE="'$_LOGGER_VERBOSE'" env _LOGGER_PREFIX="'$_LOGGER_PREFIX'" env _LOGGER_ERR_ONLY="'$_LOGGER_ERR_ONLY'" \
env _REMOTE_EXECUTION="true" env PROGRAM="'$PROGRAM'" env SCRIPT_PID="'$SCRIPT_PID'" env TSTAMP="'$TSTAMP'" \
env dirToCreate="'$dirToCreate'" $COMMAND_SUDO' bash -s' << 'ENDSSH' > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2>&1 &
_REMOTE_TOKEN="(o_0)"
include #### DEBUG SUBSET ####
include #### TrapError SUBSET ####
include #### RemoteLogger SUBSET ####
if [ ! -d "$dirToCreate" ]; then
# No sudo, you should have all necessary rights
mkdir -p "$dirToCreate"
retval=$?
if [ $retval -ne 0 ]; then
RemoteLogger "Cannot create directory [$dirToCreate]" "CRITICAL"
exit $retval
fi
fi
exit 0
ENDSSH
ExecTasks $! "${FUNCNAME[0]}" false 0 0 720 1800 true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -ne 0 ]; then
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
return $retval
fi
}
function CreateStorageDirectories {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
if [ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "pull" ]; then
if [ "$SQL_BACKUP" != false ]; then
_CreateDirectoryLocal "$SQL_STORAGE"
if [ $? -ne 0 ]; then
CAN_BACKUP_SQL=false
fi
fi
if [ "$FILE_BACKUP" != false ]; then
_CreateDirectoryLocal "$FILE_STORAGE"
if [ $? -ne 0 ]; then
CAN_BACKUP_FILES=false
fi
fi
if [ "$ENCRYPTION" == true ]; then
_CreateDirectoryLocal "$CRYPT_STORAGE"
if [ $? -ne 0 ]; then
CAN_BACKUP_FILES=false
fi
fi
elif [ "$BACKUP_TYPE" == "push" ]; then
if [ "$SQL_BACKUP" != false ]; then
_CreateDirectoryRemote "$SQL_STORAGE"
if [ $? -ne 0 ]; then
CAN_BACKUP_SQL=false
fi
fi
if [ "$FILE_BACKUP" != false ]; then
_CreateDirectoryRemote "$FILE_STORAGE"
if [ $? -ne 0 ]; then
CAN_BACKUP_FILES=false
fi
fi
if [ "$ENCRYPTION" == true ]; then
_CreateDirectoryLocal "$CRYPT_STORAGE"
if [ $? -ne 0 ]; then
CAN_BACKUP_FILES=false
fi
fi
fi
}
function GetDiskSpaceLocal {
# GLOBAL VARIABLE DISK_SPACE to pass variable to parent function
# GLOBAL VARIABLE DRIVE to pass variable to parent function
local pathToCheck="${1}"
__CheckArguments 1 $# "$@" #__WITH_PARANOIA_DEBUG
local retval
if [ -d "$pathToCheck" ]; then
# Not elegant solution to make df silent on errors
# No sudo on local commands, assuming you should have all the necesarry rights to check backup directories sizes
$DF_CMD "$pathToCheck" > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2>&1 &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -ne 0 ]; then
DISK_SPACE=0
Logger "Cannot get disk space in [$pathToCheck] on local system." "ERROR"
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
else
DISK_SPACE=$(tail -1 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" | awk '{print $4}')
DRIVE=$(tail -1 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" | awk '{print $1}')
if [ $(IsInteger "$DISK_SPACE") -eq 0 ]; then
DISK_SPACE="$(HumanToNumeric "$DISK_SPACE")"
fi
fi
else
Logger "Storage path [$pathToCheck] does not exist." "CRITICAL"
return 1
fi
}
function GetDiskSpaceRemote {
# USE GLOBAL VARIABLE DISK_SPACE to pass variable to parent function
local pathToCheck="${1}"
__CheckArguments 1 $# "$@" #__WITH_PARANOIA_DEBUG
local cmd
local retval
$SSH_CMD env _REMOTE_TOKEN=$_REMOTE_TOKEN \
env _DEBUG="'$_DEBUG'" env _PARANOIA_DEBUG="'$_PARANOIA_DEBUG'" env _LOGGER_SILENT="'$_LOGGER_SILENT'" env _LOGGER_VERBOSE="'$_LOGGER_VERBOSE'" env _LOGGER_PREFIX="'$_LOGGER_PREFIX'" env _LOGGER_ERR_ONLY="'$_LOGGER_ERR_ONLY'" \
env _REMOTE_EXECUTION="true" env PROGRAM="'$PROGRAM'" env SCRIPT_PID="'$SCRIPT_PID'" env TSTAMP="'$TSTAMP'" \
env DF_CMD="'$DF_CMD'" \
env pathToCheck="'$pathToCheck'" $COMMAND_SUDO' bash -s' << 'ENDSSH' > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2> "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" &
_REMOTE_TOKEN="(o_0)"
include #### DEBUG SUBSET ####
include #### TrapError SUBSET ####
include #### RemoteLogger SUBSET ####
function _GetDiskSpaceRemoteSub {
if [ -d "$pathToCheck" ]; then
# Not elegant solution to make df silent on errors
# No sudo on local commands, assuming you should have all the necesarry rights to check backup directories sizes
cmd="$DF_CMD \"$pathToCheck\""
eval $cmd
if [ $? != 0 ]; then
RemoteLogger "Error getting [$pathToCheck] size." "CRITICAL"
RemoteLogger "Command was [$cmd]." "WARN"
return 1
else
return 0
fi
else
RemoteLogger "Storage path [$pathToCheck] does not exist." "CRITICAL"
return 1
fi
}
_GetDiskSpaceRemoteSub
exit $?
ENDSSH
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -ne 0 ]; then
DISK_SPACE=0
Logger "Cannot get disk space in [$pathToCheck] on remote system." "ERROR"
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
return $retval
else
DISK_SPACE=$(tail -1 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" | awk '{print $4}')
DRIVE=$(tail -1 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" | awk '{print $1}')
if [ $(IsInteger "$DISK_SPACE") -eq 0 ]; then
DISK_SPACE="$(HumanToNumeric "$DISK_SPACE")"
fi
fi
}
function CheckDiskSpace {
# USE OF GLOBAL VARIABLES TOTAL_DATABASES_SIZE, TOTAL_FILES_SIZE, BACKUP_SIZE_MINIMUM, STORAGE_WARN_SIZE, STORAGE_SPACE
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
if [ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "pull" ]; then
if [ "$SQL_BACKUP" != false ]; then
GetDiskSpaceLocal "$SQL_STORAGE"
if [ $? -ne 0 ]; then
SQL_DISK_SPACE=0
CAN_BACKUP_SQL=false
else
SQL_DISK_SPACE=$DISK_SPACE
SQL_DRIVE=$DRIVE
fi
fi
if [ "$FILE_BACKUP" != false ]; then
GetDiskSpaceLocal "$FILE_STORAGE"
if [ $? -ne 0 ]; then
FILE_DISK_SPACE=0
CAN_BACKUP_FILES=false
else
FILE_DISK_SPACE=$DISK_SPACE
FILE_DRIVE=$DRIVE
fi
fi
if [ "$ENCRYPTION" != false ]; then
GetDiskSpaceLocal "$CRYPT_STORAGE"
if [ $? -ne 0 ]; then
CRYPT_DISK_SPACE=0
CAN_BACKUP_FILES=false
CAN_BACKUP_SQL=false
else
CRYPT_DISK_SPACE=$DISK_SPACE
CRYPT_DRIVE=$DRIVE
fi
fi
elif [ "$BACKUP_TYPE" == "push" ]; then
if [ "$SQL_BACKUP" != false ]; then
GetDiskSpaceRemote "$SQL_STORAGE"
if [ $? -ne 0 ]; then
SQL_DISK_SPACE=0
else
SQL_DISK_SPACE=$DISK_SPACE
SQL_DRIVE=$DRIVE
fi
fi
if [ "$FILE_BACKUP" != false ]; then
GetDiskSpaceRemote "$FILE_STORAGE"
if [ $? -ne 0 ]; then
FILE_DISK_SPACE=0
else
FILE_DISK_SPACE=$DISK_SPACE
FILE_DRIVE=$DRIVE
fi
fi
if [ "$ENCRYPTION" != false ]; then
GetDiskSpaceLocal "$CRYPT_STORAGE"
if [ $? -ne 0 ]; then
CRYPT_DISK_SPACE=0
CAN_BACKUP_FILES=false
CAN_BACKUP_SQL=false
else
CRYPT_DISK_SPACE=$DISK_SPACE
CRYPT_DRIVE=$DRIVE
fi
fi
fi
if [ "$TOTAL_DATABASES_SIZE" == "" ]; then
TOTAL_DATABASES_SIZE=-1
fi
if [ "$TOTAL_FILES_SIZE" == "" ]; then
TOTAL_FILES_SIZE=-1
fi
if [ "$SQL_BACKUP" != false ] && [ $CAN_BACKUP_SQL == true ]; then
if [ $SQL_DISK_SPACE -eq 0 ]; then
Logger "Storage space in [$SQL_STORAGE] reported to be 0Ko." "WARN"
fi
if [ $SQL_DISK_SPACE -lt $TOTAL_DATABASES_SIZE ]; then
Logger "Disk space in [$SQL_STORAGE] may be insufficient to backup SQL ($SQL_DISK_SPACE Ko available in $SQL_DRIVE) (non compressed databases calculation)." "WARN"
fi
if [ $SQL_DISK_SPACE -lt $SQL_WARN_MIN_SPACE ]; then
Logger "Disk space in [$SQL_STORAGE] is lower than warning value [$SQL_WARN_MIN_SPACE Ko]." "WARN"
fi
Logger "SQL storage Space: $SQL_DISK_SPACE Ko - Databases size: $TOTAL_DATABASES_SIZE Ko" "NOTICE"
fi
if [ "$FILE_BACKUP" != false ] && [ $CAN_BACKUP_FILES == true ]; then
if [ $FILE_DISK_SPACE -eq 0 ]; then
Logger "Storage space in [$FILE_STORAGE] reported to be 0 Ko." "WARN"
fi
if [ $FILE_DISK_SPACE -lt $TOTAL_FILES_SIZE ]; then
Logger "Disk space in [$FILE_STORAGE] may be insufficient to backup files ($FILE_DISK_SPACE Ko available in $FILE_DRIVE)." "WARN"
fi
if [ $FILE_DISK_SPACE -lt $FILE_WARN_MIN_SPACE ]; then
Logger "Disk space in [$FILE_STORAGE] is lower than warning value [$FILE_WARN_MIN_SPACE Ko]." "WARN"
fi
Logger "File storage space: $FILE_DISK_SPACE Ko - Files size: $TOTAL_FILES_SIZE Ko" "NOTICE"
fi
if [ "$ENCRYPTION" == true ]; then
if [ "$SQL_BACKUP" != false ]; then
if [ "$SQL_DRIVE" == "$CRYPT_DRIVE" ]; then
if [ $((SQL_DISK_SPACE/2)) -lt $((TOTAL_DATABASES_SIZE)) ]; then
Logger "Disk space in [$SQL_STORAGE] and [$CRYPT_STORAGE] may be insufficient to backup SQL ($SQL_DISK_SPACE Ko available in $SQL_DRIVE) (non compressed databases calculation + crypt storage space)." "WARN"
fi
else
if [ $((CRYPT_DISK_SPACE)) -lt $((TOTAL_DATABASES_SIZE)) ]; then
Logger "Disk space in [$CRYPT_STORAGE] may be insufficient to encrypt SQL ($CRYPT_DISK_SPACE Ko available in $CRYPT_DRIVE) (non compressed databases calculation)." "WARN"
fi
fi
fi
if [ "$FILE_BACKUP" != false ]; then
if [ "$FILE_DRIVE" == "$CRYPT_DRIVE" ]; then
if [ $((FILE_DISK_SPACE/2)) -lt $((TOTAL_FILES_SIZE)) ]; then
Logger "Disk space in [$FILE_STORAGE] and [$CRYPT_STORAGE] may be insufficient to encrypt Sfiles ($FILE_DISK_SPACE Ko available in $FILE_DRIVE)." "WARN"
fi
else
if [ $((CRYPT_DISK_SPACE)) -lt $((TOTAL_FILES_SIZE)) ]; then
Logger "Disk space in [$CRYPT_STORAGE] may be insufficient to encrypt files ($CRYPT_DISK_SPACE Ko available in $CRYPT_DRIVE)." "WARN"
fi
fi
fi
Logger "Crypt storage space: $CRYPT_DISK_SPACE Ko" "NOTICE"
fi
if [ $BACKUP_SIZE_MINIMUM -gt $((TOTAL_DATABASES_SIZE+TOTAL_FILES_SIZE)) ] && [ "$GET_BACKUP_SIZE" != false ]; then
Logger "Backup size is smaller than expected." "WARN"
fi
}
function _BackupDatabaseLocalToLocal {
local database="${1}" # Database to backup
local exportOptions="${2}" # export options
local encrypt="${3:-false}" # Does the file need to be encrypted ?
local encryptOptions
local drySqlCmd
local sqlCmd
local retval
__CheckArguments 3 $# "$@" #__WITH_PARANOIA_DEBUG
if [ $encrypt == true ]; then
encryptOptions="| $CRYPT_TOOL --encrypt --recipient=\"$GPG_RECIPIENT\""
encryptExtension="$CRYPT_FILE_EXTENSION"
fi
local drySqlCmd="mysqldump -u $SQL_USER $exportOptions --databases $database $COMPRESSION_PROGRAM $COMPRESSION_OPTIONS $encryptOptions > /dev/null 2> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP\""
local sqlCmd="mysqldump -u $SQL_USER $exportOptions --databases $database $COMPRESSION_PROGRAM $COMPRESSION_OPTIONS $encryptOptions > \"$SQL_STORAGE/$database.sql$COMPRESSION_EXTENSION$encryptExtension\" 2> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP\""
if [ $_DRYRUN == false ]; then
Logger "Launching command [$sqlCmd]." "DEBUG"
eval "$sqlCmd" &
else
Logger "Launching command [$drySqlCmd]." "DEBUG"
eval "$drySqlCmd" &
fi
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_DB_TASK $HARD_MAX_EXEC_TIME_DB_TASK true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ -s "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
if [ $_DRYRUN == false ]; then
_LOGGER_SILENT=true Logger "Command was [$sqlCmd]." "WARN"
else
_LOGGER_SILENT=true Logger "Command was [$drySqlCmd]." "WARN"
fi
Logger "Truncated error output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
# Dirty fix for mysqldump return code not honored
retval=1
fi
return $retval
}
function _BackupDatabaseLocalToRemote {
local database="${1}" # Database to backup
local exportOptions="${2}" # export options
local encrypt="${3:-false}" # Does the file need to be encrypted
__CheckArguments 3 $# "$@" #__WITH_PARANOIA_DEBUG
local encryptOptions
local encryptExtension
local drySqlCmd
local sqlCmd
local retval
CheckConnectivity3rdPartyHosts
CheckConnectivityRemoteHost
if [ $encrypt == true ]; then
encryptOptions="| $CRYPT_TOOL --encrypt --recipient=\"$GPG_RECIPIENT\""
encryptExtension="$CRYPT_FILE_EXTENSION"
fi
local drySqlCmd="mysqldump -u $SQL_USER $exportOptions --databases $database $COMPRESSION_PROGRAM $COMPRESSION_OPTIONS $encryptOptions > /dev/null 2> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP\""
local sqlCmd="mysqldump -u $SQL_USER $exportOptions --databases $database $COMPRESSION_PROGRAM $COMPRESSION_OPTIONS $encryptOptions | $SSH_CMD 'env _REMOTE_TOKEN=$_REMOTE_TOKEN $COMMAND_SUDO tee \"$SQL_STORAGE/$database.sql$COMPRESSION_EXTENSION$encryptExtension\" > /dev/null' 2> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP\""
if [ $_DRYRUN == false ]; then
Logger "Launching command [$sqlCmd]." "DEBUG"
eval "$sqlCmd" &
else
Logger "Launching command [$drySqlCmd]." "DEBUG"
eval "$drySqlCmd" &
fi
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_DB_TASK $HARD_MAX_EXEC_TIME_DB_TASK true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ -s "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
if [ $_DRYRUN == false ]; then
_LOGGER_SILENT=true Logger "Command was [$sqlCmd]." "WARN"
else
_LOGGER_SILENT=true Logger "Command was [$drySqlCmd]." "WARN"
fi
Logger "Truncated error output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
# Dirty fix for mysqldump return code not honored
retval=1
fi
return $retval
}
function _BackupDatabaseRemoteToLocal {
local database="${1}" # Database to backup
local exportOptions="${2}" # export options
local encrypt="${3:-false}" # Does the file need to be encrypted ?
__CheckArguments 3 $# "$@" #__WITH_PARANOIA_DEBUG
local encryptOptions
local encryptExtension
local drySqlCmd
local sqlCmd
local retval
CheckConnectivity3rdPartyHosts
CheckConnectivityRemoteHost
if [ $encrypt == true ]; then
encryptOptions="| $CRYPT_TOOL --encrypt --recipient=\\\"$GPG_RECIPIENT\\\""
encryptExtension="$CRYPT_FILE_EXTENSION"
fi
local drySqlCmd=$SSH_CMD' "env _REMOTE_TOKEN=$_REMOTE_TOKEN mysqldump -u '$SQL_USER' '$exportOptions' --databases '$database' '$COMPRESSION_PROGRAM' '$COMPRESSION_OPTIONS' '$encryptOptions'" > /dev/null 2> "'$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP'"'
local sqlCmd=$SSH_CMD' "env _REMOTE_TOKEN=$_REMOTE_TOKEN mysqldump -u '$SQL_USER' '$exportOptions' --databases '$database' '$COMPRESSION_PROGRAM' '$COMPRESSION_OPTIONS' '$encryptOptions'" > "'$SQL_STORAGE/$database.sql$COMPRESSION_EXTENSION$encryptExtension'" 2> "'$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP'"'
if [ $_DRYRUN == false ]; then
Logger "Launching command [$sqlCmd]." "DEBUG"
eval "$sqlCmd" &
else
Logger "Launching command [$drySqlCmd]." "DEBUG"
eval "$drySqlCmd" &
fi
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_DB_TASK $HARD_MAX_EXEC_TIME_DB_TASK true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ -s "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP" ]; then
if [ $_DRYRUN == false ]; then
_LOGGER_SILENT=true Logger "Command was [$sqlCmd]." "WARN"
else
_LOGGER_SILENT=true Logger "Command was [$drySqlCmd]." "WARN"
fi
Logger "Truncated error output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP")" "ERROR"
# Dirty fix for mysqldump return code not honored
retval=1
fi
return $retval
}
function BackupDatabase {
local database="${1}"
__CheckArguments 1 $# "$@" #__WITH_PARANOIA_DEBUG
local mysqlOptions
local encrypt=false
# Hack to prevent warning on table mysql.events, some mysql versions don't support --skip-events, prefer using --ignore-table
if [ "$database" == "mysql" ]; then
mysqlOptions="$MYSQLDUMP_OPTIONS --ignore-table=mysql.event"
else
mysqlOptions="$MYSQLDUMP_OPTIONS"
fi
if [ "$ENCRYPTION" == true ]; then
encrypt=true
Logger "Backing up encrypted database [$database]." "NOTICE"
else
Logger "Backing up database [$database]." "NOTICE"
fi
if [ "$BACKUP_TYPE" == "local" ]; then
_BackupDatabaseLocalToLocal "$database" "$mysqlOptions" $encrypt
elif [ "$BACKUP_TYPE" == "pull" ]; then
_BackupDatabaseRemoteToLocal "$database" "$mysqlOptions" $encrypt
elif [ "$BACKUP_TYPE" == "push" ]; then
_BackupDatabaseLocalToRemote "$database" "$mysqlOptions" $encrypt
fi
if [ $? -ne 0 ]; then
Logger "Backup failed." "ERROR"
else
Logger "Backup succeeded." "NOTICE"
fi
}
function BackupDatabases {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local database
for database in $SQL_BACKUP_TASKS
do
BackupDatabase $database
CheckTotalExecutionTime
done
}
function EncryptFiles {
local filePath="${1}" # Path of files to encrypt
local destPath="${2}" # Path to store encrypted files
local recipient="${3}" # GPG recipient
local recursive="${4:-true}" # Is recursive ?
local keepFullPath="${5:-false}" # Should destpath become destpath + sourcepath ?
__CheckArguments 5 $# "$@" #__WITH_PARANOIA_DEBUG
local successCounter=0
local errorCounter=0
local cryptFileExtension="$CRYPT_FILE_EXTENSION"
local recursiveArgs=""
if [ ! -d "$destPath" ]; then
mkdir -p "$destPath"
if [ $? -ne 0 ]; then
Logger "Cannot create crypt storage path [$destPath]." "ERROR"
return 1
fi
fi
if [ ! -w "$destPath" ]; then
Logger "Cannot write to crypt storage path [$destPath]." "ERROR"
return 1
fi
if [ $recursive == false ]; then
recursiveArgs="-mindepth 1 -maxdepth 1"
fi
Logger "Encrypting files in [$filePath]." "NOTICE"
while IFS= read -r -d $'\0' sourceFile; do
# Get path of sourcefile
path="$(dirname "$sourceFile")"
if [ $keepFullPath == false ]; then
# Remove source path part
path="${path#$filePath}"
fi
# Remove ending slash if there is one
path="${path%/}"
# Add new path
path="$destPath/$path"
# Get filename
file="$(basename "$sourceFile")"
if [ ! -d "$path" ]; then
mkdir -p "$path"
fi
Logger "Encrypting file [$sourceFile] to [$path/$file$cryptFileExtension]." "VERBOSE"
if [ $(IsNumeric "$PARALLEL_ENCRYPTION_PROCESSES") -eq 1 ] && [ "$PARALLEL_ENCRYPTION_PROCESSES" != "1" ]; then
echo "$CRYPT_TOOL --batch --yes --out \"$path/$file$cryptFileExtension\" --recipient=\"$recipient\" --encrypt \"$sourceFile\" >> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2>&1" >> "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.parallel.$SCRIPT_PID.$TSTAMP"
else
$CRYPT_TOOL --batch --yes --out "$path/$file$cryptFileExtension" --recipient="$recipient" --encrypt "$sourceFile" > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2>&1
if [ $? -ne 0 ]; then
Logger "Cannot encrypt [$sourceFile]." "ERROR"
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "DEBUG"
errorCounter=$((errorCounter+1))
else
successCounter=$((successCounter+1))
fi
fi
#TODO: This redirection does not work with busybox since there is no subshell support
done < <($FIND_CMD "$filePath" $recursiveArgs -type f ! -name "*$cryptFileExtension" -print0)
if [ $(IsNumeric "$PARALLEL_ENCRYPTION_PROCESSES") -eq 1 ] && [ "$PARALLEL_ENCRYPTION_PROCESSES" != "1" ]; then
# Handle batch mode where SOFT /HARD MAX EXEC TIME TOTAL is not defined
if [ $(IsNumeric "$SOFT_MAX_EXEC_TIME_TOTAL") -eq 1 ]; then
softMaxExecTime="$SOFT_MAX_EXEC_TIME_TOTAL"
else
softMaxExecTime=0
fi
if [ $(IsNumeric "$HARD_MAX_EXEC_TIME_TOTAL") -eq 1 ]; then
hardMaxExecTime="$HARD_MAX_EXEC_TIME_TOTAL"
else
hardMaxExecTime=0
fi
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_TOTAL $HARD_MAX_EXEC_TIME_TOTAL true $SLEEP_TIME $KEEP_LOGGING true false false $PARALLEL_ENCRYPTION_PROCESSES "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.parallel.$SCRIPT_PID.$TSTAMP"
retval=$?
if [ $retval -ne 0 ]; then
Logger "Encryption error." "ERROR"
# Output file is defined in ParallelExec
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.ExecTasks.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "DEBUG"
fi
successCounter=$(($(wc -l < "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.parallel.$SCRIPT_PID.$TSTAMP") - retval))
errorCounter=$retval
fi
if [ $successCounter -gt 0 ]; then
Logger "Encrypted [$successCounter] files successfully." "NOTICE"
elif [ $successCounter -eq 0 ] && [ $errorCounter -eq 0 ]; then
Logger "There were no files to encrypt." "WARN"
fi
if [ $errorCounter -gt 0 ]; then
Logger "Failed to encrypt [$errorCounter] files." "CRITICAL"
fi
return $errorCounter
}
function DecryptFiles {
local filePath="${1}" # Path to files to decrypt
local passphraseFile="${2}" # Passphrase file to decrypt files
local passphrase="${3}" # Passphrase to decrypt files
__CheckArguments 3 $# "$@" #__WITH_PARANOIA_DEBUG
local options
local secret
local successCounter=0
local errorCounter=0
local cryptToolVersion
local cryptToolMajorVersion
local cryptToolSubVersion
local cryptFileExtension="$CRYPT_FILE_EXTENSION"
local retval
if [ ! -w "$filePath" ]; then
Logger "Path [$filePath] is not writable or does not exist. Cannot decrypt files." "CRITICAL"
exit 1
fi
echo "$CRYPT_TOOL is $cryptToolVersion"
# Detect if GnuPG >= 2.1 that does not allow automatic pin entry anymore
cryptToolVersion=$("$CRYPT_TOOL" --version | head -1 | awk '{print $3}')
cryptToolMajorVersion=${cryptToolVersion%%.*}
cryptToolSubVersion=${cryptToolVersion#*.}
cryptToolSubVersion=${cryptToolSubVersion%.*}
cryptToolMinorVersion=${cryptToolVersion##*.}
Logger "Running with gpg $cryptToolMajorVersion$cryptToolSubVersion$cryptToolMinorVersion" "NOTICE"
if [ $cryptToolMajorVersion -eq 2 ] && [ $cryptToolSubVersion -ge 1 ]; then
if [ $cryptToolMinorVersion -gt 11 ]; then
additionalParameters="--pinentry-mode loopback"
elif [ $cryptToolMinorVersion -eq 11 ]; then
Logger "GPG automatism via --pinentry-mode loopback not supported in gpg version 2.1.11. Please add allow-loopback-pinentry to your gpg-agent.conf file." "NOTICE"
fi
fi
if [ -f "$passphraseFile" ]; then
secret="--passphrase-file $passphraseFile"
elif [ "$passphrase" != "" ]; then
secret="--passphrase $passphrase"
else
Logger "The given passphrase file or passphrase are inexistent." "CRITICAL"
exit 1
fi
if [ "$CRYPT_TOOL" == "gpg2" ]; then
options="--batch --yes"
elif [ "$CRYPT_TOOL" == "gpg" ]; then
options="--no-use-agent --batch"
fi
while IFS= read -r -d $'\0' encryptedFile; do
Logger "Decrypting [$encryptedFile]." "VERBOSE"
if [ $(IsNumeric "$PARALLEL_ENCRYPTION_PROCESSES") -eq 1 ] && [ "$PARALLEL_ENCRYPTION_PROCESSES" != "1" ]; then
echo "$CRYPT_TOOL $options --out \"${encryptedFile%%$cryptFileExtension}\" $additionalParameters $secret --decrypt \"$encryptedFile\" >> \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2>&1" >> "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.parallel.$SCRIPT_PID.$TSTAMP"
else
$CRYPT_TOOL $options --out "${encryptedFile%%$cryptFileExtension}" $additionalParameters $secret --decrypt "$encryptedFile" > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2>&1
retval=$?
if [ $retval -ne 0 ]; then
Logger "Cannot decrypt [$encryptedFile]." "ERROR"
Logger "Truncated output\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "NOTICE"
errorCounter=$((errorCounter+1))
else
successCounter=$((successCounter+1))
rm -f "$encryptedFile"
if [ $? -ne 0 ]; then
Logger "Cannot delete original file [$encryptedFile] after decryption." "ERROR"
fi
fi
fi
done < <($FIND_CMD "$filePath" -type f -name "*$cryptFileExtension" -print0)
if [ $(IsNumeric "$PARALLEL_ENCRYPTION_PROCESSES") -eq 1 ] && [ "$PARALLEL_ENCRYPTION_PROCESSES" != "1" ]; then
# Handle batch mode where SOFT /HARD MAX EXEC TIME TOTAL is not defined
if [ $(IsNumeric "$SOFT_MAX_EXEC_TIME_TOTAL") -eq 1 ]; then
softMaxExecTime=$SOFT_MAX_EXEC_TIME_TOTAL
else
softMaxExecTime=0
fi
if [ $(IsNumeric "$HARD_MAX_EXEC_TIME_TOTAL") -eq 1 ]; then
hardMaxExecTime=$HARD_MAX_EXEC_TIME_TOTAL
else
hardMaxExecTime=0
fi
ExecTasks "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.parallel.$SCRIPT_PID.$TSTAMP" "${FUNCNAME[0]}" true 0 0 $softMaxExecTime $hardMaxExecTime true $SLEEP_TIME $KEEP_LOGGING true false $PARALLEL_ENCRYPTION_PROCESSES
retval=$?
if [ $retval -ne 0 ]; then
Logger "Decrypting error.." "ERROR"
# Output file is defined in ParallelExec
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.ParallelExec.EncryptFiles.$SCRIPT_PID.$TSTAMP")" "DEBUG"
fi
successCounter=$(($(wc -l < "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.parallel.$SCRIPT_PID.$TSTAMP") - retval))
errorCounter=$retval
fi
if [ $successCounter -gt 0 ]; then
Logger "Decrypted [$successCounter] files successfully." "NOTICE"
elif [ $successCounter -eq 0 ] && [ $errorCounter -eq 0 ]; then
Logger "There were no files to decrypt." "WARN"
fi
if [ $errorCounter -gt 0 ]; then
Logger "Failed to decrypt [$errorCounter] files." "CRITICAL"
fi
return $errorCounter
}
function Rsync {
local sourceDir="${1}" # Source directory
local destinationDir="${2}" # Destination directory
local recursive="${3:-true}" # Backup only files at toplevel of directory
__CheckArguments 3 $# "$@" #__WITH_PARANOIA_DEBUG
local rsyncCmd
local retval
local rsyncArgs
## Manage to backup recursive directories lists files only (not recursing into subdirectories)
if [ $recursive == false ]; then
# Fixes symlinks to directories in target cannot be deleted when backing up root directory without recursion
rsyncArgs="$RSYNC_DEFAULT_ARGS -f '- /*/*/'"
else
rsyncArgs="$RSYNC_DEFAULT_ARGS"
fi
# Creating subdirectories because rsync cannot handle multiple subdirectory creation
if [ "$BACKUP_TYPE" == "local" ]; then
_CreateDirectoryLocal "$destinationDir"
rsyncCmd="$(type -p $RSYNC_EXECUTABLE) $rsyncArgs $RSYNC_DRY_ARG $RSYNC_ATTR_ARGS $RSYNC_TYPE_ARGS $RSYNC_NO_RECURSE_ARGS $RSYNC_DELETE $RSYNC_PATTERNS $RSYNC_PARTIAL_EXCLUDE --rsync-path=\"$RSYNC_PATH\" \"$sourceDir\" \"$destinationDir\" > \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2>&1"
elif [ "$BACKUP_TYPE" == "pull" ]; then
_CreateDirectoryLocal "$destinationDir"
CheckConnectivity3rdPartyHosts
CheckConnectivityRemoteHost
rsyncCmd="$(type -p $RSYNC_EXECUTABLE) $rsyncArgs $RSYNC_DRY_ARG $RSYNC_ATTR_ARGS $RSYNC_TYPE_ARGS $RSYNC_NO_RECURSE_ARGS $RSYNC_DELETE $RSYNC_PATTERNS $RSYNC_PARTIAL_EXCLUDE --rsync-path=\"env _REMOTE_TOKEN=$_REMOTE_TOKEN $RSYNC_PATH\" -e \"$RSYNC_SSH_CMD\" \"$REMOTE_USER@$REMOTE_HOST:'$sourceDir'\" \"$destinationDir\" > \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2>&1"
elif [ "$BACKUP_TYPE" == "push" ]; then
_CreateDirectoryRemote "$destinationDir"
CheckConnectivity3rdPartyHosts
CheckConnectivityRemoteHost
rsyncCmd="$(type -p $RSYNC_EXECUTABLE) $rsyncArgs $RSYNC_DRY_ARG $RSYNC_ATTR_ARGS $RSYNC_TYPE_ARGS $RSYNC_NO_RECURSE_ARGS $RSYNC_DELETE $RSYNC_PATTERNS $RSYNC_PARTIAL_EXCLUDE --rsync-path=\"env _REMOTE_TOKEN=$_REMOTE_TOKEN $RSYNC_PATH\" -e \"$RSYNC_SSH_CMD\" \"$sourceDir\" \"$REMOTE_USER@$REMOTE_HOST:'$destinationDir'\" > \"$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP\" 2>&1"
fi
Logger "Launching command [$rsyncCmd]." "DEBUG"
eval "$rsyncCmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 $SOFT_MAX_EXEC_TIME_FILE_TASK $HARD_MAX_EXEC_TIME_FILE_TASK true $SLEEP_TIME $KEEP_LOGGING
retval=$?
if [ $retval -ne 0 ]; then
Logger "Failed to backup [$sourceDir] to [$destinationDir]." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$rsyncCmd]." "WARN"
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
else
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "VERBOSE"
Logger "File backup succeed." "NOTICE"
fi
return $retval
}
function FilesBackup {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local backupTask
local backupTasks
local destinationDir
local encryptDir
IFS=$PATH_SEPARATOR_CHAR read -r -a backupTasks <<< "$FILE_BACKUP_TASKS"
for backupTask in "${backupTasks[@]}"; do
# Backup directories from simple list
if [ "$KEEP_ABSOLUTE_PATHS" != false ]; then
# Fix for backup of '/'
if [ "${backupTask#/}/" == "/" ]; then
destinationDir="$FILE_STORAGE/"
else
destinationDir=$(dirname "$FILE_STORAGE/${backupTask#/}/")
fi
encryptDir="$FILE_STORAGE/${backupTask#/}"
else
destinationDir="$FILE_STORAGE"
encryptDir="$FILE_STORAGE"
fi
Logger "Beginning file backup of [$backupTask] to [$destinationDir] as $BACKUP_TYPE backup." "NOTICE"
if [ "$ENCRYPTION" == true ] && ([ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "push" ]); then
EncryptFiles "$backupTask" "$CRYPT_STORAGE" "$GPG_RECIPIENT" true true
if [ $? -eq 0 ]; then
Rsync "$CRYPT_STORAGE/$backupTask" "$destinationDir" true
else
Logger "backup failed." "ERROR"
fi
elif [ "$ENCRYPTION" == true ] && [ "$BACKUP_TYPE" == "pull" ]; then
Rsync "$backupTask" "$destinationDir" true
if [ $? -eq 0 ]; then
EncryptFiles "$encryptDir" "$CRYPT_STORAGE/$backupTask" "$GPG_RECIPIENT" true false
fi
else
Rsync "$backupTask" "$destinationDir" true
fi
CheckTotalExecutionTime
done
IFS=$PATH_SEPARATOR_CHAR read -r -a backupTasks <<< "$RECURSIVE_DIRECTORY_LIST"
for backupTask in "${backupTasks[@]}"; do
# Backup recursive directories without recursion
if [ "$KEEP_ABSOLUTE_PATHS" != false ]; then
# Fix for backup of '/'
if [ "${backupTask#/}/" == "/" ]; then
destinationDir="$FILE_STORAGE/"
else
destinationDir=$(dirname "$FILE_STORAGE/${backupTask#/}/")
fi
encryptDir="$FILE_STORAGE/${backupTask#/}"
else
destinationDir="$FILE_STORAGE"
encryptDir="$FILE_STORAGE"
fi
Logger "Beginning non recursive file backup of [$backupTask] to [$destinationDir] as $BACKUP_TYPE backup." "NOTICE"
if [ "$ENCRYPTION" == true ] && ([ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "push" ]); then
EncryptFiles "$backupTask" "$CRYPT_STORAGE" "$GPG_RECIPIENT" false true
if [ $? -eq 0 ]; then
Rsync "$CRYPT_STORAGE/$backupTask" "$destinationDir" false
else
Logger "backup failed." "ERROR"
fi
elif [ "$ENCRYPTION" == true ] && [ "$BACKUP_TYPE" == "pull" ]; then
Rsync "$backupTask" "$destinationDir" false
if [ $? -eq 0 ]; then
EncryptFiles "$encryptDir" "$CRYPT_STORAGE/$backupTask" "$GPG_RECIPIENT" false false
fi
else
Rsync "$backupTask" "$destinationDir" false
fi
CheckTotalExecutionTime
done
IFS=$PATH_SEPARATOR_CHAR read -r -a backupTasks <<< "$FILE_RECURSIVE_BACKUP_TASKS"
for backupTask in "${backupTasks[@]}"; do
# Backup sub directories of recursive directories
if [ "$KEEP_ABSOLUTE_PATHS" != false ]; then
# Fix for backup of '/'
if [ "${backupTask#/}/" == "/" ]; then
destinationDir="$FILE_STORAGE/"
else
destinationDir=$(dirname "$FILE_STORAGE/${backupTask#/}/")
fi
encryptDir="$FILE_STORAGE/${backupTask#/}"
else
destinationDir="$FILE_STORAGE"
encryptDir="$FILE_STORAGE"
fi
Logger "Beginning recursive child file backup of [$backupTask] to [$destinationDir] as $BACKUP_TYPE backup." "NOTICE"
if [ "$ENCRYPTION" == true ] && ([ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "push" ]); then
EncryptFiles "$backupTask" "$CRYPT_STORAGE" "$GPG_RECIPIENT" true true
if [ $? -eq 0 ]; then
Rsync "$CRYPT_STORAGE/$backupTask" "$destinationDir" true
else
Logger "backup failed." "ERROR"
fi
elif [ "$ENCRYPTION" == true ] && [ "$BACKUP_TYPE" == "pull" ]; then
Rsync "$backupTask" "$destinationDir" true
if [ $? -eq 0 ]; then
EncryptFiles "$encryptDir" "$CRYPT_STORAGE/$backupTask" "$GPG_RECIPIENT" true false
fi
else
Rsync "$backupTask" "$destinationDir" true
fi
CheckTotalExecutionTime
done
}
function CheckTotalExecutionTime {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
#### Check if max execution time of whole script as been reached
if [ $SECONDS -gt $SOFT_MAX_EXEC_TIME_TOTAL ]; then
Logger "Max soft execution time of the whole backup exceeded." "WARN"
SendAlert true
fi
if [ $SECONDS -gt $HARD_MAX_EXEC_TIME_TOTAL ] && [ $HARD_MAX_EXEC_TIME_TOTAL -ne 0 ]; then
Logger "Max hard execution time of the whole backup exceeded, stopping backup process." "CRITICAL"
exit 1
fi
}
function _RotateBackupsLocal {
local backupPath="${1}"
local rotateCopies="${2}"
__CheckArguments 2 $# "$@" #__WITH_PARANOIA_DEBUG
local backup
local copy
local cmd
local path
$FIND_CMD "$backupPath" -mindepth 1 -maxdepth 1 ! -regex ".*\.$PROGRAM\.[0-9]+" -print0 | while IFS= read -r -d $'\0' backup; do
copy=$rotateCopies
while [ $copy -gt 1 ]; do
if [ $copy -eq $rotateCopies ]; then
path="$backup.$PROGRAM.$copy"
if [ -f "$path" ] || [ -d "$path" ]; then
cmd="rm -rf \"$path\""
Logger "Launching command [$cmd]." "DEBUG"
eval "$cmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 3600 0 true $SLEEP_TIME $KEEP_LOGGING
if [ $? -ne 0 ]; then
Logger "Cannot delete oldest copy [$path]." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$cmd]." "WARN"
fi
fi
fi
path="$backup.$PROGRAM.$((copy-1))"
if [ -f "$path" ] || [ -d "$path" ]; then
cmd="mv \"$path\" \"$backup.$PROGRAM.$copy\""
Logger "Launching command [$cmd]." "DEBUG"
eval "$cmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 3600 0 true $SLEEP_TIME $KEEP_LOGGING
if [ $? -ne 0 ]; then
Logger "Cannot move [$path] to [$backup.$PROGRAM.$copy]." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$cmd]." "WARN"
fi
fi
copy=$((copy-1))
done
# TODO: Describe new behavior: Always copy instead of move in order to make delta copies
# Latest file backup will not be moved if script configured for remote backup so next rsync execution will only do delta copy instead of full one
# Whereas sql files will always be moved because we don't do deltas on sql files
if [[ $backup == *.sql.* ]]; then
cmd="mv \"$backup\" \"$backup.$PROGRAM.1\""
Logger "Launching command [$cmd]." "DEBUG"
eval "$cmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 3600 0 true $SLEEP_TIME $KEEP_LOGGING
if [ $? -ne 0 ]; then
Logger "Cannot move [$backup] to [$backup.$PROGRAM.1]." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$cmd]." "WARN"
fi
else
#elif [ "$REMOTE_OPERATION" == true ]; then
cmd="cp -R \"$backup\" \"$backup.$PROGRAM.1\""
Logger "Launching command [$cmd]." "DEBUG"
eval "$cmd" &
ExecTasks $! "${FUNCNAME[0]}" false 0 0 3600 0 true $SLEEP_TIME $KEEP_LOGGING
if [ $? -ne 0 ]; then
Logger "Cannot copy [$backup] to [$backup.$PROGRAM.1]." "ERROR"
_LOGGER_SILENT=true Logger "Command was [$cmd]." "WARN"
fi
#else
# cmd="mv \"$backup\" \"$backup.$PROGRAM.1\""
# Logger "Launching command [$cmd]." "DEBUG"
# eval "$cmd" &
# ExecTasks $! "${FUNCNAME[0]}" false 0 0 3600 0 true $SLEEP_TIME $KEEP_LOGGING
# if [ $? -ne 0 ]; then
# Logger "Cannot move [$backup] to [$backup.$PROGRAM.1]." "ERROR"
# _LOGGER_SILENT=true Logger "Command was [$cmd]." "WARN"
# fi
fi
done
}
function _RotateBackupsRemote {
local backupPath="${1}"
local rotateCopies="${2}"
__CheckArguments 2 $# "$@" #__WITH_PARANOIA_DEBUG
$SSH_CMD env _REMOTE_TOKEN=$_REMOTE_TOKEN \
env _DEBUG="'$_DEBUG'" env _PARANOIA_DEBUG="'$_PARANOIA_DEBUG'" env _LOGGER_SILENT="'$_LOGGER_SILENT'" env _LOGGER_VERBOSE="'$_LOGGER_VERBOSE'" env _LOGGER_PREFIX="'$_LOGGER_PREFIX'" env _LOGGER_ERR_ONLY="'$_LOGGER_ERR_ONLY'" \
env _REMOTE_EXECUTION="true" env PROGRAM="'$PROGRAM'" env SCRIPT_PID="'$SCRIPT_PID'" env TSTAMP="'$TSTAMP'" \
env REMOTE_FIND_CMD="'$REMOTE_FIND_CMD'" env rotateCopies="'$rotateCopies'" env backupPath="'$backupPath'" \
$COMMAND_SUDO' bash -s' << 'ENDSSH' > "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP" 2> "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.error.$SCRIPT_PID.$TSTAMP"
_REMOTE_TOKEN="(o_0)"
include #### DEBUG SUBSET ####
include #### TrapError SUBSET ####
include #### RemoteLogger SUBSET ####
function _RotateBackupsRemoteSSH {
local backup
local copy
local cmd
local path
$REMOTE_FIND_CMD "$backupPath" -mindepth 1 -maxdepth 1 ! -regex ".*\.$PROGRAM\.[0-9]+" -print0 | while IFS= read -r -d $'\0' backup; do
copy=$rotateCopies
while [ $copy -gt 1 ]; do
if [ $copy -eq $rotateCopies ]; then
path="$backup.$PROGRAM.$copy"
if [ -f "$path" ] || [ -d "$path" ]; then
cmd="rm -rf \"$path\""
RemoteLogger "Launching command [$cmd]." "DEBUG"
eval "$cmd"
if [ $? -ne 0 ]; then
RemoteLogger "Cannot delete oldest copy [$path]." "ERROR"
RemoteLogger "Command was [$cmd]." "WARN"
fi
fi
fi
path="$backup.$PROGRAM.$((copy-1))"
if [ -f "$path" ] || [ -d "$path" ]; then
cmd="mv \"$path\" \"$backup.$PROGRAM.$copy\""
RemoteLogger "Launching command [$cmd]." "DEBUG"
eval "$cmd"
if [ $? -ne 0 ]; then
RemoteLogger "Cannot move [$path] to [$backup.$PROGRAM.$copy]." "ERROR"
RemoteLogger "Command was [$cmd]." "WARN"
fi
fi
copy=$((copy-1))
done
# Latest file backup will not be moved if script configured for remote backup so next rsync execution will only do delta copy instead of full one
if [[ $backup == *.sql.* ]]; then
cmd="mv \"$backup\" \"$backup.$PROGRAM.1\""
RemoteLogger "Launching command [$cmd]." "DEBUG"
eval "$cmd"
if [ $? -ne 0 ]; then
RemoteLogger "Cannot move [$backup] to [$backup.$PROGRAM.1]." "ERROR"
RemoteLogger "Command was [$cmd]." "WARN"
fi
else
#elif [ "$REMOTE_OPERATION" == true ]; then
cmd="cp -R \"$backup\" \"$backup.$PROGRAM.1\""
RemoteLogger "Launching command [$cmd]." "DEBUG"
eval "$cmd"
if [ $? -ne 0 ]; then
RemoteLogger "Cannot copy [$backup] to [$backup.$PROGRAM.1]." "ERROR"
RemoteLogger "Command was [$cmd]." "WARN"
fi
#else
# cmd="mv \"$backup\" \"$backup.$PROGRAM.1\""
# RemoteLogger "Launching command [$cmd]." "DEBUG"
# eval "$cmd"
# if [ $? -ne 0 ]; then
# RemoteLogger "Cannot move [$backup] to [$backup.$PROGRAM.1]." "ERROR"
# RemoteLogger "Command was [$cmd]." "WARN"
# fi
fi
done
}
_RotateBackupsRemoteSSH
ENDSSH
ExecTasks $! "${FUNCNAME[0]}" false 0 0 1800 0 true $SLEEP_TIME $KEEP_LOGGING
if [ $? -ne 0 ]; then
Logger "Could not rotate backups in [$backupPath]." "ERROR"
Logger "Truncated output:\n$(head -c16384 "$RUN_DIR/$PROGRAM.${FUNCNAME[0]}.$SCRIPT_PID.$TSTAMP")" "ERROR"
else
Logger "Remote rotation succeed." "NOTICE"
fi ## Need to add a trivial sleep time to give ssh time to log to local file
#sleep 5
}
#TODO: test find cmd for backup rotation with regex on busybox / mac
function RotateBackups {
local backupPath="${1}"
local rotateCopies="${2}"
__CheckArguments 2 $# "$@" #__WITH_PARANOIA_DEBUG
if [ "$BACKUP_TYPE" == "local" ] || [ "$BACKUP_TYPE" == "pull" ]; then
Logger "Rotating local backups in [$backupPath] for [$rotateCopies] copies." "NOTICE"
_RotateBackupsLocal "$backupPath" "$rotateCopies"
elif [ "$BACKUP_TYPE" == "push" ]; then
Logger "Rotating remote backups in [$backupPath] for [$rotateCopies] copies." "NOTICE"
_RotateBackupsRemote "$backupPath" "$rotateCopies"
fi
}
function Init {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
local uri
local hosturiandpath
local hosturi
## Test if target dir is a ssh uri, and if yes, break it down it its values
if [ "${REMOTE_SYSTEM_URI:0:6}" == "ssh://" ] && [ "$BACKUP_TYPE" != "local" ]; then
REMOTE_OPERATION=true
# remove leadng 'ssh://'
uri=${REMOTE_SYSTEM_URI#ssh://*}
if [[ "$uri" == *"@"* ]]; then
# remove everything after '@'
REMOTE_USER=${uri%@*}
else
REMOTE_USER=$LOCAL_USER
fi
if [ "$SSH_RSA_PRIVATE_KEY" == "" ]; then
if [ ! -f "$SSH_PASSWORD_FILE" ]; then
# Assume that there might exist a standard rsa key
SSH_RSA_PRIVATE_KEY=~/.ssh/id_rsa
fi
fi
# remove everything before '@'
hosturiandpath=${uri#*@}
# remove everything after first '/'
hosturi=${hosturiandpath%%/*}
if [[ "$hosturi" == *":"* ]]; then
REMOTE_PORT=${hosturi##*:}
else
REMOTE_PORT=22
fi
REMOTE_HOST=${hosturi%%:*}
fi
## Add update to default RSYNC_TYPE_ARGS
RSYNC_TYPE_ARGS=$RSYNC_TYPE_ARGS" -u"
if [ $_LOGGER_VERBOSE == true ]; then
RSYNC_TYPE_ARGS=$RSYNC_TYPE_ARGS" -i"
fi
if [ "$DELETE_VANISHED_FILES" == true ]; then
RSYNC_TYPE_ARGS=$RSYNC_TYPE_ARGS" --delete"
fi
if [ $stats == true ]; then
RSYNC_TYPE_ARGS=$RSYNC_TYPE_ARGS" --stats"
fi
## Fix for symlink to directories on target cannot get updated
RSYNC_TYPE_ARGS=$RSYNC_TYPE_ARGS" --force"
}
function Main {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
if [ "$SQL_BACKUP" != false ] && [ $CAN_BACKUP_SQL == true ]; then
ListDatabases
fi
if [ "$FILE_BACKUP" != false ] && [ $CAN_BACKUP_FILES == true ]; then
ListRecursiveBackupDirectories
if [ "$GET_BACKUP_SIZE" != false ]; then
GetDirectoriesSize
else
TOTAL_FILES_SIZE=-1
fi
fi
# Expand ~ if exists
FILE_STORAGE="${FILE_STORAGE/#\~/$HOME}"
SQL_STORAGE="${SQL_STORAGE/#\~/$HOME}"
SSH_RSA_PRIVATE_KEY="${SSH_RSA_PRIVATE_KEY/#\~/$HOME}"
SSH_PASSWORD_FILE="${SSH_PASSWORD_FILE/#\~/$HOME}"
ENCRYPT_PUBKEY="${ENCRYPT_PUBKEY/#\~/$HOME}"
if [ "$CREATE_DIRS" != false ]; then
CreateStorageDirectories
fi
CheckDiskSpace
# Actual backup process
if [ "$SQL_BACKUP" != false ] && [ $CAN_BACKUP_SQL == true ]; then
if [ $_DRYRUN == false ] && [ "$ROTATE_SQL_BACKUPS" == true ]; then
RotateBackups "$SQL_STORAGE" "$ROTATE_SQL_COPIES"
fi
BackupDatabases
fi
if [ "$FILE_BACKUP" != false ] && [ $CAN_BACKUP_FILES == true ]; then
if [ $_DRYRUN == false ] && [ "$ROTATE_FILE_BACKUPS" == true ]; then
RotateBackups "$FILE_STORAGE" "$ROTATE_FILE_COPIES"
fi
## Add Rsync include / exclude patterns
RsyncPatterns
FilesBackup
fi
}
function Usage {
__CheckArguments 0 $# "$@" #__WITH_PARANOIA_DEBUG
if [ "$IS_STABLE" != true ]; then
echo -e "\e[93mThis is an unstable dev build. Please use with caution.\e[0m"
fi
echo "$PROGRAM $PROGRAM_VERSION $PROGRAM_BUILD"
echo "$AUTHOR"
echo "$CONTACT"
echo ""
echo "General usage: $0 /path/to/backup.conf [OPTIONS]"
echo ""
echo "OPTIONS:"
echo "--dry will run $PROGRAM without actually doing anything, just testing"
echo "--no-prefix Will suppress time / date suffix from output"
echo "--silent will run $PROGRAM without any output to stdout, usefull for cron backups"
echo "--errors-only Output only errors (can be combined with silent or verbose)"
echo "--verbose adds command outputs"
echo "--stats Adds rsync transfer statistics to verbose output"
echo "--partial Allows rsync to keep partial downloads that can be resumed later (experimental)"
echo "--no-maxtime disables any soft and hard execution time checks"
echo "--delete Deletes files on destination that vanished on source"
echo "--dontgetsize Does not try to evaluate backup size"
echo "--parallel=ncpu Use n cpus to encrypt / decrypt files. Works in normal and batch processing mode."
echo ""
echo "Batch processing usage:"
echo -e "\e[93mDecrypt\e[0m a backup encrypted with $PROGRAM"
echo "$0 --decrypt=/path/to/encrypted_backup --passphrase-file=/path/to/passphrase"
echo "$0 --decrypt=/path/to/encrypted_backup --passphrase=MySecretPassPhrase (security risk)"
echo ""
echo "Batch encrypt directories in separate gpg files"
echo "$0 --encrypt=/path/to/files --destination=/path/to/encrypted/files --recipient=\"Your Name\""
exit 128
}
#### SCRIPT ENTRY POINT ####
trap TrapQuit EXIT
# Command line argument flags
_DRYRUN=false
no_maxtime=false
stats=false
partial_transfers=false
delete_vanished=false
dont_get_backup_size=false
_DECRYPT_MODE=false
DECRYPT_PATH=""
_ENCRYPT_MODE=false
function GetCommandlineArguments {
local isFirstArgument=true
if [ $# -eq 0 ]; then
Usage
fi
for i in "$@"; do
case "$i" in
--dry)
_DRYRUN=true
;;
--silent)
_LOGGER_SILENT=true
;;
--verbose)
_LOGGER_VERBOSE=true
;;
--stats)
stats=true
;;
--partial)
partial_transfers=true
;;
--no-maxtime)
no_maxtime=true
;;
--delete)
delete_vanished=true
;;
--dontgetsize)
dont_get_backup_size=true
;;
--help|-h|--version|-v)
Usage
;;
--decrypt=*)
_DECRYPT_MODE=true
DECRYPT_PATH="${i##*=}"
;;
--passphrase=*)
PASSPHRASE="${i##*=}"
;;
--passphrase-file=*)
PASSPHRASE_FILE="${i##*=}"
;;
--encrypt=*)
_ENCRYPT_MODE=true
CRYPT_SOURCE="${i##*=}"
;;
--destination=*)
CRYPT_STORAGE="${i##*=}"
;;
--recipient=*)
GPG_RECIPIENT="${i##*=}"
;;
--errors-only)
_LOGGER_ERR_ONLY=true
;;
--no-prefix)
_LOGGER_PREFIX=""
;;
--parallel=*)
PARALLEL_ENCRYPTION_PROCESSES="${i##*=}"
if [ $(IsNumeric "$PARALLEL_ENCRYPTION_PROCESSES") -ne 1 ]; then
Logger "Bogus --parallel value. Using only one CPU." "WARN"
fi
;;
*)
if [ $isFirstArgument == false ]; then
Logger "Unknown option '$i'" "CRITICAL"
Usage
fi
;;
esac
isFirstArgument=false
done
}
GetCommandlineArguments "$@"
if [ "$_DECRYPT_MODE" == true ]; then
CheckCryptEnvironnment
GetLocalOS
InitLocalOSDependingSettings
Logger "$DRY_WARNING$PROGRAM v$PROGRAM_VERSION decrypt mode begin." "ALWAYS"
DecryptFiles "$DECRYPT_PATH" "$PASSPHRASE_FILE" "$PASSPHRASE"
exit $?
fi
if [ "$_ENCRYPT_MODE" == true ]; then
CheckCryptEnvironnment
GetLocalOS
InitLocalOSDependingSettings
Logger "$DRY_WARNING$PROGRAM v$PROGRAM_VERSION encrypt mode begin." "ALWAYS"
EncryptFiles "$CRYPT_SOURCE" "$CRYPT_STORAGE" "$GPG_RECIPIENT" true false
exit $?
fi
LoadConfigFile "$1"
# Reload GetCommandlineArguments to override config file with runtime arguments
GetCommandlineArguments "$@"
if [ "$LOGFILE" == "" ]; then
if [ -w /var/log ]; then
LOG_FILE="/var/log/$PROGRAM.$INSTANCE_ID.log"
elif ([ "${HOME}" != "" ] && [ -w "${HOME}" ]); then
LOG_FILE="${HOME}/$PROGRAM.$INSTANCE_ID.log"
else
LOG_FILE=./$PROGRAM.$INSTANCE_ID.log
fi
else
LOG_FILE="$LOGFILE"
fi
# v2.3 config syntax compatibility
UpdateBooleans
if [ ! -w "$(dirname "$LOG_FILE")" ]; then
echo "Cannot write to log [$(dirname "$LOG_FILE")]."
else
Logger "Script begin, logging to [$LOG_FILE]." "DEBUG"
fi
if [ $no_maxtime == true ]; then
SOFT_MAX_EXEC_TIME_DB_TASK=0
SOFT_MAX_EXEC_TIME_FILE_TASK=0
HARD_MAX_EXEC_TIME_DB_TASK=0
HARD_MAX_EXEC_TIME_FILE_TASK=0
HARD_MAX_EXEC_TIME_TOTAL=0
fi
if [ $partial_transfers == true ]; then
PARTIAL=true
fi
if [ $delete_vanished == true ]; then
DELETE_VANISHED_FILES=true
fi
if [ $dont_get_backup_size == true ]; then
GET_BACKUP_SIZE=false
fi
if [ "$IS_STABLE" != true ]; then
Logger "This is an unstable dev build [$PROGRAM_BUILD]. Please use with caution." "WARN"
fi
DATE=$(date)
Logger "--------------------------------------------------------------------" "NOTICE"
Logger "$DRY_WARNING$DATE - $PROGRAM v$PROGRAM_VERSION $BACKUP_TYPE script begin." "ALWAYS"
Logger "--------------------------------------------------------------------" "NOTICE"
Logger "Backup instance [$INSTANCE_ID] launched as $LOCAL_USER@$LOCAL_HOST (PID $SCRIPT_PID)" "NOTICE"
GetLocalOS
InitLocalOSDependingSettings
CheckRunningInstances
PreInit
Init
CheckEnvironment
PostInit
CheckCurrentConfig
GetRemoteOS
InitRemoteOSDependingSettings
RunBeforeHook
Main
| true
|
cb39a58881ba46bae11a2c60845023baf02520b8
|
Shell
|
FicusCarica308/holberton-system_engineering-devops
|
/0x0C-web_server/1-install_nginx_web_server
|
UTF-8
| 561
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# installs nginx on the server this script is run on <set to listen on port 80> <returns "Holberton School" on curl>
# installing nginx https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-16-04
# setup https://www.digitalocean.com/community/tutorials/how-to-set-up-nginx-server-blocks-virtual-hosts-on-ubuntu-16-04
sudo apt-get -y update
sudo apt-get -y install nginx
sudo ufw allow 'Nginx HTTP' # enables firewall to port 80
echo "Holberton School" | sudo tee /var/www/html/index.html
sudo service nginx restart
| true
|
72d30cdc68049dd24b75fe83f2c122674bc2991b
|
Shell
|
AlexYangYu/os-salt
|
/states/base/ifup_interfaces.sh
|
UTF-8
| 689
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash -
# ifup the management interface
ip a | grep {{ network.mgt_interface }} | grep UP
if [ 0 -ne $? ]
then
ifup {{ network.mgt_interface }}
fi
# set up the default router
if [[ '{{ network.default_route }}' != $(ip r | grep default | awk '{ print $3 }') ]]
then
ip route del default
ip route add default via {{ network.default_route }} dev {{ network.default_interface }}
fi
# ifup the storage interface
ip a | grep {{ network.storage_interface }} | grep UP
if [ 0 -ne $? ]
then
ifup {{ network.storage_interface }}
fi
# ifup the data interface
ip a | grep {{ network.data_interface }} | grep UP
if [ 0 -ne $? ]
then
ifup {{ network.data_interface }}
fi
| true
|
025de1acb97e1599fbf3adcc2de4d09e2897bbf3
|
Shell
|
zhaorizhao/suitup
|
/script/cd.sh
|
UTF-8
| 238
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
function suitup-cd-realpath {
cd `realpath .`
pwd
}
function suitup-realpath {
realpath .
}
function suitup-cd-suitup {
cd $_suitup_path
}
function suitup-cd-gemset {
gem_name=`rvm current`
cd ~/.rvm/gems/$gem_name/gems
}
| true
|
8957ce1f8c7b161f62ea7f4f878518b925df728e
|
Shell
|
uscauv-legacy/old-uscauv-ros-pkg
|
/uscauv_scripts/bash/uscauv-add-license
|
UTF-8
| 7,218
| 3.21875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
###########################################################################
# scripts/add_license.sh
# --------------------
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Edward T. Kaszubski ( ekaszubski@gmail.com ), Dylan Foster
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of usc-ros-pkg nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
usage()
{
echo ""
echo "Usage: uscauv-add-license [-p project] [-u user] [-f] files"
echo ""
}
if [ $# -le 0 ]; then
usage
exit
fi
force="0"
while [ "$1" != "" ]; do
case $1 in
-p ) shift
project=$1
shift
;;
-f ) shift;
force="1"
;;
-u ) shift
if [ "$users" == "" ]; then
users="$1"
else
users="$users, $1"
fi
shift
;;
--help ) usage
exit
;;
* ) if [ "$1" != "" ] && [ "$1" != "-p" ] && [ "$1" != "--help" ] && [ "$1" != "-u" ]; then
targets="$targets $1"
shift
fi
;;
esac
done
if [ "$project" == "" ]; then project="USC AUV"; fi
if [ "$users" == "" ]; then
if [ -n "$USCAUV_USER" ]; then
users="$USCAUV_USER"
else
users=`whoami`
fi
fi
echo ""
echo "Adding license with authors { $users } and project \"$project\" to files { $targets }"
echo ""
year=`date | awk '{print $6;}'`
license_c_part1="/***************************************************************************
* "
license_c_part2="
* --------------------
*
* Software License Agreement (BSD License)
*
* Copyright (c) $year, $users
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of $project nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************/
"
license_script_part1="###########################################################################
# "
license_script_part2="
# --------------------
#
# Copyright (c) $year, $users
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# # Neither the name of $project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
"
script_ext="py yaml yml bash sh cmake txt rviz"
c_ext="c cpp h hpp cc cxx hxx"
for target in $targets; do
ext="${target#*.}"
shopt -s nocasematch
if grep -qw "$ext" <<<$script_ext; then
license_part1=$license_script_part1
license_part2=$license_script_part2
elif grep -qw "$ext" <<<$c_ext; then
license_part1=$license_c_part1
license_part2=$license_c_part2
else
license_part1=$license_script_part1
license_part2=$license_script_part2
fi
file_contents=`cat $target`
# Dump the original file contents in case there is an error
if [ "$force" -ne "1" -a -n "$file_contents" ]; then
echo "$file_contents" > $target~
fi
echo "$license_part1$target$license_part2$file_contents" > $target
done
shopt -u nocasematch
echo "done"
| true
|
a40ca6c5a5c3e0cf17d709860cdd8c0d8066b08f
|
Shell
|
devopscenter/dcUtils
|
/monitoring/dcWatcher/start-dcWatcher.sh
|
UTF-8
| 1,837
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#===============================================================================
#
# FILE: start-dcWatcher.sh
#
# USAGE: ./start-dcWatcher.sh
#
# DESCRIPTION: start dcWatcher script that can be executed by docker when the
# container started. This way the dcWatcher.py can run and then
# a separate process will keep the image running and not terminate
# as soon as the dcWatcher.py finishes spawning off the watchmedo
# processes.
#
# NOTE: this is NOT intended to be used for running in an instance
# or a local host (ie, anything except running in a container)
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Gregg Jensen (), gjensen@devops.center
# Bob Lozano (), bob@devops.center
# ORGANIZATION: devops.center
# CREATED: 11/02/2016 15:45:23
# REVISION: ---
#
# Copyright 2014-2017 devops.center llc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#===============================================================================
#set -o nounset # Treat unset variables as an error
#set -o errexit # exit immediately if command exits with a non-zero status
#set -x # essentially debug mode
./dcWatcher.py
tail -f /dev/null
| true
|
269b1441cf7a3e0bfc938de348919a0be8002fcc
|
Shell
|
zydronium/bankoi-webhostpanel-linux
|
/usr/local/webhostpanel/admin/htdocs/server/cpustats.sh
|
UTF-8
| 1,307
| 3.328125
| 3
|
[] |
no_license
|
filename="/tmp/.cpstats"
top -n0 > $filename
term=`tty`
exec < $filename
i=0
while [ $i -le 4 ]
do
read line
set $line
case $i in
0)
uptime=`echo $4 | tr "," " "`
cpu1=`echo $9 | tr "," " "`
shift 9
cpu5=`echo $1 | tr "," " "`
cpu15=`echo $2 | tr "," " "`
echo uptime=$uptime
echo cpu 1 min = $cpu1
echo cpu 5 min = $cpu5
echo cpu 15 min= $cpu15
;;
3)
mem_total=`echo $2 | tr "K" " "`
mem_used=`echo $4 | tr "K" " "`
mem_free=`echo $6 | tr "K" " "`
mem_shared=`echo $8 | tr "K" " "`
shift 1
mem_buff=`echo $9 | tr "K" " "`
echo $mem_total KB
echo $mem_used KB
echo $mem_free KB
echo $mem_shared KB
echo $mem_buff KB
;;
4)
swp_total=`echo $2 | tr "K" " "`
swp_used=`echo $4 | tr "K" " "`
swp_free=`echo $6 | tr "K" " "`
cache_mem=`echo $8 | tr "K" " "`
echo Total Swap= $swp_total KB
echo Swap Used= $swp_used KB
echo Swap Free= $swp_free KB
echo Cache memory= $cache_mem KB
;;
esac
i=`expr $i + 1`
done
i=0
exec < $term
echo -e "Filesystem\tTotal Size\tUsed\t\tAvailable\tCapacity\tMount Point"
df --type=ext3 -h > $filename
exec < $filename
while read line
do
if [ $i -ne 0 ]
then
set $line
file_system=$1
total_siz=$2
used=$3
avail=$4
capacity=$5
mounted_on=$6
echo -e "$file_system\t$total_siz\t\t$used\t\t$avail\t\t$capacity\t\t$mounted_on"
fi
i=`expr $i + 1`
done
exec < $term
rm -f $filename
| true
|
990332876f324bdb55101394c2fd316178e8bc4b
|
Shell
|
huangyaling/caffe_boost
|
/examples/rfcn/run_all_online_sc.sh
|
UTF-8
| 3,163
| 3.78125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Parameter1 quantize_option: 0-int16; 1-int8
# Parameter2 mlu_option: 1-MLU; 2-MFUS
# Parameter3 core_version: MLU270; MLU220
usage()
{
echo "Usage:"
echo " $0 [0|1] [1|2] [MLU270 | MLU220]"
echo ""
echo " Parameter description:"
echo " parameter1: int8 mode or int16 mode. 0:int16, 1:int8"
echo " parameter2: layer by layer or fusion. 1:layer by layer; 2:fusion"
}
checkFile()
{
if [ -f $1 ]; then
return 0
else
return 1
fi
}
if [[ "$#" -ne 3 ]]; then
echo "[ERROR] Unknown parameter."
usage
exit 1
fi
# used to enable Bangop or not,default is disabled
bang_option=1
core_version=$3
network_list=(
rfcn
)
do_run()
{
/bin/rm *.jpg &> /dev/null
/bin/rm 200*.txt &> /dev/null
echo "----------------------"
echo "single core"
echo "using prototxt: $proto_file"
echo "using model: $model_file"
log_file=$(echo $proto_file | sed 's/prototxt$/log/' | sed 's/^.*\///')
echo > $CURRENT_DIR/$log_file
run_cmd="$CAFFE_DIR/build/examples/rfcn/rfcn_online_singlecore$SUFFIX \
-model $proto_file \
-weights $model_file \
-images $CURRENT_DIR/$FILE_LIST \
-outputdir $CURRENT_DIR \
-mmode $mlu_option \
-mcore $core_version \
-Bangop $bang_option &>> $CURRENT_DIR/$log_file"
check_cmd="python $CAFFE_DIR/scripts/meanAP_VOC.py $CURRENT_DIR/$FILE_LIST $CURRENT_DIR/ $VOC_PATH &>> $CURRENT_DIR/$log_file"
echo "run_cmd: $run_cmd" &>> $CURRENT_DIR/$log_file
echo "check_cmd: $check_cmd" &>> $CURRENT_DIR/$log_file
echo "running online test..."
eval "$run_cmd"
grep "^Total execution time:" -A 2 $CURRENT_DIR/$log_file
eval "$check_cmd"
tail -n 1 $CURRENT_DIR/$log_file
}
CURRENT_DIR=$(dirname $(readlink -f $0))
# check caffe directory
if [ -z "$CAFFE_DIR" ]; then
CAFFE_DIR=$CAFFE_DIR
else
if [ ! -d "$CAFFE_DIR" ]; then
echo "[ERROR] Please check CAFFE_DIR."
exit 1
fi
fi
. $CAFFE_DIR/scripts/set_caffe_module_env.sh
mlu_option=""
if [[ $2 -eq 1 ]]; then
mlu_option="MLU"
elif [[ $2 -eq 2 ]]; then
mlu_option="MFUS"
else
echo "[ERROR] Unknown parameter."
usage
exit 1
fi
quantize_type=$1
ds_name=""
if [[ $quantize_type -eq 1 ]]; then
ds_name="int8"
elif [[ $quantize_type -eq 0 ]]; then
ds_name="int16"
else
echo "[ERROR] Unknown parameter."
usage
exit 1
fi
/bin/rm *.jpg &> /dev/null
/bin/rm 200*.txt &> /dev/null
/bin/rm *.log &> /dev/null
for network in "${network_list[@]}"; do
model_file=$CAFFE_MODELS_DIR/${network}/${network}_${ds_name}_dense.caffemodel
checkFile $model_file
if [ $? -eq 1 ]; then
continue
fi
echo "===================================================="
echo "running ${network} online - ${ds_name}, Bangop..."
bang_option=1
for proto_file in $CAFFE_MODELS_DIR/${network}/${network}_${ds_name}*dense_1batch.prototxt; do
checkFile $proto_file
if [ $? -eq 1 ]; then
continue
fi
do_run
done
done
| true
|
09c505c9767f759574fb992b148e7c457205b324
|
Shell
|
josepablocam/aquery2q
|
/src/visualize/run_viz.sh
|
UTF-8
| 828
| 3.234375
| 3
|
[] |
no_license
|
# Takes port for q process as argument
if [ ! $# -eq 1 ]
then
echo "Usage: <port-number-for-q-process>"
exit 1
fi
# AQuery compiler
base_dir=$(pwd); cd ../; export A2Q=$(pwd)/a2q; cd $base_dir
# Port for q process
PORT=$1
AQUERYDIR=./aquery/
# if there is no folder for q, copy over what we brought
if [ ! -d $HOME/q/ ]
then
cp -r ./q/ $HOME/q/
fi
Q=$HOME/q/m32/q
# compile aquery file
$A2Q -a 1 -c -o ${AQUERYDIR}/setup.q ${AQUERYDIR}/setup.a
# launch q process with port (launch it with respect to R folder)
# NOTE: (runs in background and killed upon exit of R process)
$Q ${AQUERYDIR}/setup.q -p $PORT &
# save down PID to kill
q_pid=$!
# launch R shiny application
R -e '.libPaths(c("./Rdeps",.libPaths())); shiny::runApp(appDir = "R/", launch.browser = TRUE)'
# terminate q process
kill $q_pid
| true
|
c6b0b19309e947f12e0b19e241e6b3920d51be6e
|
Shell
|
Kitware/trame
|
/docker/scripts/run.sh
|
UTF-8
| 1,143
| 4.09375
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
# This script is used to start the trame server.
# If the `TRAME_USE_HOST` environment variable is set, this
# will replace `USE_HOST` in the launcher json file. If it contains
# `://`, it will replace `ws://USE_HOST` instead.
if [ ! -d /deploy/server ]; then
echo "ERROR: The the server directory must be in the container at '/deploy/server'"
exit 1
fi
# First, activate the venv
. /opt/trame/activate_venv.sh
# We will copy the launcher and make any needed edits to it
LAUNCHER_TEMPLATE_PATH=/deploy/server/launcher.json
LAUNCHER_PATH=/opt/trame/config.json
OUTPUT=$(<"${LAUNCHER_TEMPLATE_PATH}")
if [[ -n $TRAME_USE_HOST ]]; then
REPLACEMENT_STRING="USE_HOST"
if [[ $TRAME_USE_HOST == *"://"* ]]; then
# If the string contains "://", then we are replacing the "ws://" at
# the beginning as well
REPLACEMENT_STRING="ws://$REPLACEMENT_STRING"
fi
OUTPUT="${OUTPUT//$REPLACEMENT_STRING/$TRAME_USE_HOST}"
fi
echo -e "$OUTPUT" > "${LAUNCHER_PATH}"
# Run the launcher in the foreground so this script doesn't end
echo "Starting the wslink launcher at"
python -m wslink.launcher ${LAUNCHER_PATH}
| true
|
4b951d98fb3806ea85e37f1f52cbf163035155c3
|
Shell
|
martiege/TTK4155-Industrielle-og-innbygde-datasystemers-konstruksjon-Byggern
|
/microbit-master/auto_setup_script/auto_setup_tools
|
UTF-8
| 360
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
# Create a temporary directory
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
WORK_DIR="$( mktemp -d -p "$DIR" )"
if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then
echo "Could not create a working directory"
exit 1
fi
function cleanup {
# rm -rf "$WORK_DIR"
echo "Done."
}
trap cleanup EXIT
# Download tools
cd $WORK_DIR
echo $(pwd)
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.