blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a2d19524fa46091db9131935d19450abb137b678
|
Shell
|
shell909090/utils
|
/7z/7zdir
|
UTF-8
| 77
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
for i in "$@"
do
7z a -ms=off "$i.7z" "$i"
rm -rf "$i"
done
| true
|
a3c6880ccef1a2aa31dcdb0d7e099c1a61346548
|
Shell
|
martin34/codewars
|
/shell/latest/wait_for_new_file.sh
|
UTF-8
| 295
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
SCRIPT_DIR=$(dirname $(readlink -f $0))
. $SCRIPT_DIR/input_args.sh
wait_for_new_file_in()
{
set_TARGET_DIR_from_input $1
echo "Wait for new file in $TARGET_DIR"
# sudo apt-get install inotify-tools
inotifywait -e modify $TARGET_DIR
echo "New file was created"
return 0
}
| true
|
2ae679e66822a88a3723e2feb5bed83cb3387d89
|
Shell
|
conp-solutions/automated-reasoner-builds
|
/building/build-aigbmc.sh
|
UTF-8
| 2,184
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright Norbert Manthey, 2019
#
# This script builds aigbmc as a statically linked binary
# in case something goes wrong, notify!
set -ex
# name of the tool we build
declare -r TOOL="aigbmc"
declare -r TOOL_URL="" # does not come with git repository (yet?)
# declare a suffix for the binary
declare -r BUILD_SUFFIX="_static"
# the final binary should be moved here
SCRIPT_DIR=$(dirname "$0")
declare -r BINARY_DIRECTORY="$(readlink -e "$SCRIPT_DIR")/binaries"
# commit we might want to build, defaults to empty, if none is specified
declare -r COMMIT=${AIGBMC_COMMIT:-}
# specific instructions to build the tool: aigbmc
# this function is called in the source directory of the tool
build_tool ()
{
# build picosat
pushd picosat
./configure.sh
make -j $(nproc)
popd
# build lingeling
pushd lingeling
./configure.sh
make -j $(nproc)
popd
# build aiger-1.9.9
pushd aiger-1.9.9
./configure.sh
make aigbmc -j $(nproc) CFLAGS=-static
# check file properties
file aigbmc
# make executable for everybody
chmod oug+rwx aigbmc
# store created binary in destination directory, with given suffix
cp aigbmc "$BINARY_DIRECTORY"/"${TOOL}${BUILD_SUFFIX}"
popd
}
#
# this part of the script should be rather independent of the actual tool
#
# build the tool
build()
{
pushd "$TOOL"
build_tool
popd
}
# get the tool via the given URL
get()
{
# get the solver, store in directory "$TOOL"
if [ ! -d "$TOOL" ]
then
mkdir -p "$TOOL"
pushd "$TOOL"
# get actual aiger package
wget http://fmv.jku.at/aiger/aiger-1.9.9.tar.gz
tar xzf aiger-1.9.9.tar.gz
rm -rf aiger-1.9.9.tar.gz
# get SAT backends
git clone https://github.com/arminbiere/lingeling.git # we will use lingeling as SAT backend
wget http://fmv.jku.at/picosat/picosat-965.tar.gz # picosat is required to build successfully
tar xzf picosat-965.tar.gz
rm -f picosat-965.tar.gz
ln -sf picosat-965 picosat
popd
fi
# in case there is a specific commit, jump to this commit
if [ -n "$COMMIT" ]
then
pushd "$TOOL"
git fetch origin
git reset --hard "$COMMIT"
# no submodules are used in aigbmc
popd
fi
}
mkdir -p "$BINARY_DIRECTORY"
get
build
| true
|
e564dac8f3a5e5cbe9bd9ad6ab0a87f20905d035
|
Shell
|
tipsi/tipsi-dropdown
|
/scripts/post-link-ios.sh
|
UTF-8
| 654
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ "$(uname)" != "Darwin" ]; then
echo "Current OS is not macOS, skip iOS linking"
exit 0
fi
ios_dir=`pwd`/ios
if [ -d ios_dir ]; then
exit 0
fi
podfile="$ios_dir/Podfile"
pod_dep="pod 'TPSDropDown', '~> 1.0'"
echo "Checking Podfile in iOS project ($podfile)"
if [ ! -f $podfile ]; then
echo "Adding Podfile to iOS project"
cd ios
pod init >/dev/null 2>&1
cd ..
else
echo "Found an existing Podfile"
fi
if ! grep -q "$pod_dep" "$podfile"; then
echo "Adding the following pod to Podfile":
echo ""
echo $pod_dep
echo ""
echo $pod_dep >> $podfile
fi
echo "Installing Pods"
pod install --project-directory=ios
| true
|
8e38df083a2acbc9124d6998c2c9ec4e8922afbd
|
Shell
|
daavvis/vendor_samsung_degaswifi
|
/proprietary/bin/run_composite.sh
|
UTF-8
| 5,394
| 3.328125
| 3
|
[] |
no_license
|
#!/system/bin/sh
setprop sys.telephony.default.loglevel 8
MODULE_DIR=/lib/modules
insmod $MODULE_DIR/msocketk.ko
insmod $MODULE_DIR/cploaddev.ko
insmod $MODULE_DIR/seh.ko
# load cp and mrd image and release cp
/system/bin/cploader
ret="$?"
if [ ! -e $NVM_ROOT_DIR ]; then
mkdir -p $NVM_ROOT_DIR
chown system.system $NVM_ROOT_DIR
fi
if [ ! -e $MARVELL_RW_DIR ]; then
mkdir -p $MARVELL_RW_DIR
chown system.system $MARVELL_RW_DIR
chmod 0755 $MARVELL_RW_DIR
fi
case "$ret" in
"-1")
rmmod seh
rmmod cploaddev
rmmod msocketk
stop ril-daemon
exit -1
;;
"1")
rmmod seh
rmmod cploaddev
rmmod msocketk
stop ril-daemon
start nvm-aponly
start diag-aponly
insmod $MODULE_DIR/citty.ko
start atcmdsrv-aponly
exit 1
;;
*)
;;
esac
# $1 src file
# $2 dst file
function copy_if_not_exist()
{
if [ -f "${2}" ]; then
echo "existing ${2}";
else
if [ -f "${1}" ]; then
cp ${1} ${2}
chmod 666 ${2}
chown system.system ${2}
echo "cp: ${1} -> ${2}"
fi
fi
}
kernel_cmdline=`cat /proc/cmdline`
T7_BOARDID="board_id=0x7"
function is_t7_board()
{
if [[ "$kernel_cmdline" == *$T7_BOARDID* ]]
then
return 0
else
return 1
fi
}
# copy correct RF config file for CP
# pxa986 T7 board -> T7 specific
cputype=`cat /sys/devices/system/cpu/cpu0/cputype`
case "$cputype" in
"pxa986ax"|"pxa986zx"|"pxa1088")
if is_t7_board; then
rfcfg_src="rfcfg/LyraConfig_T7.nvm"
rfcfg_dst="LyraConfig.nvm"
else
rfcfg_src=""
rfcfg_dst=""
fi
;;
*)
rfcfg_src=""
rfcfg_dst=""
;;
esac
# copy COMCfg.csv
string=`getprop`;
if [[ $string == *SM-T232* ]]
then
nvm_src="ttc/degas3gjv/COMCfg.csv"
else
nvm_src="ttc/COMCfg.csv"
fi
nvm_dst="COMCfg.csv"
copy_if_not_exist "/etc/tel/${nvm_src}" "${NVM_ROOT_DIR}/${nvm_dst}"
chown system system $NVM_ROOT_DIR/$nvm_dst
chmod 0666 $NVM_ROOT_DIR/$nvm_dst
# copy DipChannelChange.nvm file for CP
nvm_src="ttc/DipChannelChange.nvm"
nvm_dst="DipChannelChange.nvm"
copy_if_not_exist "/etc/tel/${nvm_src}" "${NVM_ROOT_DIR}/${nvm_dst}"
chown system system $NVM_ROOT_DIR/$nvm_dst
chmod 0644 $NVM_ROOT_DIR/$nvm_dst
# copy DipChannelChange.nvm file for CP
nvm_src="ttc/LyraConfig.nvm"
nvm_dst="LyraConfig.nvm"
copy_if_not_exist "/etc/tel/${nvm_src}" "${NVM_ROOT_DIR}/${nvm_dst}"
chown system system $NVM_ROOT_DIR/$nvm_dst
chmod 0644 $NVM_ROOT_DIR/$nvm_dst
# copy TTPCom_NRAM2_ABMM_WRITEABLE_DATA.gki file for CP
nvm_src="ttc/TTPCom_NRAM2_ABMM_WRITEABLE_DATA.gki"
nvm_dst="TTPCom_NRAM2_ABMM_WRITEABLE_DATA.gki"
copy_if_not_exist "/etc/tel/${nvm_src}" "${NVM_ROOT_DIR}/${nvm_dst}"
chown system system $NVM_ROOT_DIR/$nvm_dst
chmod 0644 $NVM_ROOT_DIR/$nvm_dst
# copy RFPmaxReductionConfig.nvm file for CP
nvm_src="ttc/RFPmaxReductionConfig.nvm"
nvm_dst="RFPmaxReductionConfig.nvm"
copy_if_not_exist "/etc/tel/${nvm_src}" "${NVM_ROOT_DIR}/${nvm_dst}"
chown system system $NVM_ROOT_DIR/$nvm_dst
chmod 0644 $NVM_ROOT_DIR/$nvm_dst
# copy audio calibration files to NVM if not exist
audio_avc="audio_avc.nvm"
audio_config="audio_config.nvm"
audio_ctm="audio_ctm.nvm"
audio_diamond="audio_diamond.nvm"
audio_DualMic="audio_DualMic.nvm"
audio_ec="audio_ec.nvm"
audio_effect_config="audio_effect_config.xml"
audio_eq="audio_eq.nvm"
audio_gain_calibration="audio_gain_calibration.xml"
audio_gssp_config="audio_gssp_config.nvm"
audio_HLPF="audio_HLPF.nvm"
audio_misc="audio_misc.nvm"
audio_MSAmain="audio_MSAmain.nvm"
audio_ns="audio_ns.nvm"
audio_swvol_calibration="audio_swvol_calibration.xml"
copy_if_not_exist "/etc/tel/${audio_avc}" "${NVM_ROOT_DIR}/${audio_avc}"
copy_if_not_exist "/etc/tel/${audio_config}" "${NVM_ROOT_DIR}/${audio_config}"
copy_if_not_exist "/etc/tel/${audio_ctm}" "${NVM_ROOT_DIR}/${audio_ctm}"
copy_if_not_exist "/etc/tel/${audio_diamond}" "${NVM_ROOT_DIR}/${audio_diamond}"
copy_if_not_exist "/etc/tel/${audio_DualMic}" "${NVM_ROOT_DIR}/${audio_DualMic}"
copy_if_not_exist "/etc/tel/${audio_ec}" "${NVM_ROOT_DIR}/${audio_ec}"
copy_if_not_exist "/etc/tel/${audio_effect_config}" "${NVM_ROOT_DIR}/${audio_effect_config}"
copy_if_not_exist "/etc/tel/${audio_eq}" "${NVM_ROOT_DIR}/${audio_eq}"
copy_if_not_exist "/etc/tel/${audio_gain_calibration}" "${NVM_ROOT_DIR}/${audio_gain_calibration}"
copy_if_not_exist "/etc/tel/${audio_gssp_config}" "${NVM_ROOT_DIR}/${audio_gssp_config}"
copy_if_not_exist "/etc/tel/${audio_HLPF}" "${NVM_ROOT_DIR}/${audio_HLPF}"
copy_if_not_exist "/etc/tel/${audio_misc}" "${NVM_ROOT_DIR}/${audio_misc}"
copy_if_not_exist "/etc/tel/${audio_MSAmain}" "${NVM_ROOT_DIR}/${audio_MSAmain}"
copy_if_not_exist "/etc/tel/${audio_ns}" "${NVM_ROOT_DIR}/${audio_ns}"
copy_if_not_exist "/etc/tel/${audio_swvol_calibration}" "${NVM_ROOT_DIR}/${audio_swvol_calibration}"
# end copy audio calibration
copy_if_not_exist "/etc/tel/${rfcfg_src}" "${NVM_ROOT_DIR}/${rfcfg_dst}"
insmod $MODULE_DIR/citty.ko
insmod $MODULE_DIR/cci_datastub.ko
insmod $MODULE_DIR/ccinetdev.ko
insmod $MODULE_DIR/gs_modem.ko
insmod $MODULE_DIR/diag.ko
insmod $MODULE_DIR/gs_diag.ko
insmod $MODULE_DIR/cidatattydev.ko
insmod $MODULE_DIR/usimeventk.ko
sync
start eeh
start nvm
start diag
start atcmdsrv
start vcm
# for usimevent
sleep 1
chown radio:system /sys/devices/virtual/usim_event/usim0/enable
chown radio:system /sys/devices/virtual/usim_event/usim0/send_event
chmod 0660 /sys/devices/virtual/usim_event/usim0/enable
chmod 0660 /sys/devices/virtual/usim_event/usim0/send_event
exit 0
| true
|
771b5c8fda16fb105747227ad24a1640d29a2c21
|
Shell
|
tangzhenquan/estool
|
/build.sh
|
UTF-8
| 644
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
INSTALL_DIR=`pwd`"/build/bin"
function build()
{
echo "Building $1 ..."
if [ -f "$1/Makefile" ]; then
build_cwd=`pwd`
cd $1
make install DESTDIR=$2
if [ "$?" != "0" ]; then
echo "$1 failed"
exit 1
else
echo "$1 done"
fi
cd $build_cwd
else
echo "not found Makefile "
exit 1
fi
}
build_items="cmd/esImport \
cmd/tool \
cmd/esQuery"
if [ ! -z "$1" ]; then
if [ "$1" != "all" ]; then
build_items=$1
fi
fi
if [ ! -z "$2" ]; then
INSTALL_DIR=$1
fi
if [ ! -d "$INSTALL_DIR" ]; then
mkdir -p $INSTALL_DIR
fi
for item in $build_items
do
build $item $INSTALL_DIR
done
| true
|
18a5c2ed8e8a10d6c92176f26e3723b1ff0afe31
|
Shell
|
Azure/batch-shipyard
|
/scripts/shipyard_cascade.sh
|
UTF-8
| 6,263
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
log() {
local level=$1
shift
echo "$(date -u -Ins) - $level - $*"
}
# globals
block=
cascadecontainer=0
cascade_docker_image=
cascade_singularity_image=
concurrent_source_downloads=10
envfile=
is_start_task=0
log_directory=
prefix=
singularity_basedir=
# process command line options
while getopts "h?b:c:de:i:j:l:p:s:t" opt; do
case "$opt" in
h|\?)
echo "shipyard_cascade.sh parameters"
echo ""
echo "-b [images] block on images"
echo "-c [concurrent source downloads] concurrent source downloads"
echo "-d use docker container for cascade"
echo "-e [envfile] environment file"
echo "-i [cascade docker image] cascade docker image"
echo "-j [cascade singularity image] cascade singularity image"
echo "-l [log directory] log directory"
echo "-p [prefix] storage container prefix"
echo "-s [singularity basedir] singularity base directory"
echo "-t run cascade as part of the start task"
echo ""
exit 1
;;
b)
block=$OPTARG
;;
c)
concurrent_source_downloads=$OPTARG
;;
d)
cascadecontainer=1
;;
e)
envfile=$OPTARG
;;
i)
cascade_docker_image=$OPTARG
;;
j)
cascade_singularity_image=$OPTARG
;;
l)
log_directory=$OPTARG
;;
p)
prefix=$OPTARG
;;
s)
singularity_basedir=$OPTARG
;;
t)
is_start_task=1
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
if [ $cascadecontainer -eq 1 ] && [ -z "$envfile" ]; then
log ERROR "envfile not specified"
exit 1
fi
if [ $cascadecontainer -eq 1 ] && [ -z "$cascade_docker_image" ]; then
log ERROR "cascade docker image not specified"
exit 1
fi
if [ $cascadecontainer -eq 1 ] && [ -n "$singularity_basedir" ] && [ -z "$cascade_singularity_image" ]; then
log ERROR "cascade singularity image not specified"
exit 1
fi
if [ -z "$log_directory" ]; then
log ERROR "log directory not specified"
exit 1
fi
if [ -z "$prefix" ]; then
log ERROR "prefix not specified"
exit 1
fi
spawn_cascade_process() {
set +e
local cascade_docker_pid
local cascade_singularity_pid
local detached
if [ -z "$block" ]; then
detached="-d"
fi
if [ $cascadecontainer -eq 1 ]; then
tmp_envfile="$envfile.tmp"
cp "$envfile" "$tmp_envfile"
echo "log_directory=$log_directory" >> "$tmp_envfile"
# run cascade for docker
log DEBUG "Starting $cascade_docker_image"
# shellcheck disable=SC2086
docker run $detached --rm --runtime runc --env-file "$tmp_envfile" \
-e "cascade_mode=docker" \
-e "is_start_task=$is_start_task" \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /etc/passwd:/etc/passwd:ro \
-v /etc/group:/etc/group:ro \
-v "$AZ_BATCH_NODE_ROOT_DIR":"$AZ_BATCH_NODE_ROOT_DIR" \
-w "$AZ_BATCH_TASK_WORKING_DIR" \
"$cascade_docker_image" &
cascade_docker_pid=$!
# run cascade for singularity
if [ -n "$singularity_basedir" ]; then
log DEBUG "Starting $cascade_singularity_image"
local singularity_binds
# set singularity options
singularity_binds="\
-v $singularity_basedir:$singularity_basedir \
-v $singularity_basedir/mnt:/var/lib/singularity/mnt"
# shellcheck disable=SC2086
docker run $detached --rm --runtime runc --env-file $tmp_envfile \
-e "cascade_mode=singularity" \
-v /etc/passwd:/etc/passwd:ro \
-v /etc/group:/etc/group:ro \
${singularity_binds} \
-v "$AZ_BATCH_NODE_ROOT_DIR":"$AZ_BATCH_NODE_ROOT_DIR" \
-w "$AZ_BATCH_TASK_WORKING_DIR" \
"$cascade_singularity_image" &
cascade_singularity_pid=$!
fi
else
# add timings
if [[ -n ${SHIPYARD_TIMING+x} ]]; then
# mark start cascade
# shellcheck disable=SC2086
./perf.py cascade start --prefix "$prefix"
fi
log DEBUG "Starting Cascade Docker mode"
# shellcheck disable=SC2086
PYTHONASYNCIODEBUG=1 ./cascade.py --mode docker \
--concurrent "$concurrent_source_downloads" \
--prefix "$prefix" \
--log-directory "$log_directory" &
cascade_docker_pid=$!
# run cascade for singularity
if [ -n "$singularity_basedir" ]; then
log DEBUG "Starting Cascade Singularity mode"
# shellcheck disable=SC2086
PYTHONASYNCIODEBUG=1 ./cascade.py --mode singularity \
--concurrent "$concurrent_source_downloads" \
--prefix "$prefix" \
--log-directory "$log_directory" &
cascade_singularity_pid=$!
fi
fi
# wait for cascade exit
if [ -n "$block" ]; then
local rc
wait $cascade_docker_pid
rc=$?
if [ $rc -eq 0 ]; then
log DEBUG "Cascade Docker exited successfully"
else
log ERROR "Cascade Docker exited with non-zero exit code: $rc"
exit $rc
fi
if [ -n "$singularity_basedir" ]; then
wait $cascade_singularity_pid
rc=$?
if [ $rc -eq 0 ]; then
log DEBUG "Cascade Singularity exited successfully"
else
log ERROR "Cascade Singularity exited with non-zero exit code: $rc"
exit $rc
fi
fi
else
log INFO "Not waiting for cascade due to non-blocking option"
fi
set -e
}
block_for_container_images() {
# wait for images via cascade
"${AZ_BATCH_NODE_STARTUP_DIR}"/wd/wait_for_images.sh "$block"
}
spawn_cascade_process
# block for images if necessary
block_for_container_images
| true
|
3056705d465749ba89ffab489c1892ab56da1878
|
Shell
|
hiennc24/mstep
|
/sh/import_weather.sh
|
UTF-8
| 517
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
. ./function.sh
DEVELOP_MODE=`isDevelop`
DEV_PHPDIR_DUC='D:/xampp/htdocs/SpcMstep-master/php/import_weather.php'
DEV_PHPDIR_HIEN='D:/xampp/htdocs/SpcMstep-master/php/import_weather.php'
DEV_PHPDIR_KIYOSAWA='/Applications/XAMPP/htdocs/Mstep/php/import_weather.php'
PHP=$DEV_PHPDIR_DUC
if [ $DEVELOP_MODE == 4 ]; then
PHP=$DEV_PHPDIR_HIEN
fi
if [ $DEVELOP_MODE == 0 ]; then
PHP=$DEV_PHPDIR_KIYOSAWA
fi
consoleWait
if [ $? == 1 ]; then
echo "Cancel"
exit 1
fi
php -f ${PHP}
exit 0
| true
|
d688f7bfcbef30c0bfdffc08e7937a4e1f0231a4
|
Shell
|
simonebaracchi/tsh
|
/install.sh
|
UTF-8
| 2,087
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
printf "Downloading & Installing Packages...\n"
apt-get install -y python nmap dnsutils mtr python-pip && pip install telepot
printf "\n\n--------------------------------\n\n"
echo "Enter your Telegram BOT Token. "
echo "Telegram BOT Token can be asked to BotFather. "
read -r TG_BOT_TOKEN
echo "bot_token = '$TG_BOT_TOKEN'" > tempconfig.py
printf "\n\n--------------------------------\n\n"
echo "Trying to find out your Telegram sender-id..."
python get-sender-id.py | grep "'id'" | uniq -c | awk '{ print $3 }' | sed s'/,//'
rm tempconfig.py
echo "Enter your Telegram Sender ID. "
echo "Telegram Sender ID is your identifier. Only this user will be enabled to send commands to this bot. If automatic sender ID retrieval has failed, try sending a private message to your bot in Telegram, and try again. "
read -r SENDER_ID
cp config.example.py config.py
sed -i s"/MY-TG-BOT-TOKEN/$TG_BOT_TOKEN/" config.py
sed -i s"/MY-SENDER-ID-LIST/$SENDER_ID/" config.py
printf "\n\n--------------------------------\n\n"
echo " Select an option"
echo "[0] Exit installer"
echo "[1] Configure daemon with systemctl (for systemd-enabled distros)"
echo "[2] Disable daemon with systemctl"
echo "[3] Configure daemon with supervisor (for supervisor-enabled distros)"
read -r SUPERVISOR
case $SUPERVISOR in
0)
exit 0
;;
1)
cp systemctl/tsh.example.service /tmp/tsh.service
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
sed -i s"#MY-PATH#$DIR#" /tmp/tsh.service
mv /tmp/tsh.service /etc/systemd/system/multi-user.target.wants/tsh.service
systemctl daemon-reload
systemctl restart tsh
;;
2)
systemctl stop tsh
systemctl disable tsh
rm /etc/systemd/system/multi-user.target.wants/tsh.service
systemctl daemon-reload
systemctl reset-failed
;;
3)
apt-get install -y supervisor
echo "Configuring tsh as a service..."
scp supervisor/conf.d/tsh.conf /etc/supervisor/conf.d/tsh.conf
echo "Update supervisord..."
supervisorctl update
echo "Starting tsh service..."
supervisorctl start tsh
;;
*)
echo Unrecognized option $SUPERVISOR, exiting
exit 1
;;
esac
| true
|
0a35a8d95fdd8d7bf4a07900c1c34cdfffb42a6e
|
Shell
|
ldgabet/oracle_wp
|
/instance_web/docker/nginx/cloudflare-sync-ips.sh
|
UTF-8
| 899
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
CLOUDFLARE_FILE_PATH=/etc/nginx/global/cloudflare.conf
CURRENTDATE=`date +"%Y-%m-%d %T"`
echo "# File generated ${CURRENTDATE}" > $CLOUDFLARE_FILE_PATH;
echo "" >> $CLOUDFLARE_FILE_PATH;
echo "# Cloufdlare's IPs (https://www.cloudflare.com/fr-fr/ips/)" >> $CLOUDFLARE_FILE_PATH;
echo "" >> $CLOUDFLARE_FILE_PATH;
echo "# - IPv4" >> $CLOUDFLARE_FILE_PATH;
for i in `curl https://www.cloudflare.com/ips-v4`; do
echo "set_real_ip_from $i;" >> $CLOUDFLARE_FILE_PATH;
done
echo "" >> $CLOUDFLARE_FILE_PATH;
echo "# - IPv6" >> $CLOUDFLARE_FILE_PATH;
for i in `curl https://www.cloudflare.com/ips-v6`; do
echo "set_real_ip_from $i;" >> $CLOUDFLARE_FILE_PATH;
done
echo "" >> $CLOUDFLARE_FILE_PATH;
echo "# Header for real IP" >> $CLOUDFLARE_FILE_PATH;
echo "real_ip_header CF-Connecting-IP;" >> $CLOUDFLARE_FILE_PATH;
#test configuration and reload nginx
nginx -t && nginx -s reload
| true
|
6793f20548a9259d80b924179a0c4b5d2c5d6369
|
Shell
|
angular/material.angular.io
|
/tools/deploy.sh
|
UTF-8
| 1,791
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu -o pipefail
declare -A PROJECT_ID
# Firebase project ids
PROJECT_ID["stable", "dev"]="material2-docs-dev"
PROJECT_ID["stable", "prod"]="material-angular-io"
PROJECT_ID["stable", "beta"]="beta-angular-material-io"
PROJECT_ID["v5", "dev"]="material-docs-dev-v5"
PROJECT_ID["v5", "prod"]="v5-material-angular-io"
PROJECT_ID["v6", "dev"]="material-docs-dev-v6"
PROJECT_ID["v6", "prod"]="v6-material-angular-io"
PROJECT_ID["v7", "dev"]="material-docs-dev-v7"
PROJECT_ID["v7", "prod"]="v7-material-angular-io"
PROJECT_ID["v8", "prod"]="v8-material-angular-io"
PROJECT_ID["v9", "prod"]="v9-material-angular-io"
PROJECT_ID["v10", "prod"]="v10-material-angular-io"
PROJECT_ID["v11", "prod"]="v11-material-angular-io"
PROJECT_ID["v12", "prod"]="v12-material-angular-io"
PROJECT_ID["next", "prod"]="beta-angular-material-io"
version=${1:-stable}
mode=${2:-dev}
projectId=${PROJECT_ID[$version, $mode]}
# Prevent deployment if we have a pre-release version, using the cdk
# version as a proxy for all components repo package versions.
cdk_prerelease=$(cat package.json | grep cdk | egrep next\|rc || true)
if [[ "${cdk_prerelease}" ]]; then
if [[ "${version}" == "stable" && "${mode}" == "prod" ]]; then
echo "Cannot publish a prerelease version to stable prod"
exit 1
fi
fi
echo ""
echo "NOTE: Make sure to refresh the docs-content to match the new version of docs."
echo " You can pull the docs-content for the new version by updating the"
echo " \"@angular/components-examples\" in the 'package.json' file."
echo ""
read -rp "Press <ENTER> to continue.."
yarn prod-build
yarn firebase use $projectId
yarn firebase target:clear hosting mat-aio
yarn firebase target:apply hosting mat-aio $projectId
yarn firebase deploy --only hosting:mat-aio
| true
|
615fcb1fae295479cc2a80f1f8ff4018083d678d
|
Shell
|
frgomes/bash-scripts
|
/bin/trim
|
UTF-8
| 124
| 2.796875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash -eu
function __trim() {
which tr >/dev/null 2>&1 || apt+ install coreutils
tr -d [:blank:] $*
}
__trim $*
| true
|
06d7d33d808c73857272a6ec6765e41f40596244
|
Shell
|
zcash/lightwalletd
|
/testclient/stress.sh
|
UTF-8
| 1,092
| 3.796875
| 4
|
[
"MIT",
"AGPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# Create a CSV file with various performance measurements
#
set -e
test $# -eq 0 && { echo "usage: $0 iterations op(getlighdinfo|getblock|getblockrange)";exit 1;}
iterations=$1
op=$2
export p=`pidof server`
test -z $p && { echo 'is the server running?';exit 1;}
set -- $p
test $# -ne 1 && { echo 'server pid is not unique';exit 1;}
echo "concurrency,iterations per thread,utime before (ticks),stime before (ticks),memory before (pages),time (sec),utime after (ticks),stime after (ticks),memory after (pages)"
for i in 1 200 400 600 800 1000
do
csv="$i,$iterations"
csv="$csv,`cat /proc/$p/stat|field 14`" # utime in 10ms ticks
csv="$csv,`cat /proc/$p/stat|field 15`" # stime in 10ms ticks
csv="$csv,`cat /proc/$p/statm|field 2`" # resident size in pages (8k)
csv="$csv,`/usr/bin/time -f '%e' testclient/main -concurrency $i -iterations $iterations -op $op 2>&1`"
csv="$csv,`cat /proc/$p/stat|field 14`" # utime in 10ms ticks
csv="$csv,`cat /proc/$p/stat|field 15`" # stime in 10ms ticks
csv="$csv,`cat /proc/$p/statm|field 2`"
echo $csv
done
| true
|
d4557ae42dd24a0c8f7da7a47e1791e5c1a6cea0
|
Shell
|
petronny/aur3-mirror
|
/trac-0.11-accountmanagerplugin/PKGBUILD
|
UTF-8
| 801
| 2.515625
| 3
|
[] |
no_license
|
# Contributor: Kevin van Andel <actreon at gmail dot com>
pkgname=trac-0.11-accountmanagerplugin
pkgver=7731
pkgrel=1
pkgdesc="User account management plugin for Trac 0.11"
arch=('i686' 'x86_64')
url="http://trac-hacks.org/wiki/AccountManagerPlugin"
license="THE BEER-WARE LICENSE"
makedepends=("python>=2.5" "setuptools")
depends=("python>=2.5" "trac>=0.11")
install=trac-0.11-accountmanagerplugin.install
source=()
md5sums=()
_svntrunk="http://trac-hacks.org/svn/accountmanagerplugin/0.11"
_svnmod="accountmanagerplugin"
build() {
cd ${srcdir}
if [ -d $_svnmod/.svn ]; then
(cd $_svnmod && svn up -r $pkgver)
else
svn co http://trac-hacks.org/svn/accountmanagerplugin/0.11 -r $pkgver $_svnmod
fi
cd ${srcdir}/${_svnmod}
python setup.py build || return 1
python setup.py install --root=${pkgdir} || return 1
}
| true
|
b8a8bb976b8609b57ea1dbf5aa527bceb8c46207
|
Shell
|
krush11/personal
|
/system errors/dpkg_error1.sh
|
UTF-8
| 1,166
| 3.484375
| 3
|
[] |
no_license
|
#! /bin/bash
# Error: Sub-process /usr/bin/dpkg returned an error code (1)
# Reason: Happens when a software installation failed and the installer(dpkg) becomes corrupted
# Possible repercussions: `sudo dpkg --configure -a` fails and dpkg warnings
# !IMPORTANT! : This package is not supposed to be run as a script even though its a .sh file
# SOLUTION
# 1. Remove the broken packages
sudo dpkg --configure -a
sudo apt install -f # This will give packages which are broken/affecting the installer(dpkg)
sudo apt remove --purge ${package_name} # Here, ${package_name} are all the list of packages which are listed in the output of above command
sudo apt autoremove
# 2. Deal with the broken package's files
sudo mv /var/lib/dpkg/info/*${package_name}* /tmp # Again, replace ${package_name} with the broken package. (Run the command multiple files for each package)
sudo apt update
sudo dpkg -i --force-overwrite /var/cache/apt/archives/${full_package_name} # Write the exact name of the package instead of ${full_package_name} and the the command multiple times
sudo apt update
# This should solve the problem. If error still persists feel free to contact me.
| true
|
dd208eeef74505b0feb3905dce59b5d911ef73cb
|
Shell
|
waman/java10-vagrant
|
/install_java10.sh
|
UTF-8
| 569
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
for f in /vagrant/archive/*jdk*_linux-x64_bin*.tar.gz; do
jdk=$f
done
if [[ $jdk == "/vagrant/archive/*jdk*_linux-x64_bin*.tar.gz" ]]; then
defaultjdk=openjdk-10_linux-x64_bin.tar.gz
wget -q -O /vagrant/archive/${defaultjdk} https://download.java.net/java/GA/jdk10/10/binaries/${defaultjdk}
jdk=/vagrant/archive/${defaultjdk}
else
:
fi
mkdir /opt/java
cd /opt/java
tar zxvf ${jdk}
cd /home/vagrant
javaHome=/opt/java/jdk-10
echo "export JAVA_HOME=${javaHome}" >> .bash_profile
echo 'export PATH=$PATH:$JAVA_HOME/bin' >> .bash_profile
| true
|
a3be917355fee5f2a05745455f043dea93b62054
|
Shell
|
cuongnb14/cookbook
|
/database/export.sh
|
UTF-8
| 764
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Requirements :mysqldump (sudo apt-get install mysql-client)
# Upzip file: gzip -d ${DIR_BACKUP}/${FILE_BACKUP}.sql.gz
#
# Add cronjob Every 1 hour
# 0 * * * * /path/to/export.sh
# -----------------------------------------------------------
MYSQL_HOST='127.0.0.1'
MYSQL_PORT='3306'
MYSQL_USER='root'
MYSQL_PASSWORD=123456
MYSQL_DATABASE=demo
DIR_BACKUP='/tmp'
DATE_BACKUP=`date +%Y-%m-%d"_"%H-%M-%S`
FILE_BACKUP=${MYSQL_DATABASE}_${DATE_BACKUP}
KEEP_NEWEST=3
echo "Start export..."
mysqldump -h ${MYSQL_HOST} -P ${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} ${MYSQL_DATABASE} | gzip > ${DIR_BACKUP}/${FILE_BACKUP}.sql.gz
echo "Remove old file"
cd ${DIR_BACKUP} && ls -1tr | head -n -${KEEP_NEWEST} | xargs -d '\n' rm -f --
echo "Done!"
| true
|
72846782371d7e948748b3e9d07b6128190501a8
|
Shell
|
dtolabs/yana2
|
/src/main/rerun/modules/yana/commands/typerelations/default.sh
|
UTF-8
| 1,870
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# NAME
#
# typerelations
#
# DESCRIPTION
#
# manage nodetype relations
#
# Read module function library
source $RERUN_MODULES/yana/lib/functions.sh || exit 1 ;
#
# Initialize the context
#
yana_initialize $CFG || rerun_die "Yana initialization failed"
# Parse the command options
[ -r $RERUN_MODULES/yana/commands/typerelations/options.sh ] && {
source $RERUN_MODULES/yana/commands/typerelations/options.sh || exit 2 ;
}
# ------------------------------
flags="" #-v --silent --fail --show-error"
cookie=/tmp/yana-typerelations-cookiejar.txt
response=/tmp/yana-typerelations-response.txt
[ -f $response ] && rm $response
#
# Login and create a session
#
yana_authenticate $YANA_URL $YANA_USER $YANA_PASSWORD ${cookie} || rerun_die "Yana authentication failed"
#
# Function to format the output
#
format() {
oIFS=$IFS
while read line
do
IFS=:
arr=( $line )
[ ${#arr[*]} -eq 4 ] || continue
IFS=$oIFS
yana_expand "$FORMAT" ID=${arr[0]} NAME=${arr[1]} \
PARENT=${arr[2]} CHILD=${arr[3]}
done
}
#
# Execute specified action
#
case $ACTION in
list)
source $RERUN_MODULES/yana/commands/typerelations/list.sh
;;
get)
[ -z "$ID" ] && { echo "missing required option: --id" ; exit 2 ; }
source $RERUN_MODULES/yana/commands/typerelations/get.sh
;;
delete)
[ -z "$ID" ] && { echo "missing required option: --id" ; exit 2 ; }
source $RERUN_MODULES/yana/commands/typerelations/delete.sh
;;
create)
[ -z "$CHILD" ] && { echo "missing required option: --child" ; exit 2 ; }
[ -z "$PARENT" ] && { echo "missing required option: --parent" ; exit 2 ; }
[ -z "$NAME" ] && { echo "missing required option: --name" ; exit 2 ; }
source $RERUN_MODULES/yana/commands/typerelations/create.sh
;;
*)
echo "Invalid action: \"$ACTION\""
exit 2
esac
# ------------------------------
exit $?
# Done
| true
|
b7d109dfc3a57f3a64def41562d00a90f87ba0c8
|
Shell
|
tatyana-ruzsics/uzh-corpuslab-pos-normalization
|
/src/Main-wus-soft-train-pos.sh
|
UTF-8
| 2,628
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
# Usage Main-wus-soft-train-pos.sh model_file_name data_folder data_prefix use_aux_loss_and/or_pos_feat
# ./Main-wus-soft-train-pos.sh norm_soft wus/phase2/btagger wus
# ./Main-wus-soft-train-pos.sh norm_soft_pos wus/phase2/btagger wus
# ./Main-wus-soft-train-pos.sh norm_soft_context wus/phase2/btagger wus
# ./Main-wus-soft-train-pos.sh norm_soft_context wus/phase2/btagger wus pos
# ./Main-wus-soft-train-pos.sh norm_soft_context wus/phase2/btagger wus aux
# ./Main-wus-soft-train-pos.sh norm_soft_context wus/phase2/btagger wus pos_aux
##########################################################################################
export TRAIN=$2/train_silverpos.txt
export DEV=$2/dev_autopos.txt
export TEST=$2/test_autopos.txt
export PR="$3_phase2"
export MODEL=$1
if [[ $4 == "aux" ]]; then
export PR="${PR}/$3_aux"
elif [[ $4 == "pos" ]]; then
export PR="${PR}/$3_pos"
elif [[ $4 == "pos_aux" ]]; then
export PR="${PR}/$3_pos_aux"
else
export PR=${PR}/$3
fi
echo "$PR"
########### train + eval of individual models
for (( k=1; k<=5; k++ ))
do
(
if [[ $4 == "aux" ]]; then
PYTHONIOENCODING=utf8 python ${MODEL}.py train --dynet-seed $k --train_path=$TRAIN --dev_path=$DEV ${PR}_${MODEL}_$k --epochs=40 --aux_pos_task
elif [[ $4 == "pos" ]]; then
PYTHONIOENCODING=utf8 python ${MODEL}.py train --dynet-seed $k --train_path=$TRAIN --dev_path=$DEV ${PR}_${MODEL}_$k --epochs=40 --pos_feature
elif [[ $4 == "pos_aux" ]]; then
PYTHONIOENCODING=utf8 python ${MODEL}.py train --dynet-seed $k --train_path=$TRAIN --dev_path=$DEV ${PR}_${MODEL}_$k --epochs=40 --pos_feature --aux_pos_task
elif [[ $1 == "norm_soft_pos" ]]; then
PYTHONIOENCODING=utf8 python ${MODEL}.py train --dynet-seed $k --train_path=$TRAIN --dev_path=$DEV ${PR}_${MODEL}_$k --epochs=40
else
PYTHONIOENCODING=utf8 python ${MODEL}.py train --dynet-seed $k --train_path=$TRAIN --dev_path=$DEV ${PR}_${MODEL}_$k --epochs=40
fi
wait
PYTHONIOENCODING=utf8 python ${MODEL}.py test ${PR}_${MODEL}_$k --test_path=$DEV --beam=3 --pred_path=best.dev.3 &
PYTHONIOENCODING=utf8 python ${MODEL}.py test ${PR}_${MODEL}_$k --test_path=$TEST --beam=3 --pred_path=best.test.3
) &
done
wait
########### Evaluate ensemble 5
PYTHONIOENCODING=utf8 python ${MODEL}.py ensemble_test ${PR}_${MODEL}_1,${PR}_${MODEL}_2,${PR}_${MODEL}_3,${PR}_${MODEL}_4,${PR}_${MODEL}_5 --test_path=$DEV --beam=3 --pred_path=best.dev.3 ${PR}_${MODEL}_ens5 &
PYTHONIOENCODING=utf8 python ${MODEL}.py ensemble_test ${PR}_${MODEL}_1,${PR}_${MODEL}_2,${PR}_${MODEL}_3,${PR}_${MODEL}_4,${PR}_${MODEL}_5 --test_path=$TEST --beam=3 --pred_path=best.test.3 ${PR}_${MODEL}_ens5
| true
|
140901f65fc42ae5c41c5c9035f1ea921fdb6f22
|
Shell
|
duckinator/dotfiles
|
/.bashrc
|
UTF-8
| 2,888
| 3.78125
| 4
|
[] |
no_license
|
# If not running interactively, don't do anything.
case $- in
*i*) ;;
*) return;;
esac
# Shell Options
# https://www.gnu.org/software/bash/manual/html_node/The-Shopt-Builtin.html
shopt -s autocd checkjobs checkwinsize globstar histverify
# Source global definitions
[ -f /etc/bashrc ] && . /etc/bashrc
[ -f "$HOME/.bash_aliases" ] && . $HOME/.bash_aliases
[ -f "$HOME/.bash_env" ] && . $HOME/.bash_env
# Creates a directory and then cd's to it.
function mkcd() {
if [ "${#@}" -eq 0 ] || [ "${#@}" -ge 3 ]; then
echo "Usage: mkcd [-p] dir"
return 1
fi
if [ "$1" = "-p" ]; then
mkdir -p "$2" && cd "$2"
else
mkdir "$1" && cd "$1"
fi
}
declare -A FOREGROUND=(
["black"]="\033[30m"
["red"]="\033[31m"
["green"]="\033[32m"
["yellow"]="\033[33m"
["blue"]="\033[34m"
["magenta"]="\033[35m"
["cyan"]="\033[36m"
["white"]="\033[37m"
["brblack"]="\033[90m"
["brred"]="\033[91m"
["brgreen"]="\033[92m"
["bryellow"]="\033[93m"
["brblue"]="\033[94m"
["brmagenta"]="\033[95m"
["brcyan"]="\033[96m"
["brwhite"]="\033[97m"
["bold"]="\033[1m"
["reset"]="\033[0m"
)
function bash_prompt() {
local fg_normal="\\[${FOREGROUND["reset"]}\\]"
local fg_operator="\\[${FOREGROUND["brmagenta"]}\\]"
local fg_user="\\[${FOREGROUND["blue"]}\\]"
local fg_host="$fg_user"
local fg_cwd="$fg_user"
local bold="\\[${FOREGROUND["bold"]}\\]"
local git_status
local git_branch
local prefix_hostname=""
local prefix_venv=""
local suffix_git=""
local prompt_symbol="$"
if test -n "$VIRTUAL_ENV"; then
prefix_venv="(`basename \"$VIRTUAL_ENV\"`) "
fi
if test -n "$SSH_CONNECTION" || test -n "$PS1_SHOW_HOSTNAME"; then
prefix_hostname="${bold}${fg_user}\u${fg_operator}@${fg_host}\H${fg_operator}:"
fi
if [ -z "$STATIC_PROMPT" ]; then
git_status="$(git status -s 2>/dev/null)"
if test $? -eq 0; then
# If `git status` returns 0, this is a git repo, so show git information.
git_branch="$(git rev-parse --abbrev-ref HEAD 2>/dev/null)"
if test -n "$git_branch"; then
suffix_git=" $git_branch"
fi
if test -n "$git_status"; then
prompt_symbol="\\[${FOREGROUND["red"]}\\]+"
else
prompt_symbol="+"
fi
fi
fi
local prompt_main="${fg_cwd}\w${fg_operator}"
export PS1="$bold$prefix_venv$prefix_hostname$prompt_main$suffix_git$prompt_symbol$fg_normal "
}
function post_cmd() {
local LAST_STATUS=$?
if [ $LAST_STATUS -gt 0 ]; then
printf "%$[ $(tput cols) - ${#LAST_STATUS} - 2]s[$LAST_STATUS]\r"
fi
}
export PROMPT_COMMAND="post_cmd; bash_prompt"
[ -f "$HOME/.bashrc.local" ] && source ~/.bashrc.local
| true
|
d0fc2891229970e7f2970d74cff8f7d108c82f7c
|
Shell
|
dst-umms/i2b2-quickstart
|
/scripts/postgres/load_data.sh
|
UTF-8
| 2,400
| 2.890625
| 3
|
[] |
no_license
|
load_demo_data(){
#BASE="/home/ec2-user/i2b2-install"
BASE=$1
DATA_BASE="$BASE/unzipped_packages/i2b2-data-master"
PARG=$2
IP=$3
cd "$DATA_BASE/edu.harvard.i2b2.data/Release_1-7/NewInstall/Crcdata/"
echo ">>>>>pwd:$PWD IP:$IP"
#local POPTS="dbname=i2b2 options=--search_path='i2b2demodata'"
cat scripts/crc_create_datamart_postgresql.sql|psql -U i2b2demodata $PARG
cat scripts/crc_create_query_postgresql.sql|psql -U i2b2demodata $PARG
cat scripts/crc_create_uploader_postgresql.sql|psql -U i2b2demodata $PARG
cat scripts/expression_concept_demo_insert_data.sql|psql -U i2b2demodata $PARG
cat scripts/expression_obs_demo_insert_data.sql|psql -U i2b2demodata $PARG
for x in $(ls scripts/postgresql/); do cat scripts/postgresql/$x|psql -U i2b2demodata $PARG;done;
cd "$DATA_BASE/edu.harvard.i2b2.data/Release_1-7/NewInstall/Hivedata/"
mkdir ~/tmp
for x in "create_postgresql_i2b2hive_tables.sql" "work_db_lookup_postgresql_insert_data.sql" "ont_db_lookup_postgresql_insert_data.sql" "im_db_lookup_postgresql_insert_data.sql" "crc_db_lookup_postgresql_insert_data.sql"
do
cat scripts/$x|psql -U i2b2hive $PARG ;
done;
cd ../Pmdata/
for x in "create_postgresql_i2b2pm_tables.sql" "create_postgresql_triggers.sql"
do echo $x;cat scripts/$x|psql -U i2b2pm $PARG;done;
cat scripts/pm_access_insert_data.sql| sed "s/localhost:9090/$IP/" |psql -U i2b2pm $PARG;
cd "$DATA_BASE/edu.harvard.i2b2.data/Release_1-7/NewInstall/Metadata/"
for x in $(ls scripts/*postgresql*); do echo $x;cat $x|psql -U i2b2metadata $PARG ;done;
for x in $(ls demo/scripts/*.sql); do echo $x;cat $x|psql -U i2b2metadata $PARG ;done;
for x in $(ls demo/scripts/postgresql/*); do echo $x;cat $x|psql -U i2b2metadata $PARG ;done;
cd "$DATA_BASE/edu.harvard.i2b2.data/Release_1-7/NewInstall/Workdata/";
x="scripts/create_postgresql_i2b2workdata_tables.sql"; echo $x;cat $x|psql -U i2b2workdata $PARG;
x="scripts/workplace_access_demo_insert_data.sql"; echo $x;cat $x|psql -U i2b2workdata $PARG;
echo "update crc_db_lookup set c_db_fullschema = 'i2b2demodata';\
update work_db_lookup set c_db_fullschema = 'i2b2workdata';\
update ont_db_lookup set c_db_fullschema = 'i2b2metadata';
"|psql -U i2b2hive $PARG;
}
create_db_schema(){
PARG=$2
echo "drop database i2b2;"|psql $PARG
cat $1/scripts/postgres/create_schemas.sql|psql $PARG
}
#create_db_schema $1;
#load_demo_data $1;
| true
|
e5942b5df72fe46a339a954668fb39b4cd970543
|
Shell
|
TomMannson/Finject
|
/tool/local_analize
|
UTF-8
| 1,965
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash --
# Copyright (c) 2016, Google Inc. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
set -e
directories="finject finject_generator finject_flutter"
parent_directory=$PWD
echo ""
echo "#####"
echo "##### CODE_FORMATTING"
echo "#####"
echo ""
for directory in $directories; do
echo "*** Formatting $directory..."
cd "$parent_directory/$directory"
dart format $(find bin lib test -name \*.dart 2>/dev/null)
done
echo ""
echo "#####"
echo "##### BUILD_RUNNER"
echo "#####"
echo ""
for directory in $directories; do
echo "*** Building $directory..."
cd "$parent_directory/$directory"
flutter pub get
flutter pub upgrade
# Clear any pre-existing build output so package:build doesn't get confused
# when we use built_value to build itself.
rm -rf .dart_tool/build/
grep -q build_runner pubspec.yaml && \
flutter pub run build_runner build \
--delete-conflicting-outputs \
--fail-on-severe
done
echo ""
echo "#####"
echo "##### DART ANALYZER"
echo "#####"
echo ""
for directory in $directories; do
echo "*** Analyzing $directory..."
cd "$parent_directory/$directory"
# --packages="$PWD/.packages" \
dart analyze \
--fatal-warnings \
--fatal-infos \
$(find bin lib test -name \*.dart 2>/dev/null)
done
echo ""
echo "#####"
echo "##### UNIT TESTS and COVERAGE"
echo "#####"
echo ""
dart_directories="finject finject_generator"
flutter_directories="finject_flutter"
for directory in $dart_directories; do
echo "*** Testing and coverage dart modules $directory..."
cd "$parent_directory/$directory"
dart test
# pub run test_coverage
done
for directory in $flutter_directories; do
echo "*** Testing and coverage flutter modules $directory..."
cd "$parent_directory/$directory"
# flutter test --coverage
flutter test
done
cd "$parent_directory"
| true
|
37c061828997ac4f8ca65427bd91da1019d5800b
|
Shell
|
rikuu/eval-error-correction
|
/scripts/master.sh
|
UTF-8
| 1,755
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Runs error correction tools and logs the disk, memory and swap statistics and
# the cpu time and elapsed time
#
if [[ "$#" -lt 2 ]]; then
echo "Usage: $0 <lorma|proovread|pbcr|lordec> [lorma parameters] <long reads> [short reads]" 1>&2
exit 1
fi
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)
SCRIPTS=$DIR
source $DIR/../configuration.sh
mkdir tmp
bash $SCRIPTS/monitor-disk.sh tmp &
DISK_PID=$!
bash $SCRIPTS/monitor-stats.sh 5 &
STATS_PID=$!
cd tmp
if [ "$1" = "lorma" ]; then
$TIME -v $SCRIPTS/lorma.sh "${@:2}" 2> ../stderr.log 1> ../stdout.log
mv final.fasta ../corrected.fasta
fi
if [ "$1" = "proovread" ]; then
# Splits long reads into 20M sized chunks
$SEQCHUNKER -s 20M -o pb-%03d.fq "$2"
# Corrects each chunk separately
for FILE in $(ls pb-*.fq); do
$PROOVREAD --threads 8 -l $FILE -s "$3" --pre "${FILE%.fq}"
done
# Parallelize proovread on process-level
# parallel $proovread' --threads 4 -l {} -s '$3' --pre {.}' ::: pb-*.fq -P 4
# Combines corrected chunks
cat pb-*/*.trimmed.fa > ../corrected-trimmed.fasta
cat pb-*/*.untrimmed.fq | awk 'NR%4==1{printf ">%s\n", substr($0,2)}NR%4==2{print}' > ../corrected.fasta
rm -r pb*
fi
if [ "$1" = "pbcr" ]; then
$TIME -v $PBCR -l pbcr -s $SCRIPTS/pbcr.spec -fastq "$2" "$3" 2> ../stderr.log 1> ../stdout.log
mv pbcr.fasta ../corrected.fasta
fi
if [ "$1" = "lordec" ]; then
$TIME -v $LORDEC -s 3 -k 19 -i "$2" -2 "$3" -o lordec.fasta 2> ../correct-stderr.log 1> ../correct-stdout.log
$TIME -v $TRIMSPLIT -i lordec.fasta -o lordec-trimmed.fasta 2> ../trim-stderr.log 1> ../trim-stdout.log
mv lordec.fasta ../corrected.fasta
mv lordec-trimmed.fasta ../corrected-trimmed.fasta
fi
kill $DISK_PID
kill $STATS_PID
cd ..
| true
|
89a9b717c6d4435870ab8323399ce2fbdb51bf00
|
Shell
|
carlhuth/cookbook-openvas
|
/script/jenkins
|
UTF-8
| 1,080
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set +x
COOKBOOK=`echo $JOB_NAME | sed 's/_cookbook//'`
COOKBOOK_PATH="/var/lib/jenkins/workspace/$JOB_NAME"
CHEF_GEMS_PATH="/var/lib/jenkins/chefgems"
# Set the cookbook path
echo -n "Setting the cookbook path... "
if ! [ -d ".chef" ]
then
mkdir .chef
fi
cat /var/lib/jenkins/.chef/knife.rb |sed -e "/cookbook_path.*$/ccookbook_path ['$COOKBOOK_PATH']" > .chef/knife.rb
cat /var/lib/jenkins/.chef/hosted_chef-knife.rb |sed -e "/cookbook_path.*$/ccookbook_path ['$COOKBOOK_PATH']" > .chef/hosted_chef-knife.rb
echo "Done!"
# Submodules
echo -n 'Updating submodules... '
git submodule update --init 2>&1 > /dev/null
echo 'Done!'
# Bundler: install all dependencies
echo -n 'Bundling... '
bundle install --quiet --deployment --path $CHEF_GEMS_PATH
echo 'Done!'
# echo -n 'Installing berkshelf cookbooks... '
# bundle exec berks install --path vendor/cookbooks
# echo 'Done!'
echo 'Running foodcritic'
bundle exec foodcritic ./
echo 'Running knife cookbook test'
bundle exec knife cookbook test $JOB_NAME -o ../
| true
|
709f5c1ddcd72f14bccd446808ac6958c8561aa0
|
Shell
|
aperezca/CMS-global-pool-monitor
|
/make_html/make_html_global_pool_collector.sh
|
UTF-8
| 5,437
| 2.84375
| 3
|
[] |
no_license
|
#Interval to plot in hours
int=$1
let n_lines=6*$int
if [[ $int -gt "168" ]]; then #put plots longer than one week at another location
long="long"
else
long=""
fi
ratio=1
if [[ $int -gt "720" ]]; then ratio=2; fi # more than 1 month
if [[ $int -gt "1440" ]]; then ratio=3; fi # more than 2 months
if [[ $int -gt "2880" ]]; then ratio=4; fi # more than 4 months
if [[ $int -gt "4320" ]]; then ratio=6; fi # more than 6 months
WORKDIR="/home/aperez"
OUTDIR="/crabprod/CSstoragePath/aperez"
OUT=$OUTDIR"/HTML/"$long"global_pool_collector_"$int"h.html"
echo '<html>
<head>
<title>CMS Global pool collector metrics</title>
<!--Load the AJAX API-->
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">'>$OUT
echo "google.load('visualization', '1', {packages: ['corechart', 'line']});
google.setOnLoadCallback(drawChart);
function drawChart() {">>$OUT
# Collector attrs being recorded:
# -------------------------------
# ActiveQueryWorkers PendingQueries RecentDroppedQueries
# RecentDaemonCoreDutyCycle
# RecentForkQueriesFromNEGOTIATOR RecentForkQueriesFromTOOL
# RecentUpdatesTotal RecentUpdatesLost
# SubmitterAds
#-------------
tail -n $n_lines $OUTDIR/out/collector_global_pool|awk -v var="$ratio" 'NR % var == 0' |sort >$WORKDIR/status/input_global_collector$int
# -------------------------------
# CoreDutyCycle in collector
echo "var data_dutycycle = new google.visualization.DataTable();
data_dutycycle.addColumn('datetime', 'Date');
data_dutycycle.addColumn('number', 'RecentCoreDutyCycle');
data_dutycycle.addRows([">>$OUT
while read -r line; do
if [[ $(echo $line |wc -w ) -eq 1 ]]; then continue; fi
time=$(echo $line |awk '{print $1}')
let timemil=1000*$time
content=$(echo $line |awk '{print $5}')
echo "[new Date($timemil), $content], " >>$OUT
done <$WORKDIR/status/input_global_collector$int
echo " ]);
var options_dutycycle = {
title: 'Collector RecentCoreDutyCycle',
isStacked: 'true',
explorer: {},
'height':500,
colors: ['#0040FF'],
hAxis: {title: 'Time'},
vAxis: {title: 'dutycycle'}
};
var chart_dutycycle = new google.visualization.AreaChart(document.getElementById('chart_div_dutycycle'));
chart_dutycycle.draw(data_dutycycle, options_dutycycle);">>$OUT
# -------------------------------
# RecentUpdatesTotal RecentUpdatesLost
echo "var data_updates = new google.visualization.DataTable();
data_updates.addColumn('datetime', 'Date');
data_updates.addColumn('number', 'Total');
data_updates.addColumn('number', 'Lost');
data_updates.addRows([">>$OUT
while read -r line; do
if [[ $(echo $line |wc -w ) -eq 1 ]]; then continue; fi
time=$(echo $line |awk '{print $1}')
let timemil=1000*$time
content=$(echo $line |awk '{print $8", "$9}')
echo "[new Date($timemil), $content], " >>$OUT
done <$WORKDIR/status/input_global_collector$int
echo " ]);
var options_updates = {
title: 'Updates to the Collector',
isStacked: 'false',
explorer: {},
lineWidth: 6,
'height':500,
hAxis: {title: 'Time'},
vAxis: {title: 'Number of updates'}
};
var chart_updates = new google.visualization.LineChart(document.getElementById('chart_div_updates'));
chart_updates.draw(data_updates, options_updates);">>$OUT
# -------------------------------
# ActiveQueryWorkers PendingQueries RecentDroppedQueries
echo "var data_queries = new google.visualization.DataTable();
data_queries.addColumn('datetime', 'Date');
data_queries.addColumn('number', 'Active');
data_queries.addColumn('number', 'Pending');
data_queries.addColumn('number', 'Dropped');
data_queries.addRows([">>$OUT
while read -r line; do
if [[ $(echo $line |wc -w ) -eq 1 ]]; then continue; fi
time=$(echo $line |awk '{print $1}')
let timemil=1000*$time
content=$(echo $line |awk '{print $2", "$3", "$4}')
echo "[new Date($timemil), $content], " >>$OUT
done <$WORKDIR/status/input_global_collector$int
echo " ]);
var options_queries = {
title: 'Queries on the Collector',
isStacked: 'false',
explorer: {},
lineWidth: 6,
'height':500,
hAxis: {title: 'Time'},
vAxis: {title: 'Number of queries'}
};
var chart_queries = new google.visualization.LineChart(document.getElementById('chart_div_queries'));
chart_queries.draw(data_queries, options_queries);">>$OUT
#----------------------
echo '
}
</script>
<style>
p {text-align: center;
font-family: verdana;
}
</style>
</head>
<body>
<div id="header">
<h2>CMS Global pool collector metrics monitor for the last '$int' hours, updated at '$(date -u)'<br>
<a href="http://submit-3.t2.ucsd.edu/CSstoragePath/aperez/HTML/global_pool_collector_24h.html">24h</a>
<a href="http://submit-3.t2.ucsd.edu/CSstoragePath/aperez/HTML/global_pool_collector_168h.html">1week</a>
<a href="http://submit-3.t2.ucsd.edu/CSstoragePath/aperez/HTML/longglobal_pool_collector_720h.html">1month</a>
<br><br>
</h2>
</div>
<br>
<!--Div to hold the charts-->'>>$OUT
echo ' <div id="chart_div_dutycycle"></div></p><br><br>'>>$OUT
echo ' <div id="chart_div_updates"></div><br><br>'>>$OUT
echo ' <div id="chart_div_queries"></div><br><br>'>>$OUT
echo "
</body>
</html>" >>$OUT
| true
|
b40144ad7745178c85253454853f0ea1ae7f611b
|
Shell
|
OckhamConsulting/mlc-deployer
|
/app/args.sh
|
UTF-8
| 6,327
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
parse_json_config()
{
if [[ "$CONFIG_PROCESSED" == "Y" ]]; then
return 1
fi
if [[ -f "$CONFIG" ]]; then
KEYS=$(jq -r '. | keys[]' "$CONFIG")
for KEY in $(jq -r '. | keys[]' "$CONFIG"); do
case $KEY in
"nodeIp")
NODE_IP=$(jq -r '.nodeIp' "$CONFIG")
;;
"nodePort")
NODE_PORT=$(jq -r '.nodePort' "$CONFIG")
;;
"explorerIp")
EXPLORER_IP=$(jq -r '.explorerIp' "$CONFIG")
;;
"explorerPort")
EXPLORER_PORT=$(jq -r '.explorerPort' "$CONFIG")
;;
"chainName")
CHAIN_NAME=$(jq -r '.chainName' "$CONFIG")
;;
"token")
TOKEN=$(jq -r '.token' "$CONFIG")
;;
"database")
DATABASE_NAME=$(jq -r '.database' "$CONFIG")
;;
"symbol")
SYMBOL=$(jq -r '.symbol' "$CONFIG")
;;
"prefix")
PREFIX=$(jq -r '.prefix' "$CONFIG")
;;
"feeSend")
FEE_SEND=$(jq -r '.feeSend' "$CONFIG")
;;
"feeVote")
FEE_VOTE=$(jq -r '.feeVote' "$CONFIG")
;;
"feeSecondPassphrase")
FEE_SECOND_PASSPHRASE=$(jq -r '.feeSecondPassphrase' "$CONFIG")
;;
"feeDelegate")
FEE_DELEGATE=$(jq -r '.feeDelegate' "$CONFIG")
;;
"feeMultisig")
FEE_MULTISIG=$(jq -r '.feeMultisig' "$CONFIG")
;;
"forgers")
FORGERS=$(jq -r '.forgers' "$CONFIG")
;;
"maxVotes")
MAX_VOTES=$(jq -r '.maxVotes' "$CONFIG")
;;
"blockTime")
BLOCK_TIME=$(jq -r '.blockTime' "$CONFIG")
;;
"txsPerBlock")
TXS_PER_BLOCK=$(jq -r '.txsPerBlock' "$CONFIG")
;;
"totalPremine")
TOTAL_PREMINE=$(jq -r '.totalPremine' "$CONFIG")
;;
"updateEpoch")
local VALUE=$(jq -r '.updateEpoch' "$CONFIG")
if [[ "$VALUE" == "true" ]]; then
UPDATE_EPOCH="Y"
fi
;;
"rewardHeightStart")
REWARD_HEIGHT_START=$(jq -r '.rewardHeightStart' "$CONFIG")
;;
"rewardPerBlock")
REWARD_PER_BLOCK=$(jq -r '.rewardPerBlock' "$CONFIG")
;;
"blockchainPath")
BLOCKCHAIN_PATH=$(jq -r '.blockchainPath' "$CONFIG")
;;
"explorerPath")
EXPLORER_PATH=$(jq -r '.explorerPath' "$CONFIG")
;;
esac
done
fi
CONFIG_PROCESSED="Y"
}
parse_generic_args()
{
ARGS="$@"
while [[ $# -ne 0 ]] ; do
case $1 in
"--config")
CONFIG="$2"
parse_json_config
;;
esac
shift
done
set -- $ARGS
while [[ $# -ne 0 ]] ; do
case $1 in
"--name")
CHAIN_NAME="$2"
;;
"--node-ip")
NODE_IP="$2"
;;
"--node-port")
NODE_PORT="$2"
;;
"--explorer-ip")
EXPLORER_IP="$2"
;;
"--explorer-port")
EXPLORER_PORT="$2"
;;
"--token")
TOKEN="$2"
;;
"--forgers")
FORGERS="$2"
;;
"--autoinstall-deps")
INSTALL_DEPS="Y"
;;
"--skip-deps")
SKIP_DEPS="Y"
;;
"--non-interactive")
INTERACTIVE="N"
;;
esac
shift
done
ARGS_PROCESSED="Y"
}
parse_explorer_args()
{
if [[ "$ARGS_PROCESSED" == "Y" ]]; then
return 0
fi
parse_generic_args "$@"
while [[ $# -ne 0 ]] ; do
case $1 in
"--path")
EXPLORER_PATH="$2"
;;
esac
shift
done
}
parse_node_args()
{
if [[ "$ARGS_PROCESSED" == "Y" ]]; then
return 0
fi
parse_generic_args "$@"
while [[ $# -ne 0 ]] ; do
case "$1" in
"--path")
BLOCKCHAIN_PATH="$2"
;;
"--database")
DATABASE_NAME="$2"
;;
"--symbol")
SYMBOL="$2"
;;
"--prefix")
PREFIX="$2"
;;
"--fee-send")
FEE_SEND="$2"
;;
"--fee-vote")
FEE_VOTE="$2"
;;
"--fee-second-passphrase")
FEE_SECOND_PASSPHRASE="$2"
;;
"--fee-delegate")
FEE_DELEGATE="$2"
;;
"--fee-multisig")
FEE_MULTISIG="$2"
;;
"--max-votes")
MAX_VOTES="$2"
;;
"--blocktime")
BLOCK_TIME="$2"
;;
"--transactions-per-block")
TXS_PER_BLOCK="$2"
;;
"--reward-height-start")
REWARD_HEIGHT_START="$2"
;;
"--reward-per-block")
REWARD_PER_BLOCK="$2"
;;
"--total-premine")
TOTAL_PREMINE="$2"
;;
"--max-tokens-per-account")
MAX_TOKENS_PER_ACCOUNT="$2"
;;
"--no-autoforger")
AUTO_FORGER="N"
;;
"--update-epoch")
UPDATE_EPOCH="Y"
;;
esac
shift
done
if [[ "$TOTAL_PREMINE" > "$MAX_TOKENS_PER_ACCOUNT" ]]; then
MAX_TOKENS_PER_ACCOUNT="$TOTAL_PREMINE"
fi
}
| true
|
ef6effe8e548fcaa8df4bf825831e18a014a296a
|
Shell
|
dh-thesis/build
|
/init
|
UTF-8
| 705
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "-------------------------"
echo "START BUILDING DH-THESIS"
echo "-------------------------"
echo ""
echo "CLONE REPOSITORIES"
echo ""
mkdir dh-thesis
cd dh-thesis
git clone https://github.com/dh-thesis/crawl.git
git clone https://github.com/dh-thesis/retrieve.git
echo ""
echo "PREPARE RETRIEVAL"
echo ""
cd retrieve/
virtualenv -p python3 --no-site-packages env
source env/bin/activate
pip install -r requirements.txt
deactivate
./main
echo ""
echo "RETRIEVAL DONE!"
echo ""
echo ""
echo "PREPARE CRAWL"
echo ""
cd ../crawl
virtualenv -p python3 --no-site-packages env
source env/bin/activate
pip install -r requirements.txt
deactivate
./main
echo ""
echo "CRAWL DONE!"
echo ""
| true
|
608ce46c953651ae13b7b014804c6e754c787587
|
Shell
|
ecogit-stage/terraform-vsphere-single-vm-disk
|
/files/volume.sh
|
UTF-8
| 211
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
vgchange -ay
pvcreate $1
vgcreate data $1
lvcreate --name data1 -l 100%FREE data
mkfs.ext4 /dev/data/data1
mkdir -p /data
echo '/dev/data/data1 /data ext4 defaults 0 0' >> /etc/fstab
mount /data
| true
|
d489f7c62faf49358007f35f303a8a2f78971259
|
Shell
|
carp-lang/Carp
|
/scripts/release.sh
|
UTF-8
| 1,172
| 4.28125
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env sh
set -e;
name=$1
noprompt=$2
if [ "$name" = "" ]; then
echo "ERROR: Must pass a name of the release as the first argument to this script.";
exit 1;
fi
fullPath="$PWD/releases/$name"
echo "Creating release '$name'"
echo "Full path = '$fullPath'"
if [ "$noprompt" = "--noprompt" ]; then
echo "No prompt, will continue"
else
echo "Continue? (y/n)"
read answer
if [ "$answer" != "y" ]; then
echo "Bye!"
exit 1;
fi
fi
mkdir -p "$fullPath"
echo
echo "Building Haskell Stack project..."
stack build
./scripts/carp.sh ./docs/core/generate_core_docs.carp
mkdir "$fullPath/bin"
echo "Copying executable..."
cp "$(stack path --local-install-root)/bin/carp" $fullPath/bin/carp
echo "Copying core..."
cp -r "./core/" "$fullPath/core/"
echo "Copying docs..."
cp -r "./docs/" "$fullPath/docs/"
echo "Copying README.md..."
cp -r "./README.md" "$fullPath/README.md"
echo "Copying img..."
cp -r "./resources/" "$fullPath/resources/"
echo "Copying examples..."
cp -r "./examples/" "$fullPath/examples/"
echo
echo "Zipping..."
cd releases
zip -r "${name}.zip" "${name}"
echo
echo "Done. New release created successfully!"
| true
|
19b1113949f97258e16a7f8cc710178cd68cf990
|
Shell
|
carloscuesta/dotfiles
|
/osx/software/npm-packages.sh
|
UTF-8
| 380
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
source './osx/utils.sh'
# npm_packages : Asks to install these package through npm.
npm_packages() {
if cmd_exists "npm"; then
print_in_blue "Updating npm ..."
execute "npm update -g npm"
print_in_blue "npm packages"
npm_install "gitmoji-cli"
npm_install "@antfu/ni"
else
print_error 'npm not installed (required to install node packages).'
fi
}
npm_packages
| true
|
2a11b8fc89288ffd58915a568c2acbe8245eb3d2
|
Shell
|
mowenGithub/Clutch
|
/docs/publish-docs
|
UTF-8
| 400
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
CURRENT=`pwd`
make clean && make html
cd /tmp
mkdir clutchdocswork
cd clutchdocswork
git clone git@github.com:clutchio/clutchio.github.com.git
cd clutchio.github.com
git checkout --orphan master
git rm -rf .
cp -R $CURRENT/_build/html/* .
touch .nojekyll
git add .
git commit -a -m "Generate documentation site"
git push origin master --force
cd /tmp
rm -Rf /tmp/clutchdocswork
| true
|
4eff404f71c6048b450241ed7b1d98e59d6c7f1c
|
Shell
|
somasis/beginning-scripts
|
/xdg-runtime-dirs
|
UTF-8
| 1,476
| 3.828125
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/bash
xdg-runtime-dirs_exists() {
true
}
xdg-runtime-dirs_start() {
[[ -d "@@runstatedir@@"/user ]] || mkdir "@@runstatedir@@"/user >/dev/null 2>&1
for u in $(cut -f1,3-4 -d ':' "@@sysconfdir@@"/passwd);do
user=${u%%:*}
gid=${u##*:}
uid=${u%:*}; uid=${uid##*:}
status CUSTOM "xdg-runtime-dirs: ${user}"
if [[ ! -d "@@runstatedir@@"/user/"${uid}" ]];then
mkdir "@@runstatedir@@"/user/"${uid}"
# make sure directory is owned by user's primary group
chown "${uid}:${gid}" "@@runstatedir@@"/user/"${uid}"
chmod 700 "@@runstatedir@@"/user/"${uid}"
fi
if [[ ! -L "@@runstatedir@@"/user/"${user}" ]];then
# programs seem to default to /run/user/${USER} if XDG_RUNTIME_DIR unset
ln -sf "@@runstatedir@@"/user/"${uid}" "@@runstatedir@@"/user/"${user}"
fi
done
}
xdg-runtime-dirs_stop() {
# runtime dirs are cleared at startup, we should not stop because we can't be sure they aren't
# being used when we delete them
[[ "${BEGINNING_RUNNER}" == rc.shutdown ]] || return 1
return 0
}
xdg-runtime-dirs_status() {
for u in $(cut -f1,3-4 -d ':' "@@sysconfdir@@"/passwd);do
user=${u%%:*}
gid=${u##*:}
uid=${u%**}; uid=${uid##*:}
if [[ ! -d "@@runstatedir@@"/user/"${uid}" || ! -L "@@runstatedir@@"/user/"${user}" ]];then
return 1
fi
done
return 0
}
| true
|
894ab9b7d059233c6d3b35f75250190511b58a90
|
Shell
|
magicalcosmos/project-structure
|
/appctl.sh
|
UTF-8
| 729
| 4
| 4
|
[] |
no_license
|
#!/bin/sh
DIR=`pwd`
NODE=`which node`
# get action
ACTION=$1
# help
usage() {
echo "Usage: ./appctl.sh {start|stop|restart}"
exit 1;
}
get_pid() {
if [ -f ./run/app.pid ]; then
echo `cat ./run/app.pid`
fi
}
# start app
start() {
pid=`get_pid`
if [ ! -z $pid ]; then
echo 'server is already running'
else
$NODE $DIR/app.js 2>&1 &
echo 'server is running'
fi
}
# stop app
stop() {
pid=`get_pid`
if [ -z $pid ]; then
echo 'server not running'
else
echo "server is stopping ..."
kill -15 $pid
echo "server stopped !"
fi
}
##############################
##### how to use ########
#####./appctl.sh start########
#####./appctl.sh stop#########
#####./appctl.sh restart######
##############################
| true
|
79d433e6211153989cb8f6a966915d29b98e823e
|
Shell
|
mgup73/scripts
|
/fil.sh
|
UTF-8
| 156
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
vim ab.txt
ls -l | grep -v '^d' > ab.txt
for b.txt in ./* ; do
mv "$file" "$(echo $file|sed -e 's/\([A-Z]\)/_\L\1/g' -e 's/^.\/_//')"
done
| true
|
bfb1a60cccc4766e1c7fbc5cf8faf8af6810dc0e
|
Shell
|
krjoshi/tegu
|
/system/tegu_req.ksh
|
UTF-8
| 18,676
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env ksh
# vi: sw=4 ts=4:
#
# ---------------------------------------------------------------------------
# Copyright (c) 2013-2015 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------------
#
#
# Mnemonic: tegu_req.ksh
# Abstract: Request interface to tegu. This is a convenience and scripts can just use
# a direct curl if need be. This script uses rjprt to make the request and
# then to print the resulting json in a more human friendly manner. Using the
# -j optin causes the raw json returned by Tegu to be spilled to standard
# out.
#
# Looking at the code and wondering what `rjprt` is? Well it's short for
# request and json print. It acts (somewhat) like curl in as much as it
# makes a GET/PUT/POST/DELETE request supplying the body of data to be worked
# on. Then, unlike curl, formats the resulting json output before writing it
# to the tty device. The -j and -d options control the format of the output.
#
# NOTE: this script was dumbed down to work with bash; it may still not
# function correctly with bash and before complaining just install ksh
# and use that.
#
# Date: 01 Jan 2014
# Author: E. Scott Daniels
#
# Mods: 05 Jan 2014 - Added listres and cancel support.
# 08 Jan 2014 - Changed the order of parms on the reservation command so
# that they match the order on the HTTP interface. This was purely
# to avoid confusion. Added a bit better error checking to reserve
# and consistently routing error messages to stderr.
# 03 Mar 2014 - Added error checking based on host pair syntax.
# 31 Mar 2014 - Allows for queue dump/list command.
# 13 May 2014 - Added support to provide dscp value on a reservation req.
# 09 Jun 2014 - Added https proto support.
# 17 Jun 2014 - Added token authorisation support for privileged commands.
# 22 Jul 2014 - Added support for listulcap request.
# 09 Nov 2014 - change <<- here doc command to << -- seems some flavours of
# kshell don't handle this? Also removed support for keystone cli
# and substituted a curl command since it seems keystone isn't installed
# everyplace (sigh).
# 25 Feb 2015 - Added mirror commands
# 31 Mar 2015 - Added support for sending key-value pairs to listhosts and
# graph commands.
# 01 Apr 2015 - Corrected bug with token passed on steering request.
# 18 May 2015 - Dumbed down so that bash could run the script.
# 02 Jun 2015 - Added optional request name to *-mirror commands to make
# consistent with others (no dash).
# 04 Jun 2015 - Added token to -a call.
# 10 Jun 2015 - Added one way reservation support
# 19 Jun 2015 - Added support for v3 token generation.
# 30 Jun 2015 - Fixed a bunch of typos.
# 01 Jul 2015 - Correct bug in mirror timewindow parsing.
# 20 Jul 2015 - Corrected potential bug with v2/3 selection.
# ----------------------------------------------------------------------------------------
function usage {
cat <<endKat
usage: $argv0 [-d] [-h tegu-host[:port] [-j] [-K] [-k key=value] [-r rname] [-s] [-t token|-T] command parms
-d causes json output from tegu to be formatted in a dotted hierarch style
-f force prompting for user and password if -T is used even if a user name or password is
currrently set in the environment.
-h is needed when tegu is running on a different host than is being used to run tegu_req
and/or when tegu is listening on a port that isn't the default
-j causes raw json to be spilled to the standard out device
-k k=v Supplies a key/value pair that is necessary on some requests. Multiple -k options
can be supplied when needed.
-K Use keystone command line interface, rather than direct API, to generate a token
(ignored unless -T is used)
-r allows a 'root' name to be supplied for the json output when humanised
-s enables secure TLS (https://) protocol for requests to Tegu.
-t allows a keystone token to be supplied for privileged commands; -T causes a token to
be generated using the various OS_ environment variables. If a needed variable is
not in the environment, then a prompt will be issued. When either -t or -T is given
a %t can be used on the commandline in place of the token and the token will
substituted. For example: %t/cloudqos/daniels8 would substitute the generated
token into the host name specification.
commands and parms are one of the following:
$argv0 reserve [bandwidth_in,]bandwidth_out [start-]expiry token/project/host1,token/project/host2 cookie [dscp]
$argv0 owreserve bandwidth_out [start-]expiry token/project/host1,token/project/host2 cookie [dscp]
$argv0 cancel reservation-id [cookie]
$argv0 listconns {name[ name]... | <file}
$argv0 add-mirror [start-]end port1[,port2...] output [cookie] [vlan]
$argv0 del-mirror name [cookie]
$argv0 list-mirrors
$argv0 show-mirror name [cookie]
Privileged commands (admin token must be supplied)
$argv0 graph
$argv0 listhosts
$argv0 listulcap
$argv0 listres
$argv0 listqueue
$argv0 setdiscount value
$argv0 setulcap tenant percentage
$argv0 refresh hostname
$argv0 steer {[start-]end|+seconds} tenant src-host dest-host mbox-list cookie
$argv0 verbose level [subsystem]
If only bandwidth_out is supplied, then that amount of bandwidth is reserved
in each direction. Otherwise, the bandwidth out value is used to reserve
bandwidth from host1 (out) to host2 and the bandwidth in is used to reserve
bandwidth from host2 (in) to host1. Both values may be specified with trailing
G/M/K suffixes (e.g. 10M,20M).
The dscp value is the desired value that should be left tagging the data as it
reaches the egress point. This allows applications to have their data tagged
in cases when the application does not, or cannot, tag it's own data.
For the listconns command, "name" may be a VM name, VM ID, or IP address. If
a file is supplied on stdin, then it is assumed to consist of one name per
line.
For the cancel command the reservation ID is the ID returned when the reservation
was accepted. The cookie must be the same cookie used to create the reservation
or must be omitted if the reservation was not created with a cookie.
For verbose, this controls the amount of information that is written to the log
(stderr) by Tegu. Values may range from 0 to 9. Supplying the subsystem causes
the verbosity level to be applied just to the named subsystem. Subsystems are:
net, resmgr, fqmgr, http, agent, fqmgr, or tegu
Admin Token
The admin token can be supplied using the -t parameter and is required for all
privileged commands. The token can be generated by invoking the keystone token-get
command for the user that is defined as the admin in the Tegu configuration file.
The admin token is NOT the token that is defined in the Openstack configuration.
If the -T option is used, $argv0 will prompt for username and password and then
will generate the admin token to use. Tokens may be needed on host names
and those must be generated independently.
endKat
}
# generate the input json needed to request a token using openstack/keystone v3 interface
function gen_v3_token_json {
cat <<endKat
{
"auth": {
"identity": {
"methods": [ "password" ],
"password": {
"user": {
"domain": { "name": "${OS_DOMAIN_NAME:-default}" },
"name": "${OS_USERNAME:-missing}", "password": "${OS_PASSWORD:-missing}"
}
},
"scope": {
"project": {
"name": "$OS_TENANT_NAME"
}
}
}
}
}
endKat
}
# parse the output from keystone/openstack version2 token generation
function v2_suss_token {
awk '{ print $0 "},"} ' RS="," | awk '1' RS="{" | awk '
/"access":/ { snarf = 1; next } # we want the id that is a part of the access struct
/"id":/ && snarf == 1 { # so only chase id if we have seen access tag
gsub( "\"", "", $0 ); # drop annoying bits of json
gsub( "}", "", $0 );
gsub( ",", "", $0 );
print $NF
exit ( 0 ); # stop short; only need one
} ' # now bash compatible
}
# Run the v3 output for the returned token
# Bloody openstack puts the token from a v3 request in the HEADER and not in the body
# with the rest of the data; data does NOT belong in the transport header! Header fields
# are tagged by rjprt and are contained in square brackets which need to be stripped.
function v3_suss_token {
awk '/header: X-Subject-Token/ { gsub( "\\[", "", $NF ); gsub( "]", "", $NF ); print $NF; exit( 0 ); }'
}
function str2expiry
{
typeset expiry
if [[ $1 == "+"* ]]
then
expiry=$(( $(date +%s) $1 ))
else
if [[ $1 == -* ]]
then
echo "start-end timestamp seems wrong: $2 [FAIL]" >&2
usage >&2
exit 1
fi
expiry=$1
fi
echo $expiry
}
# given a raw token, or nothing, generate the proper rjprt option to set
# it in the header.
# CAUTION: error messages MUST go to &2
function set_xauth
{
if [[ -n $1 ]]
then
if ! rjprt -?|grep -q -- -a
then
echo "" >&2
echo "WARNING: the version of rjprt installed in $(which rjprt) is old, some information might not be sent to tegu" >&2
echo " install a new version of rjprt, or remove the old one" >&2
echo "" >&2
fi
echo " -a $1 "
fi
}
function gen_token
{
typeset token_value=""
typeset xOS_PASSWORD=""
typeset xOS_USERNAME=""
typeset xOS_TENANT_NAME=""
trap 'stty echo; exit 2' 1 2 3 15
if [[ -z $OS_USERNAME ]]
then
printf "Token generation:\n\tEnter user name: " >/dev/tty
read xOS_USERNAME
OS_USERNAME="${xOS_USERNAME:-nonegiven}"
fi
if [[ -z $OS_PASSWORD ]]
then
default="no-default"
printf "\tEnter password for $OS_USERNAME: " >/dev/tty
stty -echo
read xOS_PASSWORD
stty echo
printf "\n" >/dev/tty
OS_PASSWORD=${xOS_PASSWORD:-nonegiven999}
fi
trap - 1 2 3 15
if [[ -z $OS_TENANT_NAME ]]
then
printf "\tEnter tenant: " >/dev/tty
read OS_TENANT_NAME
fi
if [[ -z $OS_AUTH_URL ]]
then
printf "\tEnter keystone url: " >/dev/tty
read OS_AUTH_URL
fi
export OS_TENANT_NAME
export OS_PASSWORD
export OS_USERNAME
export OS_AUTH_URL
if (( use_keystone )) # -K used on the command line
then
token_value=$( keystone token-get | awk -F \| '{gsub( "[ \t]", "", $2 ) } $2 == "id" {print $3 }' ) # now bash compatible
else
content_type="Content-type: application/json"
case $OS_AUTH_URL in
*/v2.0*)
url="$OS_AUTH_URL/tokens"
token_value=$( curl -s -d "{\"auth\":{ \"tenantName\": \"$OS_TENANT_NAME\", \"passwordCredentials\":{\"username\": \"$OS_USERNAME\", \"password\": \"$OS_PASSWORD\"}}}" -H "$content_type" $url | v2_suss_token )
;;
*/v3*)
url="$OS_AUTH_URL/auth/tokens"
body="$( gen_v3_token_json )" # body for the url
token_value=$( rjprt -h -J -m POST -d -D "$body" -t $url | v3_suss_token )
;;
*) echo "version in OS_AUTH_URL ($OS_AUTH_URL) is not supported for -T" >&2
exit 1
;;
esac
fi
if [[ -z $token_value ]]
then
echo "unable to generate a token for $OS_USERNAME [FAIL]" >&2
return 1
fi
echo ${token_value%% *} # ensure any trailing junk is gone
return 0
}
# ------------------------------------------------------------------------------------------------------------
argv0="${0##*/}"
port=29444
host=localhost:$port
opts=""
root=""
proto="http"
prompt4token=0
force=0
use_keystone=0
bandwidth="bandwidth" # http api collections
steering="api" # eventually this should become steering
default="api"
while [[ $1 == -* ]]
do
case $1 in
-d) opts+=" -d";;
-f) force=1;;
-F) bandwidth="api"; steering="api";; # force collection to old single set style
-h) host=$2; shift;;
-j) opts+=" -j";;
-k) kv_pairs+="$2 "; shift;;
-K) use_keystone=1;;
-r) root="$2"; shift;;
-s) proto="https";;
-t) raw_token="$2"; token=$"auth=$2"; shift;;
-T) prompt4token=1;;
-\?) usage
exit 1
;;
*) echo "ERROR: unrecognised option: $1"
usage
exit 1
;;
esac
shift
done
opts+=" -r ${root:-$1}"
if (( force > 0 )) # force username and password prompts; other OS vars default if set
then
OS_USERNAME=""
OS_PASSWORD=""
fi
if [[ $host != *":"* ]]
then
host+=":$port"
fi
if (( prompt4token )) # if -T given, prompt for information needed to generate a token
then
raw_token="$( gen_token )"
if [[ -z $raw_token ]]
then
exit 1
fi
token="auth=$raw_token"
fi
opts+=$( set_xauth $raw_token )
case $1 in
ping)
rjprt $opts -m POST -t "$proto://$host/tegu/$default" -D "$token ping"
;;
listq*|qdump|dumpqueue*)
rjprt $opts -m POST -t "$proto://$host/tegu/$bandwidth" -D "$token qdump"
;;
listr*)
rjprt $opts -m POST -t "$proto://$host/tegu/$default" -D "$token listres $kv_pairs"
;;
listh*) # list hosts
rjprt $opts -m POST -t "$proto://$host/tegu/$default" -D "$token listhosts $kv_pairs"
;;
listul*) # list user link caps
rjprt $opts -m POST -t "$proto://$host/tegu/$bandwidth" -D "$token listulcaps"
;;
listc*) # list connections
if (( $# < 2 )) # assume it's on stdin
then
sed 's/^/listconns /' >/tmp/PID$$.data
else
shift
for x in "$@"
do
echo "listconns $x"
done >/tmp/PID$$.data
fi
rjprt $opts -m POST -t "$proto://$host/tegu/$default" </tmp/PID$$.data
rm -f /tmp/PID$$.data
;;
graph)
rjprt $opts -m POST -D "$token graph $kv_pairs" -t "$proto://$host/tegu/$default"
;;
cancel)
shift
case $# in
1|2) ;;
*) echo "bad number of positional parameters for cancel [FAIL]" >&2
usage >&2
exit 1
;;
esac
rjprt $opts -m DELETE -D "reservation $1 $2" -t "$proto://$host/tegu/$bandwidth"
;;
pause)
rjprt $opts -m POST -D "$token pause" -t "$proto://$host/tegu/$default"
;;
refresh)
rjprt $opts -m POST -D "$token refresh $2" -t "$proto://$host/tegu/$default"
;;
resume)
rjprt $opts -m POST -D "$token resume" -t "$proto://$host/tegu/$default"
;;
reserve)
shift
#teg command is: reserve <bandwidth>[K|M|G] [<start>-]<end> <host1-host2> [cookie [dscp]]
if (( $# < 4 ))
then
echo "bad number of positional parms for reserve [FAIL]" >&2
usage >&2
exit 1
fi
expiry=$( str2expiry $2 )
if [[ $3 != *"-"* ]] && [[ $3 != *","* ]]
then
echo "host pair must be specified as host1-host2 OR host1,host2 [FAIL]" >&2
exit 1
fi
if [[ $3 == *"-any" ]] || [[ $3 == *",any" ]]
then
echo "second host in the pair must NOT be 'any' [FAIL]" >&2
exit 1
fi
if [[ -n $5 ]]
then
if (( $5 < 0 || $5 > 64 ))
then
echo "dscp value ($5) must be between 0 and 64 [FAIL]" >&2
exit 1
fi
fi
rjprt $opts -m POST -D "reserve $kv_pairs $1 $expiry ${3//%t/$raw_token} $4 $5" -t "$proto://$host/tegu/$bandwidth"
;;
owres*|ow_res*)
shift
#teg command is: owreserve <bandwidth>[K|M|G] [<start>-]<end> <host1-host2> [cookie [dscp]]
if (( $# < 4 ))
then
echo "bad number of positional parms for owreserve [FAIL]" >&2
usage >&2
exit 1
fi
expiry=$( str2expiry $2 )
rjprt $opts -m POST -D "ow_reserve $kv_pairs $1 $expiry ${3//%t/$raw_token} $4 $5" -t "$proto://$host/tegu/$bandwidth"
;;
setdiscount)
rjprt $opts -m POST -D "$token setdiscount $2" -t "$proto://$host/tegu/$bandwidth"
;;
setulcap)
rjprt $opts -m POST -D "$token setulcap $2 $3" -t "$proto://$host/tegu/$default"
;;
steer*)
expiry=$( str2expiry $2 )
rjprt $opts -m POST -D "steer $kv_pairs $expiry ${3//%t/$raw_token} $4 $5 $6 $7" -t "$proto://$host/tegu/$steering"
;;
verbose)
case $2 in
[0-9]*) rjprt $opts -m POST -D "$token verbose $2 $3" -t "$proto://$host/tegu/$default";; # assume tegu way: level subsystem
*) rjprt $opts -m POST -D "$token verbose $3 $2" -t "$proto://$host/tegu/$default";; # assume entered backwards: subsystem level
esac
;;
add-mirror|addmirror)
shift
if (( $# < 3 ))
then
echo "bad number of positional parms for add-mirror [FAIL]" >&2
usage >&2
exit 1
fi
json="{"
case $1 in # handle [start-]end or +sss
*-*) # start-end
json="$json \"start_time\": \"${1%%-*}\", \"end_time\": \"${1##*-}\","
;;
+[0-9]*) # number of seconds after now
now=$( date +%s )
json="$json \"start_time\": \"${now}\", \"end_time\": \"$((now $1))\","
;;
[0-9]*) # just a hard end
now=$( date +%s )
if (( $1 < now ))
then
echo "end time ($1) is not in the future"
echo "invalid window: expected [start-]end or +sss [FAIL]"
usage
exit 1
fi
json="$json \"start_time\": \"${now}\", \"end_time\": \"$1\","
;;
*)
echo "invalid window: expected [start-]end or +sss [FAIL]"
usage
exit 1
;;
esac
json="$json \"output\": \"$3\", \"port\": [ "
sep=""
for p in $( echo $2 | tr , ' ' )
do
json="$json$sep\"$p\""
sep=", "
done
json="$json ]"
if (( $# >= 4 ))
then
json="$json, \"cookie\": \"$4\""
fi
if (( $# >= 5 ))
then
json="$json, \"vlan\": \"$5\""
fi
json="$json }"
rjprt $opts -m POST -D "$json" -t "$proto://$host/tegu/mirrors/"
;;
del-mirror|delmirror)
shift
case $# in
1)
rjprt $opts -m DELETE -t "$proto://$host/tegu/mirrors/$1/" </dev/null
;;
2)
rjprt $opts -m DELETE -t "$proto://$host/tegu/mirrors/$1/?cookie=$2" </dev/null
;;
*)
echo "bad number of positional parameters for del-mirror [FAIL]" >&2
usage >&2
exit 1
;;
esac
;;
list-mirrors|listmirror)
rjprt $opts -m GET -t "$proto://$host/tegu/mirrors/"
;;
show-mirror|showmirror)
shift
case $# in
1)
rjprt $opts -m GET -t "$proto://$host/tegu/mirrors/$1/"
;;
2)
rjprt $opts -m GET -t "$proto://$host/tegu/mirrors/$1/?cookie=$2"
;;
*)
echo "bad number of positional parameters for show-mirror [FAIL]" >&2
usage >&2
exit 1
;;
esac
;;
test)
shift
echo "test: raw_token=($raw_token)"
echo "test: options: ($opts)"
;;
*)
echo ""
echo "unrecognised action: $1 [FAIL]" >&2
echo ""
usage >&2
;;
esac
| true
|
52572a285d45a4d780076c6f21fb34680aef4953
|
Shell
|
hasufell/docker-gentoo-teamspeak
|
/start.sh
|
UTF-8
| 878
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
print_cmd_args() {
local i
local cmd_args=(
default_voice_port
voice_ip
create_default_virtualserver
machine_id
filetransfer_port
filetransfer_ip
query_port
query_ip
clear_database
logpath
dbplugin
dbpluginparameter
dbsqlpath
dbsqlcreatepath
licensepath
createinifile
inifile
query_ip_whitelist
query_ip_blacklist
query_skipbruteforcecheck
dbclientkeepdays
dblogkeepdays
logquerycommands
no_permission_update
open_win_console
no_password_dialog
dbconnections
logappend
)
for i in ${cmd_args[@]} ; do
if [[ ${!i} ]] ; then
echo "${i}=${!i} "
fi
done
}
BASEDIR="/opt/teamspeak3-server"
cd "/var/lib/teamspeak3-server"
exec sudo -u teamspeak3 -g teamspeak3 \
LD_LIBRARY_PATH="${BASEDIR}${LD_LIBRARY_PATH:+:}${LD_LIBRARY_PATH}" \
"${BASEDIR}/sbin/ts3server-bin" -- \
$(print_cmd_args)
| true
|
d0a97cec5a1aa1110993b2d3388450c7f2f40cb4
|
Shell
|
jeremyary/performance-addon-operators
|
/build/assets/scripts/pre-boot-tuning.sh
|
UTF-8
| 1,115
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
SYSTEM_CONFIG_FILE="/etc/systemd/system.conf"
SYSTEM_CONFIG_CUSTOM_FILE="/etc/systemd/system.conf.d/setAffinity.conf"
if [ -f /etc/sysconfig/irqbalance ] && [ -f ${SYSTEM_CONFIG_CUSTOM_FILE} ] && grep -ls "IRQBALANCE_BANNED_CPUS=${RESERVED_CPU_MASK_INVERT}" /etc/sysconfig/irqbalance; then
echo "Pre boot tuning configuration already applied"
echo "Setting kernel rcuo* threads to the housekeeping cpus"
pgrep rcuo* | while read line; do taskset -pc ${RESERVED_CPUS} $line || true; done
else
#Set IRQ balance banned cpus
if [ ! -f /etc/sysconfig/irqbalance ]; then
touch /etc/sysconfig/irqbalance
fi
if grep -ls "IRQBALANCE_BANNED_CPUS=" /etc/sysconfig/irqbalance; then
sed -i "s/^.*IRQBALANCE_BANNED_CPUS=.*$/IRQBALANCE_BANNED_CPUS=${RESERVED_CPU_MASK_INVERT}/" /etc/sysconfig/irqbalance
else
echo "IRQBALANCE_BANNED_CPUS=${RESERVED_CPU_MASK_INVERT}" >>/etc/sysconfig/irqbalance
fi
rpm-ostree initramfs --enable --arg=-I --arg="${SYSTEM_CONFIG_FILE} ${SYSTEM_CONFIG_CUSTOM_FILE}"
touch /var/reboot
fi
| true
|
22447a8734819c19eefcaaed9e94053697ac055c
|
Shell
|
RaffaeleCanale/scripts-rabbitmq
|
/src/rabbitmq
|
UTF-8
| 1,064
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
function declareConstants() {
$1 HOST "__HOST__"
$1 AUTH "__AUTH__"
}
function printHelp() {
cat << EOF
Publish an RabbitMQ message to an exchange
Usage:
$PROGNAME `green "<key> <payload>"` `cyan "--exchange <exchange>"`
Options:
`cyan exchange`: Name of the exchange (default: `bold "__DEFAULT_EXCHANGE__"`)
EOF
}
function loadOptions() {
# unusedBehaviour="IGNORE"
getOpt "--exchange" exchange 1 "__DEFAULT_EXCHANGE__"
}
function getData() {
cat "__ROOT__/data.json" \
| sedeasy "__EXCHANGE__" "$exchange" \
| sedeasy "__KEY__" "$key" \
| sedeasy "__PAYLOAD__" "$payload"
}
function execute() {
curl "${HOST}/api/exchanges/%2F/${exchange}/publish" -s \
-H "authorization: Basic ${AUTH}" \
--data-binary "`getData`" | jq '.routed'
}
function run () {
getArg key,payload
info "Publish to [$exchange/$key]: $payload"
result=`execute`
if [ "$result" != 'true' ]; then
warn "Failed to route"
fi
}
source "__STARTER__@v2"
| true
|
76ab7150b90a9589e9aee3815b26dba23b2747fb
|
Shell
|
Edert/DCS_predictions_chr
|
/run_pd_homerpd.sh
|
UTF-8
| 2,632
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
NAME=$1
SET=$2
TOOL=$3
mkdir -p results/$TOOL results/$TOOL/$SET
mkdir -p log/$TOOL
files=$(ls results/$TOOL/$SET/*.bed 2> /dev/null | wc -l)
if [ "$files" = "0" ]; then
echo "running $TOOL with: $1 $2 $3" #$4 $5 $6 $7 $8 $9 $10 $11 $12 $13 $14 $15"
OUT_NAME=$(basename $NAME _sample1-rep1_mm)
LOG="../../../log/$TOOL/$SET.log"
cd results/$TOOL/$SET/
STARTTIME=`date +%s.%N`
#creat tags
/apps/homer_4.11/bin/makeTagDirectory S11 $4 2>> $LOG
/apps/homer_4.11/bin/makeTagDirectory S12 $5 2>> $LOG
/apps/homer_4.11/bin/makeTagDirectory S21 $7 2>> $LOG
/apps/homer_4.11/bin/makeTagDirectory S22 $8 2>> $LOG
/apps/homer_4.11/bin/makeTagDirectory IN1 $6 2>> $LOG
/apps/homer_4.11/bin/makeTagDirectory IN2 $9 2>> $LOG
PREPDONE=`date +%s.%N`
TIMEDIFFPREP=`echo "$PREPDONE - $STARTTIME" | bc | awk -F"." '{print}'`
echo "prep $TIMEDIFFPREP" > time.txt
for PCALLER in ../../../results_peaks/*; do
#for PCALLER in "../../../results_peaks/sicer"; do
for PMODE in $PCALLER/$SET/*; do
echo "using $(basename $PCALLER) mode: $(basename $PMODE)"
cat $PMODE/s11_peaks.bed $PMODE/s12_peaks.bed $PMODE/s21_peaks.bed $PMODE/s22_peaks.bed | sort -k1,1 -k2,2n > peaks.bed
bedtools merge -i peaks.bed | sort -k1,1 -k2,2n > m_peaks.bed
STARTTIME=`date +%s.%N`
#/apps/homer_4.11/bin/getDifferentialPeaksReplicates.pl -t S11/ S12/ -b S21/ S22/ -i IN1/ IN2/ -f 0.7 -q 1 -p m_peaks.bed > results.txt 2>> $LOG
/usr/bin/time -o mem.txt -f "%K %M" /apps/homer_4.11/bin/getDifferentialPeaksReplicates.pl -t S11/ S12/ -b S21/ S22/ -i IN1/ IN2/ -f 0.7 -q 1 -p m_peaks.bed > results.txt 2>> $LOG
#save result
PSHORT=$(basename $PCALLER)
MSHORT=$(basename $PMODE)
OUT_NAME=$(basename $NAME _sample1-rep1_mm)"_"$PSHORT"_"$MSHORT".bed"
ENDTIME=`date +%s.%N`
TIMEDIFF=`echo "$ENDTIME - $STARTTIME" | bc | awk -F"." '{print}'`
echo $PSHORT"_"$MSHORT" $TIMEDIFF" >> time.txt
MEMUSAGE=$(sed '/non-zero status/d' mem.txt )
echo $PSHORT"_"$MSHORT" $MEMUSAGE" >> memory.txt
lines=$(wc -l results.txt | awk '{print $1}')
if [ $lines -ge 2 ]; then
#reformat for eval
cat results.txt | awk -F"\t" '{print $2"\t"$3"\t"$4"\t"$26"\t"$24}' | grep -v "^Chr" | sort -k1,1 -k2,2n > $OUT_NAME
else
#create empty file
touch $OUT_NAME
fi
#clean up
rm -rf m_peaks.bed peaks.bed mem.txt
done
done
rm -rf results.txt S11 S12 S21 S22 IN1 IN2
else
echo "results/$TOOL/$SET/bed already exists exiting..."
fi
| true
|
6c0589d0a7c522508ce404186840ea33a9dd9ebb
|
Shell
|
IPv6-mPvD/pvdd
|
/tests/pvdid-get-hflag.sh
|
UTF-8
| 193
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
. `dirname $0`/check-nc.sh
if [ $# != 1 ]
then
echo "usage : $0 pvdname"
exit 1
fi
echo PVD_GET_ATTRIBUTES "$@" |
$NC 0.0.0.0 10101 |
awk '/hFlag/ { print $3 }' |
sed -e 's/,//'
| true
|
0823b4db15b803fb1ee8c3b853d9e3ce5792998d
|
Shell
|
LucasHell/Bachelor-project
|
/runTTV.sh
|
UTF-8
| 2,247
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
#Bash script to generate files in the correct format for TTVFast, run TTVFast and plot the results
#Remove input and output files from when program ran last time
rm -r ./TTVFast/c_version/input
rm -r ./code/input
rm ./TTVFast/c_version/numberPlanets.csv
rm ./code/timingErrors.csv
rm -r ./code/times/*
rm ./code/transAmpl.csv
rm ./code/numberPlanets.csv
rm ./code/ampError.txt
rm ./code/RA_dec_sys.csv
rm ./code/RA_dec_p.csv
rm ./code/rPlanet.csv
rm ./code/AmplPeriodDouble.csv
mkdir ./TTVFast/c_version/input
#Run code to generate data in supported format for TTVFast
cd code
python inputTTV.py
cd ..
#Move input data to TTVFast directory
cp ./code/input/*.in ./TTVFast/c_version/input/
cp ./code/numberPlanets.csv ./TTVFast/c_version
cp ./code/wtm-RA_dec_sys.csv ./TTVFast/c_version
cp ./code/eccenOmeg.csv ./TTVFast/c_version
cd TTVFast/c_version
#Run TTVFast by creating a setup file for every system and rename the output file to the number of the system
readarray -t numPlanet < <(cut -d, -f2 numberPlanets.csv)
awk -F "\"*,\"*" '{$3=$3*27; if(NR>1)print $3}' wtm-RA_dec_sys.csv > maxTime.csv
awk -F "\"*,\"*" '{$3=$3*27;if(NR>1)print $4}' wtm-RA_dec_sys.csv > minTime.csv
awk -F "\"*,\"*" '{$3=$3*27;if(NR>1)print $5}' wtm-RA_dec_sys.csv > avgTime.csv
readarray -t maxTime < <(cut -d, -f2 maxTime.csv)
readarray -t minTime < <(cut -d, -f2 minTime.csv)
readarray -t avgTime < <(cut -d, -f2 avgTime.csv)
number=0
for file1 in `ls input/*.in | sort --version-sort`;
do
readarray -t data < <(cut -f2 $file1)
period=$(echo ${data[3]} | awk '{$1=$1/20; print $1;}')
echo -e "$file1\n0\n$period\n${maxTime[$number]}\n${numPlanet[$number]}\n0" > setup_file.txt; #${maxTime[$number]}
./run_TTVFast setup_file.txt Times RV_file RV_out;
mv Times output;
mv ./output/Times output/$file1;
number=$((number+1));
done
#Move output data from TTVFast to code directory for plotting
cd ../..
mv TTVFast/c_version/output/input/*.in code/times/
#~ #Run python script to read data generated by TTVFast and plot it
cd code
count=0
for file2 in `ls times/*.in | sort --version-sort`;
do
echo $file2
python transits.py $file2 $count
count=$((count+1))
#~ read -p "Press key to continue.. " -n1 -s
done
python finalPlots.py
| true
|
a56a9f0c3021986812e897fcf8c22224ed5ff7a1
|
Shell
|
thundra-io/thundra-agent-lambda-dotnet-template
|
/release.sh
|
UTF-8
| 348
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "$NUGET_API_KEY" ]; then echo "NUGET_API_KEY should be set to run the the release script"; exit 1; fi
sh ./build.sh
VERSION=$(awk -F'[<>]' '/<Version>/{print $3}' ThundraTemplates.nuspec)
nuget pack ThundraTemplates.nuspec
dotnet nuget push Thundra.Templates.$VERSION.nupkg -k $NUGET_API_KEY -s https://api.nuget.org/v3/index.json
| true
|
719b991ca8431fc142f77012e313cfa308df518a
|
Shell
|
Talits/getup-engine-installer
|
/run
|
UTF-8
| 2,809
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
export INSTALLER_VERSION=$2
#declare -r INSTALLER_VERSION
source prepare-container
shift
set -eu
echo
echo "Image: $image_name"
echo "Container: $container_name"
echo "State dir: $state_dir"
echo
if [ -e $state_dir/getupengine.env -a -e $state_dir/getupengine.env.enc ] && \
[ $state_dir/getupengine.env.enc -nt $state_dir/getupengine.env ]; then
echo "Fail: Potentially unsaved data: $state_dir/getupengine.env.inc is newer than $state_dir/getupengine.env"
exit 1
fi
# Prepend cluster search domain
search=$(grep search /etc/resolv.conf | cut -f2- -d' ')
# setup permissions
if [ $(command -v chcon) ]; then
sudo chcon -R -t svirt_sandbox_file_t ${state_dir}
fi
if [ $(id -u 1000 2> /dev/null ) ]; then
sudo chown 1000 -R ${state_dir}
fi
sudo chgrp -R $(id -g) ${state_dir}
if ! [ -v STATE_KEY ]; then
STATE_KEY=""
fi
bin/open-state ${state_dir}
if ! grep CLUSTER_ID -qw $state_dir/getupengine.env; then
echo "Invalid file: getupengine.env"
exit 1
fi
set -a
source $state_dir/getupengine.env
set +a
state_backend_vars=(
STATE_BACKEND_AWS_ACCESS_KEY_ID
STATE_BACKEND_AWS_SECRET_ACCESS_KEY
STATE_BACKEND_AWS_DEFAULT_REGION
)
for var in ${state_backend_vars[*]}; do
if ! [ -v $var ]; then
read -sp "$var (empty to ignore): " $var
echo
fi
[ "${#STATE_BACKEND_AWS_ACCESS_KEY_ID}" -eq 0 ] && break
done
if [ -v STATE_BACKEND_AWS_ACCESS_KEY_ID -a -v STATE_BACKEND_AWS_SECRET_ACCESS_KEY ]; then
if ! [ "${#STATE_BACKEND_AWS_ACCESS_KEY_ID}" -eq 0 -o "${#STATE_BACKEND_AWS_ACCESS_KEY_ID}" -eq 20 ]; then
echo Invalid STATE_BACKEND_AWS_ACCESS_KEY_ID length: ${#STATE_BACKEND_AWS_ACCESS_KEY_ID}
exit 1
fi
if ! [ "${#STATE_BACKEND_AWS_SECRET_ACCESS_KEY}" -eq 0 -o "${#STATE_BACKEND_AWS_SECRET_ACCESS_KEY}" -eq 40 ]; then
echo Invalid STATE_BACKEND_AWS_SECRET_ACCESS_KEY length: ${#STATE_BACKEND_AWS_SECRET_ACCESS_KEY}
exit 1
fi
else
STATE_BACKEND_AWS_ACCESS_KEY_ID=""
STATE_BACKEND_AWS_SECRET_ACCESS_KEY=""
STATE_BACKEND_AWS_DEFAULT_REGION=""
fi
echo "---> Starting container ${container_name} from image $image_name"
trap "STATE_KEY=$STATE_KEY $PWD/bin/close-state ${state_dir}" EXIT
sudo docker run -it --rm $@ \
-v ${state_dir}/:/state \
--env-file ${state_dir}/getupengine.env \
-e "STATE_KEY=${STATE_KEY}" \
-e "STATE_BACKEND_AWS_ACCESS_KEY_ID=$STATE_BACKEND_AWS_ACCESS_KEY_ID" \
-e "STATE_BACKEND_AWS_SECRET_ACCESS_KEY=$STATE_BACKEND_AWS_SECRET_ACCESS_KEY" \
-e "STATE_BACKEND_AWS_DEFAULT_REGION=$STATE_BACKEND_AWS_DEFAULT_REGION" \
--name ${container_name} \
${CLUSTER_ZONE:+--dns-search $CLUSTER_ZONE} \
$(for dom in $search; do [ $CLUSTER_ZONE != $dom ] && echo --dns-search $dom; done) \
$image_name bash
| true
|
0f7cb680c209c5b8893ac5122b56bcc233a299af
|
Shell
|
escenic/ece-scripts
|
/usr/share/escenic/ece-scripts/common-os.sh
|
UTF-8
| 7,782
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /usr/bin/env bash
# Platform/OS specific methods.
#
# by torstein@escenic.com
# depends on common-bashing
common_bashing_is_loaded > /dev/null 2>&1 ||
source "${BASH_SOURCE[0]%/*}/common-bashing.sh"
## Only used if the Tomcat download mirror couldn't be determined
fallback_tomcat_url="http://apache.uib.no/tomcat/tomcat-7/v7.0.79/bin/apache-tomcat-7.0.79.tar.gz"
# Can be used like this:
# common_io_os_loaded 2>/dev/null || source common-os.sh
function common_os_is_loaded() {
echo 1
}
### get_user_home_directory
## Method which will try its best to find the home diretory of
## existing users and probable home directories of new users.
##
## - On Darwin/Mac OS X, it will return /Users/<user>
## - On systems using /etc/passwd, it will search there to find the
## home dir of existing users.
## - For new users, it will check the configuration of adduser (if
## present).
## - If all of thes above fails, it will return /home/<user>
##
## $1 :: the user name, can either be an existing user or a non-yet
## created user.
function get_user_home_directory() {
if [ $(uname -s) = "Darwin" ]; then
echo /Users/$1
elif [ $(grep ^$1 /etc/passwd | wc -l) -gt 0 ]; then
grep ^$1 /etc/passwd | cut -d':' -f6
elif [ -r /etc/adduser.conf ]; then
local dir=$(grep DHOME /etc/adduser.conf | grep -v ^# | cut -d'=' -f2)
echo $dir/$1
else
echo "/home/$1"
fi
}
function is_on_debian_or_derivative() {
[[ ${on_debian_or_derivative} -eq 1 ]]
}
function is_on_redhat_or_derivative() {
[[ ${on_redhat_or_derivative} -eq 1 ]]
}
function is_java_vendor_oracle() {
[[ "${java_vendor-openjdk}" == oracle ]]
}
function is_java_vendor_openjdk() {
[[ "${java_vendor-openjdk}" == openjdk ]]
}
### create_user_and_group_if_not_present
## $1 :: user name
## $2 :: group name
function create_user_and_group_if_not_present() {
local user=$1
local group=$1
if [ $(grep $user /etc/passwd | wc -l) -lt 1 ]; then
print_and_log "Creating UNIX user $user ..."
if [ $on_debian_or_derivative -eq 1 ]; then
run adduser $user \
--disabled-password \
--gecos "Escenic-user,Room,Work,Home,Other"
fi
if [ $on_redhat_or_derivative -eq 1 ]; then
run useradd $user \
--comment "Escenic-user,Room,Work,Home,Other"
fi
add_next_step "I created a new UNIX user called $user"
add_next_step "and you must set a password using: passwd $user"
fi
if [ $(grep $group /etc/group | wc -l) -lt 1 ]; then
print_and_log "Creating UNIX group $group ..."
run addgroup $group
fi
}
### get_ip
## Will return the IP of the host name. If not found, the host name
## passed to the function will be returned.
##
## $1 :: the host name
function get_ip() {
local ip=$(ping -c 1 $1 2>/dev/null | \
grep "bytes from" | \
cut -d'(' -f2 | \
cut -d ')' -f1)
if [ -z "$ip" ]; then
echo $1
fi
echo $ip
}
### assert_commands_available
## Asserts that the passed command/program is indeed accessible in the
## current context. If it is not, the program aborts and removes the
## PID.
##
## $@ :: a list of the binary/executable/program
function assert_commands_available() {
local errors_found=0
for el in $@; do
if [ $(which ${el} 2>/dev/null | wc -l) -lt 1 ]; then
print_and_log "Please install $el and then run $(basename $0) again."
errors_found=1
fi
done
if [ $errors_found -eq 1 ]; then
exit 1
fi
}
function get_tomcat_download_url() {
local url=
if [ -n "${tomcat_download}" ]; then
url=$tomcat_download
else
url=$(
curl -s http://tomcat.apache.org/download-90.cgi | \
grep tar.gz | \
head -1 | \
cut -d'"' -f2
)
fi
if [ -z "${url}" ]; then
url=$fallback_tomcat_url
log "Failed to get Tomcat mirror URL, will use fallback URL $url"
fi
echo $url
}
### download_tomcat
## Downloads Tomcat from the regional mirror
##
## $1 :: target directory
function download_tomcat() {
local url=$(get_tomcat_download_url)
print_and_log "Downloading Tomcat from $url ..."
download_uri_target_to_dir $url $1
}
### get_free_memory_in_mega_bytes
function get_free_memory_in_mega_bytes() {
if [ $(uname -s) == "Linux" ]; then
local free_in_kb=$(
grep MemFree /proc/meminfo | \
cut -d: -f2- | \
sed 's/^[ ]*//g' | \
cut -d' ' -f1
)
echo $(( $free_in_kb / 1024 ))
fi
}
### get_total_memory_in_mega_bytes
function get_total_memory_in_mega_bytes() {
if [ $(uname -s) == "Linux" ]; then
local total_in_kb=$(
grep MemTotal /proc/meminfo | \
cut -d: -f2- | \
sed 's/^[ ]*//g' | \
cut -d' ' -f1
)
echo $(( $total_in_kb / 1024 ))
fi
}
### add_apt_source
##
## $@ :: the apt line to be added if it's not already present.
function add_apt_source() {
# first, check that the base URL in the sources list returns 200,
# only allow 20 seconds for this test. If the URL doesn't return
# 200, the sources list is not added.
local url=$(echo $@ | cut -d' ' -f2)
local repo_ok=$(
curl \
--silent \
--head \
--connect-timeout 20 \
$url | \
egrep " 200 OK| 301 Moved Permanently" | \
wc -l
)
if [ $repo_ok -eq 0 ]; then
print_and_log "$(yellow WARNING)" \
"The APT repo $url is not OK, not adding it."
return
fi
if [ "$(grep -r "${@}" /etc/apt/sources.list* | wc -l)" -lt 1 ]; then
echo "# added by $(basename $0) @ $(date)" >> $escenic_sources
echo "$@" >> $escenic_sources
fi
}
### get_memory_usage_of_pid
## Only works on Linux
function get_memory_usage_of_pid() {
local file=/proc/$1/status
if [ ! -e $file ]; then
# TODO add support for non-Linux systems
return
fi
grep VmSize $file | cut -d ":" -f2 | sed 's/^[ \t]*//g'
}
### get_memory_usage_of_pid
## Only works on Linux
function get_memory_summary_of_pid() {
local file=/proc/$1/status
if [ ! -e $file ]; then
# TODO add support for non-Linux systems
return
fi
local size=$(
grep VmSize $file | cut -d ":" -f2 | sed 's/^[ \t]*//g'
)
local peak=$(
grep VmPeak $file | cut -d ":" -f2 | sed 's/^[ \t]*//g'
)
echo "${size} (peaked at: $peak)"
}
### Returns 0 (ok) if the passed RPM file has been installed on the
### current machine, 1 if not.
##
## $1 :: RPM file reference
function is_rpm_already_installed() {
local file=$1
if [ ! -e "${file}" ]; then
return 1
fi
local package_name=
package_name=$(rpm -qp --queryformat "%{Name}\n" "${file}")
is_rpm_already_installed_by_name "${package_name}"
}
function is_rpm_already_installed_by_name() {
local package_name=$1
rpm -q "${package_name}" &> /dev/null
}
## Returns the secondary interfaces, if any. The loopback device is
## ignored.
##
## If no secondary device could be found, nothing is returned from
## this method.
get_secondary_interfaces() {
local devices_except_loopback devices_except_loopback_no=
devices_except_loopback=$(ip link | grep -E '^[2-9]+:' | grep -w UP)
printf "%s\n" "${devices_except_loopback}" |
cut -d: -f2 |
grep -v docker |
awk '{print $1}' |
sed 1d
}
## Method which will wait for a certain amount of time to ensure
## there's no other APT process running. Systemd may be running an
## update process on boot of new machines or there may be a daemon
## like unattended-upgrades running which also grabs the APT lock
## file.
##
## Requires that lsof is installed.
##
## $1 :: optional path to the lock file to check, default is
## /var/lib/dpkg/lock if none is specified.
apt_wait_for_lock() {
local file=${1-/var/lib/dpkg/lock}
if [[ -x /usr/bin/lsof && -e "${file}" ]]; then
for i in {0..20}; do
lsof "${file}" || break
sleep "${i}"
done
fi
}
| true
|
3be39c12cadb7e4b5b1766bef65463ee861382c4
|
Shell
|
dufferzafar/dotfiles
|
/bootstrap.sh
|
UTF-8
| 1,815
| 2.890625
| 3
|
[] |
no_license
|
# Get script's folder
# http://stackoverflow.com/a/246128
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# Git
ln -sf "$DIR/.gitconfig" ~/.gitconfig
ln -sf "$DIR/.gitignore" ~/.gitignore
ln -sf "$DIR/.gitattributes" ~/.gitattributes
# Ranger
mkdir -p ~/.config/ranger
for file in $DIR/ranger/*; do
ln -sf "$file" ~/.config/ranger/"${file##*/}" # gives basename of file
done
# mpv
mkdir -p ~/.config/mpv
ln -sf "$DIR/mpv/mpv.conf" ~/.config/mpv/mpv.conf
ln -sf "$DIR/mpv/input.conf" ~/.config/mpv/input.conf
# cheat
ln -sf "$DIR/.cheatrc" ~/.cheatrc
# tmux
ln -sf "$DIR/.tmux.conf" ~/.tmux.conf
# Vim
ln -sf "$DIR/vim/.vimrc" ~/.vimrc
mkdir -p ~/.vim/colors
ln -sf "$DIR/vim/monokai.vim" ~/.vim/colors/monokai.vim
# jrnl
ln -sf "$DIR/.jrnl_config" ~/.jrnl_config
# Supercat
mkdir -p ~/.spcrc
for file in $DIR/spc/spcrc*; do
ln -sf "$file" ~/.spcrc/"${file##*/}"
done
# the silver searcher
ln -sf "$DIR/.agignore" ~/.agignore
# ptpython
mkdir -p ~/.ptpython
ln -sf "$DIR/.ptpython.py" ~/.ptpython/config.py
# KDE related stuff
KDE=~/.local/share/kservices5/
mkdir -p "$KDE/ServiceMenus"
for file in $DIR/kde/service-menus/*; do
ln -sf "$file" $KDE/ServiceMenus/"${file##*/}"
done
exit
# firefox
FF=~/.mozilla/firefox/*.default
# mkdir -p "$FF/chrome"
# $(cd ff && git pull shadowfox)
ln -sf "$DIR/firefox/chrome/userChrome.css" $FF/chrome/
ln -sf "$DIR/firefox/chrome/userContent.css" $FF/chrome/
########################################### The End
# Zsh
# wget https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O - | sh
rm ~/.zshrc
ln -sf "$DIR/zsh/.zshrc" ~/.zshrc
# Neobundle
curl https://raw.githubusercontent.com/Shougo/neobundle.vim/master/bin/install.sh | sh
# fzf
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install
| true
|
72c26cec20fa7ae44140c8a8f4107ce5ab2920bf
|
Shell
|
pengeorge/GridEngineMonitor
|
/print-cpu-temp.sh
|
UTF-8
| 648
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
temps=`sensors | perl -e '
$max = -1;
$min = 999;
$sum = 0;
$count = 0;
while(<>) {
chomp;
if (/Core/) {
if (/([\+\-].*)°C \(high = (.*), crit = (.*)\)/) {
$temp = $1;
if ($temp > $max) {
$max = $temp;
}
if ($temp < $min) {
$min = $temp;
}
$sum += $temp;
$count++;
$high = $2;
$crit = $3;
}
}
}
$avg = $sum / $count;
printf("%+8.1f°C%+8.1f°C%+8.1f°C |%9s%11s\n", $max, $avg, $min, $high, $crit);'`
echo " `hostname`$temps"
#echo '--------------------------------------------------------------'
| true
|
462dfed8c4e3db4703d93c688958b56adc29cae0
|
Shell
|
anthonyshort/dotfiles
|
/bootstrap.sh
|
UTF-8
| 1,350
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
cd "${HOME}/.dotfiles"
# Pull down the latest changes.
git pull origin master
# Check out submodules.
git submodule --quiet update --init
cd "${OLDPWD}"
function link() {
# Force create/replace the symlink.
ln -fs "${HOME}/.dotfiles/${1}" "${HOME}/${2}"
}
function mirrorfiles() {
# Copy `.gitconfig`.
# Any global git commands in `~/.bash_profile.local` will be written to
# `.gitconfig`. This prevents them being committed to the repository.
rsync -avz --quiet ${HOME}/.dotfiles/git/gitconfig ${HOME}/.gitconfig
# Force remove the vim directory if it's already there.
if [ -e "${HOME}/.vim" ]; then
rm -rf "${HOME}/.vim"
fi
# Create the necessary symbolic links between the `.dotfiles` and `HOME`
# directory. The `bash_profile` sources other files directly from the
# `.dotfiles` repository.
link "bash/bashrc" ".bashrc"
link "bash/bash_profile" ".bash_profile"
link "bash/inputrc" ".inputrc"
link "git/gitattributes" ".gitattributes"
link "git/gitignore" ".gitignore"
link "osx" ".osx"
echo "Dotfiles update complete"
}
read -p "This will overwrite some existing files in your home directory. Are you sure? (y/n) " -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
mirrorfiles
source ~/.bash_profile
fi
| true
|
1750bdd90513efb3d2ca8668c4231c5b4a9eef8e
|
Shell
|
team-caduceus/RPi-Surveillance
|
/bugfree.sh
|
UTF-8
| 2,668
| 3.28125
| 3
|
[] |
no_license
|
case "$1" in
remove)
sudo killall raspimjpeg
sudo apt-get remove -y apache2 php5 libapache2-mod-php5 gpac motion
sudo apt-get autoremove -y
sudo rm -r /var/www/*
sudo rm /usr/local/bin/raspimjpeg
sudo rm /etc/raspimjpeg
sudo cp -r etc/rc_local_std/rc.local /etc/
sudo chmod 755 /etc/rc.local
echo "Removed everything"
;;
autostart_yes)
sudo cp -r etc/rc_local_run/rc.local /etc/
sudo chmod 755 /etc/rc.local
echo "Changed autostart"
;;
autostart_no)
sudo cp -r etc/rc_local_std/rc.local /etc/
sudo chmod 755 /etc/rc.local
echo "Changed autostart"
;;
install)
sudo killall raspimjpeg
git pull origin master
sudo apt-get install -y apache2 php5 libapache2-mod-php5 gpac motion
sudo cp -r www/* /var/www/
sudo mkdir -p /var/www/media
sudo chown -R www-data:www-data /var/www
sudo mknod /var/www/FIFO p
sudo chmod 666 /var/www/FIFO
sudo cp -r etc/apache2/sites-available/default /etc/apache2/sites-available/
sudo chmod 644 /etc/apache2/sites-available/default
sudo cp etc/apache2/conf.d/other-vhosts-access-log /etc/apache2/conf.d/other-vhosts-access-log
sudo chmod 644 /etc/apache2/conf.d/other-vhosts-access-log
sudo cp -r bin/raspimjpeg /opt/vc/bin/
sudo chmod 755 /opt/vc/bin/raspimjpeg
sudo ln -s /opt/vc/bin/raspimjpeg /usr/bin/raspimjpeg
sudo cp -r /etc/raspimjpeg /etc/raspimjpeg.bak
sudo cp -r etc/raspimjpeg/raspimjpeg /etc/
sudo chmod 644 /etc/raspimjpeg
sudo cp -r etc/rc_local_run/rc.local /etc/
sudo chmod 755 /etc/rc.local
sudo cp -r etc/motion/motion.conf /etc/motion/
sudo chmod 640 /etc/motion/motion.conf
echo "Installer finished"
;;
start)
shopt -s nullglob
video=-1
for f in /var/www/media/video_*.mp4; do
video=`echo $f | cut -d '_' -f2 | cut -d '.' -f1`
done
video=`echo $video | sed 's/^0*//'`
video=`expr $video + 1`
image=-1
for f in /var/www/media/image_*.jpg; do
image=`echo $f | cut -d '_' -f2 | cut -d '.' -f1`
done
image=`echo $image | sed 's/^0*//'`
image=`expr $image + 1`
shopt -u nullglob
sudo mkdir -p /dev/shm/mjpeg
sudo raspimjpeg -ic $image -vc $video > /dev/null &
echo "Started"
;;
stop)
sudo killall raspimjpeg
echo "Stopped"
;;
*)
echo "No option selected"
;;
esac
| true
|
dcee218cb39fab3743575ff547e1e62755316c6f
|
Shell
|
wlcx/dotfiles
|
/.zshenv
|
UTF-8
| 381
| 2.859375
| 3
|
[] |
no_license
|
#
# Defines environment variables for all sessions
#
export PATH=$HOME/.local/bin:$PATH
export EDITOR='vim'
export SUDO_EDITOR='vim'
export PAGER='less'
if [[ "$OSTYPE" == darwin* ]]; then
export BROWSER='open'
fi
# Source cargo
if [[ -s "$HOME/.cargo/env" ]]; then
source "$HOME/.cargo/env"
fi
if [[ -s "$HOME/.zshenv.local" ]]; then
source "$HOME/.zshenv.local"
fi
| true
|
444f3e81727f5daab99fd9ef4aaf95ac4b0ffef2
|
Shell
|
lisabronwyn/186-unix-bash
|
/command-line/4.3-exercises.sh
|
UTF-8
| 589
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#Q: How do the effects of cd and cd ~ differ (or do they)?
they are the same
#Q: Change to text_directory, then change to second_directory using the "one directory up" double dot operator.
cd text_directory cd ..
#Q: From wherever you are, create an empty file called nil in text_directory using whatever method you wish.
cd text_directory, touch nil
#Q: Remove nil from the previous exercises using a different path from the one you used before. (In other words, if you used the path ~/text_directory before, use something like ../text_directory or /Users//text_directory.)
../nil
| true
|
2ad076ca5d551141adc91d0971c5fbf578a000eb
|
Shell
|
end2end-network/end2end-install
|
/install.sh
|
UTF-8
| 3,706
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
function docker_install { #add docker repo, install docker
apt update
apt install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable"
apt update
apt-cache policy docker-ce
apt install -y docker-ce
}
function systemctl_install { # download service file, start
cd /etc/systemd/system/
curl -fsSL "https://end2end.network/install/mqttproxy.service" | sed -e "s~\${FOLDER}~$FOLDER~" > mqttproxy.service
cd $FOLDER
systemctl daemon-reload
if [ "$AUTOSTART" = 1 ]; then
printf "Adding to autostart\n"
systemctl enable mqttproxy
elif [ ! "$QUIET" = 1 ]; then
read -p "Add to autostart? [yN]" -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]; then
systemctl enable mqttproxy
printf "\nService is in /etc/systemctl/system/mqttproxy.service\n"
fi
fi
printf "Starting\n"
systemctl start mqttproxy
}
if [ ! "$EUID" = 0 ]; then
printf "Please run as root\n"
exit
fi
FOLDER=/opt/mqttproxy
INTERVAL=30
BAN_ATTEMPTS=5
BAN_TIME=12h
BAN_FIND_INTERVAL=10m
OPTS=$(getopt -o 'f:qhisa' -l 'interval:,ban_attempts:,ban_time:,ban_find_interval:' --name "$0" -- "$@")
if [ $? != 0 ] ; then echo "Failed to parse options...exiting."; exit 1 ; fi
eval set -- "$OPTS"
while true ; do
case "$1" in
-f)FOLDER="$2"
shift 2;;
-h)printf "
-f -- install folder\
\n-h -- this message\
\n-q -- quiet, doesn't install docker or systemctl service\
\n-i -- install docker, don't ask\
\n-s -- install systemctl service file\
\n-a -- add systemctl service to autostart\
\n# These options set container environment\
\n--ban_attempts -- Failet ssh login attempts before ban\
\n--ban_time -- For how long to ban, fail2ban format\
\n--ban_find_interval -- How colse one to another should be failed attempts, fail2ban format\n"
shift
exit;;
-q)QUIET=1
shift;;
-i)INSTALL=1
shift;;
-s)SYSTEMCTL=1
shift;;
-a)AUTOSTART=1
shift;;
--interval)
INTERVAL="$2"
shift;;
--ban_attempts)
BAN_ATTEMPTS="$2"
shift;;
--ban_time)
BAN_TIME="$2"
shift;;
--ban_find_interval)
BAN_FIND_INTERVAL="$2"
shift;;
--)shift
break;;
* )echo "Internal error!"
exit 1;;
esac
done
mkdir -p $FOLDER
cd $FOLDER
printf "Installing in $FOLDER\n"
if [ "$INSTALL" = 1 ]; then
docker_install
elif [ ! "$QUIET" = 1 ]; then
read -p "Install Docker? [yN]" -n 1 -r
echo
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
docker_install
fi
fi
if ! command -v docker &> /dev/null
then
printf "docker could not be found\n"
exit
fi
DOCKER=$(command -v docker)
( set -o posix ; set ) | grep "INTERVAL\|BAN_ATTEMPTS\|BAN_TIME\|BAN_INTERVAL" > ./env
if command -v systemctl &> /dev/null
then
if [ "$SYSTEMCTL" = 1 ]; then
systemctl_install
elif [ ! "$QUIET" = 1 ]; then
read -p "Add systemctl service? [yN]" -n 1 -r
echo
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
systemctl_install
fi
fi
else
printf "No systemctl\n"
fi
printf "docker stop mqttproxy;
$DOCKER pull niksaysit/mqttproxy;
$DOCKER run --rm \\
--name=mqttproxy \\
-p 2022:22 \\
-v $FOLDER/keys/:/opt/keys/ \\
-v $FOLDER/ag/work/:/opt/adguardhome/work/ \\
-v $FOLDER/ag/conf/:/opt/adguardhome/conf/ \\
--env-file ./env \\
--cap-add=NET_ADMIN \\
niksaysit/mqttproxy" > ./mqttproxy.sh
printf "Launcher is in $FOLDER/mqttproxy.sh"
printf "\n"
| true
|
8268a7b59e39ebbd1cb936eb7f25c015a9aa71c3
|
Shell
|
ccmbioinfo/dti-processing
|
/registration.sh
|
UTF-8
| 420
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
atlas=/hpf/projects/brudno/data/language/atlas-aal/ROI_MNI_V5.nii
mni=$FSLDIR/data/standard/MNI152_T1_2mm_brain
for D in */
do
cd $D
flirt -in corr_brain.nii.gz -ref $mni -out corr_mni -omat corr_mni.mat
convert_xfm -omat mni_to_corr.mat -inverse corr_mni.mat
flirt -in $atlas -applyxfm -init mni_to_corr.mat -ref corr_brain.nii.gz -out aal_to_corr.nii.gz -interp nearestneighbour
cd ..
done
| true
|
497b82f2340ad1f4b664483f7eb9f448c13793c7
|
Shell
|
christophschubert/cp-docker-images-reloaded
|
/base-image/include/etc/confluent/docker/bash-functions.sh
|
UTF-8
| 820
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#fail when a subprogram fails
set -e
# Usage:
function exit_if_not_set {
param=$1
if [[ -z ${!param} ]]
then
echo " Required environment variable $param not set"
exit 1
fi
}
function warn_jmx_rmi_port {
param=$1
RMI_PORT="com.sun.management.jmxremote.rmi.port"
if [[ -n ${!param} ]]; then
if [[ ! ${!param} == *"$RMI_PORT"* ]]; then
echo "${param} should contain '$RMI_PORT' property. It is required for accessing the JMX metrics externally."
fi
fi
}
# Usage
# kafka_ready numBrokers timeout pathToConfig
function kafka_ready {
if java $KAFKA_OPTS -cp "$CUB_CLASSPATH" "io.confluent.admin.kafka.health.KafkaReady" $1 $2 $3
then
echo "Kafka ready: found at least $1 broker(s)."
else
exit 1
fi
}
function log_status {
echo "===> ${1}..."
}
| true
|
4b662302d5cdbec0ff9ff41f5b1d598d172033c0
|
Shell
|
sankpalvrishabhz/linux-content
|
/assignments/Day04/specific_hours.sh
|
UTF-8
| 500
| 2.921875
| 3
|
[] |
no_license
|
echo -e "\n a) View access.log without opening it using editor\n"
cat ../access.log
echo -e "\n b) Print web responce code field which has given timestamp\n"
grep 20/Sep/2019 ../access.log | awk '{print $9}'
echo -e "\n c) Sort extracted responce code and count it\n"
cat ../access.log | awk '{print $9}' | sort
echo -e "\ncount\n"
cat ../access.log | awk '{print $9}' | grep -c '^'
echo -e "\n d) Print 4 unique responce code count\n"
cat ../access.log | awk '{print $9}' | sort | uniq | head -n 4
| true
|
c3724ddd243c242f7980fcaa8dfd97da18187005
|
Shell
|
Yasumoto/jimmeh
|
/bootstrap.sh
|
UTF-8
| 539
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
set -eux
VERSION="1.9.0"
HARDWARE="$(uname -m)"
if [ "$HARDWARE" = "x86_64" ]; then
HARDWARE="amd64"
fi
if [ -f /etc/lsb-release ]; then
OS="linux"
else
OS="darwin"
fi
FILENAME="https://github.com/bazelbuild/bazelisk/releases/download/v${VERSION}/bazelisk-${OS}-${HARDWARE}"
if [ -f "./${FILENAME}" ]; then
rm "./${FILENAME}"
fi
curl -L -o ./bazelisk "${FILENAME}"
chmod +x ./bazelisk
# https://gitlab.com/CalcProgrammer1/OpenRGB#linux
sudo apt install git build-essential libusb-1.0-0-dev libhidapi-dev
| true
|
9a4e1e8ab3ac3f5f9ff7ccca8901759e7c9b2d3d
|
Shell
|
alvelcom/.dots
|
/zshrc
|
UTF-8
| 3,103
| 2.90625
| 3
|
[] |
no_license
|
if [ -r /proc/loadavg ] && [[ `cut -f1 -d' ' /proc/loadavg` > 3 ]]
then
echo "zsh running in failsafe mode"
export FAIL_SAFE=1
else
export FAIL_SAFE=""
fi
# Set up the prompt
autoload -Uz promptinit
promptinit
autoload colors
colors
# Allow for functions in the prompt.
setopt PROMPT_SUBST
# VCS_INFO
if [ -z "$FAIL_SAFE" ]
then
autoload -Uz vcs_info
precmd () { vcs_info }
fi
# Set the prompt.
if ! ps -p $PPID | grep -q "mc"; then
PROMPT='%B[%F{red}%n%b%f@%B%F{cyan}%m%b%f%B]%b '
RPROMPT='${vcs_info_msg_0_} %F{green}%2~%f'
else
PROMPT="> "
fi
# Force emacs key bindings
bindkey -e
autoload -U edit-command-line
zle -N edit-command-line
bindkey '^xe' edit-command-line
bindkey '^x^e' edit-command-line
# Keep 30000 lines of history within the shell and save it to ~/.zsh_history:
HISTSIZE=30000
SAVEHIST=30000
HISTCONTROL=ignorespace:ignoredups
HISTFILE=~/.zsh_history
# Appends every command to the history file once it is executed
# setopt inc_append_history
# Reloads the history whenever you use it
# setopt share_history
# Remove dups from history
setopt HIST_SAVE_NO_DUPS
# Use modern completion system
autoload -Uz compinit
compinit
zstyle ':completion:*' auto-description 'specify: %d'
zstyle ':completion:*' completer _expand _complete _correct _approximate
zstyle ':completion:*' format 'Completing %d'
zstyle ':completion:*' group-name ''
zstyle ':completion:*' menu select=2
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' list-colors ''
zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=* l:|=*'
zstyle ':completion:*' menu select=long
zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s
zstyle ':completion:*' use-compctl false
zstyle ':completion:*' verbose true
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
if [ -z $FAIL_SAFE ]
then
zstyle ':vcs_info:*' enable hg git svn
zstyle ':vcs_info:(hg*|git*):*' check-for-changes true
zstyle ':vcs_info:*' formats "%F{magenta} %c%u %b%f"
zstyle ':vcs_info:*' actionformats "%F{magenta} ♯%a%c%u %b%f"
zstyle ':vcs_info:*:*' unstagedstr "unst"
zstyle ':vcs_info:*:*' stagedstr "st"
fi
if which vim &> /dev/null; then
export EDITOR=vim
alias vi=vim
elif which vi &> /dev/null; then
export EDITOR=vi
fi
stat_bin=stat
if which gstat &> /dev/null; then
stat_bin=gstat
fi
alias ls="ls"
alias ll="ls -l"
alias grep="grep --color"
export LANG=en_US.UTF-8
export PATH=~/.cargo/bin:~/.cabal/bin:~/.rvm/bin:~/.bin:$PATH:/bin:/usr/bin:/usr/local/bin
export GOPATH=~/go
export GOBIN=~/go/bin
export PATH="$PATH:$GOBIN"
export PATH=$PATH:/usr/local/opt/go/libexec/bin
if type ag &> /dev/null; then
export FZF_DEFAULT_COMMAND='ag -p ~/.gitignore -g ""'
fi
export GPG_TTY="$(tty)"
[ -r ~/.dots/zshrc_local ] && source ~/.dots/zshrc_local
| true
|
37d8bd9e75340a61f0ca256f2a4d0dae9bf3dad9
|
Shell
|
and3rsonls/Linux-From-Scratch
|
/lfs-systemd/stage4/cpio
|
UTF-8
| 958
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
TMP=${TMP:-/tmp/work}
LOG=${LOG:-$HOME/logs/stage4}
SRC=${SRC:-/sources}
name=cpio
version=2.13
rm -rf $TMP
mkdir -p $TMP $LOG
tar xf $SRC/$name-$version.tar.bz2 -C $TMP
{ time \
{
cd $TMP/$name-$version
sed -i '/The name/,+2 d' src/global.c
./configure --prefix=/usr \
--bindir=/bin \
--enable-mt \
--with-rmt=/usr/libexec/rmt
make
makeinfo --html -o doc/html doc/cpio.texi
makeinfo --html --no-split -o doc/cpio.html doc/cpio.texi
makeinfo --plaintext -o doc/cpio.txt doc/cpio.texi
make install
install -v -m755 -d /usr/share/doc/$name-$version/html
install -v -m644 doc/html/* \
/usr/share/doc/$name-$version/html
install -v -m644 doc/cpio.{html,txt} \
/usr/share/doc/$name-$version
}
} 2>&1 | tee $name.log
[ $PIPESTATUS = 0 ] && mv $name.log $LOG || exit $PIPESTATUS
rm -fr $TMP
| true
|
e8bbfd4c07806c21c9e798feea31761631ca4c66
|
Shell
|
imatharv/Shell-Programming-Constructs
|
/array/second-largest-smallest-with-sort.sh
|
UTF-8
| 350
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
max=10
count=0
while [[ $count -lt $max ]]
do
number[$count]=$((RANDOM%999))
count=`expr $count + 1`;
done
echo "The 10 array values : ${number[@]}"
x=$(echo '%s\n' "${no[@]}" | sort -n | head -2 | tail -1)
echo "The 2nd small No : $x"
y=$(echo '%s\n' "${number[@]}" | sort -n | tail -2 | head -1)
echo "The 2nd large No : $y"
| true
|
efa479833c56704752ae3f9e794eb10f6182b448
|
Shell
|
DRSC-FG/biolitmine
|
/start_xml_1_batch_no_slurm.sh
|
UTF-8
| 2,668
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# This is the first step Extract the xml files using R
# Places them in the data_extracted directory
# Configuration files in rscripts/
# rscript/config_baseline.R and rscript/config_update.R
#
# Default Directories, create if needed
#
mkdir -p data_extracted/baseline
mkdir -p data_extracted/baseline/detail/
mkdir -p data_extracted/baseline/author
mkdir -p data_extracted/baseline/mesh
mkdir -p data_extracted/baseline/brief
mkdir -p data_extracted/updatefiles
mkdir -p data_extracted/updatefiles/detail
mkdir -p data_extracted/updatefiles/author
mkdir -p data_extracted/updatefiles/mesh
mkdir -p data_extracted/updatefiles/brief
#
# Run R script.
#
#
run_xml_extractions_1_no_slurm.sh
run_xml_extractions_1_no_slurm.sh detail rscripts/config_update.R 1 1
run_xml_extractions_1_no_slurm.sh mesh rscripts/config_update.R 1 1
run_xml_extractions_1_no_slurm.sh brief rscripts/config_update.R 1 1
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 1 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 2 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 3 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 4 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 5 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 6 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 7 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 8 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_update.R 9 9
# Baseline (the number at the end are process #, total number processes)
run_xml_extractions_1_no_slurm.sh detail rscripts/config_baseline.R 1 2
run_xml_extractions_1_no_slurm.sh detail rscripts/config_baseline.R 2 2
run_xml_extractions_1_no_slurm.sh mesh rscripts/config_baseline.R 1 2
run_xml_extractions_1_no_slurm.sh mesh rscripts/config_baseline.R 2 2
run_xml_extractions_1_no_slurm.sh brief rscripts/config_baseline.R 1 2
run_xml_extractions_1_no_slurm.sh brief rscripts/config_baseline.R 2 2
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 1 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 2 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 3 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 4 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 5 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 6 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 7 8
run_xml_extractions_1_no_slurm.sh author rscripts/config_baseline.R 8 8
| true
|
cd15d486f9325bb39e1ed9002b5d83a92734805a
|
Shell
|
ChipWolf/heph-mc-service
|
/home/minecraft/srvctrl.sh
|
UTF-8
| 5,615
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# version 0.2.0 2015-05-18
# VARS
USERNAME="minecraft"
SERVICE='minecraft_server.jar'
MCPATH="/home/$USERNAME/minecraft"
BACKUPPATH="/home/$USERNAME/backup"
CHECKSERVER="/home/$USERNAME/checksrv"
CRASHLOG_DB_PATH='/home/$USERNAME/crashdb'
JAVA_HOME="/usr/bin/java"
MEMORY_OPTS="-Xmx4G -Xms4G"
JAVA_OPTIONS=""
INVOCATION="${JAVA_HOME}/bin/java ${MEMORY_OPTS} ${JAVA_OPTIONS} -jar $SERVICE nogui"
BACKUPARCHIVEPATH=$BACKUPPATH/archive
BACKUPDIR=$(date +%H%M_b%Y_%N)
PORT=$(grep server-port $MCPATH/server.properties | cut -d '=' -f 2)
if [ -z "$PORT" ];then PORT=25565;fi
# END VARS
if [ $(whoami) != $USERNAME ];then su $USERNAME -l -c "$(readlink -f $0) $*";exit $?;fi
heph_startmonitor() { if [ -z $CHECKSERVER ];then echo "MONITOR: ACTIVE";/usr/bin/daemon --name=minecraft_checkserver -- $JAVA_HOME/bin/java -cp $CHECKSERVER chksrv localhost $PORT;fi;}
heph_stopmonitor() { if [ -z $CHECKSERVER ];then /usr/bin/daemon --name=minecraft_checkserver --stop;fi;}
heph_dumpcrash() { if is_running;then cp $MCPATH/crash-reports/* $CRASHLOG_DB_PATH;mv $MCPATH/crash-reports/* $MCPATH/crash-reports.archive/;fi;}
heph_exec() { if is_running;then screen -p 0 -S $(cat $MCPATH/screen.name) -X stuff "$@$(printf \\r)";else echo "NOCOMMAND: $SERVICE NORUN";fi;}
is_running() {
if [ ! -e $MCPATH/java.pid ];then return 1;fi
pid=$(cat $MCPATH/java.pid);if [ -z $pid ];then return 1;fi
ps -eo "%p" | grep "^\\s*$pid\\s*\$" > /dev/null
return $?
}
heph_start() {
if is_running; then
echo "FAILSTART: $SERVICE RUNNING"
else
echo "$SERVICE START"
cd $MCPATH
screen -dmS heph$PORT $INVOCATION &
for (( i=0; i < 10; i++ )); do
screenpid=$(ps -eo '%p %a' | grep -v grep | grep -i screen | grep heph$PORT | awk '{print $1}')
javapid=$(ps -eo '%P %p' | grep "^\\s*$screenpid " | awk '{print $2}')
if [[ -n "$screenpid" && -n "$javapid" ]];then break;fi;sleep 1
done
if [[ -n "$screenpid" && -n "$javapid" ]]; then
echo "$SERVICE RUNNING"
echo "$javapid" > $MCPATH/java.pid
echo "$screenpid.heph$PORT" > $MCPATH/screen.name
else
echo "FAILSTART: $SERVICE"
fi
fi
}
heph_saveoff() {
if is_running; then
echo "SUSPENDSAVE: $SERVICE RUNNING"
heph_exec "say §k§9ch §r§cHiding §cPorn §cStash §r§k§9ip"
heph_exec "say §a> §agoing §aread-only"
heph_exec "save-off"
heph_exec "save-all"
sync
sleep 10
else
echo "FAILSAVESUSPEND: $SERVICE NORUN"
fi
}
heph_saveon() {
if is_running; then
echo "ENABLEDSAVE: $SERVICE RUNNING"
heph_exec "save-on"
heph_exec "say §k§9ch §r§cMom's §cGone §r§k§9ip"
heph_exec "§a> §agoing §aread-write"
else
echo "FAILSAVERESUME: $SERVICE NORUN"
fi
}
heph_kill() {
pid=$(cat $MCPATH/java.pid)
echo "TERM PID:$pid"
kill $pid;for (( i=0;i < 10;i++ ));do is_running || break;sleep 1;done
if is_running;then echo "FAILTERM: KILLING $SERVICE";kill -SIGKILL $pid;echo "$SERVICE K.O.";else echo "$SERVICE TERM";fi
}
heph_stop() {
if is_running; then
echo "STOPPING: $SERVICE RUNNING"
heph_exec "say §k§9ch §cSelf-Destruct §cSequence §cStart §k§9ip"
heph_exec "say §a> §ashutdown §at-minus §a± §a300s"
sleep 240
heph_exec "say §a> §ashutdown §at-minus §a± §a60s"
sleep 30
heph_exec "say §a> §ashutdown §at-minus §a± §a30s"
heph_exec "save-all"
sleep 20
heph_exec "say §a> §ashutdown §at-minus §a± §a10s"
sleep 5
heph_exec "say §a> §ashutdown §at-minus §a± §a5s"
sleep 1
heph_exec "say §a> §ashutdown §at-minus §a± §a4s"
sleep 1
heph_exec "say §a> §ashutdown §at-minus §a± §a3s"
sleep 1
heph_exec "say §a> §ashutdown §at-minus §a± §a2s"
heph_exec "stop"
heph_exec "say §a> §ashutdown §at-minus §a± §a1s"
for (( i=0;i < 20;i++ ));do is_running || break;sleep 1;done
else
echo "$SERVICE NORUN"
fi
if is_running;then echo "NOCLEAN: $SERVICE RUNNING";heph_kill;else echo "$SERVICE DOWN";fi
rm $MCPATH/java.pid;rm $MCPATH/screen.name
}
heph_backup() {
echo "BACKUP COMMENCE"
[ -d "$BACKUPPATH/$BACKUPDIR" ] || mkdir -p "$BACKUPPATH/$BACKUPDIR"
rdiff-backup $MCPATH "$BACKUPPATH/$BACKUPDIR"
echo "BACKUP COMPLETE"
}
heph_thinoutbackup() {
archivedate=$(date --date="3 days ago")
echo "THINBACKUP since $archivedate"
archivedateunix=$(date --date="$archivedate" +%s)
archivesourcedir=$BACKUPPATH/$(date --date="$archivedate" +%b_%Y)
archivesource=$archivesourcedir/rdiff-backup-data/increments.$(date --date="$archivedate" +%Y-%m-%dT%H):0*.dir
archivesource=$(echo $archivesource)
archivedest=$BACKUPARCHIVEPATH/$(date --date="$archivedate" +%H%M_b%Y_%N)
if [[ ! -f $archivesource ]]; then
echo "NOPE"
else
tempdir=$(mktemp -d)
if [[ ! $tempdir =~ ^/tmp ]]; then
echo "INVALID DIR $tempdir"
else
rdiff-backup $archivesource $tempdir
rdiff-backup --current-time $archivedateunix $tempdir $archivedest
rm -R "$tempdir"
rdiff-backup --remove-older-than 3D --force $archivesourcedir
echo "DONE"
fi
fi
}
case "$1" in
start)
if heph_start;then heph_startmonitor;fi
;;
stop)
heph_stopmonitor
heph_stop
heph_dumpcrash
;;
restart)
heph_stopmonitor
heph_stop
heph_dumpcrash
if heph_start;then heph_startmonitor;fi
;;
backup)
heph_saveoff
heph_backup
heph_saveon
heph_thinoutbackup
;;
exec)
shift
heph_exec "$@"
;;
dumpcrashlogs)
heph_dumpcrash
;;
status)
if is_running;then echo "$SERVICE RUNNING";else echo "$SERVICE NORUN";fi
;;
*)
echo "Usage: $(readlink -f $0) {start|stop|restart|backup|exec|dumpcrashlogs|status}"
exit 1
;;
esac
exit 0
| true
|
33a4bc539f4612d56f05e3f327a91f94cb3532db
|
Shell
|
viniciusferrao/sysadmintoolkit
|
/enableSite.sh
|
UTF-8
| 1,609
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Exit in case of failure
set -e
FQDN=$1
WWW_PATH=/var/www/html
ACME_SH_PATH=/root/.acme.sh
echo Creating directory
mkdir -p $WWW_PATH/$FQDN
echo Generating SSL certificates using the default website
$ACME_SH_PATH/acme.sh --issue --stateless -k ec-384 -d $FQDN -d www.$FQDN
echo Installing certificates
$ACME_SH_PATH/acme.sh --install-cert -d $FQDN --ecc --key-file /etc/pki/tls/private/$FQDN.key --fullchain-file /etc/pki/tls/certs/$FQDN.cer --reloadcmd "systemctl reload nginx"
echo Generating nginx configuration
cat > /etc/nginx/sites.d/$FQDN.conf << EOF
server {
listen 80;
server_name $FQDN
www.$FQDN;
# Let's encrypt
# Stateless support is only needed here since curl reads over http
include conf.d/letsencrypt.conf;
location / {
return 301 https://$FQDN$request_uri;
}
}
server {
listen 443 ssl http2;
server_name www.$FQDN;
ssl_certificate /etc/pki/tls/certs/$FQDN.cer;
ssl_certificate_key /etc/pki/tls/private/$FQDN.key;
location / {
return 301 https://$FQDN$request_uri;
}
}
server {
listen 443 ssl http2;
server_name $FQDN;
# Root path of the site
root $WWW_PATH/$FQDN;
access_log /var/log/nginx/$FQDN.access.log;
ssl_certificate /etc/pki/tls/certs/$FQDN.cer;
ssl_certificate_key /etc/pki/tls/private/$FQDN.key;
ssl_stapling on;
ssl_trusted_certificate /etc/pki/tls/certs/lets-encrypt-ca.cer;
resolver 146.164.29.4 146.164.29.3;
# Include security SSL features
include conf.d/sslsec.conf;
# Include custom error pages
include conf.d/errors.conf;
}
EOF
echo Reloading nginx
systemctl restart nginx
echo Done
| true
|
1afac30e6d9ff3939298c798785c8e0edfa64e66
|
Shell
|
zhenkun/crete-dev
|
/misc/scripts/parse-result/compare_test.sh
|
UTF-8
| 605
| 2.859375
| 3
|
[
"BSD-2-Clause-Views"
] |
permissive
|
REF_1=$1
REF_2=$2
PROGRAMS="base64
basename
cat
cksum
comm
cut
date
df
dircolors
dirname
echo
env
expand
expr
factor
fmt
fold
head
hostid
id
join
logname
ls
nl
od
paste
pathchk
pinky
printenv
printf
pwd
readlink
seq
shuf
sleep
sort
stat
sum
sync
tac
tr
tsort
uname
unexpand
uptime
users
wc
whoami
who"
# PROGRAMS="base64
# basename
# cat
# cksum
# comm
# cut
# date
# df
# dircolors"
for prog in $PROGRAMS
do
diff -qr $REF_1/auto_$prog.xml/test-case/ $REF_2/auto_$prog.xml/test-case/ | grep diff | wc -l | \
{ read diff_count; test $diff_count -ne 0 && printf "$prog\t\t$diff_count\n"; }
done
| true
|
1362af7277628c8c6e9c905160a0ff0c73b65e7e
|
Shell
|
data-enthusiast-0/chicago-taxi
|
/statsProject/processData.sh
|
UTF-8
| 2,387
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Ashish Agarwal (ashish.agarwal.chicagobooth.edu)
#
# File to process sample data - Taxi_Data_Sample_*.csv
#
# Remove data where trip Fare=0; compute
# check present working directory
# echo $PWD
# input file
# input="/Users/ashish/Documents/Workspace/statsProject/sampleData.csv"
input="Taxi_Data_Sample_2013_1000.csv"
# input line header
# Trip ID, Trip Date, Trip Time, Trip Month, Trip Year, Trip Seconds, Trip Miles, Trip Fare, Trip Tip, Trip Extras, Trip Toll, Trip Total, Payment Type
# 8b072199064a641d2b1092308d8112076d6b3c3f,11/14/2013,08:45:00,11,2013,300,0,5.45,2.00,0.00,0.00,7.45,Credit Card
# output file
output="Taxi_Data_Reg2_2013.csv"
# input line header
# Trip ID, Trip Date, Trip Time, Trip Month, Trip Year, Trip Seconds, Trip Miles, Trip Fare, Trip Tip, Trip Extras, Trip Toll, Trip Total, Payment Type
# 8b072199064a641d2b1092308d8112076d6b3c3f,11/14/2013,08:45:00,11,2013,300,0,5.45,2.00,0.00,0.00,7.45,Credit Card
# $line,$TripTipPercent,$Payment
# Declare Variables
Payment=0
TripTipPercent=0
TripFareCheck=0
# Read File line-by-line
while read line; do
# reading each line
# echo "Line No. $n : $line"
n=$((n+1))
# check if line can be read
# echo -e $line
# Print the variables of interest
# echo -e "$TripID,$TripStartDate,$TripStartTime,$TripMonth,$TripYear,$TripSeconds,$TripMiles,$TripFare,$TripTips,$TripTolls,$TripExtras,$TripTotal,$PaymentType" >> $output
TripFare=`echo $line | awk 'BEGIN{FS=","; OFS=","} {print $8}'`
TripTips=`echo $line | awk 'BEGIN{FS=",";OFS=","} {print $9}'`
# if (( $TripFare == 0 ));
TripFareCheck=$(echo "$TripFare!=0" | bc)
TripTipCheck=$(echo "$TripTips!=0" | bc)
# echo $TripFareCheck
if [[ "$TripFareCheck" -eq "0" ]] || [[ "$TripTipCheck" -eq "0" ]]
then
echo -e 'Fare OR Tip is 0' ;
else
# Compute Tip Percent
TripTipPercent=$(echo "scale=4;$TripTips*100/$TripFare" | bc)
# Check Math
# echo -e "Percent=$TripTipPercent"
# Compute Payment
PaymentType=`echo $line | awk 'BEGIN{FS=","; OFS=","} {print $13}'`
if [[ "$PaymentType" == "Cash" ]];
then
Payment=0
else
Payment=1
fi
# echo -e "$PaymentType,$Payment"
# Save output to File
echo -e "$line,$TripTipPercent,$Payment" >> $output
fi
done < $input
| true
|
ca538c8ad0c691c2a91a70fe7596139bc48897af
|
Shell
|
mitchellurgero/BashScripts
|
/Backup Scripts/backup-www.sh
|
UTF-8
| 728
| 3.78125
| 4
|
[] |
no_license
|
#! /bin/bash
# Make the following DIR's:
# /temp
# /backups
## START CONFIG
TIMESTAMP=$(date +"%F")
BACKUP_DIR=/temp/My-Backup-$TIMESTAMP
MYSQL_USER="USERNAME"
MYSQL=/usr/bin/mysql
MYSQL_PASSWORD="PASSWORD"
MYSQLDUMP=/usr/bin/mysqldump
DATABASE=DB_TO_BACKUP
## END CONFIG
mkdir -p "$BACKUP_DIR/mysql"
$MYSQLDUMP --force --opt --user=$MYSQL_USER -p=$MYSQL_PASSWORD $DATABASE | gzip > "$BACKUP_DIR/mysql/$DATABASE.gz"
mkdir -p "$BACKUP_DIR/web_dir"
SRCDIR=/var/www/
DESTDIR=$BACKUP_DIR/web_dir/
FILENAME=My-WWW-Backup-$TIMESTAMP.tgz
tar --create --gzip --file=$DESTDIR$FILENAME $SRCDIR
tar --create --gzip --file=/backups/My-Backup-$TIMESTAMP.tgz $BACKUP_DIR
rm -rf /temp/*
wait
echo "Backup of DB and Web Directory Complete!"
| true
|
b627287e04e9a09151323475906f487ba027292c
|
Shell
|
lightster/pier-11
|
/bin/vagrant/install-rvm.sh
|
UTF-8
| 354
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if ! which rvm ; then
apt-get update -qq -y
apt-get install -qq -y software-properties-common
apt-add-repository -y ppa:rael-gc/rvm 2>&1
apt-get update -qq -y
apt-get install -qq -y rvm
usermod -aG rvm ubuntu
source /etc/profile.d/rvm.sh
rvm install --quiet-curl 2.4.1
rvm use 2.4.1
gem install bundler -q
fi
| true
|
8674b996f686109508cf9705bc81647c6564fcda
|
Shell
|
anthonee619/dotfiles
|
/dotfiles_setup.sh
|
UTF-8
| 922
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
########################################################
############### General ##############################
########################################################
# neovim, docker, nodejs,
########################################################
############### Desktop ##############################
########################################################
# i3, lightdm, udiskie, fonts, light (AUR), feh, spotifyd
# add user to these groups
# video, audio, docker
directories=(".bin")
for direc in "${directories[@]}"; do
mkdir ${HOME}/${direc}
done
dotfolder="${HOME}/.dotfiles"
# ${HOME}
#lightdm
sudo ln -si ${dotfolder}/lightdm/lightdm.conf /etc/lightdm/lightdm
# ${HOME}/.config/
for configFolders in $dotfolder/config/*;do
ln -si ${configFolders} ${HOME}/.config
done
ln -s $dotfolder/bin ${HOME}/.bin
for file in $dotfolder/home/*;do
ln -si $file $HOME/.${file##*/}
done
| true
|
3e4f057c23d05ae1ad0288805743a9b6b0fa3c07
|
Shell
|
Jamol/Build-scripts
|
/ffmpeg/build-ffmpeg-android.sh
|
UTF-8
| 1,591
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
CURRENTPATH=`pwd`
CC_VER=4.9
NDK_ROOT=$ANDROID_NDK_HOME
PREBUILT=$NDK_ROOT/toolchains/arm-linux-androideabi-$CC_VER/prebuilt/darwin-x86_64
PLATFORM=$NDK_ROOT/platforms/android-24/arch-arm
CPU=armv7-a
OPTIMIZE_CFLAGS="-mfloat-abi=softfp -mfpu=vfpv3-d16 -marm -march=$CPU "
PREFIX=$(pwd)/android/$CPU
ADDITIONAL_CONFIGURE_FLAG=
ADDI_CFLAGS="-DANDROID -DNDEBUG -marm -march=$CPU"
X264_DIR=../x264
./configure --prefix=. \
--prefix=$PREFIX \
--enable-gpl \
--enable-libx264 \
--disable-decoder=vp9 \
--enable-shared \
--disable-static \
--enable-pthreads \
--disable-stripping \
--disable-doc \
--disable-programs \
--disable-ffmpeg \
--disable-ffplay \
--disable-ffprobe \
--disable-avdevice \
--disable-network \
--enable-thumb \
--enable-cross-compile \
--extra-libs="-lgcc" \
--cc=$PREBUILT/bin/arm-linux-androideabi-gcc \
--cross-prefix=$PREBUILT/bin/arm-linux-androideabi- \
--nm=$PREBUILT/bin/arm-linux-androideabi-nm \
--target-os=android \
--arch=arm \
--cpu=$CPU \
--sysroot=$PLATFORM \
--extra-cflags="-I$X264_DIR -Os -fPIC $ADDI_CFLAGS" \
--disable-asm \
--enable-neon \
--extra-ldflags="-Wl,-rpath-link=$PLATFORM/usr/lib \
-L$PLATFORM/usr/lib \
-L$X264_DIR/lib/android \
-nostdlib -lc -lm -ldl -llog"
make
OUTLIBPAHT="${CURRENTPATH}/lib/android"
mkdir -p ${OUTLIBPAHT}
MODULES="libavcodec libavformat libavutil libswresample libswscale"
for MODULE in ${MODULES}
do
cp ${CURRENTPATH}/${MODULE}/${MODULE}-*.so ${OUTLIBPAHT}
done
| true
|
43fe2665b3ce9ace2c38455a5989ac961d384b85
|
Shell
|
jiazemin/docker-hadoop-eco-sys
|
/hadoop/scripts/bin/update_cluster_xml_files.sh
|
UTF-8
| 2,392
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
source ${DOCKER_CLUSTER_TOOLS_BIN_DIR}/docker_cluster_tools_lib.sh
log "[UPDATE_CLUSTER_XML_FILES][BEGIN]"
file_config=""
if [ $(find "${DOCKER_CLUSTER_TOOLS_SHARED_CONF_DIR}" -maxdepth 1 -type f -name "*.yaml" | wc -l) -lt 1 ]
then
log "[UPDATE_CLUSTER_XML_FILES][WARNING] No yaml configuration file found!"
else
file_config=$(find "${DOCKER_CLUSTER_TOOLS_SHARED_CONF_DIR}" -maxdepth 1 -type f -name "*.yaml" | head -1)
file_config_name=$(basename ${file_config})
log "[UPDATE_CLUSTER_XML_FILES][INFO] file_config : [${file_config}]"
if [ "$(yaml_conf_exist_xpath ${file_config_name} ${NODETYPE})" == "false" ]
then
log "[UPDATE_CLUSTER_XML_FILES][WARNING] No configuration found for nodetype [${NODETYPE}] on file [${file_config}]!"
else
log "[UPDATE_CLUSTER_XML_FILES][INFO] Configuration found for nodetype [${NODETYPE}] on file [${file_config}]."
nb_files=$(yaml_conf_get_xml_node_nb_files ${file_config_name} ${NODETYPE})
log "[UPDATE_CLUSTER_XML_FILES][INFO] ${NODETYPE} nb_files = ${nb_files}."
for (( index_file=0; index_file<${nb_files}; index_file++ ))
do
xml_file_name=$(yaml_conf_get_xml_node_file_name ${file_config_name} ${NODETYPE} ${index_file})
log "[UPDATE_CLUSTER_XML_FILES][INFO] Managing file ${xml_file_name}"
nb_propertie=$(yaml_conf_get_xml_node_nb_properties ${file_config_name} ${NODETYPE} ${index_file})
log "[UPDATE_CLUSTER_XML_FILES][INFO] ${xml_file_name} nb_propertie = ${nb_propertie}."
for (( index_prop=0; index_prop<${nb_propertie}; index_prop++ ))
do
propertie_key=$(yaml_conf_get_xml_node_properties_key ${file_config_name} ${NODETYPE} ${index_file} ${index_prop})
propertie_val=$(yaml_conf_get_xml_node_properties_val ${file_config_name} ${NODETYPE} ${index_file} ${index_prop})
case "${NODETYPE}" in
SPARK )
log "[UPDATE_CLUSTER_XML_FILES][INFO] Spark propertie (${xml_file_name}) => [${propertie_key} : ${propertie_val}]"
echo -e "${propertie_key} ${propertie_val}" >> "${DOCKER_CLUSTER_TOOLS_SHARED_CONF_DIR}/spark/${xml_file_name}"
;;
* )
log "[UPDATE_CLUSTER_XML_FILES][INFO] propertie => [${propertie_key} : ${propertie_val}]"
add_hadoop_xml_properties ${DOCKER_CLUSTER_TOOLS_SHARED_CONF_DIR}/hadoop/${xml_file_name} "${propertie_key}" "${propertie_val}"
;;
esac
done
done
fi
fi
log "[UPDATE_CLUSTER_XML_FILES][END]"
| true
|
5f23f17da131ac3512a2761bd02325fa8b7fafe2
|
Shell
|
kisscool/shell-dancer
|
/dancer.sh
|
UTF-8
| 4,006
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# vim: set softtabstop=2 shiftwidth=2 expandtab :
# Original author : KissCool
# Created : 2013
# License : MIT license
#####################################################################
# Theory :
#####################################################################
# A route is the combination of an HTTP method (GET, POST, PUT, DELETE) and a path ('/path/to/resource').
# Each defined route (throught the use of high level functions get(), post(), put(), delete()) is
# associated with a dynamic function which contains the business logic of this particular route.
# Those route-handling functions are named route_$cksum(), where $cksum is a unique identifier.
# All the registered paths are stored in variables named $ROUTES_$METHOD, where $METHOD is the
# HTTP method of the associated route.
#####################################################################
# Private (internal) functions
#####################################################################
# This function will output a unique and reproductible checksum from a string,
# which in our case is a route. We use this unique checksum to name and retrieve
# the route-handling functions.
#
# arguments :
# - $* : strings
#
# example :
# dancer_cksum GET /hi
#
dancer_cksum() {
local strings="$*"
printf '%s' "$strings" | cksum | cut -d' ' -f1
}
# This function is used behind the scene inside route definitions.
# It defines a route_$method_$path() with $method equal to the HTTP
# method (GET, POST, PUT, DELETE) and $path equal to the pattern provided
# as an argument to the route definition.
# This new defined function contains the logic of the web application and
# output the answer to the request.
#
# arguments :
# - $1 : HTTP method (GET, POST, PUT or DELETE)
# - $2 : path
# - stdin : logic to execute when this route is used
#
# example :
# dancer_add_route GET /hi <<!
# echo "hi"
# !
#
dancer_add_route() {
content=`cat /dev/stdin`;
local method=$1;
local path=$2;
local cksum=`dancer_cksum ${method} ${path}`
# yet again the power of eval comes
# as our savior for some dynamic
# function definition
eval "
route_${cksum}() {
$content
}
"
# this is horrible.
# all it does it append $path to $ROUTES_${method} and export it.
eval "export ROUTES_${method}=\"\$ROUTES_${method} ${path}\""
}
#####################################################################
# Public functions
#####################################################################
# This function defines a new route for a GET HTTP request.
# It uses dancer_add_route() behind the scene.
#
# arguments :
# - $1 : path
# - stdin : logic to execute when this route is used
#
# example :
# get /hi <<!
# echo "hi"
# !
#
get() {
content=`cat /dev/stdin`;
local path=$1;
dancer_add_route GET $path <<!
$content
!
}
# This function defines a new route for a POST HTTP request.
# It uses dancer_add_route() behind the scene.
#
# arguments :
# - $1 : path
# - stdin : logic to execute when this route is used
#
# example :
# post /hi <<!
# create something
# !
#
post() {
content=`cat /dev/stdin`;
local path=$1;
dancer_add_route POST $path <<!
$content
!
}
# This function defines a new route for a PUT HTTP request.
# It uses dancer_add_route() behind the scene.
#
# arguments :
# - $1 : path
# - stdin : logic to execute when this route is used
#
# example :
# put /hi <<!
# update/replace something
# !
#
put() {
content=`cat /dev/stdin`;
local path=$1;
dancer_add_route PUT $path <<!
$content
!
}
# This function defines a new route for a DELETE HTTP request.
# It uses dancer_add_route() behind the scene.
#
# arguments :
# - $1 : path
# - stdin : logic to execute when this route is used
#
# example :
# delete /hi <<!
# annihilate something
# !
#
delete() {
content=`cat /dev/stdin`;
local path=$1;
dancer_add_route DELETE $path <<!
$content
!
}
| true
|
4789f249c41ec587a0f52ab8f9fded27a33d009f
|
Shell
|
aceofall/StudyEx
|
/bash/bash_shell7/say.sh
|
UTF-8
| 338
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
echo q를 입력하면 종료합니다.
go=start
while [[ -n "$go" ]] # 변수에 큰따옴표를 확인하라.
do
echo -n 종료하려면 q를 입력하세요. :
read word
if [[ $word == [Qq] ]] # 예전 스타일 : [ "$word" = q -o "$word" = Q ]
then
echo "q를 입력하셨네요. 종료합니다. "
go=
fi
done
| true
|
58b47822ff7e0680cec0618961c7eb75eb953aaa
|
Shell
|
WhiteTshirtXI/Nek5000_examples
|
/KMM_Re-tau_180/stats/makenek
|
UTF-8
| 1,777
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# Nek5000 build config file
# (c) 2008,2009,2010 UCHICAGO ARGONNE, LLC
# source path
SOURCE_ROOT="/home/prabal/nek5_svn/trunk/nek"
# Fortran compiler
F77="mpif77"
# C compiler
CC="mpicc"
# pre-processor symbol list
# (set PPLIST=? to get a list of available symbols)
#PPLIST="?"
# plug-in list
PLUGIN_LIST=""
# OPTIONAL SETTINGS
# -----------------
# enable MPI (default true)
#IFMPI="false"
# auxilliary files to compile
# NOTE: source files have to located in the same directory as makenek
# a makefile_usr.inc has to be provided containing the build rules
USR="statistics_2D.o statistics_2DIO.o statistics_2D_usr.o statistics_2D_debug.o time_series.o time_seriesIO.o pts_redistribute.o"
#USR="foo.o"
# linking flags
#USR_LFLAGS="-L/usr/lib -lfoo"
# generic compiler flags
#G="-g"
# optimization flags
#OPT_FLAGS_STD=""
#OPT_FLAGS_MAG=""
# enable AMG coarse grid solver (default XXT)
#IFAMG="true"
#IFAMG_DUMP="true"
# CVODE path
#CVODE_DIR=$HOME/cvode/lib
# MOAB/iMESH path
#MOAB_DIR="$HOME/moab"
###############################################################################
# DONT'T TOUCH WHAT FOLLOWS !!!
###############################################################################
# assign version tag
mver=1
# overwrite source path with optional 2nd argument
if [ -d $2 ] && [ $# -eq 2 ]; then
SOURCE_ROOT="$2"
echo "change source code directory to: ", $SOURCE_ROOT
fi
if [ $1 = "clean" ]; then
echo Clean up ./obj and binary
else
# cp SIZE.f SIZE
cp $1.f $1.usr
fi
# do some checks and create makefile
source $SOURCE_ROOT/makenek.inc
# compile
make -j4 -f makefile 2>&1 | tee compiler.out
# clean up
mkdir obj 2>/dev/null
\mv *.o ./obj 2>/dev/null
rm compiler.out
# rm makefile
mv $1.usr $1.f
exit 0
| true
|
6036ba3993b5b5dd297efcb9598776e277838ec0
|
Shell
|
spetr/chromedock
|
/scripts/start-server.sh
|
UTF-8
| 2,562
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
export DISPLAY=:99
export XAUTHORITY=${DATA_DIR}/.Xauthority
echo "---Resolution check---"
if [ -z "${CUSTOM_RES_W}" ]; then
CUSTOM_RES_W=1024
fi
if [ -z "${CUSTOM_RES_H}" ]; then
CUSTOM_RES_H=768
fi
if [ "${CUSTOM_RES_W}" -le 1023 ]; then
echo "---Width to low must be a minimal of 1024 pixels, correcting to 1024...---"
CUSTOM_RES_W=1024
fi
if [ "${CUSTOM_RES_H}" -le 767 ]; then
echo "---Height to low must be a minimal of 768 pixels, correcting to 768...---"
CUSTOM_RES_H=768
fi
echo "---Checking for old logfiles---"
find $DATA_DIR -name "XvfbLog.*" -exec rm -f {} \;
find $DATA_DIR -name "x11vncLog.*" -exec rm -f {} \;
echo "---Checking for old display lock files---"
rm -rf /tmp/.X99*
rm -rf /tmp/.X11*
rm -rf ${DATA_DIR}/.vnc/*.log ${DATA_DIR}/.vnc/*.pid ${DATA_DIR}/Singleton*
chmod -R ${DATA_PERM} ${DATA_DIR}
if [ -f ${DATA_DIR}/.vnc/passwd ]; then
chmod 600 ${DATA_DIR}/.vnc/passwd
fi
screen -wipe 2&>/dev/null
echo "---Starting TurboVNC server---"
vncserver -geometry ${CUSTOM_RES_W}x${CUSTOM_RES_H} -depth ${CUSTOM_DEPTH} :99 -rfbport ${RFB_PORT} -noxstartup ${TURBOVNC_PARAMS} 2>/dev/null
sleep 2
echo "---Starting Fluxbox---"
screen -d -m env HOME=/etc /usr/bin/fluxbox
sleep 2
echo "---Starting noVNC server---"
websockify -D --web=/usr/share/novnc/ --cert=/etc/ssl/novnc.pem ${NOVNC_PORT} localhost:${RFB_PORT}
sleep 2
echo "---Starting Chrome---"
cd ${DATA_DIR}
/usr/bin/google-chrome \
--window-position=0,0 \
--window-size=${CUSTOM_RES_W},${CUSTOM_RES_H} \
--show-fps-counter \
--frame-throttle-fps=15 \
--max-gum-fps=15 \
--user-data-dir=/tmp \
--disk-cache-dir=/tmp \
--disk-cache-size=4096 \
--media-cache-size=4096 \
--alsa-input-device=null \
--alsa-output-device=null \
--audio-output-channels=1 \
--block-new-web-contents \
--no-sandbox \
--no-first-run \
--no-pings \
--auto-ssl-client-auth \
--autoplay-policy=no-user-gesture-required \
--disable-background-networking \
--disable-client-side-phishing-detection \
--disable-default-apps \
--disable-dev-shm-usage \
--disable-hang-monitor \
--disable-infobars \
--disable-popup-blocking \
--disable-prompt-on-repost \
--disable-sync \
--disable-canvas-aa \
--disable-composited-antialiasing \
--disable-font-subpixel-positioning \
--disable-smooth-scrolling \
--disable-speech-api \
--disable-crash-reporter \
--ignore-certificate-errors \
--test-type \
--load-extension=/opt/iwc-rec-ext/ \
--whitelisted-extension-id=ifiomgafmdlhpckihjeimadkcalnamfe \
--dbus-stub \
--enable-logging=stderr \
${EXTRA_PARAMETERS}
| true
|
97491935cee0046d109398341ec31eafb9c84670
|
Shell
|
Researcher86/php-selenium
|
/run.sh
|
UTF-8
| 497
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$1" = "build" ]; then
docker-compose build
fi
if [ "$1" = "up" ]; then
docker-compose up -d
fi
if [ "$1" = "down" ]; then
docker-compose down
fi
if [ "$1" = "php" ]; then
docker-compose run php-cli php app/$2
fi
if [ "$1" = "test" ]; then
docker-compose run php-cli vendor/bin/phpunit
fi
if [ "$1" = "composer" ]; then
docker-compose run composer $2
fi
if [ "$1" = "dump-autoload" ]; then
docker-compose run composer dump-autoload -o
fi
| true
|
c95f8ee768b57b96d8ed32e8de4cbc14e08d0bc0
|
Shell
|
ateek-ujjawal/DedS_a110
|
/build.sh
|
UTF-8
| 1,503
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~/DedS_a110/mediatek/config/s9081
if [ "4.2" == "$1" ]; then
echo "4.2 unified selected"
Ramdisk="4.2ramdisk"
cp 4.2uProjectConfig.mk ProjectConfig.mk
elif [ "4.2ics" == "$1" ]; then
echo "4.2 ics selected"
Ramdisk="4.2ramdisk"
cp 4.2icsProjectConfig.mk ProjectConfig.mk
elif [ "4.1" == "$1" ]; then
echo "4.1 selected"
Ramdisk="4.1ramdisk"
cp 4.1ProjectConfig.mk ProjectConfig.mk
else
echo "wrong option"
exit
fi
#setting build
cd ~/DedS_a110/kernel
export ARCH=arm
export PATH=~/arm-linux-androideabi-4.6/bin:$PATH
export CROSS_COMPILE=arm-linux-androideabi-
#export PATH=~/arm-linux-androideabi-4.6/bin:$PATH
#export CROSS_COMPILE=arm-linux-androideabi-
TARGET_PRODUCT=s9081 MTK_ROOT_CUSTOM=../mediatek/custom/ make -j5
make -j5 TARGET_PRODUCT=s9081 INSTALL_MOD_STRIP=1 INSTALL_MOD_PATH=~/DedS_a110/mtktools/temp/system android_modules_install -j5
cp ~/DedS_a110/mtktools/temp/system/lib/modules/zram.ko ~/DedS_a110/mtktools/BBFZ/system/lib/modules/zram.ko
cp ~/DedS_a110/mtktools/temp/system/lib/modules/m4u.ko ~/DedS_a110/mtktools/BBFZ/system/lib/modules/m4u.ko
#adding 512kb header
cd ~/DedS_a110/mediatek/build/tools
./mkimage ~/DedS_a110/kernel/arch/arm/boot/zImage KERNEL > ~/DedS_a110/mtktools/zimage
#repacking boot
cd ~/DedS_a110/mtktools
./repack.pl -boot zimage $Ramdisk ~/DedS_a110/mtktools/BBFZ/boot.img
#creating flashable zip
cd ~/DedS_a110/mtktools/BBFZ
zip -r out .
mv ~/DedS_a110/mtktools/BBFZ/out.zip ~/DedS_a110/out/"$1"_DedS_kernel_001.zip
| true
|
a0d9a30516a1a6f2657952585054ae3918ad500d
|
Shell
|
hertzsprung/ninjaopenfoam
|
/ninjaopenfoam/scripts/gen-controlDict.sh
|
UTF-8
| 244
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
display_usage() {
echo -e "Usage: gen-controlDict.sh <endTime> <writeInterval> <timestep>\n"
}
if [ $# -lt 3 ]
then
display_usage
exit 1
fi
export endTime=$1
export writeInterval=$2
export timestep=$3
envsubst <&0 >&1
| true
|
030297546f9d98e25b6125240a31f640d977573a
|
Shell
|
apiwatch/apiwatch
|
/assembly/src/bin/apiwatch
|
UTF-8
| 2,894
| 4.03125
| 4
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
# Copyright (c) 2012, Robin Jarry. All rights reserved. #
# #
# This file is part of APIWATCH and published under the BSD license. #
# #
# See the "LICENSE" file for more information. #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
# ---------------------------------------------------------------------
# APIWATCH Start Up Bash script
# ---------------------------------------------------------------------
# OS specific support. $var _must_ be set to either true or false.
cygwin=false;
darwin=false;
mingw=false
case "`uname`" in
CYGWIN*) cygwin=true ;;
MINGW*) mingw=true;;
Darwin*) darwin=true;;
esac
if [ -z "$APIWATCH_HOME" ] ; then
## resolve links - $0 may be a link to apiwatch's home
PRG="$0"
# need this for relative symlinks
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG="`dirname "$PRG"`/$link"
fi
done
saveddir=`pwd`
APIWATCH_HOME=`dirname "$PRG"`/..
# make it fully qualified
APIWATCH_HOME=`cd "$APIWATCH_HOME" && pwd`
cd "$saveddir"
# echo Using apiwatch at $APIWATCH_HOME
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$APIWATCH_HOME" ] &&
APIWATCH_HOME=`cygpath --unix "$APIWATCH_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
# For Mingw, ensure paths are in UNIX format before anything is touched
if $mingw ; then
[ -n "$APIWATCH_HOME" ] &&
APIWATCH_HOME="`(cd "$APIWATCH_HOME"; pwd)`"
fi
CLASSPATH='.'
for jar in "${APIWATCH_HOME}"/lib/*.jar;
do
CLASSPATH=$CLASSPATH:$jar
done
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD="`which java`"
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVACMD is not defined correctly."
exit 1
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
[ -n "$APIWATCH_HOME" ] &&
APIWATCH_HOME=`cygpath --path --windows "$APIWATCH_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
fi
LAUNCHER='org.apiwatch.cli.APIWatch'
case `basename "$0"` in
apiscan)
LAUNCHER='org.apiwatch.cli.APIScan'
;;
apidiff)
LAUNCHER='org.apiwatch.cli.APIDiff'
;;
apiwatch)
LAUNCHER='org.apiwatch.cli.APIWatch'
;;
esac
exec "$JAVACMD" -classpath "${CLASSPATH}" $LAUNCHER "$@"
| true
|
fccc1338a77dd3a262d53db83319652a51aea7ca
|
Shell
|
sadiksha/dailog
|
/dailog
|
UTF-8
| 2,153
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
######################################################################################
# Created by Ilya Krasnov on 2016/06/19
# Captures today's tasks. Prints monthly summary
######################################################################################
DIR="/Users/`whoami`/dailog"
TODAY=`date +%F`
MONTH=`date +%Y-%m`
LAST_MONTH=`date -v -20d +%Y-%m`
FILE=${DIR}/${MONTH}.md
FILE_LAST_MONTH=${DIR}/${LAST_MONTH}.md
PDF=${DIR}/${MONTH}.pdf
PDF_LAST_MONTH=${DIR}/${LAST_MONTH}.pdf
MONTH_TITLE=`date +'%Y-%m'`
LAST_MONTH_TITLE=`date -v -20d +'%Y-%m'`
function line_to_file {
echo "- $@" >> ${FILE}
}
function write_log {
# Creates monthly file if needed
if [ ! -e ${FILE} ]
then
touch ${FILE}
echo "# ${MONTH_TITLE}" >> ${FILE}
fi
# Adds entries new days or appends to existing entry
if grep -q "${TODAY}" ${FILE}
then
while read line
do
`gsed -i '/${TODAY}/a $line' ${FILE}`
line_to_file $line
done
else
printf "\n## ${TODAY} \n" >> ${FILE}
while read line
do
line_to_file $line
done
fi
}
function create_and_open_pdf {
printf "Generating pdf summary for $1\n"
pandoc $2 -s --variable geometry:margin=1in -o $3
open $3
}
function summarize {
printf "1 - Generate summary for last month (default)\n"
printf "2 - Generate summary for current month\n"
read -p 'Please chose an option: ' MONTH
case $MONTH in
1|'')
create_and_open_pdf $LAST_MONTH_TITLE $FILE_LAST_MONTH $PDF_LAST_MONTH
;;
2)
create_and_open_pdf $MONTH_TITLE $FILE $PDF
;;
esac
}
function preview_current_month {
cat ${FILE}
}
function edit_current_month {
printf "1 - Edit log for last month (default)\n"
printf "2 - Edit log for current month\n"
read -p 'Please chose an option: ' MONTH
case $MONTH in
1|'')
vim ${FILE_LAST_MONTH}
;;
2)
vim ${FILE}
;;
esac
}
# Create directory in root folder
[ -d ${DIR} ] || mkdir ${DIR}
# Main function
case $1 in
'')
write_log
;;
preview)
preview_current_month
;;
edit)
edit_current_month
;;
summary)
summarize
;;
esac
| true
|
52e1d8afa546b5b51a194426b408295bafccf053
|
Shell
|
kmatheussen/common-ladspa-plugins
|
/build_kokkinizita.sh
|
UTF-8
| 1,681
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# COMPILER_FLAGS="-msse2 -mfpmath=sse -O2" ./build_kokkinizita.sh x86_64-w64-mingw32-gcc x86_64-w64-mingw32-g++ build/build_mcp build/dlls
if [ -z "$1" ]; then
echo "arg1 (CC) is unset";
exit -1
fi
if [ -z "$2" ]; then
echo "arg1 (CCC) is unset";
exit -1
fi
if [ -z "$3" ]; then
echo "arg1 (BUILD) is unset";
exit -1
fi
if [ -z "$4" ]; then
echo "arg1 (DEST) is unset";
exit -1
fi
set -e
set -x
CC=$1
CCC=$2
BUILD=$3
DEST=$4
ROOT=`pwd`
mkdir -p $DEST
rm -fr ${BUILD}_old
mkdir -p $BUILD
mv -f $BUILD ${BUILD}_old
mkdir -p $BUILD
cd $BUILD
function build {
cp -a $1 .
cd $2
if [[ $BUILD == *darwin* ]]
then
cat Makefile | sed s/g++/$CCC/g | sed s/gcc/$CC/g >Makefile.cross
elif [[ $BUILD == *linux* ]]
then
cat Makefile | sed s:g++:$CCC:g | sed s:gcc:$CC:g >Makefile.cross
else
cat Makefile | sed s/g++/$CCC/g | sed s/gcc/$CC/g | sed 's/\.so/\.dll/g' >Makefile.cross
fi
CC="$CC" CXX="$CCC" CFLAGS="$COMPILER_FLAGS" CXXFLAGS="$COMPILER_FLAGS" make -f Makefile.cross
if [[ $BUILD == *darwin* ]]
then
cp *.so $ROOT/$DEST/
elif [[ $BUILD == *linux* ]]
then
cp *.so $ROOT/$DEST/
else
cp *.dll $ROOT/$DEST/
fi
cd ..
}
function build_kokkinizita {
build $ROOT/kokkinizita/$1 $1
}
build_kokkinizita tap-plugins-git
build_kokkinizita MCP-plugins-0.4.0
build_kokkinizita AMB-plugins-0.8.1
build_kokkinizita STE-plugins-0.0.2
build_kokkinizita REV-plugins-0.7.1
build_kokkinizita VCO-plugins-0.3.0
build_kokkinizita FIL-plugins-0.3.0
build_kokkinizita WAH-plugins-0.1.0
build_kokkinizita g2reverb-0.7.1
| true
|
da33c503b70f95a8572ede62532a003e37514bb1
|
Shell
|
DamienRobert/dotfiles
|
/syst/config/old/new/00install
|
UTF-8
| 3,012
| 4.03125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/bin/zsh
RUNDIR=$(dirname $0)
TARGET_DIR="/"
TMPSCRIPT="/var/tmp/cur_script"
PAUSE=
#to run only one script, do
#./00install --runfile /dev/null script
while true;
do
case $1 in
-- ) break;;
--rundir ) shift; RUNDIR=$1; shift ;;
--runfile ) shift; RUNFILE=$1; shift ;;
--vars ) shift; VARFILE=$1; shift ;;
-d|--target ) shift; OTARGET_DIR=$1; shift ;;
-p|--pause ) shift; PAUSE=t ;;
*) break;;
esac
done
[[ -z $VARFILE ]] && VARFILE="$RUNDIR/vars"
[[ -z $RUNFILE ]] && RUNFILE="$RUNDIR/torun.sh"
[[ -f $VARFILE ]] && . $VARFILE
[[ -f $RUNFILE ]] && . $RUNFILE
#command line has precedence
[[ -n $OTARGET_DIR ]] && TARGET_DIR=OTARGET_DIR
TORUN=($@ $TORUN)
echo "!! Running scripts in $RUNDIR/: $TORUN"
export RUNDIR
cleanup() {
rm -f $TARGET_DIR/$TMPSCRIPT
rm -f $TARGET_DIR/etc/systemd/system/initboot.service
[[ -d $TARGET_DIR/$TMPDIR ]] && rm -rf $TARGET_DIR/$TMPDIR
}
fail() {
echo "! Failed: $?"
cleanup
exit 1
}
tmpdir() {
[[ -z $TMPDIR ]] && { echo "TMPDIR option used but TMPDIR not set"; exit 1 }
[[ -d $TARGET_DIR/$TMPDIR ]] && return 0 #already exist
rm -rf $TARGET_DIR/$TMPDIR
cp -r $RUNDIR $TARGET_DIR/$TMPDIR
if [[ -h $RUNDIR/slash ]]; then
(
cd $RUNDIR
slashdir=$(readlink slash)
rm $TARGET_DIR/$TMPDIR/slash
cp -r $slashdir $TARGET_DIR/$TMPDIR/slash
)
fi
}
prepare() {
echo "#!/bin/zsh" > $TARGET_DIR/$TMPSCRIPT
cat $VARFILE $RUNDIR/$cur_script >> $TARGET_DIR/$TMPSCRIPT
chmod 755 $TARGET_DIR/$TMPSCRIPT
}
#scripts are of the form name:type:opt1,opt2
launch() {
cur_script=$1
script_type=$2
if [[ -z $script_type ]]; then
split=(${(s/:/)cur_script})
cur_script=$split[1]
script_type=$split[2]
opts=$split[3]
opts=(${(s/,/)opts})
else
shift; shift
opts=($@)
fi
[[ -z $script_type ]] && script_type="auto"
if [[ $script_type == "auto" ]]; then
split=$(head -n2 $RUNDIR/$cur_script | tail -n1)
split=${split#\#}
split=(${(s/:/)split})
script_type=$split[1]
opts=$split[2]
opts=(${(s/,/)opts})
fi
echo "!! $cur_script: $script_type ($opts)"
[[ -n $PAUSE ]] && read
for opt in $opts; do
case $opt in
TMPDIR) tmpdir ;;
esac
done
case $script_type in
chroot )
prepare
chroot $TARGET_DIR $TMPSCRIPT
;;
arch-chroot )
prepare
arch-chroot $TARGET_DIR $TMPSCRIPT
;;
nspawn )
prepare
systemd-nspawn -D $TARGET_DIR $TMPSCRIPT
;;
arch-nspawn )
prepare
arch-nspawn $TARGET_DIR $TMPSCRIPT
;;
nspawn-b )
prepare
cat >$TARGET_DIR/etc/systemd/system/initboot.service <<EOF
[Unit]
Description=Initial configuration script
[Service]
ExecStart=$TMPSCRIPT
ExecStartPost=/usr/bin/systemctl poweroff
Type=oneshot
EOF
systemd-nspawn -bD $TARGET_DIR systemd.unit=initboot.service
;;
exec )
$RUNDIR/$cur_script
;;
* ) #source
. $RUNDIR/$cur_script
;;
esac
}
[[ -n $TORUN ]] && cleanup
while [[ $#TORUN > 0 ]]; do
runscript=$TORUN[1]
launch $runscript || fail
shift TORUN
cat > $RUNFILE <<EOS
TORUN=($(echo \'${^TORUN}\'))
EOS
done
| true
|
0b8b12ecec94fea3ad10c899fb26741e71ec1fdb
|
Shell
|
URseismology/ADAMA
|
/NCFsToPVel/run03code.sh
|
UTF-8
| 788
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
## loop through all existing text CCF outputs and do 03 code for all pairs
## F changes here
for i in /scratch/tolugboj_lab/Prj5_HarnomicRFTraces/AkiEstimate/tutorial/ResultOf02/ResultOf02_02/*
do
file=$(basename "$i")
sta1=$(echo $file | cut -d'_' -f 2)
sta2=$(echo $file | cut -d'_' -f 3)
pair="${sta1}_${sta2}"
out03="../ResultOf03/ResultOf03_02/Final_${pair}/opt.pred-love" ## F changes here
echo $pair
## skip the pair if the pair has already been processed
if [ ! -s $out03 ]
then
sbatch --export=pair=$pair run03code.slurm
## check if the job on debug is less than 70
numJobs=`/software/slurm/current/bin/squeue -p urseismo -u sxue3| wc -l`
((numJobs=numJobs-1))
if (( numJobs >= 150 ))
then
sleep 2m
fi
fi
done
| true
|
b68826266ca07234ebd3f4d1d9a0aca8c0c37fbb
|
Shell
|
gjbex/training-material
|
/CPlusPlus/DesignPatterns/Circles/generate_overlap_data.sh
|
UTF-8
| 243
| 2.75
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/usr/bin/env bash
radius=0.01
for nr_circles in $(seq 50 50 10000)
do
for nr_avg in $(seq 1 50)
do
seed=$RANDOM
./random_circles.exe ${nr_circles} ${radius} ${seed} | \
./circle_overlap.exe
done
done
| true
|
f4825d8cd2e815f339a300c8be4d262396e50a7c
|
Shell
|
loveencounterflow/intershop
|
/bin/_restart-rpc-server
|
UTF-8
| 1,388
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
# set -euo pipefail
set -eo pipefail
#-----------------------------------------------------------------------------------------------------------
cd "$(readlink -f "$0" | xargs dirname)"/..
script_path="$(realpath "${BASH_SOURCE[0]}")"
#-----------------------------------------------------------------------------------------------------------
if [ -z ${intershop_rpc_port+x} ]; then
echo "$script_path"': need variable $intershop_rpc_port'
exit 1
fi
if [ -z ${intershop_rpc_host+x} ]; then
echo "$script_path"': need variable $intershop_rpc_host'
exit 1
fi
if [ -z ${intershop_guest_modules_path+x} ]; then
echo "$script_path"': need variable $intershop_guest_modules_path'
exit 1
fi
#-----------------------------------------------------------------------------------------------------------
# echo "trying to restart RPC server at $intershop_rpc_path"
echo "trying to restart RPC server at $intershop_rpc_host:$intershop_rpc_port"
set +e
echo '["restart"]' | netcat "$intershop_rpc_host" "$intershop_rpc_port" ; status=$?
set -e
echo "status: $status"
if [[ $status -eq 0 ]]; then
echo 'OK'
else
echo 'failed, launching RPC server...'
node "$intershop_guest_modules_path"/intershop-rpc-server-primary.js &
# primary_pid=$!
# echo "PID of RPC primary: $primary_pid"
fi
| true
|
0414c5ab139b09da54d40f34c10f65f11513c2c8
|
Shell
|
Uname-a/knife_scraper
|
/uninstall.sh
|
UTF-8
| 435
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# uninstall.sh - shell script to deleted files to sopel runtime directory
#
# Copyright (c) 2015 Casey Bartlett <caseytb@bu.edu>
#
# See LICENSE for terms of usage, modification and redistribution.
# deletes python files in sopel modules directory
# should use pip or setup.py
sopel_dir = ~/.sopel/knifeclub_modules
if [ -d $sopel_dir ]
then
rm $sopel_dir/extract_blade_info.py
rm $sopel_dir/ddg.py
rm $sopel_dir/bhq_query.py
fi
| true
|
94750583100044958e6ec13c3874b0639db9d75d
|
Shell
|
loigu/termuxAndroidHelpers
|
/bin/postsignum-signature-req-gen.sh
|
UTF-8
| 796
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f "$1.pem" -o -z "$2" -o "$1" = "-h" ]; then
echo "usage: $0 oldprefix newprefix"
echo "example - $0 jirizouhar2020 jirizoubar2023"
exit
f
past="$1"
new="$2"
#gen key
openssl genrsa -out tempkey 4096
openssl pkcs8 -topk8 -in tempkey -out "$new.key"
openssl rsa -pubout -in "tempkey" > "$new.pub"
rm tempkey
#gen request based on old cert
openssl x509 -x509toreq -in "$past.pem" -out "$new.req" -key "$new.key"
echo "req md5:"
openssl req -noout -modulus -in "$new".req |openssl md5
echo "key md5:"
openssl rsa -noout -modulus -in "$new".key |openssl md5
echo "key check:"
openssl rsa -in "$new.key" -check
echo "req check:"
openssl req -text -noout -verify -in "$new".req
# clear gen - fields from scratch
# openssl req -out CSR.csr -new -key jirizouhar2023.key
| true
|
a298a71a98d7c99ad0fdcd59b6d7a99584cdca13
|
Shell
|
CBIIT/ChIP_seq
|
/scripts/runBedCov4bam.SpikeIn.sh
|
UTF-8
| 1,451
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
module load bedtools
out_file_count=${out_file}.cov.raw.bed
out_file_spikescaled=${out_file}.cov.RRPM.bed
bedtools multicov -bams $bam_1 $bam_2 $bam_3 $bam_4 -bed $bed_file > $out_file_count
bam_path1=`dirname $bam_1`
bam_path2=`dirname $bam_2`
bam_path3=`dirname $bam_3`
bam_path4=`dirname $bam_4`
total_reads1=`grep -n 2 $bam_path1/SpikeIn/spike_map_summary | cut -f 3`
total_reads2=`grep -n 2 $bam_path2/SpikeIn/spike_map_summary | cut -f 3`
total_reads3=`grep -n 2 $bam_path3/SpikeIn/spike_map_summary | cut -f 3`
total_reads4=`grep -n 2 $bam_path4/SpikeIn/spike_map_summary | cut -f 3`
#echos for debugging:
echo `grep -n 2 $bam_path1/SpikeIn/spike_map_summary | cut -f 3`
echo total_reads1
echo `grep -n 2 $bam_path2/SpikeIn/spike_map_summary | cut -f 3`
echo total_reads2
echo `grep -n 2 $bam_path3/SpikeIn/spike_map_summary | cut -f 3`
echo total_reads3
echo `grep -n 2 $bam_path4/SpikeIn/spike_map_summary | cut -f 3`
echo total_reads4
awk -F "\t" -v total_reads1="$total_reads1" -v total_reads2="$total_reads2" -v total_reads3="$total_reads3" -v total_reads4="$total_reads4" 'BEGIN{OFS="\t"}{print $1,$2,$3,$4*1000000/total_reads1,$5*1000000/total_reads2,$6*1000000/total_reads3,$7*1000000/total_reads4}' $out_file_count > $out_file_spikescaled
chgrp khanlab $bam_1
chgrp khanlab $bam_2
chgrp khanlab $bam_3
chgrp khanlab $bam_4
chgrp khanlab $bed_file
chgrp khanlab $out_file_count
chgrp khanlab $out_file_spikescaled
| true
|
c82178849ebd7d318d1a1e9719e751350f9d0b7f
|
Shell
|
nickv-nextcloud/travis-branch-only-update-script
|
/update.sh
|
UTF-8
| 787
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Checkout and update the branch on all repos
echo "Checkout master"
echo "======================"
git checkout master
echo ""
echo "Delete existing branch"
echo "======================"
git branch -D update-travis-branches-only-setting
echo ""
echo "Checkout branch"
echo "======================"
git checkout -b update-travis-branches-only-setting
echo ""
echo "Update .travis.yml"
echo "======================"
python `dirname "$0"`/update.py
echo ""
echo "Add file to git status"
echo "======================"
git add .travis.yml
echo ""
echo "Commit branch"
echo "======================"
git commit -m "Update the branches.only setting in .travis.yml"
echo ""
echo "Push branch"
echo "======================"
git push origin update-travis-branches-only-setting
| true
|
b5530ac533cbf6df8074974419084a7c71df206c
|
Shell
|
FireLemons/Abbreviation-Autocomplete
|
/.git-hooks/pre-commit
|
UTF-8
| 1,163
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MINIFY_COMMAND="minify"
if ! command -v $MINIFY_COMMAND &> /dev/null;then
echo "command $MINIFY_COMMAND could not be found"
exit 1
fi
JSLastModified=$(date -r abbreviation-autocomplete.js +%s)
minifiedJSLastModified=$(date -r abbreviation-autocomplete.min.js +%s)
# Staged files
changed_files=($(git diff --name-only --cached))
for file in "${changed_files[@]}"; do
if [[ $file == abbreviation-autocomplete.js ]];then
JSChanged=1
elif [[ $file == abbreviation-autocomplete.min.js ]];then
minifiedJSChanged=1
fi
done
if [[ $JSChanged && ($JSLastModified -gt $minifiedJSLastModified || ! $minifiedJSChanged) ]];then
if [[ $JSLastModified -gt $minifiedJSLastModified ]]; then
echo "ERROR: staged minified version is not up to date"
echo "INFO: Auto minifying"
npx minify abbreviation-autocomplete.js > abbreviation-autocomplete.min.js
fi
echo "ERROR: updated minfied file not stage for commit"
echo "INFO: Please add abbreviation-autocomplete.min.js and recommit"
exit 1
elif [[ $JSChanged ]];then
echo "INFO: minified js has been modified after original js file and is believed to be up to date"
fi
| true
|
304ad6d2b3e1e07fed33fd02c26cad2b8f4ae697
|
Shell
|
yradsmikham/openhack-devops
|
/build.sh
|
UTF-8
| 1,142
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
az login -u 'hacker6@OTAPRD170ops.onmicrosoft.com' -p 'haAWU@8$HbU0'
# -e: immediately exit if any command has a non-zero exit status
# -o: prevents errors in a pipeline from being masked
# IFS new value is less likely to cause confusing bugs when looping arrays or arguments (e.g. $@)
#script requires latest version of .netcore to be installed ()
echo "Build Flavor:" $buildFlavor
echo "Resource Group:" $resourceGroupName
echo "Image:" $imageTag
echo "Relative save location:" $relativeSaveLocation
echo "DNS Url:" $dnsUrl
#get the acr repository id to tag image with.
ACR_ID=`az acr list -g $resourceGroupName --query "[].{acrLoginServer:loginServer}" --output json | jq .[].acrLoginServer | sed 's/\"//g'`
echo "ACR ID: "$ACR_ID
#Get the acr admin password and login to the registry
acrPassword=$(az acr credential show -n $registryName -o json | jq -r '[.passwords[0].value] | .[]')
docker login $ACR_ID -u $registryName -p $acrPassword
echo "Authenticated to ACR with username and password"
TAG=$ACR_ID"/devopsoh/"$imageTag
echo "TAG: "$TAG
cd apis/poi/web
docker build . -t $TAG
docker push $TAG
echo -e "\nSuccessfully pushed image: "$TAG
| true
|
7d4c6163e1d2a24868c07b448eb036153e73ee92
|
Shell
|
ramr/dotfiles
|
/scripts/backup-media-volumes
|
UTF-8
| 1,024
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
readonly SRC="/Volumes/Media"
readonly DEST="/Volumes/Backups"
readonly BWLIMIT=${BANDWIDTH_LIMIT:-1000}
function _backup_dir() {
local srcdir=$1
local destdir=$2
if [ -z "${srcdir}" ] || [ ! -d "${srcdir}" ]; then
echo "ERROR: Invalid source directory ${srcdir}"
exit 76
fi
if [ -z "${destdir}" ]; then
echo "ERROR: Invalid destination directory ${destdir}"
exit 76
fi
mkdir -p "${destdir}"
echo " - Backing up ${srcdir} to ${destdir} ..."
rsync -azvhi --bwlimit="${BWLIMIT}" --progress "${srcdir}" "${destdir}"
} # End of function _backup_dir.
function _backup_media() {
_backup_dir "${SRC}/Media" "${DEST}/"
} # End of function _backup_media.
function _backup_user_directories() {
for name in "${SRC}"/Users/*; do
if [ ! -L "${name}" ] && [ -d "${name}" ]; then
_backup_dir "${name}" "${DEST}/Users/"
fi
done
} # End of function _backup_user_directories.
#
# main():
#
_backup_media
_backup_user_directories
| true
|
f5898228a8ce9743e0bd97aea94946295c1619bd
|
Shell
|
lishuna/mac-local-config
|
/技术类/run.sh-docker配置
|
UTF-8
| 1,143
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
# start ssh service
/etc/init.d/sshd start
/etc/init.d/rsyslog start
# 1 clone nginx files
if [ -d "/nginx-file" ]; then
cd /nginx-file
git pull
else
git clone git@jdb-dev.com:docker/nginx-file.git
if [ -n "$DEPLOY_BRANCH" ]; then
cd /nginx-file
git checkout $DEPLOY_BRANCH
fi
fi
# 2 copy nginx files
yes|cp -rf /nginx-file/$APPID/conf/* /data/nginx/conf/
# 3 clone app codes
if [[ $GITADDR =~ "svn" ]];then
if [ `ls /data/apps|wc -l` -eq 0 ]; then
svn co $GITADDR /data/apps
else
cd /data/apps
svn update
fi
else
if [ `ls /data/apps|wc -l` -eq 0 ]; then
git clone $GITADDR -b $BRANCH /data/apps
else
cd /data/apps
git pull
fi
fi
# 4 deploy
if [ -f "/nginx-file/$APPID/deploy.sh" ]; then
sh /nginx-file/$APPID/deploy.sh
fi
# 5 start services
mkdir /data/logs/php7
killall nginx
killall php-fpm
/data/nginx/sbin/nginx -c /data/nginx/conf/nginx.conf
/data/php7/sbin/php-fpm
chown -R nginx:nginx /data/apps
chown -R nginx:nginx /data/logs
source /monitor.sh
| true
|
23246996cbdb70a95d150622d22438e48874ffdb
|
Shell
|
xaliq2299/UFAZ-BSc
|
/L1/S1/Linux/BASH_MAGAUD/ex1.sh
|
UTF-8
| 165
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
echo --- Hello $USER
echo -n "Today is: "; date
echo "root account has the following shell: " \
$(grep "^root" /etc/passwd | cut -d: -f7) #-d: without :
| true
|
0fba80c45729bb80b411b901dd5521eeef9cb8b5
|
Shell
|
wutao0914/DataX
|
/core/src/main/bin/stop.sh
|
UTF-8
| 167
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
while read -r line
do
echo "==> kill datax app, PID: ${line}"
kill -9 "${line}" 2>/dev/null
done < datax.pid
# kill后需删除pid文件
rm -f datax.pid
| true
|
75566c761d57016a3546d435e1ce4eb80303af37
|
Shell
|
augustobmoura/dotfiles
|
/bin/hibernate
|
UTF-8
| 249
| 3.34375
| 3
|
[] |
no_license
|
#! /bin/sh
CMD=$(basename "$0")
SYSTEMD=$(is_executable systemctl && echo 1)
case "$CMD" in
suspend)
if [ "$SYSTEMD" ]; then
exec systemctl suspend
fi
;;
hibernate)
if [ "$SYSTEMD" ]; then
exec systemctl hibernate
fi
;;
esac
| true
|
c0b0d0167d4338fc1621f321658b6eb88b077178
|
Shell
|
Metnew/DIR-615-S-3.02
|
/listen-for-dump.sh
|
UTF-8
| 174
| 2.59375
| 3
|
[] |
no_license
|
mkdir ./dump
while true; do
nc -l 8888 >> ./dump/archive.tar
echo "Decompressing archive.tar"
tar -xf ./dump/archive.tar -C ./dump
rm ./dump/archive.tar
done
| true
|
23f5376878cafea1b0a3ed45af1357c706e47bdd
|
Shell
|
m4n0v31/stratis-dockerfile
|
/auto-stake.sh
|
UTF-8
| 342
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "Starting Stratis daemon..."
stratisd -datadir=$STRATIS_DATA_DIR -rescan -detachdb &
echo "Waiting for daemon before unlocking wallet..."
sleep 120
echo "Unlocking wallet for staking..."
stratisd -datadir=$STRATIS_DATA_DIR walletpassphrase $WALLET_PASSPHRASE 9999999 true
echo "Wallet unlocked successfully."
wait
| true
|
fe0f1f81059028a69583f30880aa4a8dbe5e57b1
|
Shell
|
abislam/H4D_SimplifyingSystemsAdministration
|
/IndividualPolicies/v71939.sh
|
UTF-8
| 929
| 3.46875
| 3
|
[] |
no_license
|
###############################################V-71939#############################################
cat << inV71939
--------------------In V-71939 Script-------------------
Configuring this setting for the SSH daemon provides additional assurance that remote logon via SSH will require a password, even in the event of misconfiguration elsewhere.
inV71939
ssh="$(grep -i PermitEmptyPasswords /etc/ssh/sshd_config)"
if [[ $ssh == *"PermitEmptyPasswords no"* ]];
then
echo "Everything looks good"
else
while true; do
echo "Add or correct the following line in '/etc/ssh/sshd_config':
PermitEmptyPasswords no"
read -p "Make your changes now and type y to continue or n to skip policy. y/n" yn
case $yn in
[Yy]* ) break;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
fi
cat << outV71939
--------------------Out V-71939 Script-------------------
outV71939
| true
|
ffaae5077fab9e69383b37d18c360373c64370f4
|
Shell
|
dziq/configs
|
/bin/battery.sh
|
UTF-8
| 387
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
if grep -q on-line /proc/acpi/ac_adapter/ACAD/state; then
exit 0
fi
BAT_DIR=/proc/acpi/battery/BAT1
FULL_BAT=`grep 'last full capacity' ${BAT_DIR}/info | awk '{ print $4 }'`
CUR_BAT=`grep 'remaining capacity' ${BAT_DIR}/state | awk '{ print $3 }'`
AVG=`expr $(expr ${CUR_BAT} \* 100) / ${FULL_BAT}`
if [ "$AVG" -le "90" ]; then
/usr/bin/showbatt
fi
| true
|
49597030869cf6518f8690c82c26e7773cdc924e
|
Shell
|
pgporada/pats
|
/pats_engine.bash
|
UTF-8
| 2,463
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
# AUTHOR:
# Phil Porada - philporada@gmail.com
# LICENSE: GPLv3
# HOW:
# This file is sourced by each test_*.bash file
# You can check the return code of each test_*.bash file to automate
#
######################################
USE_COLOR=0
GRN=""
RED=""
RST=""
YEL=""
DEBUG_OUTPUT=0
COMPACT_OUTPUT=0
ERRORS=0
usage() {
echo -e "
USAGE:
./"$(basename ${0})" [-c] [-d] [-h] [-t]
-c | Color output, defaults to uncolored
-d | Show output from the commands you're testing, defaults to off
-h | Show this help menu
-t | Output in compact format. Example: ..F....FFF.
LICENSE:
GNU GPLv3
AUTHOR:
Phil Porada
"
}
# Checks return values and outputs pass/fail
checker() {
local RETVAL=${1}
shift
if [ "${RETVAL}" -eq 0 ] && [ "${COMPACT_OUTPUT}" -eq 0 ]; then
CHECKER="${GRN}ok${RST}"
elif [ "${RETVAL}" -eq 0 ] && [ "${COMPACT_OUTPUT}" -eq 1 ]; then
CHECKER="${GRN}.${RST}"
elif [ "${RETVAL}" -ne 0 ] && [ "${COMPACT_OUTPUT}" -eq 0 ]; then
CHECKER="${RED}not ok${RST}"
else
CHECKER="${RED}F${RST}"
fi
if [ "${RETVAL}" -ne 0 ]; then
ERRORS=$((ERRORS + 1))
fi
}
# Prepares part of the output string
test_name() {
local TEST_NAME="${@}"
shift
if [ "${DEBUG_OUTPUT}" -eq 1 ]; then
OUTPUT="${YEL}DEBUG${RST} - ${TEST_NAME}"
else
OUTPUT="${TEST_NAME}"
fi
}
# Runs the specified commands to retrieve a return value
# Finishes building the output string
test_runner() {
local TEST_SCRIPT="${@}"
shift
if [ "${DEBUG_OUTPUT}" -eq 0 ]; then
eval "${TEST_SCRIPT}" > /dev/null 2>&1
else
eval "${TEST_SCRIPT}"
fi
checker $?
if [ "${DEBUG_OUTPUT}" -eq 0 ] && [ "${COMPACT_OUTPUT}" -eq 0 ]; then
echo "${CHECKER} - ${OUTPUT}"
elif [ "${DEBUG_OUTPUT}" -eq 1 ] && [ "${COMPACT_OUTPUT}" -eq 0 ]; then
echo "${CHECKER} - ${OUTPUT}"
elif [ "${DEBUG_OUTPUT}" -eq 0 ] && [ "${COMPACT_OUTPUT}" -eq 1 ]; then
echo -n "${CHECKER}"
else
echo "${CHECKER} - ${OUTPUT}"
fi
}
while getopts ":cdht" opt; do
case $opt in
c) USE_COLOR=1
GRN="$(tput setaf 2)"
RED="$(tput setaf 1)"
YEL="$(tput setaf 3)"
RST="$(tput sgr0)"
;;
d) DEBUG_OUTPUT=1;;
h) usage
exit 0
;;
t) COMPACT_OUTPUT=1;;
\?) echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
| true
|
94c10c1e8a1419ad3cfd4d7ae7c3405e2c138ffe
|
Shell
|
lmiori92/lorenz-onboard
|
/candump_to_binary.sh
|
UTF-8
| 936
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# STARTS WITH (000.000000) can0 666 [4] 48 41 43 4B 'HACK'
# CONTINUES WITH (000.002008) can0 666 [8] 23 0C C5 0B C3 0B 01 01 '#.......'
filename=$1
outfilename="$filename.bin"
rm -f $outfilename
while read p; do
ID=$(echo -ne $p | gawk '{ print $3 }')
DLC=$(echo -ne $p | gawk '{ print $4 }')
BYTE0=$(echo -ne $p | gawk '{ print $5 }')
BYTE1=$(echo -ne $p | gawk '{ print $6 }')
BYTE2=$(echo -ne $p | gawk '{ print $7 }')
BYTE3=$(echo -ne $p | gawk '{ print $8 }')
BYTE4=$(echo -ne $p | gawk '{ print $9 }')
BYTE5=$(echo -ne $p | gawk '{ print $10 }')
BYTE6=$(echo -ne $p | gawk '{ print $11 }')
BYTE7=$(echo -ne $p | gawk '{ print $12 }')
if [ "$ID" == "666" ] && [ "$DLC" == "[8]" ]
then
echo -n -e \\x$BYTE0\\x$BYTE1\\x$BYTE2\\x$BYTE3\\x$BYTE4\\x$BYTE5\\x$BYTE6\\x$BYTE7 >> $outfilename
fi
echo $p
done < $filename
| true
|
5123792a37e8a3df3fce4e0ad2f8277fcc3bfe88
|
Shell
|
yajamon/dotfiles
|
/install.sh
|
UTF-8
| 365
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
DOTFILES_REPO="https://github.com/yajamon/dotfiles.git"
DOT_PATH=$HOME/dotfiles
if ! type -a git; then
echo "REQUIRED COMMAND: git" >&2
exit 1
fi
git clone --recursive $DOTFILES_REPO $DOT_PATH
$DOT_PATH/setup.sh
source $DOT_PATH/etc/lib/util.sh
if is_osx; then
bash $DOT_PATH/etc/init/osx.sh
else
bash $DOT_PATH/etc/init/linux.sh
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.