blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b959ebac685979ebc83ef02f1f1781f3733df864 | Shell | tailscale/tailscale | /release/dist/synology/files/scripts/start-stop-status | UTF-8 | 3,197 | 4.03125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
SERVICE_NAME="tailscale"
if [ "${SYNOPKG_DSM_VERSION_MAJOR}" -eq "6" ]; then
PKGVAR="/var/packages/Tailscale/etc"
else
PKGVAR="${SYNOPKG_PKGVAR}"
fi
PID_FILE="${PKGVAR}/tailscaled.pid"
LOG_FILE="${PKGVAR}/tailscaled.stdout.log"
STATE_FILE="${PKGVAR}/tailscaled.state"
SOCKET_FILE="${PKGVAR}/tailscaled.sock"
PORT="41641"
SERVICE_COMMAND="${SYNOPKG_PKGDEST}/bin/tailscaled \
--state=${STATE_FILE} \
--socket=${SOCKET_FILE} \
--port=$PORT"
if [ "${SYNOPKG_DSM_VERSION_MAJOR}" -eq "7" -a ! -e "/dev/net/tun" ]; then
# TODO(maisem/crawshaw): Disable the tun device in DSM7 for now.
SERVICE_COMMAND="${SERVICE_COMMAND} --tun=userspace-networking"
fi
if [ "${SYNOPKG_DSM_VERSION_MAJOR}" -eq "6" ]; then
chown -R tailscale:tailscale "${PKGVAR}/"
fi
start_daemon() {
local ts=$(date --iso-8601=second)
echo "${ts} Starting ${SERVICE_NAME} with: ${SERVICE_COMMAND}" >${LOG_FILE}
STATE_DIRECTORY=${PKGVAR} ${SERVICE_COMMAND} 2>&1 | sed -u '1,200p;201s,.*,[further tailscaled logs suppressed],p;d' >>${LOG_FILE} &
# We pipe tailscaled's output to sed, so "$!" retrieves the PID of sed not tailscaled.
# Use jobs -p to retrieve the PID of the most recent process group leader.
jobs -p >"${PID_FILE}"
}
stop_daemon() {
if [ -r "${PID_FILE}" ]; then
local PID=$(cat "${PID_FILE}")
local ts=$(date --iso-8601=second)
echo "${ts} Stopping ${SERVICE_NAME} service PID=${PID}" >>${LOG_FILE}
kill -TERM $PID >>${LOG_FILE} 2>&1
wait_for_status 1 || kill -KILL $PID >>${LOG_FILE} 2>&1
rm -f "${PID_FILE}" >/dev/null
fi
}
daemon_status() {
if [ -r "${PID_FILE}" ]; then
local PID=$(cat "${PID_FILE}")
if ps -o pid -p ${PID} > /dev/null; then
return
fi
rm -f "${PID_FILE}" >/dev/null
fi
return 1
}
wait_for_status() {
# 20 tries
# sleeps for 1 second after each try
local counter=20
while [ ${counter} -gt 0 ]; do
daemon_status
[ $? -eq $1 ] && return
counter=$((counter - 1))
sleep 1
done
return 1
}
ensure_tun_created() {
if [ "${SYNOPKG_DSM_VERSION_MAJOR}" -eq "7" ]; then
# TODO(maisem/crawshaw): Disable the tun device in DSM7 for now.
return
fi
# Create the necessary file structure for /dev/net/tun
if ([ ! -c /dev/net/tun ]); then
if ([ ! -d /dev/net ]); then
mkdir -m 755 /dev/net
fi
mknod /dev/net/tun c 10 200
chmod 0755 /dev/net/tun
fi
# Load the tun module if not already loaded
if (!(lsmod | grep -q "^tun\s")); then
insmod /lib/modules/tun.ko
fi
}
case $1 in
start)
if daemon_status; then
exit 0
else
ensure_tun_created
start_daemon
exit $?
fi
;;
stop)
if daemon_status; then
stop_daemon
exit $?
else
exit 0
fi
;;
status)
if daemon_status; then
echo "${SERVICE_NAME} is running"
exit 0
else
echo "${SERVICE_NAME} is not running"
exit 3
fi
;;
log)
exit 0
;;
*)
echo "command $1 is not implemented"
exit 0
;;
esac
| true |
3c548c9f0be83765a66c53f6152a4f64f0fe9475 | Shell | llatika/SALBP2 | /simulated-annealing/solve.sh | UTF-8 | 895 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash -e
mkdir -p solutions
steps=200000
for seed in $(seq 0 9); do
./salbp2-sa instances/HAHN.IN2 3 $seed $steps > solutions/hahn3-$seed.sol
./salbp2-sa instances/HAHN.IN2 8 $seed $steps > solutions/hahn8-$seed.sol
./salbp2-sa instances/HAHN.IN2 10 $seed $steps > solutions/hahn10-$seed.sol
./salbp2-sa instances/LUTZ3.IN2 3 $seed $steps > solutions/lutz3-$seed.sol
./salbp2-sa instances/LUTZ3.IN2 20 $seed $steps > solutions/lutz20-$seed.sol
./salbp2-sa instances/LUTZ3.IN2 5 $seed $steps > solutions/lutz5-$seed.sol
./salbp2-sa instances/WEE-MAG.IN2 3 $seed $steps > solutions/weemag3-$seed.sol
./salbp2-sa instances/WEE-MAG.IN2 10 $seed $steps > solutions/weemag10-$seed.sol
./salbp2-sa instances/WEE-MAG.IN2 20 $seed $steps > solutions/weemag20-$seed.sol
./salbp2-sa instances/WEE-MAG.IN2 30 $seed $steps > solutions/weemag30-$seed.sol
done
| true |
b7ff8af1efca11fc500dcbf689355cedde0f9bf3 | Shell | liqun1981/Misc-Scripts | /HYCOMtemplate/getGrid.bash | UTF-8 | 1,957 | 2.9375 | 3 | [] | no_license | #!/bin/bash
if [ ! -d "data" ]; then
mkdir data
fi
latMin=`grep latMin ./myLatLon.txt | cut -d "=" -f2`
latMin=`echo "$latMin - .08" |bc`
echo "latMin = " $latMin
lonMin=`grep lonMin ./myLatLon.txt | cut -d "=" -f2`
lonMin=`echo "$lonMin - .08" |bc`
echo "lonMin = " $lonMin
latMax=`grep latMax ./myLatLon.txt | cut -d "=" -f2`
latMax=`echo "$latMax + .08" |bc`
echo "latMax = " $latMax
lonMax=`grep lonMax ./myLatLon.txt | cut -d "=" -f2`
lonMax=`echo "$lonMax + .08" |bc`
echo "lonMax = " $lonMax
#part1='http://ncss.hycom.org/thredds/ncss/GLBu0.08/expt_91.2?var=surf_el&var=water_temp&north='$latMax'&west='$lonMin'&east='$lonMax'&south='$latMin'&disableProjSubset=on&horizStride=1&time_start='
#part2='T00%3A00%3A00Z&time_end='
#part3='T00%3A00%3A00Z&timeStride=1&vertCoord=&accept=netcdf'
part1='http://ncss.hycom.org/thredds/ncss/GLBa0.08/expt_91.2/2016?var=ssh&var=salinity&var=temperature&var=u&var=v'
part2='&north='$latMax'&west='$lonMin'&east='$lonMax'&south='$latMin
part3='&disableProjSubset=on&horizStride=1&time_start='
part4='T00%3A00%3A00Z&time_end='
part5='T00%3A00%3A00Z&timeStride=1&vertStride=1&addLatLon=true&accept=netcdf'
echo $part1
echo $part2
echo $part3
echo $part4
echo $part5
# pick any file for the "grid" file
year=2016
month='04'
day='25'
echo $year
echo $month
echo $day
date="$year-$month-$day"
echo $date
outFile="HYCOM_GLBa0.08_PALAU_grid.nc"
echo $outFile
myURL=$part1$part2$part3$date$part4$date$part5
echo $myURL
wget -O $outFile $myURL
ncrename -O -h -d MT,ocean_time $outFile
ncrename -O -h -d Depth,z $outFile
ncrename -O -h -v MT,ocean_time $outFile
ncrename -O -h -v Depth,z $outFile
ncrename -O -h -v temperature,temp $outFile
ncrename -O -h -v salinity,salt $outFile
ncrename -O -h -v Latitude,lat $outFile
ncrename -O -h -v Longitude,lon $outFile
| true |
d29c56fc73af48f989c9851aa62acedc99f6ce79 | Shell | AlexConnat/MPC-Aggreg | /setup.sh | UTF-8 | 488 | 3.203125 | 3 | [] | no_license | bold=$(tput bold)
normal=$(tput sgr0)
echo "${bold}[*] Updating package source lists...${normal}"
sudo apt update
echo ""
echo "${bold}[*] Installing python3 and pip3${normal}"
sudo apt install python3 python3-pip -y
echo ""
echo "${bold}[*] Installing libgmp, libmpfr, and libmpc for gmpy2${normal}"
sudo apt install libgmp-dev libmpfr-dev libmpc-dev -y
echo ""
echo "${bold}[*] Installing numpy, gmpy2, and mpyc${normal}"
pip3 install numpy gmpy2 git+https://github.com/lschoe/mpyc
| true |
3989c5568d77388fec1903849f580f0205853c3d | Shell | pabagan/knowledgebase | /hashicorp-vault/course/m6/1-creating-tokens.sh | UTF-8 | 701 | 2.953125 | 3 | [
"MIT"
] | permissive | # First of all we are going to start Vault in development mode
vault server -dev
# Now set your Vault address environment variable
export VAULT_ADDR=http://127.0.0.1:8200
# Set the root token variable
root_token=ROOT_TOKEN_VALUE
# And log into Vault using the root token
vault login $root_token
# First we are going to create a basic token
vault token create -policy=default -ttl=60m
# Now let's check out some info on the token
vault token lookup TOKEN_VALUE
# We can do the same using the accessor, but no ID
vault token lookup -accessor ACCESSOR_VALUE
# Now let's revoke our token
vault token revoke -accessor ACCESSOR_VALUE
# Cool, now let's enable an auth method and explore TTL a bit | true |
e88a84135ef431e42ea3b4d9558ca38dc7595cfb | Shell | rafaelcarv/mestrado | /Scripts/Script-KMeans-distributed.sh | UTF-8 | 1,211 | 2.953125 | 3 | [] | no_license | #!/bin/bash
HPAT_EXAMPLE=$HOME/.julia/v0.5/HPAT/examples
HPAT_GENERATE=$HOME/.julia/v0.5/HPAT/generate_data/
SPARK=$HOME/spark-2.0.1-bin-hadoop2.7
SPARK_SBIN=$SPARK/sbin/
SPARK_BIN=$SPARK/bin/
iterations=$1
centers=$2
cores=$3
master=$4
hostfile=$5
iteration_output_file=$HOME/iteration_output_file
single_execution_output_file=$HOME/single_execution_output_file
script_loop_output_file=$HOME/script_loop_output_file
for i in `seq 1 30` ;
do
mpirun -n ${cores} —-hostfile hostfile julia --depwarn=no ${HPAT_EXAMPLE}/kmeans.jl --file=$HOME/data.hdf5 --centers=$centers --iterations=$iterations >> $single_execution_output_file
${SPARK_BIN}spark-submit --master $master $HOME/KMeans.jar $HOME/train.txt $centers $iterations --total-executor-cores ${cores} >> $single_execution_output_file
done
for i in `seq 1 30` ;
do
mpirun -n ${cores} —-hostfile hostfile julia --depwarn=no ${HPAT_EXAMPLE}/kmeans.jl --file=$HOME/data.hdf5 --centers=$centers --iterations=$iterations >> $script_loop_output_file
done
for i in `seq 1 30` ;
do
${SPARK_BIN}spark-submit --master $master $HOME/KMeans.jar $HOME/train.txt $centers $iterations --total-executor-cores ${cores} >> $script_loop_output_file
done
| true |
fbc412cf42a1d9c7b58f69c2722c9166da37302a | Shell | arpsabbir/megagoofil | /megagoofil.sh | UTF-8 | 973 | 3.53125 | 4 | [] | no_license | #!/bin/bash
while [[ $# -gt 0 ]]; do case "${1}" in
-d)
DOMAIN="${2}"
shift 2
;;
-l)
LIMIT="${2}"
shift 2
;;
-n)
DOWNLOADS="${2}"
shift 2
;;
-o)
OUTPUT="${2}"
shift 2
;;
-t)
FILETYPE="${2}"
shift 2
;;
*) # unknown option
shift # past argument
;;
esac
done
echo "$DOMAIN"
echo "$LIMIT"
echo "$DOWNLOADS"
echo "$OUTPUT"
echo "$FILETYPE"
python3 ~/metagoofil/metagoofil.py -d "$DOMAIN" -l $LIMIT -n $DOWNLOADS -o "$OUTPUT" -t "$FILETYPE"
cd "$OUTPUT"
if [ "$FILETYPE" == 'pdf' ]; then
for f in *.pdf; do
echo "Parsing $f"
docsplit text --no-ocr "$f"
docsplit author "$f" >> authors.txt | sort | uniq -i
done
for t in *.txt; do
echo "Searching $t for E-Mail adresses."
grep -o '[[:alnum:]+\.\_\-]*@[[:alnum:]+\.\_\-]*' "$t" | sort | uniq -i >> emails.txt
done
fi
| true |
abd0124143a7f7c53c57f0177fae7fc9be034ac2 | Shell | AMReX-FHD/LowMachFHD | /staggered_grid/exec/reactDiff/test/misc/Front_1d/anim_gif_comp.sh | UTF-8 | 1,100 | 3.875 | 4 | [] | no_license | #!/bin/bash
#RUNNAME1=Schlogl_Dirichlet
#RUNNAME2=Schlogl_Dirichlet_det
#OUTDIR=Schlogl_Dirichlet_comp
RUNNAME1=Schlogl_PBC
RUNNAME2=Schlogl_PBC_det
OUTDIR=Schlogl_PBC_comp
GNUPLOTSCRIPT="Schlogl_comp.plt"
DATAFILEPREFIX=vstat
GNUPLOTOUTPUTEXT="png"
GIFDELAY=50
GIFOUTPUT=Schlogl_comp.gif
# check output directory
if [ -d $RUNNAME1 ]
then
cd $RUNNAME1
DATAFILES=`ls $DATAFILEPREFIX????????`
cd ..
else
echo "directory $RUNNAME1 does not exist..."
exit
fi
if [ ! -d $RUNNAME2 ]
then
echo "directory $RUNNAME2 does not exist..."
exit
fi
if [ -d $OUTDIR ]
then
echo "directory $OUTDIR already exists..."
sleep 3s
else
mkdir $OUTDIR
fi
cd $OUTDIR
# make plots
for datafile in $DATAFILES
do
datafile1=../$RUNNAME1/$datafile
datafile2=../$RUNNAME2/$datafile
gnuplot -e "datafile='$datafile'" -e "datafile1='$datafile1'" -e "datafile2='$datafile2'" ../$GNUPLOTSCRIPT
echo "$datafile.$GNUPLOTOUTPUTEXT generated"
done
# generate an animated gif
convert -delay $GIFDELAY $DATAFILEPREFIX*.$GNUPLOTOUTPUTEXT $GIFOUTPUT
echo "$GIFOUTPUT generated"
animate $GIFOUTPUT &
| true |
ab9875e7d2e9f9bed99b6bf79017da9a0c0817c6 | Shell | AbDaniel/dotfiles-1 | /bin/brew-dirty | UTF-8 | 265 | 2.71875 | 3 | [] | no_license | #!/bin/sh
# This serves as an alias for the old `brew dirty` command which annoyingly
# was removed.
if ! command -v brew >/dev/null; then
echo "brew not found. You don't have a Homebrew installation here?"
exit 1
fi
exec brew list --multiple --versions
| true |
d8572fcd287e8c99ebb016b36bad0896c4107717 | Shell | awesomeamit1998/Shell-Scripting | /script2.sh | UTF-8 | 224 | 2.640625 | 3 | [] | no_license | echo "Total no of arguments is $#"
echo "Script name is $0"
echo "First argument is $1"
echo "Second argument is $2"
echo "Third argument is $3"
echo "Total no of arguments is $@"
echo "Total no of arguments together is $*"
| true |
e4728f224a380e216d19bbb4c34f91440cdabe63 | Shell | yochananmarqos/pkgbuilds | /librnnoise-git/PKGBUILD | UTF-8 | 727 | 2.796875 | 3 | [] | no_license | # https://aur.archlinux.org/packages/librnnoise
pkgname=librnnoise-git
pkgver=r84.125d8a5
pkgrel=1
pkgdesc="A noise suppression library based on a recurrent neural network."
arch=('x86_64')
url="https://github.com/xiph/rnnoise"
license=('BSD')
makedepends=('git')
provides=("${pkgname%-git}")
conflicts=("${pkgname%-git}")
source=('git+https://github.com/xiph/rnnoise.git')
sha256sums=('SKIP')
pkgver() {
cd "$srcdir/rnnoise"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$srcdir/rnnoise"
./autogen.sh
./configure --prefix=/usr
make
}
package() {
cd "$srcdir/rnnoise"
make DESTDIR="$pkgdir" install
install -Dm644 COPYING -t "$pkgdir/usr/share/licenses/$pkgname"
}
| true |
90399f33b75087128d9d3193cd673139562c969a | Shell | BattlefieldRedux/bf2-docker | /images/bf2hub-pb-mm-bf2cc/assets/runtime/run.sh | UTF-8 | 3,537 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
set -e
TMP='/home/bf2/tmp'
SRV='/home/bf2/srv'
VOLUME='/volume'
generate_pw() {
echo "$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c10)"
}
replace_var() {
echo "$3: $1 => [$2]"
replaceEscaped=$(echo "$2" | sed 's/[&/\]/\\&/g')
sed -i --follow-symlinks -e "s/$1/$replaceEscaped/g" $3
}
# Check if target volume is empty
if [ "$(ls -A $SRV)" ]; then
echo "$SRV is not empty. Skipping..."
else
# Move server files to persisted folder (-n without overwriting)
echo "$SRV is empty. Moving server files..."
mv -n $TMP/srv/* $SRV/
# Set server settings from environment variables
rcon_pw="${ENV_RCON_PASSWORD:-"$(generate_pw)"}"
bf2ccd_pw="${ENV_BF2CCD_PASSWORD:-"$(generate_pw)"}"
bf2ccd_pw_md5="$(echo -n $bf2ccd_pw | md5sum | tr a-z A-Z | tr -d - | xargs echo -n)"
replace_var '{{server_name}}' "${ENV_SERVER_NAME:-"bf2-docker"}" "$SRV/mods/bf2/settings/serversettings.con"
replace_var '{{max_players}}' "${ENV_MAX_PLAYERS:-"16"}" "$SRV/mods/bf2/settings/serversettings.con"
replace_var '{{server_port}}' "${ENV_SERVER_PORT:-"16567"}" "$SRV/mods/bf2/settings/serversettings.con"
replace_var '{{server_port}}' "${ENV_SERVER_PORT:-"16567"}" "$SRV/bf2ccd/default.profile"
replace_var '{{gamespy_port}}' "${ENV_GAMESPY_PORT:-"29900"}" "$SRV/mods/bf2/settings/serversettings.con"
replace_var '{{gamespy_port}}' "${ENV_GAMESPY_PORT:-"29900"}" "$SRV/bf2ccd/default.profile"
replace_var '{{demos_url}}' "${ENV_DEMOS_URL:-"http://example.com/demos/"}" "$SRV/mods/bf2/settings/serversettings.con"
replace_var '{{rcon_password}}' "$rcon_pw" "$SRV/mods/bf2/settings/modmanager.con"
replace_var '{{rcon_password}}' "$rcon_pw" "$SRV/bf2ccd/default.profile"
replace_var '{{bf2ccd_password}}' "$bf2ccd_pw_md5" "$SRV/bf2ccd/users.xml"
# Create volume directory for all persisted changes
echo 'Moving persisted data and creating symlinks...'
mkdir -m 777 -p $VOLUME
mkdir -m 777 -p $VOLUME/svlogs
mkdir -m 777 -p $VOLUME/svss
mkdir -m 777 -p $VOLUME/demos
mkdir -m 777 -p $VOLUME/demos/pending
mkdir -m 777 -p $VOLUME/demos/uploaded
mkdir -m 777 -p $VOLUME/www
install -m 777 /dev/null $VOLUME/bf2.log
install -m 777 /dev/null $VOLUME/modmanager.log
install -m 777 /dev/null $VOLUME/pbalias.dat
install -m 777 /dev/null $VOLUME/sv_viol.log
mv -n $SRV/mods/bf2/settings $VOLUME
chmod -R 777 $VOLUME/settings
rm -rf $SRV/mods/bf2/demos
rm -rf $SRV/pb_amd-64/svss
rm -rf $SRV/pb_amd-64/svlogs
ln -sf $VOLUME/settings $SRV/mods/bf2/settings
ln -sf $VOLUME/demos/pending $SRV/mods/bf2/demos
ln -sf $VOLUME/svss $SRV/pb_amd-64/svss
ln -sf $VOLUME/svlogs $SRV/pb_amd-64/svlogs
ln -sf $VOLUME/bf2.log $SRV/bf2.log
ln -sf $VOLUME/modmanager.log $SRV/modmanager.log
ln -sf $VOLUME/pbalias.dat $SRV/pbalias.dat
ln -sf $VOLUME/sv_viol.log $SRV/pb_amd-64/sv_viol.log
mv -n $SRV/bf2ccd $VOLUME
chmod -R 777 $VOLUME/bf2ccd
ln -sf $VOLUME/bf2ccd $SRV/bf2ccd
# Set execute permissions
echo 'Setting execute permissions...'
cd $SRV
chmod +x ./start_bf2hub.sh ./bin/amd-64/bf2 ./mono-1.1.12.1/bin/mono ./bf2ccd/bf2ccd.exe
chmod -R 777 ./pb_amd-64
chmod -R 777 ./bf2ccd
chmod -R 777 . # temp D:
fi
# Start nginx for demos
service nginx start
# Start BF2CC Daemon
echo 'Starting BF2CC Daemon...'
export TERM=xterm
su -c "cd $SRV && ./mono-1.1.12.1/bin/mono ./bf2ccd/bf2ccd.exe -noquitprompts -autostart >/dev/null" - bf2
exit 0
| true |
fd3dab71cabc128eea1ba5e476b41f806790a981 | Shell | fronttang/hadoop_ha_config | /storm/bin/storm-all.sh | UTF-8 | 1,271 | 3.34375 | 3 | [] | no_license | #!/bin/bash
SUPERVISOR_HOST_CONF=$STORM_HOME/conf/$SUPERVISOR_HOST_CONF
case $1 in
start)
echo $HOSTNAME: starting nimbus, logging to $STORM_HOME/logs/nimbus.out
nohup $STORM_HOME/bin/storm nimbus >> $STORM_HOME/logs/nimbus.out 2>&1 &
echo $HOSTNAME: starting ui, logging to $STORM_HOME/logs/nimbus.out
nohup $STORM_HOME/bin/storm ui >> $STORM_HOME/logs/ui.out 2>&1 &
for supervisor in `cat $SUPERVISOR_HOST_CONF`
do
echo $supervisor: starting supervisor, logging to $STORM_HOME/logs/supervisor.out
ssh $supervisor nohup $STORM_HOME/bin/storm supervisor >> $STORM_HOME/logs/supervisor.out 2>&1 &
done
;;
stop)
echo $HOSTNAME: stop nimbus
kill -9 `jps | grep nimbus|awk '{print $1}' `
echo $HOSTNAME: stop core
kill -9 `jps | grep core|awk '{print $1}' `
#echo $HOSTNAME: stop ui
#kill -9 `jps | grep ui|awk '{print $1}' `
for supervisor in `cat $SUPERVISOR_HOST_CONF`
do
echo $supervisor: stop supervisor
ssh $supervisor $STORM_HOME/bin/stop-supervisor.sh
done
;;
status)
echo $HOSTNAME: nimbus status
jps
for supervisor in `cat $SUPERVISOR_HOST_CONF`
do
echo $supervisor: supervisor status
ssh $supervisor jps
done
;;
*)
echo "Usage: {start|stop|status}"
;;
esac
| true |
034ad9328f237893b49385dd3502ff924b61c799 | Shell | i-sinister/dotconfig | /zsh/fzf/file_operations.zsh | UTF-8 | 2,707 | 3.921875 | 4 | [
"Apache-2.0"
] | permissive | # content from fzf wiki: https://github.com/junegunn/fzf/wiki/examples
# fe [FUZZY PATTERN] - Open the selected file with the default editor
# - Bypass fuzzy finder if there's only one match (--select-1)
# - Exit if there's no match (--exit-0)
fe() {
local files
IFS=$'\n' files=($(fzf-tmux --query="$1" --multi --select-1 --exit-0))
[[ -n "$files" ]] && ${EDITOR:-vim} "${files[@]}"
}
# Modified version where you can press
# - CTRL-O to open with `open` command,
# - CTRL-E or Enter key to open with the $EDITOR
fo() {
local out file key
IFS=$'\n' out=($(fzf-tmux --query="$1" --exit-0 --expect=ctrl-o,ctrl-e))
key=$(head -1 <<< "$out")
file=$(head -2 <<< "$out" | tail -1)
if [ -n "$file" ]; then
[ "$key" = ctrl-o ] && exo-open "$file" || ${EDITOR:-vim} "$file"
fi
}
# vf - fuzzy open with vim from anywhere
# ex: vf word1 word2 ... (even part of a file name)
# zsh autoload function
vf() {
local files
files=(${(f)"$(locate -Ai -0 $@ | grep -z -vE '~$' | fzf --read0 -0 -1 -m)"})
if [[ -n $files ]] then
vim -- $files
print -l $files[1]
fi
}
# fuzzy grep open via ag
vg() {
local file
file="$(ag --nobreak --noheading $@ | fzf -0 -1 | awk -F: '{print $1 " +" $2}')"
if [[ -n $file ]]
then
vim $file
fi
}
# cdf - cd to selected directory
cdf() {
local dir
dir=$(find ${1:-.} -type d -print 2> /dev/null | fzf +m) &&
cd "$dir"
}
# Another fd - cd into the selected directory
# This one differs from the above, by only showing the sub directories and not
# showing the directories within those.
csf() {
DIR=`find * -maxdepth 0 -type d -print 2> /dev/null | fzf-tmux` \
&& cd "$DIR"
}
# fdr - cd to selected parent directory
cdpf() {
local declare dirs=()
get_parent_dirs() {
if [[ -d "${1}" ]]; then dirs+=("$1"); else return; fi
if [[ "${1}" == '/' ]]; then
for _dir in "${dirs[@]}"; do echo $_dir; done
else
get_parent_dirs $(dirname "$1")
fi
}
local DIR=$(get_parent_dirs $(realpath "${1:-$PWD}") | fzf-tmux --tac)
cd "$DIR"
}
# cf - fuzzy cd from anywhere
# ex: cf word1 word2 ... (even part of a file name)
# zsh autoload function
cf() {
local file
file="$(locate -Ai -0 $@ | grep -z -vE '~$' | fzf --read0 -0 -1)"
if [[ -n $file ]]
then
if [[ -d $file ]]
then
cd -- $file
else
cd -- ${file:h}
fi
fi
}
# cdf - cd into the directory of the selected file
cdfd() {
local file
local dir
file=$(fzf +m -q "$1") && dir=$(dirname "$file") && cd "$dir"
}
# fkill - kill process
fkill() {
local pid
pid=$(ps -ef | sed 1d | fzf -m | awk '{print $2}')
if [ "x$pid" != "x" ]
then
echo $pid | xargs kill -${1:-9}
fi
}
| true |
9422cdb0c06a621bbb9d6b4ac5751207b995d651 | Shell | twopoint718/heroku-buildpack-sqitch | /bin/compile | UTF-8 | 1,868 | 3.71875 | 4 | [] | no_license | #!/bin/bash
# bin/compile <build-dir> <cache-dir> <env-dir>
set -e # fail fast
set -o pipefail # do not ignore exit codes when piping output
# Heroku-provided params
BUILD_DIR="$1"
CACHE_DIR="$2"
ENV_DIR="$3"
# ENV_DIR contains files representing config vars. Each file's name is the key
# of the var, and the file contents are the value of the var. Load selected
# vars into the environment with this function
export_env_dir() {
env_dir=$1
whitelist_regex=${2:-'DATABASE_URL'}
blacklist_regex=${3:-'^(PATH|GIT_DIR|CPATH|CPPATH|LD_PRELOAD|LIBRARY_PATH)$'}
if [ -d "$env_dir" ]; then
for e in $(ls $env_dir); do
echo "$e" | grep -E "$whitelist_regex" | grep -qvE "$blacklist_regex" &&
export "$e=$(cat $env_dir/$e)"
:
done
fi
}
# Run it
export_env_dir $ENV_DIR
PATH="$BUILD_DIR/local/bin:$PATH"
export PERL5LIB="$BUILD_DIR/local/lib/perl5"
export PERL_CPANM_OPT="--quiet --notest -l $BUILD_DIR/local"
rm -rf $BUILD_DIR/local
if [ -d $CACHE_DIR/local ]; then
cp -a $CACHE_DIR/local $BUILD_DIR/local
fi
cd $BUILD_DIR
if ! [ -e $BUILD_DIR/local/bin/cpanm ]; then
echo "-----> Bootstrapping cpanm"
curl -L --silent https://raw.github.com/miyagawa/cpanminus/master/cpanm | perl - App::cpanminus 2>&1
fi
echo "-----> Installing Sqitch"
cpanm App::Sqitch DBD::Pg 2>&1
if [ -d $BUILD_DIR/local ]; then
rm -rf $CACHE_DIR/local
mkdir -p $CACHE_DIR
cp -a $BUILD_DIR/local $CACHE_DIR/local
fi
echo "-----> Adding sqitch database target"
sqitch target add heroku_db "$DATABASE_URL"
echo "-----> Making this target the default"
sqitch config core.pg.target heroku_db
echo "-----> Persisting settings"
PROFILE_PATH="$BUILD_DIR/.profile.d/sqitch.sh"
mkdir -p $(dirname $PROFILE_PATH)
echo "export PERL5LIB=/app/local/lib/perl5" >> $PROFILE_PATH
echo "export PATH=\$HOME/local/bin:\$PATH" >> $PROFILE_PATH
| true |
b2bced686f88bb0fcd0f3e1d175d8f73febc3f83 | Shell | dpca/dotfiles | /bin/chrome_canary | UTF-8 | 319 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
CHROME_DIR="${HOME}/Chrome"
CHROME_CANARY="${CHROME_DIR}/Google Chrome Canary.app"
if [ ! -d "$CHROME_CANARY" ]; then
echo "Chrome canary not installed at '${CHROME_CANARY}'"
exit 1
fi
"${CHROME_CANARY}/Contents/MacOS/Google Chrome Canary" --user-data-dir="${CHROME_DIR}/chromecanaryprofile"
| true |
41c289f01a192b6d508c8f84390e1d4bf618a50a | Shell | kibihrchak/work-environment-provisioning | /scripts/update+upgrade.sh | UTF-8 | 317 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash -eux
echo "==> Updating list of repositories"
sudo apt -y update
if [[ $UPGRADE =~ true || $UPGRADE =~ 1 || $UPGRADE =~ yes ]]; then
echo "==> Performing dist-upgrade (all packages and kernel)"
sudo DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade --force-yes
sudo reboot
sleep 60
fi
| true |
2960717c4897951a45de80c5f4f1ed8cf8a60be5 | Shell | mtyiu/memec | /scripts/hpc-multi/experiments/control/thread_count.sh | UTF-8 | 957 | 3.21875 | 3 | [] | no_license | #!/bin/bash
BASE_PATH=${HOME}/mtyiu
MEMEC_PATH=${BASE_PATH}/memec
threads='2 4 6 8 10 12 16 24 32 48 64 96 128 192 256 400 512 1000'
for t in $threads; do
echo "Running experiment with thread count = $t..."
screen -S manage -p 0 -X stuff "${BASE_PATH}/scripts-multi/util/start.sh $1$(printf '\r')"
sleep 150
mkdir -p ${BASE_PATH}/results-multi/thread_count/$t
for n in {7..14}; do
ssh hpc$n "screen -S ycsb -p 0 -X stuff \"${BASE_PATH}/scripts-multi/experiments/client/thread_count.sh $t$(printf '\r')\"" &
done
pending=0
for n in {7..14}; do
read -p "Pending: ${pending} / 8"
pending=$(expr $pending + 1)
done
echo "Finished experiment with thread count = $t..."
screen -S manage -p 0 -X stuff "$(printf '\r\r')"
sleep 10
for n in {7..14}; do
cp ${BASE_PATH}/results-multi/thread_count/tmp/hpc$n/$t.txt ${BASE_PATH}/results-multi/thread_count/$t/hpc$n.txt
rm -rf ${BASE_PATH}/results-multi/thread_count/tmp/hpc$n
done
done
| true |
cb042c789bfe0e03ddd89a4fa41672b7dab03468 | Shell | agenteo/pact_concourse_pipeline | /update-consumer-contract.sh | UTF-8 | 602 | 3.015625 | 3 | [] | no_license | #!/bin/bash
echo "Updating contract..."
git clone consumer-driven-api-contracts updated-consumer-driven-api-contracts
cp target/pacts/* updated-consumer-driven-api-contracts
diff consumer-driven-api-contracts updated-consumer-driven-api-contracts
CONTRACT_CHANGED=$?
if [ $CONTRACT_CHANGED -gt 0 ]
then
echo "API contract changed."
cd updated-consumer-driven-api-contracts
git config --global user.email "cosumer@example.com"
git config --global user.name "Consumer updated contract"
git add .
git commit -m "Updated contract"
else
echo "API contract did not change."
exit 0
fi | true |
ab672492c4efef1babde40ab25113d294e6b1337 | Shell | AndersenLab/Transposons2 | /scripts/run_OD_TransposonCaller.sh | UTF-8 | 572 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# this script runs the TransposonCaller script on each sample specified in the input_file
# USE: run_TransposonCaller.sh in the data dir
input_file=/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/OD_sample_list.txt
TransposonCaller=/lscr2/andersenlab/kml436/git_repos2/Transposons2/scripts/OD_TransposonCaller.sh
### avoid blank lines and comments
sed -e '/^#/d' -e '/^$/d' $input_file > tmp && mv tmp $input_file
while read line; do
echo $line
echo "Running OD_TransposonCaller on $line"
sbatch $TransposonCaller $line
done <$input_file
| true |
5de032bd8a1d5502db9b99fd9fbcd7a03ded3001 | Shell | brontosaurusrex/singularity | /bin/toFlacCD | UTF-8 | 1,535 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# functions
encode() { # put your encoding settings here
when=$(date +%Y%m%d%H%M%S)
FFREPORT=file="$log/$baseext.$when.log":level=32 \
ffmpeg -hide_banner -loglevel quiet -i "$file" -af volume="$r128"dB -vn -y -c:a flac -ar 41000 -sample_fmt s16 "$tmpdir/$base.flac"
}
# help
if [ $# -eq 0 ]; then echo "anything to flac for CD at 44100 Hz"; exit 1; fi
# source config file and timer
config="$HOME/bin/singularity.cfg"
if [ -e "$config" ]; then
source "$config"
else
echo "$config does not exist"; exit 1
fi
source "$HOME/bin/timer_func"
# main
while [ $# -gt 0 ]; do
file=$(readlink -f "$1")
# echo ".............................."
echo "$file"
baseext=$(basename "${1}") # file.ext
base="${baseext%.*}" # file
echo "$out/$base.flac"
hasAudio "$file" || { >&2 echo "No Audio in $file" ; shift; continue; } # "continue" should skip to next file in this while loop.
#if $(isMXF "$file"); then
#echo "MXF detected"
#MXFdownmix "$file" "$tmpdir"
#file="$tmpdir/$base.Stereo.wav" # this is the new target for encode function now
#fi
# IS r128 = true, then encode with that in mind ...
if [ "$EBUR128" == "true" ]; then # this is set in config
r128=$(r128corectionPlus6 "$file")
echo "$r128"
else
r128="0"
fi
# action
encode # function
mv "$tmpdir/$base.flac" "$out/$base.flac" # will replace files with same name
shift
done
printf 'done in %s\n' $(timer $tmr)
| true |
4de99c8c7e0e9e967f9ade6b1bd0f0e57133d9d4 | Shell | bonzofenix/workstation | /bin/trigger-pipeline | UTF-8 | 1,125 | 3.625 | 4 | [] | no_license | #!/usr/bin/env bash
source ~/workstation/bin/common.sh
function select-option(){
PS3=$'\e[0;32mSelect: \e[0m'
opts=($@)
select opt in "${opts[@]}" "Quit" ; do
if (( REPLY == 1 + ${#opts[@]} )) ; then
exit
elif (( REPLY > 0 && REPLY <= ${#opts[@]} )) ; then
echo $opt
break
else
error "Invalid option. Try another one."
fi
done
}
function trigger-pipeline(){
payload=$(fly -t $ENV pipelines --json)
pipeline=$1
jobs=()
if [[ -z $pipeline ]]; then
pipelines=$(echo "$payload" | jq ".[] |.name" -r | sort)
pipeline=$(select-option $pipelines)
fi
groups=$(echo "$payload" | jq ".[] | select(.name==\"$pipeline\") | .groups" -r)
if [ "$groups" == "null" ]; then
payload=$(fly -t $ENV jobs -p "$pipeline" --json)
jobs=$(echo "$payload" | jq ".[] | .name" -r)
else
group_names=$(echo $groups | jq -r ".[] | .name")
group=$(select-option $group_names)
jobs=$(echo "$groups" | jq ".[] | select(.name==\"$group\") | .jobs[]" -r)
fi
job=$(select-option $jobs)
fly -t $ENV tj -j $pipeline/$job -w
}
trigger-pipeline "${@:-}"
| true |
e18da02ef58bb0beae6da03d1f15bd96485a4f26 | Shell | Ormuzd/Shell | /copydatabase.sh | UTF-8 | 684 | 3.484375 | 3 | [] | no_license | #!/bin/bash
BACKUP_HOST="localhost"
BACKUP_USER="root"
BACKUP_PASS="root"
BACKUP_DIR=/backup
BACKUP_DBNAME="teaching"
COPIES=5
MYSQLDUMP="mysqldump"
TIMEPOINT=$(date -u +%Y-%m-%d.%H:%M:%S)
MYSQLDUMP_OPTS="-h $BACKUP_HOST -u$BACKUP_USER -p$BACKUP_PASS"
umask 0077
test ! -d "$BACKUP_DIR" && mkdir -p "$BACKUP_DIR"
test ! -w $BACKUP_DIR && echo "Error: $BACKUP_DIR is un-writeable." && exit 0
for dbname in $BACKUP_DBNAME
do
test ! -d "$BACKUP_DIR/$dbname" && mkdir -p "$BACKUP_DIR/$dbname"
$MYSQLDUMP $MYSQLDUMP_OPTS $dbname | gzip > $BACKUP_DIR/$dbname/$dbname.$TIMEPOINT.sql.gz
done
find $BACKUP_DIR -type f -mtime +$COPIES -delete
| true |
648cafc48b578a649d0e4a41628b85d8b30f3c6e | Shell | UNoorul/scripts | /ifscript1.sh | UTF-8 | 306 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# This script is created by Ravi
# This is used for backup
echo $1
filename=$1
echo " Welcome to my software... "
echo
if [ -f "$filename" ]
then
echo "File $filename already found "
ls -l $filename
else
echo "File not found.. Let me create the file"
touch $filename
ls -l $filename
fi
| true |
a47de2b9c1808e38c7d3fb503d644bf6f36a44d6 | Shell | muffinkilla/dotmatrix | /scripts/24hcp.sh | UTF-8 | 317 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env bash
# This script copies all contents of a directory that have been modified within
# 24 hours
# Unofficial Bash Strict Mode
# See http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
IFS=$'\n\t'
SOURCE=$1
TARGET=$2
find $SOURCE -mtime 0 -exec cp --parents {} $TARGET \;
| true |
720003bdc8b13827437876e1d67c2a623a4d51e3 | Shell | openEHR/specifications | /scripts/report_cr_pr.sh | UTF-8 | 2,427 | 3.65625 | 4 | [] | no_license | #!/bin/bash
#
# $Source: C:/project/openehr/spec-dev/scripts/SCCS/s.report_cr_pr.sh $
# $Revision: 1.7 $ $Date: 04/02/13 01:13:43+10:00 $
# simple script to run invoke Perl to generate HTML summary file for
# CRs and PRs
#
PATH=$OPENEHR/$OPENEHR_SPEC/scripts:$PATH
# ------------- functions -------------
bk_edit_pub_files () {
echo "-----checking out PR publishing area files in $OPENEHR/$OPENEHR_SPEC/publishing/CM/PRs --------"
cd $OPENEHR/$OPENEHR_SPEC/publishing/CM/PRs
bk edit -q
echo "-----checking out CR publishing area files in $OPENEHR/$OPENEHR_SPEC/publishing/CM/CRs --------"
cd $OPENEHR/$OPENEHR_SPEC/publishing/CM/CRs
bk edit -q
}
report_by_status() {
pr_summary_file=$1
cr_summary_file=$2
cd $OPENEHR/$OPENEHR_SPEC/CM/PRs
bk edit -q $pr_summary_file
echo "-----Building $OPENEHR/$OPENEHR_SPEC/CM/PRs/$pr_summary_file --------"
perl -S makeindex.pl --tags id,date_raised,title,status --separate status --output $pr_summary_file --title "openEHR PRs by Status" PR*.txt
echo "-----Building $OPENEHR/$OPENEHR_SPEC/CM/CRs/$cr_summary_file --------"
cd $OPENEHR/$OPENEHR_SPEC/CM/CRs
bk edit -q $cr_summary_file
perl -S makeindex.pl --tags id,date_raised,title,status,target_release,changed_components --separate status --output $cr_summary_file --title "openEHR CRs by Status" CR*.txt
}
report_by_release() {
cr_summary_file=$1
echo "-----Building $OPENEHR/$OPENEHR_SPEC/CM/CRs/$cr_summary_file --------"
cd $OPENEHR/$OPENEHR_SPEC/CM/CRs
bk edit -q $cr_summary_file
perl -S makeindex.pl --tags id,title,status,target_release,changed_components --separate target_release --output $cr_summary_file --title "openEHR CRs by Release" CR*.txt
}
copy_files() {
cd $OPENEHR/$OPENEHR_SPEC/CM/PRs
cp *.txt *.html $OPENEHR/$OPENEHR_SPEC/publishing/CM/PRs
cd $OPENEHR/$OPENEHR_SPEC/CM/CRs
cp *.txt *.html $OPENEHR/$OPENEHR_SPEC/publishing/CM/CRs
}
# ---------- deal with cmd line -------
if [ -z $1 ]
then
echo "Usage: `basename $0` by_status | by_release"
exit 2
fi
case $1 in
"by_status" ) echo "Generating CR & PR report grouped by status"
bk_edit_pub_files
report_by_status "PR_by_status.html" "CR_by_status.html"
copy_files
;;
"by_release" ) echo "Generating CR & PR report grouped by release"
bk_edit_pub_files
report_by_release "CR_by_release.html"
copy_files
;;
* ) echo "Unimplemented option chosen."
exit 2
;;
esac
| true |
b20c5304889106396fef73b6ef8a9ac4eeaefd06 | Shell | rohbotics/stm32-toolchain | /install_gcc.sh | UTF-8 | 396 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env bash
TOOL_CHAIN_ARCHIVE=gcc-arm-none-eabi-5_3-2016q1-20160330-linux.tar.bz2
cd /usr/local
wget https://launchpadlibrarian.net/251687888/$TOOL_CHAIN_ARCHIVE
tar xjf $TOOL_CHAIN_ARCHIVE
rm $TOOL_CHAIN_ARCHIVE
echo 'Adding the arm-none-eabi tools to your PATH is recommended'
echo 'run this command to do so:'
echo 'export PATH=/usr/local/gcc-arm-none-eabi-5_3-2016q1/bin/:$PATH'
| true |
675a7386012b8896ac514c52405f4ff8abff0570 | Shell | Rishabh0712/NICK | /git/inf_media/inf_media.sh | UTF-8 | 2,635 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# This file is part of N.I.C.K.
# N.I.C.K is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# N.I.C.K is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with N.I.C.K. If not, see <http://www.gnu.org/licenses/>.
BOLD='\e[32;1m'
PLN='\e[0m'
RED='\033[32;31m'
GRN='\033[0;32m'
NC='\033[0m'
BLU='\033[1;34m'
printf "\n"
ser_fun()
{
server_ip=$(sed -n '1p' $PWD/server/server_info.txt)
server_port=4445
nc $server_ip $server_port ;
}
listener()
{
echo -e "\n${BOLD}${RED}Starting the listner... Please Wait..${NC}${PLN}"
service postgresql start;
xterm -title Listner -e msfconsole -r metaconfig
}
dev_fun()
{
echo -e "[+] Press 1 to generate payload for ${GRN}android${NC}"
echo -e "[+] Press 2 to generate payload for ${GRN}windows${NC}\n"
read opt;
dev_ip=$(hostname -I)
printf "\n[+] Enter the ${GRN}port${NC} you want to listen to :- "
read dev_port;
printf "[+] Enter the ${GRN}path${NC} to store backdoor :- "
read dev_path;
printf "[+] Enter a ${GRN}name${NC} for the backdoor :- "
read file_name;
if [ $opt -eq 1 ]
then
apk_backdoor;
elif [ $opt -eq 2 ]
then
exe_backdoor;
else
echo -e "${RED}Wrong Input${NC}"
./inf_media.sh;
fi
echo -e "\n[+] Do you want to start a ${GRN}listener${NC} now !!! ${RED}(yes/no)${NC}"
read lis_ans
if [ $lis_ans == "yes" ]
then
listener
fi
}
apk_backdoor()
{
msfvenom -p android/meterpreter/reverse_tcp LHOST=$dev_ip LPORT=$dev_port R> $dev_path/$file_name
echo -e "use multi/handler\nset payload android/meterpreter/reverse_tcp\nset LHOST $dev_ip\nset LPORT $dev_port\nset ExitOnSession false\nexploit -j" > metaconfig;
}
exe_backdoor()
{
msfvenom -a x86 --platform windows -k -p windows/meterpreter/reverse_tcp LHOST=$dev_ip LPORT=$dev_port-e x86/shikata_ga_nai -i 3 -b "\x00" > $dev_path/$file_name
echo -e "use multi/handler\nset payload windows/meterpreter/reverse_tcp\nset LHOST $dev_ip\nset LPORT $dev_port\nset ExitOnSession false\nexploit -j" > metaconfig;
}
ser_sta=$(sed -n '5p' $PWD/server/server_status.txt)
check="Success"
if [ "$ser_sta" == "$check" ];
then
ser_fun;
else
dev_fun;
fi
| true |
93f08561c9ea10a776992e8de8aec243fe989427 | Shell | adeelahmad/gluster_vm_test | /attach_int.sh | UTF-8 | 514 | 3.40625 | 3 | [] | no_license |
for host in $1; do
disk=$(( 97 + $2 ))
for vm in `seq -s ' ' $2 $3` ; do
diskletter=`perl -e 'printf "%c\n", '$disk';'`
diskstr="sd${diskletter}"
vmname="${host}_${vm}"
ssh $host virsh attach-disk $vmname /dev/$diskstr --persistent --target vdb
IP_ETH1=`./get_ip_eth1.sh $vmname`
IP_ETH0=`./get_ip.sh $vmname`
# sed -i 's/IPADDR=.*/IPADDR='$IP_ETH1'/g' ifcfg-eth1
# scp ifcfg-eth1 $IP_ETH0:/etc/sysconfig/network-scripts
./console_set_ip.exp $vmname $IP_ETH1
disk=$(( $disk + 1 ))
done
done
| true |
e658439f3a85387e0b90ee5f608835b3525d49ef | Shell | danielasay/fMRI-Pipeline | /step_6_3dDeconvolve.sh | UTF-8 | 3,458 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# 3dDeconvolve script that creates GLM. This was written to analyze block design data. 29.9 seconds is the length of each block
# The baseline comparison (arrow task in this case) is implicit in the model since it is not specified.
workdir=~/fmri_processing/afni
sub=$( tail -n 1 $workdir/subjlist.txt )
func_dir=~/fmri_processing/afni/${sub}/func
stim_dir=~/fmri_processing/afni/${sub}/stimuli
cd $func_dir
#3dDeconvolve script to create GLM for FN data
3dDeconvolve -input FN_r?_scale.nii \
-mask FN_full_mask.nii \
-polort A \
-num_stimts 9 \
-stim_times 1 ${stim_dir}/FN/FN_novel.1D 'BLOCK(29.9, 1)' \
-stim_label 1 Novel \
-stim_times 2 ${stim_dir}/FN/FN_repeated.1D 'BLOCK(29.9, 1)' \
-stim_label 2 Repeated \
-stim_times 3 ${stim_dir}/FN/FN_arrow.1D 'BLOCK(13, 1)' \
-stim_label 3 Baseline \
-stim_file 4 FN_motion.txt'[0]' -stim_base 4 -stim_label 4 transverse_x \
-stim_file 5 FN_motion.txt'[1]' -stim_base 5 -stim_label 5 transverse_y \
-stim_file 6 FN_motion.txt'[2]' -stim_base 6 -stim_label 6 transverse_z \
-stim_file 7 FN_motion.txt'[3]' -stim_base 7 -stim_label 7 rot_x \
-stim_file 8 FN_motion.txt'[4]' -stim_base 8 -stim_label 8 rot_y \
-stim_file 9 FN_motion.txt'[5]' -stim_base 9 -stim_label 9 rot_z \
-jobs 3 \
-num_glt 6 \
-gltsym 'SYM: +Novel +Repeated' \
-glt_label 1 Everything \
-gltsym 'SYM: +Novel -Repeated' \
-glt_label 2 Novel-Repeated \
-gltsym 'SYM: +Novel' \
-glt_label 3 Novel \
-gltsym 'SYM: +Repeated' \
-glt_label 4 Repeated \
-gltsym 'SYM: +Novel -Baseline' \
-glt_label 5 Novel-Baseline \
-gltsym 'SYM: +Repeated -Baseline' \
-glt_label 6 Repeated-Baseline \
-fout -tout -x1D X.FN.xmat.1D -xjpeg X.FN.${sub}-native.jpg \
-x1D_uncensored X.nocensor.xmat.${sub}-native.1D \
-fitts fitts.FN.${sub}-native \
-errts errts.FN.${sub}-native \
-bucket stats.FN.${sub}-native
echo "3dDeconvolve is finished!"
sleep 2
echo "Copying output to native_afni_out..."
sleep 2
cp -v *.FN.${sub}* ../native_afni_out
echo "Done copying!"
echo "Copying T1 from anat to native_afni_out..."
cp ~/fmri_processing/afni/${sub}/anat/${sub}_desc-preproc_T1w.nii.gz ../native_afni_out
echo "Done!"
sleep 1
echo "Take a look at the output."
echo "Make sure the stats file and T1 are present"
ls $workdir/$sub/native_afni_out
sleep 5
echo "The pipeline is now finished. Bring the data from this folder"
echo "down to your local computer and look at brain activation using the afni GUI."
cd $native_afni_out
| true |
d2c976358e1dddc6039428d07d7a9c54c3fda330 | Shell | phonixor/GeneticAlgorithm | /random_experiment.sh | UTF-8 | 1,111 | 3.15625 | 3 | [] | no_license | # shell for the job:
#PBS -S /bin/bash
# job requires at most 10 hours, 0 minutes
# and 0 seconds wallclock time and uses one 8-core node:
#PBS -lwalltime=2:00:00 -lnodes=1
#
# load the required modules
module load python/2.7.2
# cd to the directory where the program is to be called:
cd $HOME/stage/python/
# get the nr of cpu cores
ncores=`cat /proc/cpuinfo | grep bogomips | wc -l`
# run the program
echo starting working on problem $PBS_ARRAYID
(( nprocs = ncores - 1 ))
echo $nprocs
for (( i=1; i<=nprocs; i++ )) ; do
# very important the & at the end, so that they all run in parallel instead of sequential...
echo nr o :$i
python random_experiment.py -n $PBS_ARRAYID-$i $HOME/stage/python/models/random_test $HOME/stage/python/models/4geneWithVarProduction.model &
done
wait
echo ended working on problem $PBS_ARRAYID
#
# notes:
# call this with qsub -t 1-100 random_experiment.sh
# an advantage of using the '-t' flag is that you can kill all the jobs in one command.
# The jobnumbers will be for example 34445-1 34445-2 ... 34445-100.
# The command 'qdel 34445' will remove all these jobs.
| true |
3e7ba5c088e752bb1b49f1eb2aa5cd2d40b965e1 | Shell | robertocosta/uep | /run_many_markov.sh | UTF-8 | 2,285 | 3.421875 | 3 | [] | no_license | #!/bin/bash
set -eu -o pipefail
jobName=(mar_2 mar_3 mar_4)
nblocks=(64000)
function writeJob {
pis=$4
rfs=$1
efs=$2
nblocks=$3
enb=$5
jn=$6
jobN=$jn.$enb.job
echo -e "#!/bin/bash" > ./$jobN
echo -e "#$ -cwd -m ea" >> ./$jobN
echo -e "set -eu -o pipefail" >> ./$jobN
printf "Delete this file to end the job\n" > ./$jobN.check
printf "file=%s.check\n" $jobN >> $jobN
printf "while [ -f \"\$file\" ]; do\n" >> $jobN
for pib in ${pis[*]}
do
for rf in ${rfs[*]}
do
for ef in ${efs[*]}
do
printf " python3 run_uep_markov.py " >> ./$jobN
if [ $pib == 100 ]; then
k_min=600
k_max=2300
printf " --kmin=%s --kmax=%s --overhead=0.3 " $k_min $k_max >> ./$jobN
#python3 run_uep_iid.py $rf $ef $nblocks-
fi
if [ $pib == 10 ]; then
k_min=6000
k_max=8000
printf " --kmin=%s --kmax=%s --overhead=0.3 " $k_min $k_max >> ./$jobN
fi
printf "%s %s %s %s %s\n" $rf $ef $nblocks $pib $enb >> ./$jobN
done
done
done
printf "done\n" >> $jobN
qsub $jobN
}
for nbl in ${nblocks[*]}
do
for jn in ${jobName[*]}
do
if [ $jn == mar_1 ]; then
echo -e $jn
rfs=(1 3 5)
efs=(1)
pib=(inf)
enb=(1)
writeJob $rfs $efs $nbl $pib $enb $jn
else
if [ $jn == mar_2 ]; then
rfs=(1 5)
efs=(1)
pib=(100 10)
enb=(5)
writeJob $rfs $efs $nbl $pib $enb $jn
else
if [ $jn == mar_3 ]; then
rfs=(1 5)
efs=(1)
pib=(100 10)
enb=(10)
writeJob $rfs $efs $nbl $pib $enb $jn
else
rfs=(1 5)
efs=(1)
pib=(100 10)
enb=(50)
writeJob $rfs $efs $nbl $pib $enb $jn
fi
fi
fi
done
done | true |
e95157fd3ad96916cf2aad9493c418823ab8fd0a | Shell | FabioLolix/PKGBUILD-AUR_fix | /t/torrential/PKGBUILD | UTF-8 | 790 | 2.671875 | 3 | [
"WTFPL"
] | permissive | # Maintainer: Fabio 'Lolix' Loli <fabio.loli@disroot.org> -> https://github.com/FabioLolix
# Contributor: Helder Bertoldo <helder.bertoldo@gmail.com>
pkgname=torrential
pkgver=2.0.1
pkgrel=1
pkgdesc="Download torrents in style with this speedy, minimalist torrent client designed for Pantheon Shell"
arch=(i686 x86_64)
url="https://github.com/davidmhewitt/torrential"
license=(GPL2)
depends=(libgranite.so libb64 libevent libnatpmp miniupnpc)
makedepends=(git meson ninja vala dht libutp libtransmission)
source=("git+https://github.com/davidmhewitt/torrential.git#tag=${pkgver}")
sha256sums=('SKIP')
build() {
cd "${pkgname}"
meson build --prefix=/usr
ninja -C build
}
package() {
cd "${pkgname}"
DESTDIR="${pkgdir}" ninja -C build install
ln -s /usr/bin/com.github.davidmhewitt.torrential "$pkgdir/usr/bin/torrential"
}
| true |
575ccc6d9fca6e4ab1b227a57e2da38322c0a700 | Shell | tremor-rs/tremor-runtime | /.github/checks/deps.sh | UTF-8 | 618 | 3.546875 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
if [ -d "$1" ]
then
path="$1"
else
path="."
fi
cnt=0
for d in $( remarshal -i $path/Cargo.toml -of json | jq -r '.dependencies | keys []' | if [ -f $path/.depignore ]; then grep -v -f $path/.depignore; else cat; fi )
do
dep=$(echo $d | sed -e 's/-/_/g')
if ! rg "use $dep(::|;| )" $path -trust > /dev/null
then
if ! rg "extern crate $dep;" $path -trust > /dev/null
then
if ! rg "[^a-z]$dep::" $path -trust > /dev/null
then
cnt=$((cnt + 1))
echo "Not used: $d";
fi
fi
fi
done
exit $cnt
| true |
7d32ce5f67c8dc2c6a9e6a68856f93df0341e76b | Shell | UTK-geog415-s15/admin | /create_student_directories.sh | UTF-8 | 875 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# Initialize the github repository for geog 415
declare class_home="/Users/nicholasnagle/Dropbox/git_root/UTK-geog415-s15"
declare -a uid=(sbleakne vbrown23 aingra14 smarkley imorris1 enield dpaulse1 proychow sselecm2 jwatki22 swoolfol kyoumans jfornade klandolt tsonnich kswagger kthiel ntromble)
declare class_org="UTK-geog415-s15"
declare token="3a5c2a2a9293dd2379a718c3263a531b6cee93bf"
mkdir $class_home
cd $class_home
#for i in "${uid[@]}"
#do
# rm -rf $class_home/${i}
#done
for i in "${uid[@]}"
do
echo "$i"
curl -i -u nnnagle:$token https://api.github.com/orgs/$class_org/repos -d '{"name": "'$i'"}'
done
for i in "${uid[@]}"
do
mkdir $class_home/${i}
cd $class_home/${i}
touch README.md
git init
git add README.md
git commit -m "first commit"
git remote add origin git@github.com:"$class_org"/"$i".git
git push -u origin master
done
| true |
58b6c021f48fe7608a7676f6f9e0de920818f76e | Shell | TUDSSL/Botoks | /install_toolchain.sh | UTF-8 | 1,380 | 3.640625 | 4 | [] | no_license | #!/bin/bash
MSP430_GCC_VERSION=${MSP430_GCC_VERSION:-8.3.0.16}
MSP430_GCC_OS=${MSP430_GCC_OS:-linux64} # can be 'linux32' or 'macos' instead
INSTALL_PREFIX=${INSTALL_PREFIX:-~/ti}
cd ${INSTALL_PREFIX}
# Download toolchain and support files
echo "Downloading toolchain and support files..."
wget -q --show-progress \
"http://software-dl.ti.com/msp430/msp430_public_sw/mcu/msp430/MSPGCC/latest/exports/msp430-gcc-${MSP430_GCC_VERSION}_${MSP430_GCC_OS}.tar.bz2"
wget -q --show-progress \
"http://software-dl.ti.com/msp430/msp430_public_sw/mcu/msp430/MSPGCC/latest/exports/msp430-gcc-support-files-1.208.zip"
# Extract toolchain and support files
echo "Extracting toolchain and support files..."
tar xf msp430-gcc-${MSP430_GCC_VERSION}_${MSP430_GCC_OS}.tar.bz2
unzip -qq msp430-gcc-support-files-1.208.zip
# Rename toolchain to ${INSTALL_PREFIX}/msp430-gcc
mv msp430-gcc-${MSP430_GCC_VERSION}_${MSP430_GCC_OS} msp430-gcc
# Move support files into toolchain directory
mv msp430-gcc-support-files/include/ msp430-gcc/include/
# Remove compressed archives and unneeded support files
echo "Cleaning up..."
rm msp430-gcc-${MSP430_GCC_VERSION}_${MSP430_GCC_OS}.tar.bz2
rm msp430-gcc-support-files-1.208.zip
rm -rf msp430-gcc-support-files
cd -
echo "MSP430 toolchain installed!"
echo ""
echo "Now you can use '-DMSP430_TOOLCHAIN_DIR=${INSTALL_PREFIX}' when calling cmake"
| true |
5de65f6e6684f48db155c552808384380212b574 | Shell | softeu/docker.nagios3-server | /start-nagios.sh | UTF-8 | 420 | 2.59375 | 3 | [] | no_license | #!/bin/sh
#if [ ! -f ${NAGIOS_HOME}/etc/htpasswd.users ] ; then
# htpasswd -c -b -s ${NAGIOS_HOME}/etc/htpasswd.users ${NAGIOSADMIN_USER} ${NAGIOSADMIN_PASS}
# chown -R nagios.nagios ${NAGIOS_HOME}/etc/htpasswd.users
#fi
echo "host $MAIL_PORT_25_TCP_ADDR" >> /etc/msmtprc
echo "set smtp=$MAIL_PORT_25_TCP_ADDR:$MAIL_PORT_25_TCP_PORT " >> /etc/nail.rc
exec ${NAGIOS_HOME}/bin/nagios ${NAGIOS_HOME}/etc/nagios.cfg
| true |
3ba852268ea8fb8fb3869e895ab620bcf32ce6dd | Shell | iladin/dotfiles | /bin/makeZSH.sh | UTF-8 | 921 | 2.515625 | 3 | [
"MIT"
] | permissive |
function nosudo(){
git clone git://git.code.sf.net/p/zsh/code zsh
cd zsh
autoconf
# Options from Ubuntu Zsh package rules file (http://launchpad.net/ubuntu/+source/zsh)
./configure --prefix=$HOME \
--mandir=$HOME/man \
--bindir=$HOME/bin \
--infodir=$HOME/usr/share/info \
--enable-maildir-support \
--enable-etcdir=$HOME/etc/zsh \
--enable-function-subdirs \
--enable-site-fndir=$HOME/usr/local/share/zsh/site-functions \
--enable-fndir=$HOME/usr/share/zsh/functions \
--with-tcsetpgrp \
--with-term-lib="ncursesw" \
--enable-cap \
--enable-pcre \
--enable-readnullcmd=pager \
LDFLAGS="-Wl,--as-needed -g"
make
make check
make install
}
function wsudo(){
sudo apt-get update && sudo apt-get install zsh
}
sudo -l apt-get && wsudo || nosudo
| true |
eb9459d0436b010c3b94b277433bad0ad672a599 | Shell | ikkz/ppt | /java/start.sh | UTF-8 | 282 | 3.015625 | 3 | [] | no_license | #!/bin/sh
javac Main.java
startTime=`date +%s%3N`
echo ---- $PPT_LANG Test start at $startTime
java Main
endTime=`date +%s%3N`
totalTime=$(($endTime-$startTime))
size=`stat -c %s Main.class | tr -d '\n'`
echo -e "\033[41;37m $PPT_LANG Test time: $totalTime ms, size: $size \033[0m" | true |
aee10cca1b5d939d771de7a4b0710c684c9adecc | Shell | AtomGraph/LinkedDataHub | /scripts/create-container.sh | UTF-8 | 3,920 | 3.78125 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
print_usage()
{
printf "Creates a container document.\n"
printf "\n"
printf "Usage: %s options\n" "$0"
printf "\n"
printf "Options:\n"
printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n"
printf " -p, --cert-password CERT_PASSWORD Password of the WebID certificate\n"
printf " -b, --base BASE_URI Base URI of the application\n"
printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n"
printf "\n"
printf " --title TITLE Title of the container\n"
printf " --description DESCRIPTION Description of the container (optional)\n"
printf " --slug STRING String that will be used as URI path segment (optional)\n"
printf "\n"
printf " --parent PARENT_URI URI of the parent container\n"
printf " --content CONTENT_URI URI of the content list (optional)\n"
}
hash turtle 2>/dev/null || { echo >&2 "turtle not on \$PATH. Need to set \$JENA_HOME. Aborting."; exit 1; }
args=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-f|--cert-pem-file)
cert_pem_file="$2"
shift # past argument
shift # past value
;;
-p|--cert-password)
cert_password="$2"
shift # past argument
shift # past value
;;
-b|--base)
base="$2"
shift # past argument
shift # past value
;;
--title)
title="$2"
shift # past argument
shift # past value
;;
--description)
description="$2"
shift # past argument
shift # past value
;;
--parent)
parent="$2"
shift # past argument
shift # past value
;;
--content)
content="$2"
shift # past argument
shift # past value
;;
--mode)
mode="$2"
shift # past argument
shift # past value
;;
--slug)
slug="$2"
shift # past argument
shift # past value
;;
*) # unknown arguments
args+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${args[@]}" # restore args parameters
if [ -z "$cert_pem_file" ] ; then
print_usage
exit 1
fi
if [ -z "$cert_password" ] ; then
print_usage
exit 1
fi
if [ -z "$base" ] ; then
print_usage
exit 1
fi
if [ -z "$title" ] ; then
print_usage
exit 1
fi
if [ -z "$parent" ] ; then
print_usage
exit 1
fi
args+=("-f")
args+=("$cert_pem_file")
args+=("-p")
args+=("$cert_password")
args+=("-t")
args+=("text/turtle")
args+=("${base}service")
turtle+="@prefix dh: <https://www.w3.org/ns/ldt/document-hierarchy#> .\n"
turtle+="@prefix ac: <https://w3id.org/atomgraph/client#> .\n"
turtle+="@prefix ldh: <https://w3id.org/atomgraph/linkeddatahub#> .\n"
turtle+="@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n"
turtle+="@prefix dct: <http://purl.org/dc/terms/> .\n"
turtle+="@prefix sioc: <http://rdfs.org/sioc/ns#> .\n"
turtle+="_:container a dh:Container .\n"
turtle+="_:container dct:title \"${title}\" .\n"
turtle+="_:container sioc:has_parent <${parent}> .\n"
if [ -n "$content" ] ; then
turtle+="_:container rdf:_1 <${content}> .\n"
else
content_triples="a ldh:Content; rdf:value ldh:SelectChildren"
if [ -n "$mode" ] ; then
content_triples+="; ac:mode <${mode}> "
fi
turtle+="_:container rdf:_1 [ ${content_triples} ] .\n"
fi
if [ -n "$description" ] ; then
turtle+="_:container dct:description \"${description}\" .\n"
fi
if [ -n "$slug" ] ; then
turtle+="_:container dh:slug \"${slug}\" .\n"
fi
echo -e "$turtle" | turtle --base="$base" | ./create-document.sh "${args[@]}" | true |
5e7d1d7907e149d8629cc453f65f1d342cd36700 | Shell | shoracek/kernel-tests | /ipmi/stress/driver/runtest.sh | UTF-8 | 2,264 | 3.203125 | 3 | [] | no_license | #!/bin/bash
# vim: dict=/usr/share/beakerlib/dictionary.vim cpt=.,w,b,u,t,i,k
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# runtest.sh of /kernel/ipmi/stress/driver
# Description: IPMI driver installation loop
# Author: Rachel Sibley <rasibley@redhat.com>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2015 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and conditions of the GNU General Public License version 2.
#
# This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Include Beaker environment
. ../../../cki_lib/libcki.sh || exit 1
. /usr/share/beakerlib/beakerlib.sh || exit 1
# Set the full test name
TEST="/kernel/ipmi/stress/driver"
rlJournalStart
rlPhaseStartSetup
# Exit if not ipmi compatible
if [[ $(uname -m) != "ppc64le" ]]; then
rlRun -l "dmidecode --type 38 > /tmp/dmidecode.log"
if grep -i ipmi /tmp/dmidecode.log ; then
rlPass "Moving on, host is ipmi compatible"
else
rlLog "Exiting, host is not ipmi compatible"
rstrnt-report-result $TEST SKIP
exit
fi
fi
rlPhaseEnd
rlPhaseStartTest
# Load and unload ipmi drivers in a loop
modules="ipmi_ssif ipmi_devintf ipmi_poweroff ipmi_watchdog ipmi_si"
rlRun -l "modprobe -r $modules"
for i in $(seq 0 10); do
for i in $modules; do
rlRun -l "modprobe $i" 0,1
rlRun -l "lsmod | grep ipmi" 0,1
done
rlRun -l "modprobe -r $modules" 0,1
rlLogInfo "Loop $i Complete"
done
rlPhaseEnd
rlJournalEnd
# Print the test report
rlJournalPrintText
| true |
c7b353a13ccd8bf85eefde1dfdff572988aa3198 | Shell | costing/Monalisa | /MonaLisa/Clients/WS/Java-Axis/classpath.sh | UTF-8 | 224 | 2.765625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
cd `dirname $0`
AXIS_LIB="../../lib"
LIST=""
for jar in axis commons-logging commons-discovery wsdl4j jaxrpc saaj; do
LIST=${LIST}:${1}${AXIS_LIB}/${jar}.jar
done
CLASSPATH=$LIST:$CLASSPATH
echo $CLASSPATH
| true |
83205f71b4d72224df0d09715049582c4529e63c | Shell | rasalt/hdl-demo | /scripts/provisioning/jupyterconnect.sh | UTF-8 | 358 | 2.921875 | 3 | [] | no_license |
if [[ $# -eq 0 ]] ; then
echo 'Usage: ./jupyterconnect.sh {path to your env file}'
exit 1
fi
source $1
export MASTERNODE=${clustername}"-m"
export PORT=1082
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" \
--proxy-server="socks5://localhost:${PORT}" \
--user-data-dir=/tmp/${MASTERNODE} \
--incognito http://$MASTERNODE:8080
| true |
bbe80a33a7b0dbf3c46c646a7e7ddcb52a4db97d | Shell | miasteinberg/REDItools2 | /create_html.sh | UTF-8 | 458 | 3.546875 | 4 | [] | no_license | #!/bin/bash
if [ $# -eq 0 ]; then
echo "[ERROR] Please, remember to provide the temporary directory of interest."
echo -e "Usage:\n\t$0 TEMPORARY_DIR"
exit 1
fi
TEMPDIR=$1
#cat template.html | sed "s@EVENTS_DATA@$(cat "$1"/times.txt)@g" | sed "s@GROUPS_DATA@$(cat "$1"/groups.txt)@g" > reditools.html
cat template.html | sed "/EVENTS_DATA/{s/EVENTS_DATA//g
r "$1"/times.txt
}" | sed "/GROUPS_DATA/{s/GROUPS_DATA//g
r "$1"/groups.txt
}" > reditools.html
| true |
323905d9c452be644954abf144af008b6a93ed61 | Shell | hkjersem/dotfiles | /zsh/zlogin | UTF-8 | 1,010 | 3.1875 | 3 | [] | no_license | # Executes commands at login post-zshrc.
# Execute code that does not affect the current session in the background.
{
# Compile the completion dump to increase startup speed.
dump_file="$HOME/.zcompdump"
if [[ "$dump_file" -nt "${dump_file}.zwc" || ! -s "${dump_file}.zwc" ]]; then
zcompile "$dump_file"
fi
# Set environment variables for launchd processes.
if [[ "$OSTYPE" == darwin* ]]; then
if [[ -z "$TMUX" ]]; then
for env_var in PATH MANPATH; do
launchctl setenv "$env_var" "${(P)env_var}"
done
fi
fi
} &!
# Speed up zsh compinit by only checking cache once a day.
autoload -Uz compinit
if [ $(date +'%j') != $(stat -f '%Sm' -t '%j' ~/.zcompdump) ]; then
compinit
compdump
else
compinit -C
fi
# Set zsh-autosuggestions color
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=214'
# Bind zsh-history-substring-search keys
bindkey '^[[A' history-substring-search-up
bindkey '^[[B' history-substring-search-down
| true |
411e49cedee5688f64ae45f689a9d50b530300bb | Shell | fenildf/anki-crawler | /upload-ankicards | UTF-8 | 1,216 | 4.25 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
set -u
set -o pipefail
function usage {
echo "Usage: $0 filename username password readysuffix ankicrawler-path"
echo
printf "filename filename to read anki cards from\n"
printf "username username to use to login to AnkiWeb\n"
printf "password password for AnkiWeb\n"
printf "readysuffix the suffix string to append to the archived file\n"
printf "ankicrawler-path path to ankicrawler\n"
exit 1
}
while getopts "h" opt; do
case $opt in
h)
usage
break;;
esac
done
[ "$#" -ne 5 ] && usage
filename=$1
username=$2
password=$3
readysuffix=$4
ankicrawler_path=$5
datetime=$(date +%Y-%m-%d)
ankifolder=$(dirname $filename)
iteration=1
if [ ! -f "$filename" ]; then
echo "File does not exist"
exit 0
fi
resulting_path="$ankifolder/$datetime-$readysuffix-$iteration"
while [ -f "$resulting_path" ]; do
iteration=$(( $iteration + 1 ))
resulting_path="$ankifolder/$datetime-$readysuffix-$iteration"
done
cd "$ankicrawler_path"
PS1=lol
source venv/bin/activate
python anki-crawler.py -f "$filename" -u "$username" -p "$password"
mv "$filename" "$resulting_path"
| true |
af715f20bdc0bfab884d77b0aae97d2a410a3319 | Shell | inuyashafff/Http_cache_proxy | /docker_deploy/web_proxy/tests/run_all.sh | UTF-8 | 2,303 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# This script runs all test cases using netcat (nc).
# Test instructions:
# Start a fresh proxy server which listens on port 12345.
# Ensure that port 23456 is not in use.
# Then, run this script, you should see the output of netcat
# (i.e., the received request and response).
SERVER_PORT=12345
CLIENT_PORT=23456
nc -z localhost $SERVER_PORT
if [ $? -ne 0 ]; then
echo "You don't have your proxy server running!"
exit
fi
nc localhost $CLIENT_PORT > /dev/null # remove any pending data
testid=0
testcase()
{
testid=`expr $testid + 1`
echo "----------Test $testid-----------"
if [ "$2" != "" ]; then
nc -l -p $CLIENT_PORT < "$2" &
fi
sleep .2
nc localhost $SERVER_PORT < "$1" &
wait
}
result()
{
echo "------Test $testid finishes------"
echo "You should see $1"
echo
}
testcase request1.txt response1.txt
result "a normal GET request/response."
testcase request1.txt response2.txt
result "the response has a Content-Length field. \
so the body ends before EOF."
testcase request1.txt response3.txt
result "the response uses chunked encoding. \
The proxy is able to determine the end of the body \
according to the encoding."
testcase request1.txt response4.txt
result "the response is 404. \
The proxy should say it is not cachable."
testcase request2.txt response1.txt
result "the request is POST and has a body, \
whose length is determined by Content-Length \
in request's header."
testcase request1.txt crap.txt
result "the response is illegal. \
The proxy should respond an HTTP 502."
testcase crap.txt
result "the request is illegal. \
The proxy should respond an HTTP 400."
testcase request1.txt response5.txt
result "the response is cached."
testcase request1.txt #response5.txt
# need not run server because the reponse is cached
result "the stored response is used. \
(This is the same request as the previous one.)"
testcase request3.txt response5.txt
result "the stored response is not used \
because the request has no-cache in its \
Cache-Control."
testcase request4.txt response6.txt
result "the response is not stored \
because no-store is present in its \
Cache-Control."
testcase request4.txt response6.txt
result "the server is contacted again
because the previous response was not stored.
(This is the same request as the previous one.)"
| true |
60737bbcc75df45a058317207b4515d195d05155 | Shell | simplesurance/utils | /nomad-logs/nomad-logs.sh | UTF-8 | 179 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env bash
alloc_id="$1"
script_dir="$(cd "$(dirname "$0")" ; pwd -P)"
cat /var/nomad/alloc/${alloc_id}*/alloc/logs/*.stdout.0 | $script_dir/jsonlogs2txt.py | less -RI
| true |
b62e2b1b92ca4fe1c10d958cac23381bc0f00eb3 | Shell | SuperNG6/bilibili-helper-runer | /download.sh | UTF-8 | 566 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/JunzhouLiu/BILIBILI-HELPER/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g;s/V//g')
wget -O "BILIBILI-HELPER.zip" "https://github.com/JunzhouLiu/BILIBILI-HELPER/releases/download/V${version}/BILIBILI-HELPER-v${version}.zip"
mkdir "jar"
unzip -o "BILIBILI-HELPER.zip" -d "./jar/"
mv "./jar/BILIBILI-HELPER-v${version}.jar" "./jar/BILIBILI-HELPER.jar" -f
mv "./config.json" "./jar/config.json" -f
rm "BILIBILI-HELPER.zip"
echo "下载完成"
| true |
46fa6e06cd1145d64d29ccb4a6f37d4e46b0934c | Shell | JetChars/stacker | /swift/swift-conf.sh | UTF-8 | 1,734 | 3 | 3 | [] | no_license | #!/usr/bin/env bash
# Sample ``local.sh`` for user-configurable tasks to run automatically
# at the successful conclusion of ``stack.sh``.
# NOTE: Copy this file to the root ``devstack`` directory for it to
# work properly.
# This is a collection of some of the things we have found to be useful to run
# after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces.
# These should be considered as samples and are unsupported DevStack code.
umask 022
PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin
FILES=/opt/stack/devstack/files
DEST=${DEST:-/opt/stack}
TOP_DIR=$(cd $(dirname "$0") && pwd)
SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
source $TOP_DIR/functions
source $TOP_DIR/lib/config
source $TOP_DIR/stackrc
source $TOP_DIR/lib/database
source $TOP_DIR/lib/rpc_backend
source $TOP_DIR/lib/apache
source $TOP_DIR/lib/tls
source $TOP_DIR/lib/infra
source $TOP_DIR/lib/oslo
source $TOP_DIR/lib/stackforge
source $TOP_DIR/lib/horizon
source $TOP_DIR/lib/keystone
source $TOP_DIR/lib/glance
source $TOP_DIR/lib/nova
source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/swift
source $TOP_DIR/lib/ceilometer
source $TOP_DIR/lib/heat
source $TOP_DIR/lib/neutron
source $TOP_DIR/lib/ldap
source $TOP_DIR/lib/dstat
source $TOP_DIR/openrc admin admin
cleanup_swift
install_swift
configure_swift
create_swift_disk
#swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
sudo /etc/init.d/rsync restart
#echo ${SWIFT_DATA_DIR} $SWIFT_DIR ${SWIFT_CONF_DIR}
#local type
#for type in object container account; do
#swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
#run_process s-${type} "$SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
#done
sudo swift-init all start
| true |
0c5e0f365bc63cec766be5aa3eda5d064e634be3 | Shell | mostalecki/recruitment-task | /app/wait-for-db.sh | UTF-8 | 135 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env bash
until nc -z $POSTGRES_HOST $POSTGRES_PORT
do
echo Waiting for db...
sleep 1
done
echo Connected with database | true |
de08207283fc4f8a632f4677573cf1e52a6cac32 | Shell | iantestplant/ImageDiff | /make_OSX.sh | UTF-8 | 1,223 | 2.9375 | 3 | [] | no_license | #!/bin/bash
#
# QtHelp includes a dependancy on libQtCLucene.4.dylib, and this has a dependancy on QtCore.framework/Versions/4/QtCore which is not found in the app created by the setup.
# (User "otool -l" to show dependencies)
# This is really a problem with Qt that it installs libQtCLucene in /usr/lib and these are assumed to exist on every system so are not included by setup.
# Solution is to modify libQtCLucene.4.dylib to change the path to QtCore and include libQtCLucene.4.dylib in files. It is then copied by setup into the app
# Also with the image plugin libraries they also require amending
cp /usr/lib/libQtCLucene.4.dylib .
install_name_tool -change QtCore.framework/Versions/4/QtCore @executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore libQtCLucene.4.dylib
rm -rf build dist
export target=ImageDiff
export version=1.0
export release=100
#After stuggling for hours with py2app, cx_freeze etc founf that the only packager that supports qt plugins (for the tiff handling) is pyinstaller
pyinstaller -w -i ImageDiff.icns --onefile ImageDiff.py
#echo "Now edit dist\imageDiff.app\Contents\info.plist for version etc"
python editPlist.py
#zip -y -r dist/imageDiff.zip . -i disp/imageDiff.app
| true |
4b4bee7376f291016f0f1c1e2e068da270ceefdc | Shell | widygui93/belajar-bash-scripting | /test1.sh | UTF-8 | 92 | 2.640625 | 3 | [] | no_license | #!/bin/sh
status=$(date | awk '{print $(NF-1)}')
if [ $status == "WIB" ]
then
ls -lh
fi | true |
549b4f07db7709318bfc1675f532293d3fad68b1 | Shell | chie7tain/bashEdits | /.bashrc | UTF-8 | 549 | 2.921875 | 3 | [] | no_license |
NODIST_BIN_DIR__=$(echo "$NODIST_PREFIX" | sed -e 's,\\,/,g')/bin; if [ -f "$NODIST_BIN_DIR__/nodist.sh" ]; then . "$NODIST_BIN_DIR__/nodist.sh"; fi; unset NODIST_BIN_DIR__;
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s $NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
NODIST_BIN_DIR__=$(echo "$NODIST_PREFIX" | sed -e 's,\\,/,g')/bin; if [ -f "$NODIST_BIN_DIR__/nodist.sh" ]; then . "$NODIST_BIN_DIR__/nodist.sh"; fi; unset NODIST_BIN_DIR__;
| true |
5b78d6551806e2f8dacc0dc5429951f3f2fcb98d | Shell | scholtalbers/galaxy-dataset-exporter | /resolve_username.sh | UTF-8 | 394 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
username=$1
email=$2
# If your galaxy usernames do not match the unix usernames, then fix it here with your custom code like below
echo $username
exit 0
# Example:
username="$(ldapsearch -H ldaps://ldap.embl.de -b cn=Users,dc=embl,dc=org -x mail=$email uid | sed -n 's/uid: //p')"
if [[ ! -z "${username// }" ]]; then
echo "$username"
else
echo "${email%@*}"
fi
| true |
79c38df29a0dda6962535ccc066bf6f717327316 | Shell | CyanogenMod/android_vendor_cm | /prebuilt/common/bin/compcache | UTF-8 | 1,102 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/system/bin/sh
#
# Compcache manager
# shade@chemlab.org (cyanogen)
#
isramzswap="$(find /system/lib/modules/ -name ramzswap.ko 2>/dev/null)"
isramzswapbuiltin="$(ls -l /dev/block/ramzswap0 2>/dev/null)"
if [ -n "$isramzswap$isramzswapbuiltin" ] ; then
MODULE=ramzswap
DEV=/dev/block/ramzswap0
else
DEV=/dev/block/zram0
MODULE=zram
SYSFS_PATH=/sys/block/zram0
fi
case "$1" in
start)
echo 3 > /proc/sys/vm/drop_caches
echo 30 > /proc/sys/vm/swappiness
modprobe $MODULE
if [ $MODULE = ramzswap ]; then
rzscontrol $DEV --disksize_kb=$2 --init
else
echo "$(($2 * 1024))" > $SYSFS_PATH/disksize
mkswap $DEV >/dev/null
fi
swapon $DEV
;;
stop)
swapoff $DEV >/dev/null 2>&1
rmmod $MODULE >/dev/null 2>&1
;;
stats)
if [ $MODULE = ramzswap ]; then
rzscontrol $DEV --stats
else
cd $SYSFS_PATH && for i in * ; do
echo -n "$i:"
cat $i
done
fi
;;
*)
echo "Usage: $0 {start <size>|stop|stats}"
exit 1
esac
exit 0
| true |
9f8526ea9901c5dcd2e5241c53df17517809a659 | Shell | permikomnaskaltara/research-2 | /ddos/1991/COPS/cops_104/checkacct/ca.src | UTF-8 | 13,825 | 3.5 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
#
# paths to some important programs
#
#
# This is the language used to parse the .rhosts file. If you rewrite the
# parser in a different language, you should change this. If it doesn't
# exist, that's "ok". chkacct will notice that.
#
PERL=perlpath()
#
# This is the program used to parse the .rhosts file. If you rewrite it, you
# should change PERL1 to be the location of your new program. You should also
# send me a copy because I'm a packrat for stuff like this.
#
PERL1=installpath()lib/chkacct/rhosts.pl
#
# This directory contains the info and effect files that chkacct(1L) references.
#
DOCPATH=installpath()lib/chkacct
# default variable values prevent nasty surprises
# (its good style, too)
#
# The title of the guru to send customers to (maybe your name if you work at a
# small company..)
GURU=gurudude()
#
# The name of the security article
#
ARTICLE=Article
#
# The name of the pager you want to use to display info files. This is
# probably "more" or "less" at most sites, but never cat, because things
# scroll off the screen too quickly.
#
PAGER=pagerpath()
#
# miscellaneous stuff
#
CAT=catpath()
THISSHELL=$$
EXITCOND=0
UNIQUE=1
trap 'echo "Exiting."; %eval^ $stop_dots; exit;' 0;
trap 'echo "Exiting."; %eval^ $stop_dots; exit;' 1;
trap 'echo "Exiting."; %eval^ $stop_dots; exit;' 2;
trap 'echo "Exiting."; %eval^ $stop_dots; exit;' 3;
# an example shell command line parser that conforms to the getopt (ksb)
# standard, Kevin Braunsdorf, ksb@cc.purdue.edu
# our program name and usage message
progname=`basename $0`
usage="$progname: usage [-ehinqrv] [-f <startdir>] [-m <homedir>] [-s <username]"
# how to slide a single letter option off the beginning of a bundle
# -barf -> -arf
slide='P=$1; %shift^; set _ -`expr "$P" : '\''-.\(.*\)'\''` ${1+"$@"}; %shift^'
param='if [ $# -lt 2 ]; then echo "$progname: missing value for $1" 1>&2 ; exit 1; fi'
# default values for all the flags, or leave unset for a ${flag-value) form
# verbose by default
VERBOSE=1
# interactive by default
INTERACTIVE=1
# cavalier by default
HARMLESS=0
# check .rhosts file by default
RHOSTS=1
# read an environment variable as well as the command line options:
# protect this script from leading -x's with a bogus underbar, then remove it
set _ $ZZZ ${1+"$@"}
%shift^
# get the options from the command line (+ any variables)
while [ $# -gt 0 ]
do
case "$1" in
-e)
INTERACTIVE=0
%shift^
;;
-e*)
INTERACTIVE=0
%eval^ "$slide"
;;
-f)
%eval^ "$param"
START_DIR=$2
%shift^ ; %shift^
;;
-f*)
START_DIR=`expr "$1" : '-.\(.*\)'`
%shift^
;;
-i)
INTERACTIVE=1
%shift^
;;
-i*)
INTERACTIVE=1
%eval^ "$slide"
;;
-m)
%eval^ "$param"
HOME=$2
%shift^ ; %shift^
;;
-m*)
HOME=`expr "$1" : '-.\(.*\)'`
%shift^
;;
-n)
HARMLESS=1
%shift^
;;
-n*)
HARMLESS=1
%eval^ "$slide"
;;
-q)
VERBOSE=0
%shift^
;;
-q*)
VERBOSE=0
%eval^ "$slide"
;;
-r)
RHOSTS=0
%shift^
;;
-r*)
RHOSTS=0
%eval^ "$slide"
;;
-s)
%eval^ "$param"
ME=$2
HOME=`echo "echo ~${ME}" | cshpath() `;
%shift^ ; %shift^
;;
-s*)
ME=`expr "$1" : '-.\(.*\)'`
HOME=`echo "echo ~${ME}" | cshpath() `;
%shift^
;;
-v)
VERBOSE=1
%shift^
;;
-v*)
VERBOSE=1
%eval^ "$slide"
;;
--)
%shift^
break
;;
-h|-h*)
cat <<HERE
$usage
e expert (non-interactive) do not ask the user any questions
f <startdir> specify the directory in which to begin the general file check
(\${HOME} is the default)
h print this help message
i interactive mode - ask the user about every file (default)
m <home dir> specify a home directory (\${HOME} is the default)
n do not actually alter any file permissions or files
q perform actions quietly
r do not check of \${HOME}/.rhosts file
s <username> run chkacct as if my userid were <username> (also sets \${HOME} to ~username)
v perform actions verbosely (this is the default)
HERE
exit 0
;;
-*)
echo "$usage" 1>&2
exit 1
;;
*)
# process and continue for intermixed options & args
break
;;
esac
done
# set my identity if it hasn't been done yet
if [ -z "${ME}" ]; then
ME=`whoami`;
if [ \( -z "${ME}" \) -o \( $? -ne 0 \) ]; then
echo "Cannot determine your identity - exiting, nothing checked.";
EXITCOND=1;
exit ${EXITCOND};
fi;
fi;
# set my home directory if it hasn't been set yet
if [ -z "${HOME}" ]; then
HOME=`echo "echo ~${ME}" | cshpath() `;
if [ -z "${HOME}" ]; then
echo "Cannot determine your home directory - exiting, nothing checked.";
EXITCOND=1;
exit ${EXITCOND};
fi;
fi;
# search only in the home dir by default
if [ -z "${START_DIR}" ]; then
START_DIR=${HOME};
fi
#
# For debugging, silly.
#
# echo "Performing account check with username = ${ME}, home dir = ${HOME}, and";
# echo "starting directory ${START_DIR}";
#
# Ok, this is actually checkacct.
#
#
# Define a routine which will display files. If sites have their own favorite
# pager or display method, it can be specified here. If you just wanted
# to use a simple pager, you would define PAGER to be equal to it, and then
# you would change the line below that display it to be:
# ${PAGER} ${DOCPATH}/${DISPLAY};
#
# REMEMBER! Before you call this routine, you must set DISPLAYFILE to be
# the file you want displayed
#
display_file='
if [ -f ${DOCPATH}/${DISPLAYFILE} ]; then
${PAGER} ${DOCPATH}/${DISPLAYFILE};
fi;'
#
# Its crucial that we don't leave shell variables like $* set
# when we're not expecting it. For that reason, here's a small routine
# to clear the contents of $* by shift'ing. For some reason, each set
# successively lengthens $*.
#
clear_args='
for i
do
%shift^;
done;'
#
# Before each situation where the user might be queried as to the action,
# one needs to remember to set the following shell variables:
#
# FIX - the shell command to fix it with \$TARGET to be the file to
# be operated upon
# MANPAGES - a list of man pages it will tell you to look at
# INFO - The name of the info file in which more info is to be found (if any)
# EFFECT - The name of the file which describes the effect of the fix
# PROBLEM - This is the problem string -- it may be printed several times.
#
# define the prompt/decision routine which will make the fix if necessary, print
# out specific info, refer someone to a manual page.
prompt='
FIXED=0;
while [ ${FIXED} -eq 0 ]; do
echo "";
echo "${PROBLEM}";
echo "The output of the command \"ls -lsopt()ld ${PROBLEMFILE}\" is:";
/bin/ls -lsopt()ld ${PROBLEMFILE};
echo "";
echo "The suggested fix for this is to execute the command:";
echo " ${FIX}";
if [ ${VERBOSE} -eq 1 ]; then
if [ \( -f ${DOCPATH}/${EFFECT} \) -a \( ! -d ${DOCPATH}/${EFFECT} \) ]; then
${CAT} ${DOCPATH}/${EFFECT};
fi;
fi;
if [ ${INTERACTIVE} -eq 1 ]; then
echo "";
echo "Press a letter (a) to enter automatic mode (no more questions), (f)ix problem,";
echo "(h)elp me out with this menu, (i)gnore problem, (m)ore info";
echownl(%Press RETURN/NEWLINE to fix the problem and go on> ^);
read input;
else
input="f";
fi;
case $input in
a*)
echo "";
echo "This will put you into automatic mode. No more questions will be asked,";
echo "and all problems will be automatically fixed unless you specified the";
echo "\"harmless(-n)\" option on startup.";
echo "";
echownl(%Press \"yes\" to enter automatic mode> ^);
read confirm;
if [ \( ! -z "$confirm" \) -a \( "$confirm" = "yes" \) ]; then
echo "Beginning automatic mode.";
INTERACTIVE=0;
echo "";
fi;
;;
h*)
DISPLAYFILE="prompt.help";
%eval^ $display_file;
;;
m*)
DISPLAYFILE=${INFO};
%eval^ $display_file;
if [ -n "$MANPAGES" -a ${VERBOSE} -eq 1 ]; then
echo "";
echo "For additional information, read the manual page for the following";
echo "program(s): ${MANPAGES}";
echo "The command man <name of program> will show you the manual page.";
echo "";
fi;
;;
i*)
echo "Ignoring problem -- taking no action.";
FIXED=1;
;;
*|f*)
if [ ${HARMLESS} -eq 0 ]; then
echownl(%Fixing problem...^);
%eval^ ${FIX};
echo "Done.";
else
echo "In \"harmless\" (-n) mode, ignoring problem.";
fi;
FIXED=1;
;;
esac;
done;'
#
# define the waiting routine that prints those neat dots
#
make_dots='
if [ ${VERBOSE} -eq 1 ]; then
(touch /tmp/makedots${THISSHELL};while [ -f /tmp/makedots${THISSHELL} ]; do echownl(%.^); sleep 1; done)& 2>&1 >/dev/null;
fi;'
stop_dots='sleep 1; /bin/rm -rf /tmp/makedots${THISSHELL};'
if [ 1 -eq $VERBOSE ]; then
DISPLAYFILE="Intro";
%eval^ $display_file;
fi
if [ ${INTERACTIVE} -eq 1 ]; then
echownl(%Press RETURN/NEWLINE to begin> ^); read input;
fi;
NO_WRITE="rhosts profile login logout cshrc bashrc bash_profile inputrc";
NO_WRITE="$NO_WRITE screenrc kshrc tcshrc netrc forward dbxinit distfile";
NO_WRITE="$NO_WRITE exrc emacsrc remote mh_profile xinitrc xsession Xdefaults";
NO_WRITE="$NO_WRITE Xresources rninit mwmrc twmrc emacs rhosts";
NO_READ="badpass netrc"
#
# First, are any of the dot files writable & does the user own every dot file?
#
PERMLINE="FindPermWrite()";
if [ ${VERBOSE} -eq 1 ]; then
echo ""
echo "Step one (three total) - Evaluating your account's dot files."
fi
%eval^ $make_dots;
for i in ${NO_WRITE}
do
TARGET=${HOME}/.$i;
if [ -f ${TARGET} -o -d ${TARGET} ]; then
while [ -f ${HOME}/dangerous.${i}.${UNIQUE} ];
do
UNIQUE=`echo "${UNIQUE} + 1" | bc -l`;
done;
FIX="/bin/mv -i ${TARGET} ${HOME}/dangerous.${i}.${UNIQUE}";
MANPAGES="chmod"
EFFECT="effect.owners"
INFO="owners"
RESULT=`/bin/ls -ld ${TARGET}`;
%eval^ $clear_args;
set $*=${RESULT};
if [ $3 != ${ME} ]; then
PROBLEM="File '${TARGET}' is owned by user $3.";
PROBLEMFILE=${TARGET};
EXITCOND=1;
%eval^ $stop_dots;
%eval^ $prompt;
%eval^ $make_dots;
continue;
fi
TEMP="`find ${TARGET} ! -type l \( ${PERMLINE} \) -print`"
EFFECT="dotwrite";
INFO="effect.dotwrit";
FIX="/bin/chmod ChmodPermSymbol()-w ${TARGET};"
if [ -n "${TEMP}" ]; then
PROBLEM="File '${TARGET}' is world or group writable.";
PROBLEMFILE=${TARGET};
EXITCOND=1;
%eval^ $stop_dots;
%eval^ $prompt;
%eval^ $make_dots;
fi
fi
done
PERMLINE="FindPermRead()";
EFFECT="effect.read";
INFO="readable";
for i in ${NO_READ}
do
TARGET=${HOME}/.${i};
if [ -f ${TARGET} ]; then
FIX="/bin/chmod ChmodPermSymbol()-r ${TARGET};"
if [ -n "`find ${TARGET} \( ${PERMLINE} \) -exec /bin/ls {} \;`" ]; then
PROBLEM="File '${TARGET}' is world or group readable.";
PROBLEMFILE=${TARGET};
EXITCOND=1;
%eval^ $stop_dots;
%eval^ $prompt;
%eval^ $make_dots;
fi
fi
done
%eval^ $stop_dots;
if [ ${VERBOSE} -eq 1 ]; then
echo "Step one complete."
echo ""
echo "Step two (three total) - Evaluating the file permissions in your account."
fi
#
# Second, do we have any writable files or directories?
#
%eval^ $make_dots
PERMLINE="FindPermWrite()";
RESULT=`(cd ${HOME}; find . -user ${ME} ! -type l \( ${PERMLINE} \) -print)`;
EFFECT="effect.write";
INFO="write";
%eval^ $stop_dots
for i in ${RESULT}
do
FIX="/bin/chmod ChmodPermSymbol()-w ${i};"
if [ -d $i ]; then
PROBLEM="Your directory $i is world or group writable.";
PROBLEMFILE=$i;
EXITCOND=1;
%eval^ $prompt;
else
PROBLEM="Your file $i is world or group writable.";
PROBLEMFILE=$i;
EXITCOND=1;
%eval^ $prompt;
fi
done
%eval^ $make_dots
PERMLINE="FindPermSuid()";
RESULT=`(cd ${HOME} ; find . -user ${ME} ! \( -type l -o -type d \) \( ${PERMLINE} \) -print)`;
EFFECT="effect.setuid";
INFO="setuid";
for i in ${RESULT}
do
FIX="/bin/chmod ChmodPermSuidSymbol()-s ${i};"
PROBLEM="Your file $i is user or group setuid.";
PROBLEMFILE=$i;
EXITCOND=1;
%eval^ $stop_dots
%eval^ $prompt;
%eval^ $make_dots
done
sleep 1
%eval^ $stop_dots
if [ ${VERBOSE} -eq 1 ]; then
echo "Step two complete."
echo ""
echo "Step three (three total) - Checking the contents of your rhosts file."
fi
FIX="/bin/mv -i ${HOME}/.rhosts ${HOME}/rhosts.$$;"
EFFECT="effect.rhosts";
INFO="rhosts";
MANPAGES="hosts.equiv rlogin";
#
# Third, does our rhost file contain any glaring dangers?
# see "man hosts.equiv"
#
if [ ${RHOSTS} -eq 0 ]; then
echo "The file ${HOME}/.rhosts will not be checked (as requested).";
elif [ -f ${HOME}/.rhosts ]; then
if [ ! -x ${PERL} ]; then
echo "${PERL} does not exist on your system -- skipping .rhosts check.";
echo "If you are unfamiliar with the uses of a .rhosts file, you should";
echo "definately have a ${GURU} take a look at it.";
else
${PERL1} ${HOME}/.rhosts;
if [ $? -ne 0 ]; then
PROBLEM="Your .rhosts file is unsafe.";
PROBLEMFILE=${HOME}/.rhosts;
EXITCOND=1;
%eval^ $prompt;
else
if [ ${VERBOSE} -eq 1 ]; then
echo "Your .rhosts file doesn't appear to be a security hole.";
fi;
fi;
fi;
else
if [ ${VERBOSE} -eq 1 ]; then
echo "Congratulations! You don't have a .rhosts file!";
echo "(If I had a cookie, I would give you one.)";
fi;
fi;
%eval^ $stop_dots
if [ ${VERBOSE} -eq 1 ]; then
echo "Step 3 complete."
echo "";
echo "Checkacct is complete. If you still have questions about this program,";
echo "please see a ${GURU}." ;
echo "";
if [ ${INTERACTIVE} -eq 1 ]; then
echo "If you are interested in reading an article on Unix";
echo "security, type \"yes\" and hit RETURN/NEWLINE now.";
echownl(%If not, simply hit RETURN/NEWLINE and checkacct will exit.> ^);
read input;
if [ \( ! -z "$input" \) -a \( "$input" = "yes" \) ]; then
DISPLAYFILE=${ARTICLE};
%eval^ $display_file;
fi;
fi;
fi;
if [ \( ${EXITCOND} -eq 0 \) -a \( ${VERBOSE} -eq 1 \) ]; then
echo "There were no obvious problems with your Unix account.";
echo "(I owe you a cookie.)";
fi;
exit ${EXITCOND};
| true |
d4e5a7be09b8de1baec0a7faa8edbd7a6027612f | Shell | hmasmoudi/SyphaxOS | /Default/0003-SyphaxOSGnome3/001_BuildPackagesScripts/0074-docbook-xsl/PKGBUILD | UTF-8 | 1,312 | 2.765625 | 3 | [] | no_license | # Maintainer: Hatem Masmoudi <hatem.masmoudi@gmail.com>
pkgname=docbook-xsl
pkgver=1.79.1
pkgrel=4
pkgdesc="The DocBook XSL Stylesheets package contains XSL stylesheets. These are useful for performing transformations on XML DocBook files."
arch=('x86_64')
url="http://downloads.sourceforge.net/docbook"
install=${pkgname}.install
license=('GPL')
groups=('desktop')
source=("$url/${pkgname}-${pkgver}.tar.bz2")
md5sums=('b48cbf929a2ad85e6672f710777ca7bc')
depends=('rootfs')
package() {
cd "$srcdir/${pkgname}-${pkgver}"
install -v -m755 -d ${pkgdir}/usr/share/xml/docbook/xsl-stylesheets-${pkgver}
cp -v -R VERSION assembly common eclipse epub epub3 extensions fo \
highlighting html htmlhelp images javahelp lib manpages params \
profiling roundtrip slides template tests tools webhelp website \
xhtml xhtml-1_1 xhtml5 \
${pkgdir}/usr/share/xml/docbook/xsl-stylesheets-${pkgver}
ln -s VERSION ${pkgdir}/usr/share/xml/docbook/xsl-stylesheets-${pkgver}/VERSION.xsl
install -v -m644 -D README \
${pkgdir}/usr/share/doc/${pkgname}-${pkgver}/README.txt
install -v -m644 RELEASE-NOTES* NEWS* \
${pkgdir}/usr/share/doc/${pkgname}-${pkgver}
}
| true |
9c3421772a53fb1d775680d3ccc0e9ba352f4876 | Shell | nicholastay/dotfiles | /.scripts/tools/launchpersonal | UTF-8 | 199 | 2.75 | 3 | [] | no_license | #!/bin/sh
s="$(find "$HOME/.scripts/personal" -mindepth 1 -maxdepth 1 -type f -exec basename {} \; | dmenu -i -p "exec personal?")"
[ -f "$HOME/.scripts/personal/$s" ] && $HOME/.scripts/personal/$s
| true |
67a28c5992470b63793e6c7e172fa471c703cfb7 | Shell | AutomateCompliance/complianceascode-content | /build/rhel7/fixes/bash/package_dracut-fips_installed.sh | UTF-8 | 319 | 2.984375 | 3 | [
"BSD-3-Clause"
] | permissive | # platform = Oracle Linux 7,Red Hat Enterprise Linux 7
# Remediation is applicable only in certain platforms
if [ ! -f /.dockerenv ] && [ ! -f /run/.containerenv ]; then
if ! rpm -q --quiet "dracut-fips" ; then
yum install -y "dracut-fips"
fi
else
>&2 echo 'Remediation is not applicable, nothing was done'
fi | true |
8d886cb9fb5b4b82e5c5b677d9edc525b9811359 | Shell | cyberkingsam/AggregatorVirtualization | /run1.sh | UTF-8 | 437 | 2.65625 | 3 | [] | no_license | #!/bin/bash
source=$1
output=$2
bitrate=$3
framerate=$4
resolution=$5
#bitrate=" -b:v 512k "
if ps ax | grep -v grep | grep "ffmpeg -i $source -vcodec libx264 -preset slow -r 15 -b:v 2500k -vprofile baseline -g 1 -ar 44100 -ac 1 -f flv " > /dev/null
then
echo "ffmpeg running"
else
ffmpeg -i $source -vcodec libx264 -preset slow -r 15 -b:v 2500k -vprofile baseline -g 1 -ar 44100 -ac 1 -f flv $output &>/home/Aggregator/ffmpegLog &
fi
| true |
c8c4d817fe6c2c543a22f0416fd63e9fa46cd3a2 | Shell | gisle/d7-project | /bin/setup | UTF-8 | 1,135 | 3.75 | 4 | [
"MIT"
] | permissive | #!/bin/sh
DRUPAL_DIR=${1:-drupal}
if [ -d $DRUPAL_DIR ]; then
echo "Drupal appears to already be set up, remove $DRUPAL_DIR/ to start fresh"
exit 1
fi
set -x
# curl -sS https://getcomposer.org/installer | php -- --install-dir=bin --filename=composer
composer install || exit 2
if vendor/bin/drush make drupal.make $DRUPAL_DIR; then
: ok
else
echo "Failed building the $DRUPAL_DIR directory, aborting";
if [ -d $DRUPAL_DIR ]; then
rm -rf $DRUPAL_DIR
fi
exit 3
fi
git init $DRUPAL_DIR
(cd $DRUPAL_DIR && git add . && git commit -m "Drupal baseline")
(cd $DRUPAL_DIR && git mv sites/default sites/.default.orig && git commit -m "Move sites/default out of the way")
ln -s ../../site $DRUPAL_DIR/sites/default
ln -s ../../../../modules $DRUPAL_DIR/sites/all/modules/custom
ln -s ../../../../themes $DRUPAL_DIR/sites/all/themes/custom
(cd $DRUPAL_DIR && git add . && git commit -m "Enable symlinks")
if [ ! -d site ]; then
cp -a $DRUPAL_DIR/sites/.default.orig site
cp site/default.settings.php site/settings.php
chmod +w site/settings.php
echo "http://localhost:8010" >site/base_url
fi
| true |
59ba1cf3caecbe34b6afcedc5f5f53062021015f | Shell | mantoujiaozi/Learning | /tool/Script/manager.sh | UTF-8 | 1,865 | 3.390625 | 3 | [] | no_license | #!/bin/sh
cd `dirname $0`
command=$1
if [ -z $command ];then
echo "请使用: manager.sh [start|stop|restart|status]"
exit
fi
if [[ $command == "start" ]];then
ps axu |grep -w "anti_auto_service.sh"|grep -v "grep"
if [[ $? == 0 ]];then
ps axu |grep -w "anti_auto_service.sh"|grep -v "grep" |gawk '{print $2}'|xargs -n1 kill -9
else
source ~/.bashrc && cd /data10/app/script/gray_user/control/ && nohup sh /data10/app/script/gray_user/control/anti_auto_service.sh /data10/app/script/gray_user/control/anti_exec_strategy.sh 2>&1 >> ../data/auto.log &
echo "service started!"
fi
elif [[ $command == "stop" ]];then
ps axu |grep -w "anti_auto_service.sh"|grep -v "grep"
if [[ $? == 0 ]];then
ps axu |grep -w "anti_auto_service.sh"|grep -v "grep" |gawk '{print $2}'|xargs -n1 kill -9
else
echo "service not start!"
fi
elif [[ $command == "restart" ]];then
ps axu |grep -w "anti_auto_service.sh"|grep -v "grep"
if [[ $? == 0 ]];then
ps axu |grep -w "anti_auto_service.sh"|grep -v "grep" |gawk '{print $2}'|xargs -n1 kill -9
echo "service stop!"
source ~/.bashrc && cd /data10/app/script/gray_user/control/ && nohup sh /data10/app/script/gray_user/control/anti_auto_service.sh /data10/app/script/gray_user/control/anti_exec_strategy.sh 2>&1 >> ../data/auto.log &
else
source ~/.bashrc && cd /data10/app/script/gray_user/control/ && nohup sh /data10/app/script/gray_user/control/anti_auto_service.sh /data10/app/script/gray_user/control/anti_exec_strategy.sh 2>&1 >> ../data/auto.log &
echo "service started!"
fi
elif [[ $command == "status" ]];then
ps axu |grep -w "anti_auto_service.sh"|grep -v "grep"
if [[ $? == 0 ]];then
echo "service is running ..."
else
echo "service stoped!"
fi
else
echo "请使用: manager.sh [start|stop|restart|status]"
fi
| true |
98091e38ff52c853ff50f5a925e95caaf0b3b2ea | Shell | robilemos89/tools-ubuntu | /apache_scripts/fix-dev.sh | UTF-8 | 215 | 2.671875 | 3 | [] | no_license | #!/bin/sh
FILE_TLD=/etc/NetworkManager/dnsmasq.d/dev-tld
if [ ! -f $FILE_TLD ]; then
echo address=/dev/127.0.0.1 | sudo tee $FILE_TLD > /dev/null
sudo systemctl restart NetworkManager > /dev/null
fi
| true |
42cd60fcd1f713a8a68b8526ea9e9b65501202d7 | Shell | sdk2/ClickHouse | /tests/queries/0_stateless/02100_multiple_hosts_command_line_set.sh | UTF-8 | 2,633 | 3.09375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
# default values test
${CLICKHOUSE_CLIENT} --query "SELECT 1"
# backward compatibility test
${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}" --port "${CLICKHOUSE_PORT_TCP}" --query "SELECT 1";
not_resolvable_host="notlocalhost"
exception_msg="Cannot resolve host (${not_resolvable_host}), error 0: ${not_resolvable_host}.
Code: 198. DB::Exception: Not found address of host: ${not_resolvable_host}. (DNS_ERROR)
"
error="$(${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}" "${not_resolvable_host}" --query "SELECT 1" 2>&1 > /dev/null)";
[ "${error}" == "${exception_msg}" ]; echo "$?"
not_number_port="abc"
exception_msg="Bad arguments: the argument ('${CLICKHOUSE_HOST}:${not_number_port}') for option '--host' is invalid."
error="$(${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}:${not_number_port}" --query "SELECT 1" 2>&1 > /dev/null)";
[ "${error}" == "${exception_msg}" ]; echo "$?"
not_alive_host="10.100.0.0"
${CLICKHOUSE_CLIENT} --host "${not_alive_host}" "${CLICKHOUSE_HOST}" --query "SELECT 1";
not_alive_port="1"
exception_msg="Code: 210. DB::NetException: Connection refused (${CLICKHOUSE_HOST}:${not_alive_port}). (NETWORK_ERROR)
"
error="$(${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}" --port "${not_alive_port}" --query "SELECT 1" 2>&1 > /dev/null)"
[ "${error}" == "${exception_msg}" ]; echo "$?"
${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}:${not_alive_port}" "${CLICKHOUSE_HOST}" --query "SELECT 1";
${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_TCP}" --port "${not_alive_port}" --query "SELECT 1";
ipv6_host_without_brackets="2001:3984:3989::1:1000"
exception_msg="Code: 210. DB::NetException: Connection refused (${ipv6_host_without_brackets}). (NETWORK_ERROR)
"
error="$(${CLICKHOUSE_CLIENT} --host "${ipv6_host_without_brackets}" --query "SELECT 1" 2>&1 > /dev/null)"
[ "${error}" == "${exception_msg}" ]; echo "$?"
ipv6_host_with_brackets="[2001:3984:3989::1:1000]"
exception_msg="Code: 210. DB::NetException: Connection refused (${ipv6_host_with_brackets}). (NETWORK_ERROR)
"
error="$(${CLICKHOUSE_CLIENT} --host "${ipv6_host_with_brackets}" --query "SELECT 1" 2>&1 > /dev/null)"
[ "${error}" == "${exception_msg}" ]; echo "$?"
exception_msg="Code: 210. DB::NetException: Connection refused (${ipv6_host_with_brackets}:${not_alive_port}). (NETWORK_ERROR)
"
error="$(${CLICKHOUSE_CLIENT} --host "${ipv6_host_with_brackets}:${not_alive_port}" --query "SELECT 1" 2>&1 > /dev/null)"
[ "${error}" == "${exception_msg}" ]; echo "$?"
| true |
27cdbb722c5152052270e6e1d2cabb8e485ee3d5 | Shell | aarnt/apps | /kwebkitpart/PKGBUILD | UTF-8 | 723 | 2.671875 | 3 | [] | no_license |
pkgname=kwebkitpart
pkgver=1.3.3
pkgrel=1
pkgdesc="A web browser component for KDE"
url="http://opendesktop.org/content/show.php?content=127960"
arch=('x86_64')
license=('GPL')
depends=('kde-runtime')
makedepends=('cmake' 'automoc4')
install=${pkgname}.install
source=("http://sourceforge.net/projects/kdeos/files/Sources/${pkgname}/${pkgname}-${pkgver}.tar.bz2")
md5sums=('0f0e7b8d7ad85ded3b95a247057f4550')
build() {
cd ${srcdir}
sed -i '/add_subdirectory(kdelauncher)/d' ${pkgname}-${pkgver}/CMakeLists.txt
mkdir build
cd build
cmake ../${pkgname}-${pkgver} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release
make
}
package() {
cd ${srcdir}/build
make DESTDIR=${pkgdir} install
} | true |
6b6149d3635380f2683c1c07cb62f9ed7a57d5b0 | Shell | dmccloskey/sequencing_utilities | /scripts/run_reseq.sh | UTF-8 | 573 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/bash -l
#PBS -q serial
#PBS -N reseq_$SAMPLENAME
#PBS -l nodes=1:ppn=1,walltime=07:02:00
#PBS -m abe
# USAGE: qsub reseq.sh -vSAMPLENAME="DIRECTORY_NAME_OF_SAMPLE"
if [ $PBS_O_WORKDIR ]
then
cd $PBS_O_WORKDIR
fi
if [ -z "$SAMPLENAME" ]
then
SAMPLENAME=$1
fi
echo $SAMPLENAME
cd $SAMPLENAME
INDEXES_DIR=$(python -c "import sequtil; print sequtil.seq_settings.indexes_dir")
gzip -d *.fastq.gz
breseq -r $INDEXES_DIR/NC_000913.gbk `ls *.fastq`
cd output
ln -s ../data
cd ..
ln -s output $SAMPLENAME
tar cf ../$SAMPLENAME.tar -h $SAMPLENAME
rm $SAMPLENAME
| true |
a413c322c9284b111c9940989dea4f386a571a5e | Shell | mattfield/dotfiles-stow | /bin/bin/functions/web/weechat | UTF-8 | 1,465 | 3.125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/dash
# sdothum - 2016 (c) wtfpl
# Web
# ▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂
# ..................................................................... IRC chat
# depends on how many servers auto-connected
IRC=$HOME/.irc
timeout=300
rm -f $HOME/.weechat/*.log
# re-initialize disappearing(?) autoload links
cd $HOME/.weechat/perl/autoload
for i in ../*pl
do
ln -s $i . 2>/dev/null
done
cd $HOME/.weechat/python/autoload
for i in ../*py
do
ln -s $i . 2>/dev/null
done
# wait for znc to establish connections
# connections = irc servers X (times) 2, e.g. freenode + rizon = 4
if server ;then
if pstat znc ;then
ditto 'launching weechat'
ditto 'waiting for ZNC server connections to complete'
count=0
[ -e $IRC ] && ircservers=$(cat $IRC) || ircservers=1
while :
do
[ $(sudo ss -apt | grep znc | grep ESTAB | wc -l) -ge $ircservers ] && break
sleep 1
count=$(( $count + 1 ))
if [ $count -ge $timeout ] ;then
notify critical "Weechat" "$timeout second ZNC server IRC timeout"
exit
fi
done
notify low "Weechat" "$count seconds to ZNC server IRC connection"
/usr/bin/weechat
else
ditto 'znc server not running'
fi
else
/usr/bin/weechat
fi
# vim: set ft=sh: #
| true |
23d5f464ac1c0a4a6687900d68723843ea56768d | Shell | alexzhou707/snavquad_interface | /scripts/capture/onboard_camera_only_record.sh | UTF-8 | 1,352 | 3.671875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
if [ $# -eq 0 ]; then
echo "Input vehicle number as first argument"
exit 1
fi
MAV_ID=$1
if echo $MAV_ID | grep -Eq '^[+-]?[0-9]+$'
then
echo "Recording bag file for MAV $MAV_ID"
else
echo "Input mav number(integer) as first argument"
exit 1
fi
MAV_NAME=dragonfly${MAV_ID}
echo "MAV Name $MAV_NAME"
bag_folder="/media/sdcard"
if [ ! -d "$bag_folder" ]; then
echo "*** WARNING *** SD card not present, recording locally"
else
echo 'Bag files stored at' $bag_folder
cd $bag_folder
#Print out %Used SD card
USED_PERCENT=$(df --output=pcent $bag_folder | awk '/[0-9]%/{print $(NF)}' | awk '{print substr($1, 1, length($1)-1)}')
echo 'SD card' ${USED_PERCENT} '% Full'
fi
TOPICS="
/tf
"
DOWN_CAM_TOPIC=" /$MAV_NAME/MVSampleVISLAMEagle/snap_cam_output_image/compressed"
STEREO_TOPIC="
/$MAV_NAME/stereo/left/image_raw/compressed
/$MAV_NAME/stereo/left/camera_info
/$MAV_NAME/stereo/right/image_raw/compressed
/$MAV_NAME/stereo/right/camera_info
"
ALL_TOPICS=$TOPICS$DOWN_CAM_TOPIC$STEREO_TOPIC
BAG_STAMP=$(date +%F-%H-%M-%S-%Z)
CURR_TIMEZONE=$(date +%Z)
BAG_NAME=$BAG_STAMP-V${MAV_ID}.bag
BAG_PREFIX=V${MAV_ID}-${CURR_TIMEZONE}
#Move old back files in backup folder
backup_folder=old_bags_$BAG_STAMP
mkdir $backup_folder
mv *.bag $backup_folder
eval rosbag record --split --duration 5s $ALL_TOPICS -o $BAG_PREFIX
| true |
b44d38079131d841462e6523afbb193f7df11cbf | Shell | w-A-L-L-e/bash_renamer | /renamer.sh | UTF-8 | 768 | 4.46875 | 4 | [] | no_license | #!/bin/bash
# author: Walter Schreppers
# description: recursive bash script to rename all files and directories with spaces
function traverse_dir() {
ls -1 "$1" | while read file;
do
path=${1}/${file}
renamed_path=`echo ${path} | sed -e 's/ /_/g'` # replace _ with - or . or whatever you want as space replacement here
if [[ "$path" != "$renamed_path" ]]; then
echo -n "found spaces in ${path} renaming to ${renamed_path}... "
mv "${path}" "${renamed_path}"
echo "done"
fi
if [[ -d ${renamed_path} ]]; then
traverse_dir "${renamed_path}"
fi
done
}
if [[ $# != 1 ]]; then
echo "USAGE: $0 <directory> : renames all files and directories with spaces to _"
exit 1;
else
traverse_dir $1
fi
| true |
1675456977139744934e11bd62b9c7f939651e95 | Shell | ilventu/aur-mirror | /seahorse-plugins-git/PKGBUILD | UTF-8 | 1,745 | 2.984375 | 3 | [] | no_license | # Maintainer: György Balló <ballogy@freestart.hu>
_pkgname=seahorse-plugins
pkgname=$_pkgname-git
pkgver=20120101
pkgrel=1
pkgdesc="Plugins and extensions to third party software to provide encryption capability"
arch=('i686' 'x86_64')
url="http://live.gnome.org/Seahorse/Plugins"
license=('GPL')
depends=('gconf' 'libcryptui' 'desktop-file-utils' 'hicolor-icon-theme' 'xdg-utils')
makedepends=('git' 'gnome-common' 'intltool' 'gnome-doc-utils')
options=('!emptydirs' '!libtool')
install=$_pkgname.install
_gitroot="git://git.gnome.org/seahorse-plugins"
_gitname="seahorse-plugins"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
#
# BUILD HERE
#
./autogen.sh --prefix=/usr --sysconfdir=/etc --localstatedir=/var \
--disable-static --disable-schemas-install --disable-scrollkeeper \
--disable-update-mime-database --disable-applet --disable-nautilus \
--with-gconf-schema-file-dir=/usr/share/gconf/schemas --with-gtk=3.0
make
}
package() {
cd "$srcdir/$_gitname-build"
make DESTDIR="$pkgdir/" install
# Remove seahorse-tool (now it's in seahorse-nautilus package)
rm "$pkgdir"/usr/bin/seahorse-tool \
"$pkgdir"/usr/share/applications/seahorse-pgp-{encrypted,keys,signature}.desktop \
"$pkgdir"/usr/share/man/man1/seahorse-tool.1 \
"$pkgdir"/usr/share/seahorse-plugins/ui/seahorse-{multi-encrypt,notify,progress}.xml
}
| true |
414435d70c21296db523e8145827bb1024950907 | Shell | kartishr/scripts | /build_entrypoint.sh | UTF-8 | 2,540 | 4.03125 | 4 | [] | no_license | #!/bin/bash
current=`pwd`
echo $current
DIR=$(dirname "$0")
export LAYER_NAMES=$1
export WORKING_DIR=$current
export PYTHON_VERSION=$2
export BUCKET_NAME=$3
export REGION=$4
# Check variables
if [ -n "$LAYER_NAMES" ]; then
echo 'LAYER_NAMES found = ' $LAYER_NAMES
else
export LAYER_NAMES="zero"
echo "LAYER_NAMES set = " $LAYER_NAMES
fi
if [ WORKING_DIR != "" ]; then
echo 'WORKING_DIR found = ' $WORKING_DIR
else
export WORKING_DIR=$(pwd)
echo "WORKING_DIR set = " $(pwd)
fi
if [ PYTHON_VERSION != "" ]; then
echo 'PYTHON_VERSION found = ' $PYTHON_VERSION
else
export PYTHON_VERSION="3.7"
echo "PYTHON_VERSION set = " $PYTHON_VERSION
fi
if [ BUCKET_NAME != "" ]; then
echo 'BUCKET_NAME found = ' $BUCKET_NAME
else
export BUCKET_NAME="baby-yodas-bucket"
echo "BUCKET_NAME set = " $BUCKET_NAME
fi
if [ REGION != "" ]; then
echo 'REGION found = ' $REGION
else
export REGION="zero"
echo "REGION set = " $REGION
fi
# Build dependencies environment for Python Lambda Layers
# This script performs setup of a build environment for Python Lambda Layers
$WORKING_DIR/build_dependencies.sh $LAYER_NAMES $WORKING_DIR $PYTHON_VERSION $BUCKET_NAME $REGION
if cat /etc/*release | grep ^NAME | grep Alpine ; then
echo "==============================================="
echo "Setting pyenv VARs on Alpine"
echo "==============================================="
# Set Python version
export PYENV_VERSION="${PYTHON_VERSION}-dev" # 3.7-dev
# Set pyenv home
export PYENV_HOME=/root/.pyenv
# Update PATH
export PATH=$PYENV_HOME/shims:$PYENV_HOME/bin:$PATH
# Install Python
pyenv global $PYENV_VERSION
# Upgrade pip
pyenv rehash
echo "==============================================="
echo "Python version: $(python --version)"
echo "==============================================="
else
echo "==============================================="
echo "Alpine NOT DETECTED, continuing..."
echo "==============================================="
fi
# Init Packages Directory
mkdir -p layer_packages/
# Build list of layer names from comma separated string
IFS=',' read -ra LAYER_NAMES_LIST <<< "$LAYER_NAMES"
# Building layers
for i in "${LAYER_NAMES_LIST[@]}"
do
echo "==============================================="
echo "Building lambda Layer $i"
echo "==============================================="
$WORKING_DIR/aws_layer_$i/build_layer.sh $i $WORKING_DIR $PYTHON_VERSION $BUCKET_NAME $REGION
done
| true |
ddd67e7035c6f6b96eb4e0b52ce78d38e4f16b95 | Shell | eerimoq/punchboot | /src/tests/test_boot_pbi2.sh | UTF-8 | 413 | 2.546875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
source tests/common.sh
wait_for_qemu_start
dd if=/dev/urandom of=/tmp/random_data bs=64k count=1
$PBI tests/image1.pbc
result_code=$?
if [ $result_code -ne 0 ];
then
test_end_error
fi
$PB part -w -n 0 -f /tmp/img.pbi
result_code=$?
if [ $result_code -ne 0 ];
then
test_end_error
fi
sync
$PB boot -b -s A
result_code=$?
if [ $result_code -ne 0 ];
then
test_end_error
fi
test_end_ok
| true |
77281ce9df151254fa144e8cfe22ae842d9c71a8 | Shell | ahesford/zfsbootmenu | /initcpio/install/zfsbootmenu | UTF-8 | 6,509 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/bash
add_terminfo() {
# Populate some basic terminfo databases
# Shamelessly ripped from dracut
local tt troot
# Find the first path that contains a linux terminfo
for tt in /etc /usr/share /lib; do
if [[ -r "${tt}/terminfo/l/linux" ]]; then
troot="${tt}/terminfo"
break
fi
done
[[ -d "${troot}" ]] || return
# At this point, at least l/linux is guaranteed to exist
for tt in "l/linux" "v/vt100" "v/vt102" "v/vt220"; do
[[ -r "${troot}/${tt}" ]] || continue
add_file "${troot}/${tt}" "/usr/share/terminfo/${tt}"
done
}
add_optional_binary() {
# This has to be inverted to avoid a RETURN trap that triggers failure
if ! command -v "${1}" >/dev/null 2>&1; then
warning "optional component '${1}' not found, will omit"
return 0
fi
add_binary "${1}"
}
add_optional_module() {
# Add a kernel module to the initcpio image only if the module
# actually exists as a loadable file. Otherwise, ignore the module.
# Without a version, no module is added
[[ $KERNELVERSION == none ]] && return 0
# Strip any extension, normalize name
local target="${1%.ko*}"
target="${target//-/_}"
# Try to determine path to module, if there is one
local kfile
kfile="$( modinfo -k "${KERNELVERSION}" -n "${target}" 2>/dev/null )" || return 0
# If module has a valid path, try to add it properly
case "${kfile}" in
/*) add_module "${target}" ;;
*) return 0 ;;
esac
}
add_zbm_binaries() {
local mustcopy maycopy
# Hard requirements
# shellcheck disable=SC2154
case "${zfsbootmenu_miser}" in
1|[Yy]|[Yy][Ee][Ss]|[Oo][Nn])
# Try to figure out what busybox provides
;;
*)
# Don't be a miser, use system versions
map add_binary "${zfsbootmenu_essential_binaries[@]}"
map add_optional_binary "${zfsbootmenu_optional_binaries[@]}"
return
;;
esac
# Figure out which binaries busybox does *not* provide
# shellcheck disable=SC2154
readarray -t mustcopy < <(comm -23 \
<(printf "%s\n" "${zfsbootmenu_essential_binaries[@]}" | sort) \
<(/usr/lib/initcpio/busybox --list | sort))
# Copy the missing required binaries
map add_binary "${mustcopy[@]}"
# Do the say for optional binaries
# shellcheck disable=SC2154
readarray -t maycopy < <(comm -23 \
<(printf "%s\n" "${zfsbootmenu_optional_binaries[@]}" | sort) \
<(/usr/lib/initcpio/busybox --list | sort))
map add_optional_binary "${maycopy[@]}"
}
create_zbm_entrypoint() {
# Create an entrypoint to initialize the ZBM environment
mkdir -p "${BUILDROOT}/libexec"
cat > "${BUILDROOT}/libexec/zfsbootmenu-initcpio" <<-'EOF'
#!/bin/bash
hooks=(
/lib/zfsbootmenu-parse-commandline.sh
/lib/zfsbootmenu-preinit.sh
)
for hook in "${hooks[@]}"; do
[ -r "${hook}" ] && source "${hook}" && continue
echo "ERROR: failed to load hook "${hook}"; good luck..."
exec /bin/bash
done
EOF
chmod 755 "${BUILDROOT}/libexec/zfsbootmenu-initcpio"
}
build() {
local hooks relative _file
: "${zfsbootmenu_module_root:=/usr/share/zfsbootmenu}"
# shellcheck disable=SC1091
source "${zfsbootmenu_module_root}/install-helpers.sh" || exit 1
# shellcheck disable=SC2034
BUILDSTYLE="initcpio"
# Modules (required and optional) used by ZBM
# shellcheck disable=SC2154
map add_module "${zfsbootmenu_essential_modules[@]}"
# shellcheck disable=SC2154
map add_optional_module "${zfsbootmenu_optional_modules[@]}"
# Necessary udev rules (also finds required binaries)
# shellcheck disable=SC2154
map add_udev_rule "${zfsbootmenu_udev_rules[@]}"
# Binaries required for ZBM operation
add_zbm_binaries
# Add libgcc_s as appropriate
local _libgcc_s
if ! _libgcc_s="$( find_libgcc_s )"; then
error "unable to locate libgcc_s.so"
exit 1
fi
local _lib
while read -r _lib ; do
[ -n "${_lib}" ] || continue
if ! add_binary "${_lib}"; then
error "Failed to install '${_lib}'"
exit 1
fi
done <<< "${_libgcc_s}"
# On-line documentation
while read -r doc; do
relative="${doc#"${zfsbootmenu_module_root}/"}"
[ "${relative}" = "${doc}" ] && continue
add_file "${doc}" "/usr/share/docs/${relative}"
done <<< "$( find "${zfsbootmenu_module_root}/help-files" -type f )"
# Install core ZBM functionality
for _file in "${zfsbootmenu_module_root}"/lib/*; do
add_file "${_file}" "/lib/${_file##*/}"
done
for _file in "${zfsbootmenu_module_root}"/libexec/*; do
add_file "${_file}" "/libexec/${_file##*/}"
done
for _file in "${zfsbootmenu_module_root}"/bin/*; do
add_file "${_file}" "/bin/${_file##*/}"
done
hooks=( zfsbootmenu-{parse-commandline,preinit}.sh )
for _file in "${hooks[@]}"; do
add_file "${zfsbootmenu_module_root}/hook/${_file}" "/lib/${_file}"
done
# allow mount(8) to "autodetect" ZFS
echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
# shellcheck disable=SC2154
for _file in "${zfsbootmenu_early_setup[@]}"; do
[ -x "${_file}" ] || continue
add_file "${_file}" "/libexec/early-setup.d/${_file##*/}"
done
# shellcheck disable=SC2154
for _file in "${zfsbootmenu_setup[@]}"; do
[ -x "${_file}" ] || continue
add_file "${_file}" "/libexec/setup.d/${_file##*/}"
done
# shellcheck disable=SC2154
for _file in "${zfsbootmenu_teardown[@]}"; do
[ -x "${_file}" ] || continue
add_file "${_file}" "/libexec/teardown.d/${_file##*/}"
done
compat_dirs=( "/etc/zfs/compatibility.d" "/usr/share/zfs/compatibility.d/" )
for compat_dir in "${compat_dirs[@]}"; do
[ -d "${compat_dir}" ] && add_full_dir "${compat_dir}"
done
# Copy host-specific ZFS configs
[[ -f /etc/hostid ]] && add_file "/etc/hostid"
[[ -f /etc/zfs/vdev_id.conf ]] && add_file "/etc/zfs/vdev_id.conf"
[[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
add_terminfo
create_zbm_conf
create_zbm_profiles
create_zbm_traceconf
create_zbm_entrypoint
add_runscript
}
help() {
echo "This hook turns the initramfs into a ZFSBootMenu image"
}
# vim: set ts=4 sw=4 ft=sh et:
| true |
a5a332dd1970ea0c01c121b82d58ae1414fe5db5 | Shell | hubzhangxj/AutoTest_D06 | /roles/i2c_regvaltest/files/pcie_test.sh | UTF-8 | 1,810 | 2.828125 | 3 | [] | no_license | #!/bin/bash
# wu.wu@hisilicon.com
# 2015.4.3
source ./d05_scripts/server_brdnum_list
D2B=({0..1}{0..1}{0..1}{0..1}{0..1}{0..1}{0..1}{0..1})
test_pcie()
{
output=$(./bin/busybox devmem $1)
#echo $output
if [ "$output"x = ""x ]; then
echo "can not get output from reading $1"
return -1
fi
# for [5:0] bit test
low_output=${output: -2}
((low_output=16#"$low_output"))
low_6bit=${D2B[$low_output]}
low_6bit=${low_6bit: -6}
if [ $low_6bit != '010001' ]; then
echo "the pcie of $1 has not linked up"
echo "the value of low 5 bit are $low_6bit"
#return -1
fi
output=$(./bin/busybox devmem $2)
# for [24:20] bit test
high_output=${output:3:2}
((high_output=16#"$high_output"))
high_value=${D2B[$high_output]}
#echo $high_value
high_5bit=${high_value:3:5}
if [ "$high_5bit"x != '01000'x ]; then
echo "the link lane of $2 has been wrong"
echo "the value of high 5 bit are $high_5bit"
#return -1
fi
# for [19:16] bit test
middle_output=${output:5:1}
((middle_output=16#"$middle_output"))
middle_value=${D2B[$middle_output]}
middle_value=${middle_value: -4}
if [ "$middle_value"x != "0010"x ]; then
echo "the link speed of $2 has been wrong"
echo "the value of middle 4 bit are $middle_value"
return -1
fi
echo ""
return 0
}
test_pcie 0xa00a0728 0xa00a0080
if [ $? -ne 0 ]; then
echo 'the pcie1 link status is fail'
exit -1
else
echo 'the pcie1 link status is succeed'
. ./d05_scripts/XGE_speed.sh selected_pcie_netcard 3
fi
#test_pcie 0xb0006a18 0xb0090080
#if [ $? -ne 0 ]; then
# echo 'the pcie2 link status is fail'
# exit -1
#else
# echo 'the pcie2 link status is succeed'
#fi
echo "pcie Test Succeed"
exit 0
| true |
80b949bc618957e2cfbd6669b28fe04f62e9b68a | Shell | pjhu/medicine-backend | /docker/master.sh | UTF-8 | 440 | 2.765625 | 3 | [] | no_license | #!/bin/bash
set -e
if [ $PG_ROLE = "master" ]; then
cat >> ${PGDATA}/postgresql.conf <<EOT
wal_level = replica
max_wal_senders = 10
wal_keep_segments = 256
archive_mode = on
EOT
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE USER $PG_REP_USER REPLICATION LOGIN CONNECTION LIMIT 100 ENCRYPTED PASSWORD '$PG_REP_PASSWORD';
EOSQL
echo "host replication all 0.0.0.0/0 md5" >> "$PGDATA/pg_hba.conf"
fi
| true |
86175df9b7aa7817f9888e16d15fce98eb2e5891 | Shell | lizhichao1988/iPhone-Libraries | /cross compile/iPhoneDevProjects 2/3rdparty/pkg/unzip.sh | UTF-8 | 412 | 2.75 | 3 | [] | no_license | #!/bin/bash
if [ -z $PROJECT_ROOT ]; then
echo "PROJECT_ROOT is nil"
exit
fi
pwd=$PWD
tar -xzf openssl-0.9.8o.tar.gz -C $PROJECT_ROOT/build-sim
tar -xzf libssh2-1.2.7.tar.gz -C $PROJECT_ROOT/build-sim
cd $pwd
tar -xzf openssl-0.9.8o.tar.gz -C $PROJECT_ROOT/build-os
tar -xzf libssh2-1.2.7.tar.gz -C $PROJECT_ROOT/build-os
cp patch/ui_openssl.c ${PROJECT_ROOT}/build-os/openssl-0.9.8o/crypto/ui/
| true |
3b549a963e6e4eb14a58ac1fa2f666e2e099a007 | Shell | fpob/dotfiles | /.config/polybar/launch | UTF-8 | 443 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
export WLAN_INTERFACE=$(ip -br link show | awk '/^wl/ { print $1 ; nextfile }')
# Terminate already running bar instances.
pkill -U $UID polybar
# Start polybar on all monitors.
polybar --list-monitors | while read line ; do
export MONITOR=$(echo $line | cut -d":" -f1)
export BAR=$(echo $line | grep -q primary && echo primary || echo secondary)
systemd-cat -t polybar/$MONITOR polybar --reload $BAR &
done
| true |
0706a832b70f5eebe4977b18c8c99e88f9068a3a | Shell | thaophung/COSC4550 | /test_set_generate_txt.sh | UTF-8 | 218 | 3.03125 | 3 | [] | no_license | #!/bin/bash
for d in `ls -d test/*`; do
echo ${d}
#Directory name is also label
label=$(basename $d)
for f in `ls train_10_image/${label}/*.JPEG | gshuf`; do
echo "${f} ${label}" >> test.txt
done
done
| true |
93024d5ee9ab97680688fa915da0b01811bcc8e1 | Shell | xliu93/Trading-on-Social-Sentiment-Analytics | /DataIngestion_Twitter_Data/BATCH_RUN | UTF-8 | 373 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
NOW=$(date +"%Y-%m-%d-%H:%M:%S")
LOGFILE=logs/batch-$NOW.log
while read USER; do
python Exporter.py --username "$USER" --since 2010-01-01 --until 2018-04-01 > logs/$USER.log
mv output_got.csv output/output_$USER.csv
echo "output_$USER.csv moved to output directory." >>$LOGFILE
done <$1
ls -l ./output/ | mail -s "Batch Job Done! Check $LOGFILE" xialiang.liu@nyu.edu
| true |
30d5a3dc983ab865359a90097a478d3ddcdf1529 | Shell | potyosh/BasicSortComparison | /runShellSort.sh | UTF-8 | 309 | 3.109375 | 3 | [] | no_license | #!/bin/sh
g++ -o genRandomData genRandomData.cpp
g++ -o shellSort shellSort.cpp
#echo 100 | ./genRandomData
#./shellSort >> result_shell.txt
rm result_shell.txt
for i in `seq 1 100`
do
dataNum=`expr 500 \* ${i}`
echo ${dataNum} | ./genRandomData
./shellSort >> result_shell.txt
echo $dataNum
done
| true |
1cd88eac2604d544779182be27dd5b1434fd6428 | Shell | thomasem/misc-tools | /focus.sh | UTF-8 | 323 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env bash
# Simple helper for wmctrl focus for varying window titles but persistent executable names in `ps`
# Use: `focus <executable name>`
# e.g. `focus spotify` or `focus gnome-terminal`
PID=$(ps -ea | grep $1 | head -1 | awk '{print $1}')
WID="$(wmctrl -lp | grep $PID | awk '{print $1}')"
wmctrl -ia $WID
| true |
160f6bbf52be3d8e1102b1348635b2e81822df65 | Shell | cuttlefishh/papers | /vibrio-fischeri-transcriptomics/code/bash/PECombiner.sh | UTF-8 | 935 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#replace "SEQHEADER" with the first 4 characters of your sequence name including the @
#replace "DELIMITER" with the specific delimiter from your fastq file,
#see the two examples below:
#@HWI-ST141_0363:2:1101:1175:2080#ATCACG/1
#@HWI-ST1117:83:D0TKCACXX:6:1101:1274:2051 2:N:0:TAGCTT
#SEQHEADER would be "@HWI" for both examples, DELIMITER would be a "/" in the first case and
#a " " (space) in the second. Both SEQHEADER and DELIMITER need to be in "", while the filenames
#do not.
#Then copy the following line as many times as the number of paired end lanes that you have, then
#replace YOURFILE#1_1_trimmed_clipped.fastq with the name of your forward direction .fastq file
#replace YOURFILE#1_2_trimmed_clipped.fastq with the name of your reverse direction .fastq file
#for that same sample.
fastqcombinepairedend.py "SEQHEADER" "DELIMITER" YOURFILE#1_1_trimmed_clipped.fastq YOURFILE#1_2_trimmed_clipped.fastq
| true |
c3f375e5b1499b6f03e639275a85e70e590b0490 | Shell | bmwiedemann/openSUSE | /packages/n/nrpe/nrpe.init | UTF-8 | 3,315 | 3.59375 | 4 | [] | no_license | #!/bin/bash
#
# Copyright (c) 2010 SUSE Linux Products GmbH
# Authors: Lars Vogdt (2010-2012)
#
# /etc/init.d/nrpe
# and its symbolic link
# /usr/sbin/rcnrpe
#
### BEGIN INIT INFO
# Provides: nagios-nrpe
# Required-Start: $remote_fs $syslog $network
# Should-Start: cron
# Required-Stop: $remote_fs $syslog
# Should-Stop: cron
# Default-Start: 3 5
# Default-Stop: 0 1 2 6
# Short-Description: NRPE Nagios Remote Plugin Executor
# Description: Start NRPE to allow remote execution of
# Nagios plugins.
### END INIT INFO
NRPE_BIN="/usr/sbin/nrpe"
NRPE_CONFIG="/etc/nrpe.cfg"
DEFAULT_PIDFILE="/var/run/nrpe/nrpe.pid"
test -x $NRPE_BIN || { echo "$NRPE_BIN not installed";
if [ "$1" = "stop" ]; then exit 0;
else exit 5; fi; }
test -r $NRPE_CONFIG || { echo "$NRPE_CONFIG not existing";
if [ "$1" = "stop" ]; then exit 0;
else exit 6; fi; }
function get_value() {
if [ -n "$2" ]; then
set -- `grep ^$1 "$2" | sed 's@=@ @' | tr -d '[:cntrl:]'`
else
set -- `grep ^$1 "$NRPE_CONFIG" | sed 's@=@ @' | tr -d '[:cntrl:]'`
fi
shift # remove first ARG => search-string
echo $*
}
# Shell functions sourced from /etc/rc.status:
. /etc/rc.status
# Reset status of this service
rc_reset
case "$1" in
start)
echo -n "Starting Nagios NRPE "
pid_file="$(get_value pid_file)"
nrpe_group="$(get_value nrpe_group)"
nrpe_user="$(get_value nrpe_user)"
: ${pid_file=:=$DEFAULT_PIDFILE}
: ${nrpe_group:=nagios}
: ${nrpe_user:=nagios}
if [ -z "$pid_file" ]; then
PIDDIR=$(dirname $pid_file)
else
PIDDIR=$(dirname $DEFAULT_PIDFILE)
fi
case "$PIDDIR" in
/var/run)
if [ x"$nrpe_user" != x"root" ]; then
DATESTRING=$(date +"%Y%m%d")
mv -f "$NRPE_CONFIG" "$NRPE_CONFIG-$DATESTRING"
sed -e "s|^pid_file.*|pid_file=$DEFAULT_PIDFILE|g" "$NRPE_CONFIG-$DATESTRING" > "$NRPE_CONFIG"
/bin/logger -t rcnrpe "Configured $pid_file in $NRPE_CONFIG moved to $DEFAULT_PIDFILE. Backup is $NRPE_CONFIG-$DATESTRING"
echo
echo "Configured $pid_file in $NRPE_CONFIG moved to $DEFAULT_PIDFILE. Backup is $NRPE_CONFIG-$DATESTRING"
test -f "$pid_file" && rm "$pid_file"
install -d -m755 -o$nrpe_user -g$nrpe_group $(dirname "$DEFAULT_PIDFILE")
else
test -d "$PIDDIR" || mkdir -p "$PIDDIR"
fi
;;
*)
test -d $(dirname "$DEFAULT_PIDFILE") || install -d -m755 -o$nrpe_user -g$nrpe_group $(dirname "$DEFAULT_PIDFILE")
;;
esac
/sbin/startproc $NRPE_BIN -c $NRPE_CONFIG -d
rc_status -v
;;
stop)
echo -n "Shutting down Nagios NRPE "
/sbin/killproc -TERM $NRPE_BIN
rc_status -v
;;
try-restart|condrestart)
if test "$1" = "condrestart"; then
echo "${attn} Use try-restart ${done}(LSB)${attn} rather than condrestart ${warn}(RH)${norm}"
fi
$0 status
if test $? = 0; then
$0 restart
else
rc_reset # Not running is not a failure.
fi
rc_status
;;
restart)
$0 stop
$0 start
rc_status
;;
reload|force-reload)
echo -n "Reload service Nagios NRPE "
/sbin/killproc -HUP $NRPE_BIN
rc_status -v
;;
status)
echo -n "Checking for service Nagios NRPE "
/sbin/checkproc $NRPE_BIN
rc_status -v
;;
*)
echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}"
exit 1
;;
esac
rc_exit
| true |
7aaf0c17bda2cafccf72600889accd78e91b228e | Shell | jotabr/D288-Dumps | /classroom.example.com/content/courses/do288/ocp4.5/grading-scripts/lab-nexus-service | UTF-8 | 2,360 | 3.84375 | 4 | [] | no_license | #!/usr/bin/bash
#
# Copyright 2019 Red Hat, Inc
#
# NAME
# lab-nexus-service - grading script for DO288 ch09s04 ge
#
# SYNOPSIS
# lab-nexus-service {start|finish}
#
# start - prepare the system for starting the lab
# finish - perform post-lab cleanup
#
# DESCRIPTION
# This script, based on singular argument, either does setup, grading,
# cleanup, or solve for the particular lab of awesomeness.
#
# CHANGELOG
# * Tue Nov 29 2017 Douglas Silva <dsilva@redhat.com>
# - original code
PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
# Initialize and set some variables
run_as_root='true'
# Required by the do288 shlib
this='nexus-service'
title='GE: Containerizing Nexus as a Service'
# This defines which subcommands are supported (setup, grade, etc.).
# Corresponding lab_COMMAND functions must be defined.
declare -a valid_commands=(start finish)
function lab_start {
ocp4_print_prereq_header
ocp4_verify_local_clone_exist
ocp4_is_cluster_up
ocp4_fail_if_project_exists "${RHT_OCP4_DEV_USER}-${this}"
ocp4_verify_prereq_git_projects 'nexus3'
ocp4_print_setup_header
ocp4_grab_lab_files
local template="${labs}/${this}/nexus-template.yaml"
ocp4_pad "Inserting developer user name and wildcard domain on starter template file"
if sed -i "s/REPLACE_QUAY_USER/${RHT_OCP4_QUAY_USER}/" "${template}"
then
print_SUCCESS
else
print_FAIL
fi
local soltemplate="${solutions}/${this}/nexus-template.yaml"
ocp4_pad "Inserting developer user name and wildcard domain on solution template file"
if sed -i "s/REPLACE_QUAY_USER/${RHT_OCP4_QUAY_USER}/" "${soltemplate}"
then
print_SUCCESS
else
print_FAIL
fi
ocp4_print_setup_footer
}
function lab_finish {
ocp4_print_cleanup_header
ocp4_login_as_developer
ocp4_delete_project "${RHT_OCP4_DEV_USER}-${this}"
image=nexus
ocp4_delete_local_image "localhost/${RHT_OCP4_QUAY_USER}/${image}"
ocp4_cleanup_lab_files
print_line
print_line 'If you want to perform this exercise again, you need to access quay.io'
print_line "and delete your 'nexus' repository."
print_line
ocp4_print_cleanup_footer
}
############### Don't EVER change anything below this line ###############
# Source library of functions
source /usr/local/lib/${function_lib}
source /usr/local/lib/${platform_lib}
grading_main_program "$@"
| true |
1b6bdb4f77e52e236e01d5eea2d87934df87adfb | Shell | xian123/LISAv2 | /Testscripts/Linux/docker_dotnet_app.sh | UTF-8 | 4,562 | 4.09375 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | #!/bin/bash
########################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License.
#
# Description:
# Script to compile and run the dotnet hello-world app in docker.
#
########################################################################
DOCKER_FILENAME="Dockerfile"
CONTAINER_NAME="dotnetapp"
CONTAINER_IMAGE_NAME="dotnetimage"
STRING_IDENTIFIER="Hello World"
SDK_VERSION="3.1"
# Function to check the Dotnet package support
CheckDotnetSDKSupport() {
local package_name=$1
[[ -z $package_name ]] && return 1
curl --head --silent --fail $package_name
local status=$?
if [[ $status -ne 0 ]];then
echo "$package_name not available"
return 1
fi
return 0
}
# Function to install Dotnet SDK
InstallDotnetSDK() {
local ret=0
distro=$(cat /etc/os-release | grep -w "ID" | cut -d= -f2 | tr -d \")
id=$(cat /etc/os-release | grep -w "VERSION_ID=" | cut -d= -f2 | tr -d \")
package_name="packages-microsoft-prod"
case $DISTRO in
ubuntu*|debian*)
package_name=${package_name}.deb
package=https://packages.microsoft.com/config/${distro}/${id}/${package_name}
if CheckDotnetSDKSupport $package;then
wget ${package} -O ${package_name}
dpkg -i ${package_name}
add-apt-repository -y universe
apt-get update
apt-get install apt-transport-https
apt-get update
# The latest dotnet-sdk version is 2.2 in Ubuntu 14.04
if [[ $os_RELEASE = '14.04' ]]; then
SDK_VERSION="2.2"
fi
apt-get install -y dotnet-sdk-${SDK_VERSION}
ret=$?
else
ret=2
fi
;;
centos*|redhat*|almalinux*|rockylinux*)
package_name=${package_name}.rpm
package=https://packages.microsoft.com/config/${distro}/${id}/${package_name}
if CheckDotnetSDKSupport $package;then
rpm -ivh ${package}
yum install -y dotnet-sdk-3.1
ret=$?
else
ret=2
fi
;;
*)
LogErr "$DISTRO not supported"
ret=2
esac
return $ret
}
# Function to compile dotnet application
CompileDotnetApplication() {
APP_FOLDER="app"
[[ -d ${APP_FOLDER} ]] && rm -rf ${APP_FOLDER}
dotnet new console -o app -n helloworld
if [[ $? -ne 0 ]];then
echo "Failed to compile dotnet application"
return 1
fi
pushd ${APP_FOLDER}
dotnet run
dotnet publish -c Release
popd
}
# Function to generate docker file
GenerateDockerFile() {
cat << EOF > $DOCKER_FILENAME
FROM mcr.microsoft.com/dotnet/core/sdk:${SDK_VERSION}
COPY app/bin/Release/netcoreapp${SDK_VERSION}/publish/ app/
WORKDIR /app
ENTRYPOINT ["dotnet", "helloworld.dll"]
EOF
[[ ! -f $DOCKER_FILENAME ]] && return 1
return 0
}
# Function to evaluate the test results
EvaluateTestResult() {
grep -qw "${STRING_IDENTIFIER}" $DOCKER_RUN_OUTPUT && return 0
return 1
}
#######################################################################
#
# Main script body
#
#######################################################################
# Source containers_utils.sh
. containers_utils.sh || {
echo "ERROR: unable to source containers_utils.sh"
echo "TestAborted" > state.txt
exit 0
}
UtilsInit
GetDistro
update_repos
InstallDockerEngine; ret=$?
[[ $ret -ne 0 ]] && HandleFailure "ERROR: InstallDockerEngine failed" "$ret"
InstallDotnetSDK; ret=$?
[[ $ret -eq 2 ]] && HandleSkip "WARN: InstallDotnetSDK failed" "$ret"
[[ $ret -ne 0 ]] && HandleAbort "ERROR: InstallDotnetSDK failed" "$ret"
CompileDotnetApplication; ret=$?
[[ $ret -ne 0 ]] && HandleAbort "ERROR: CompileDotnetApplication failed" "$ret"
GenerateDockerFile; ret=$?
[[ $ret -ne 0 ]] && HandleAbort "ERROR: GenerateDockerFile failed" "$ret"
RemoveDockerContainer $CONTAINER_NAME
RemoveDockerImage $CONTAINER_IMAGE_NAME
BuildDockerImage $CONTAINER_IMAGE_NAME; ret=$?
[[ $ret -ne 0 ]] && HandleFailure "ERROR: BuildDockerImage failed" "$ret"
RunDockerContainer $CONTAINER_IMAGE_NAME $CONTAINER_NAME; ret=$?
[[ $ret -ne 0 ]] && HandleFailure "ERROR: RunDockerContainer failed" "$ret"
EvaluateTestResult; ret=$?
[[ $ret -ne 0 ]] && HandleFailure "ERROR: EvaluateTestResult failed" "$ret"
SetTestStateCompleted
exit 0
| true |
8cd8230085b6ce276d99f770b2510e1695922a64 | Shell | lukeconibear/papers_wrfotron | /WRFotron0.0_Conibear_2018_Geohealth/WRFotron0.0_paper_india_scenarios_tra_minus_10/config.bash | UTF-8 | 2,626 | 2.578125 | 3 | [] | no_license | #!/bin/bash
# ------------------------------------------------------------------------------
# WRFOTRON v 1.0
# Christoph Knote (LMU Munich)
# 02/2016
# christoph.knote@lmu.de
# ------------------------------------------------------------------------------
# path to where these scripts are
chainDir=/nobackup/pmlac/code/WRFotron_paper_india_air_quality_emission_scenarios_tra_minus_10
. $chainDir/profile.bash
version=0.1
projectTag=wrfchem_india
withChemistry=true
# WPS installation directory
WPSdir=/nobackup/pmlac/code/WPS
# WRF installation directory
WRFdir=/nobackup/pmlac/code/WRFV3
# WRF meteo-only installation directory
WRFmeteodir=/nobackup/pmlac/code/WRFV3_meteo
# megan_bio_emiss installation directory
WRFMEGANdir=/nobackup/pmlac/code/megan
# mozbc installation directory
WRFMOZARTdir=/nobackup/pmlac/code/mozbc
# wesley/exocoldens installation directory
WRFmztoolsdir=/nobackup/pmlac/code/wes-coldens
# anthro_emiss installation directory
WRFanthrodir=/nobackup/pmlac/code/anthro_emis
# fire_emiss installation directory
WRFfiredir=/nobackup/pmlac/code/finn/src
# path to GFS input data
metDir=/nobackup/pmlac/initial_boundary_meteo
metInc=3
# path to geogrid input data
geogDir=/nobackup/wrfchem/wps_geog
# path to MEGAN input data
MEGANdir=/nobackup/pmlac/emissions/MEGAN
# raw emission input - the files you read in with anthro_emiss
emissDir=/nobackup/pmlac/emissions/EDGAR-HTAP2/MOZART
# emission conversion script for anthro_emis - must match emissions in emissDir
emissInpFile=emis_edgarhtap2_mozmos.inp
# year the emissions are valid for (for offset calculation)
emissYear=2010
# path to FINN fire emissions (requires a / at the end)
fireDir=/nobackup/pmlac/emissions/FINN/
# FINN fire emissions input file
fireInpFile=fire_emis.mozm.inp
# MOZART boundary condition files
MOZARTdir=/nobackup/pmlac/initial_boundary_chem
# where the WRF will run - /glade/scratch/something...
workDir=/nobackup/pmlac/paper_india_air_quality_emission_scenarios_2014_tra_minus_10/run
# where the WRF output will be stored - also maybe /glade/scratch/something...
archiveRootDir=/nobackup/pmlac/paper_india_air_quality_emission_scenarios_2014_tra_minus_10/output
# where the WRF restart files will be stored - also maybe /glade/scratch/something...
restartRootDir=/nobackup/pmlac/paper_india_air_quality_emission_scenarios_2014_tra_minus_10/restart
# remove run directory after run is finished?
removeRunDir=false
nclPpScript=${chainDir}/pp.ncl
#number of cores to run with for each stage
nprocPre=1
nprocMain=32
nprocPost=1
#mpirun for real.exe and wrf.exe
mpiCommandPre=mpirun
mpiCommandMain=mpirun
submitCommand=qsub
usequeue=true
| true |
8000e5f225ce6d1320b1e538f6bfdff9155715ae | Shell | adeutscher/core-tools | /scripts/test/coverage.sh | UTF-8 | 685 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
cd "$(dirname "${0}")"
limit=5
if ! timeout $limit coverage erase; then
echo "Failed to clear coverage data."
exit 1
fi
if ! timeout $((limit * 5)) coverage run --omit="/usr*" ./run.py; then
echo "Failed to run unit tests.."
echo "If run.py passes on its own but coverage fails, then we may need to increase the coverage script's timeout limit."
exit 1
fi
if ! timeout $limit coverage html --omit="/usr*"; then
echo "Failed to print coverage report."
exit 1
fi
if ! timeout $limit coverage report; then
echo "Failed to print coverage report."
exit 1
fi
dirPath="htmlcov/index.html"
printf "Coverage report generated at %s/%s\n" "$(pwd)" "${dirPath}"
| true |
3850457f1dc5fa2358cfffb45e661a7c9a613c10 | Shell | bobRafat/terraform-aws-gitlab-runner | /examples/shunit2/test_simple.sh | UTF-8 | 1,398 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
. shunit2/test_helper.sh
vars=(
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
TF_VAR_registration_token
TF_VAR_enable_ssh_access
TF_VAR_vpc_id
TF_VAR_subnet_ids
TF_VAR_key_name
)
validateVars
[ ! -f ~/.ssh/"$TF_VAR_key_name".pem ] && err "EC2 private key not found"
execBySsh() {
local ip_address="$1"
local command="$2"
ssh -o 'StrictHostKeyChecking no' -i ~/.ssh/"$TF_VAR_key_name".pem ec2-user@"$ip_address" "$command"
}
testSimple() {
cd simple
if ! terraform apply -auto-approve ; then
fail "terraform did not apply"
startSkipping
fi
cd ..
sleep 20
ip_address=$(aws ec2 describe-instances --query \
'Reservations[].Instances[?Tags[?Key==`aws:autoscaling:groupName`&&Value==`gitlab-runner-autoscaling-group`]].PublicIpAddress' \
--output text)
c=0
while ! execBySsh "$ip_address" "grep Cloud-init.*finished /var/log/cloud-init-output.log" ; do
sleep 20
((++c))
if [ $c -gt 10 ] ; then
fail "Cloud-init did not finish after 10 retries"
startSkipping
fi
done
}
testRunnerRegistered() {
execBySsh "$ip_address" "grep 'Runner registered successfully' /var/log/cloud-init-output.log"
assertTrue "Runner registered successfully not seen in log" "$?"
}
oneTimeTearDown() {
if [ "$NO_TEARDOWN" != "true" ] ; then
cd simple
terraform destroy -auto-approve
cd ..
fi
}
. shunit2
| true |
ebaa71bc3098ac671340f7313c2dbba61d2fa82c | Shell | AbbTek/terraform-aws-foundation | /scripts/build-docs.sh | UTF-8 | 295 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -xu
SALT_DOC_DIR=$WORKSPACE/fpco-salt-formula
TF_DOC_DIR=$WORKSPACE/fpco-terraform-aws
cd $TF_DOC_DIR
# grab docs from fpco-salt-formula
rsync -avz $SALT_DOC_DIR/docs/fpco-salt-formula $TF_DOC_DIR/docs/saltstack/
# use mkdocs to build the project
mkdocs build --clean
| true |
f90514d14e3cfb2b530b10f678b0487268dae8d0 | Shell | koqizhao/ubuntu-tools | /java_to_cs.sh | UTF-8 | 257 | 3.375 | 3 | [] | no_license | #!/bin/bash
java_to_cs()
{
for i in `ls | grep .java`
do
mv $i `basename -s .java $i`.cs
done
for i in `ls`
do
if [ -d $i ]
then
cd $i
java_to_cs
cd ..
fi
done
}
| true |
67cee0c94d36800cf9fee5e3675ee5e857ccee18 | Shell | grabadabar/snmp | /snmp | UTF-8 | 244 | 2.671875 | 3 | [] | no_license | #!/bin/bash
snmpwalk -v2c -c redhat localhost system >/home/joro/log.txt 2>&1
resolt1=`cat /home/joro/log.txt`
resolt="Timeout: No Response from localhost"
if [ "$resolt1" = "$resolt" ]
then
service snmpd start
else
echo "Snmpd is running"
fi
| true |
ec9c16cac4c160074d5429f0efb582549c7e683c | Shell | aconbere/vert | /src/vert_wrapper.sh | UTF-8 | 355 | 3.953125 | 4 | [] | no_license | verton () {
env_name="$1"
if [[ ! -n "$env_name" ]]
then
echo "ERROR: Vert name required"
return 1
fi
activate="$HOME/.verts/$env_name/bin/activate"
if [[ ! -f "$activate" ]]
then
echo "ERROR: Environment '$HOME/.verts/$env_name' does not contain an activate script." >&2
return 1
fi
source "$activate"
return 0
}
| true |
ce69b8c83ba3875587b34831c1ff7c78bfa2a2f3 | Shell | derwinmcgeary/RoadRules | /separatesections.sh | UTF-8 | 430 | 3.171875 | 3 | [] | no_license | #!/bin/bash
# self-explanatory script, extract all articles from the code, split into separate files
cat hwcode/hwcode.html | hxextract article - | sed -e 's/\/article>/\/article>\n/g' | csplit -z - /"article>"/+1 {*}
mv xx* hwcode/
# and rename the files according to the id="" in the first line of all of them
cd hwcode
for file in xx*
do
mv $file `head -q -n 1 $file | sed -e 's/^.*id="//g' -e 's/".*/.html/g'`
done
cd ..
| true |
9c81a1eec6a41d2e3ae1acef28423a312ea43c5c | Shell | chris-misa/contools | /PING/tests/slimify | UTF-8 | 1,237 | 3.5625 | 4 | [] | no_license | #!/bin/bash
#
# Script to exercise ping container for docker-slim
# Tries to do something with each possible flag / option
#
# 2018, Chris Misa
#
TEST_HOST=google.com
TEST_HOST_V4=216.58.193.78
TEST_HOST_V6=2607:f8b0:400a:809::200e
BROADCAST_V4=172.17.0.255
BORDER="----------------"
# basics for v4 and v6
echo "${BORDER}Running basics${BORDER}"
ping -c 1 -4 $TEST_HOST
ping -c 1 -4n $TEST_HOST_V4
ping -c 1 -6 $TEST_HOST
ping -c 1 -6n $TEST_HOST_V6
# Adaptive mode and time info test
echo "${BORDER}Running adaptive mode${BORDER}"
ping -c 5 -AD $TEST_HOST
# broadcast ping
echo "${BORDER}Pinging a broadcast address${BORDER}"
ping -c 1 -b $BROADCAST_V4
# socket debug option
echo "${BORDER}Running with SO_DEBUG${BORDER}"
ping -c 1 -d -4 $TEST_HOST_V4
ping -c 1 -d -6 $TEST_HOST_V6
# packet marking
echo "${BORDER}Running with marked packets${BORDER}"
ping -c 2 -m 4 -4 $TEST_HOST_V4
ping -c 2 -m 4 -6 $TEST_HOST_V6
# MTU Discovery
echo "${BORDER}Running with MTU options${BORDER}"
ping -M do -c 1 -4 $TEST_HOST_V4
ping -M want -c 1 -4 $TEST_HOST_V4
ping -M dont -c 1 -4 $TEST_HOST_V4
ping -M do -c 1 -6 $TEST_HOST_V6
ping -M want -c 1 -6 $TEST_HOST_V6
ping -M dont -c 1 -6 $TEST_HOST_V6
echo "${BORDER}Done with work out${BORDER}"
| true |
b8f949c7fdd7fb52fdf6399d50d97c1e37bfd9c6 | Shell | stefanosKarantin/dotfiles | /lib/pathmixer.bash | UTF-8 | 408 | 4 | 4 | [] | no_license | add_to_path() {
if [[ "$1" == "" ]]; then
return
fi
if [[ ":$PATH:" != *":$1:"* ]] ; then
if [[ "$2" = "after" ]] ; then
PATH=$PATH:$1
else
PATH=$1:$PATH
fi
fi
export PATH;
}
load_path_file_to_path() {
if [ -f ~/.path ]; then
while read p; do
[[ $p = "" ]] && continue
p=$(eval echo \"$p\")
add_to_path $p
done < ~/.path
unset p
fi
}
| true |
506bce409de664ade88ec0dbd908af698e764c77 | Shell | finos/TimeBase-CE | /java/timebase/commons/src/main/java/com/epam/deltix/util/os/redhat-linux-service-launcher | UTF-8 | 2,079 | 3.71875 | 4 | [
"Apache-2.0",
"CC0-1.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown"
] | permissive | #!/bin/sh
#
# chkconfig: ${chkconfig.level.start} ${SS} ${KK}
# description: ${process.description}
# processname: ${process}
#
### BEGIN INIT INFO
# Provides: ${process}
# Default-Start: ${level.start}
# Default-Stop: ${level.stop}
# Required-Start: ${required.start}
# Required-Stop: ${required.stop}
# Short-Description: ${process.description}
# Description: ${process.description.long}
### END INIT INFO
# Source function library.
if [ -f /etc/init.d/functions ] ; then
. /etc/init.d/functions
elif [ -f /etc/rc.d/init.d/functions ] ; then
. /etc/rc.d/init.d/functions
else
echo -n "Cannot find int.d functions!"
exit 1
fi
kill_process_tree() {
__kill_processes_deep 1 $1
}
__kill_processes_deep() {
is_topmost=$1
cur_pid=$2
child_pids=`ps -o pid --no-headers --ppid ${cur_pid}`
for child_pid in $child_pids
do
__kill_processes_deep 0 $child_pid
done
if [ $is_topmost -eq 0 ]; then
kill -s TERM $cur_pid 2> /dev/null
fi
}
start() {
echo -n "Starting ${process.name}..."
if [ -f ${process.pid.file} ] ; then
if ! ps -p $(cat ${process.pid.file}) > /dev/null 2>&1
then
rm -f ${process.pid.file}
fi
fi
export DELTIX_HOME=${deltix.home}
if daemon --user=${process.user} --check=${process.name} ${java.cmd} ${process.java.options} &>/dev/null &
then
echo $! > ${process.pid.file}
chown ${process.user} ${process.pid.file}
fi
}
stop() {
echo -n "Shutting down ${process.name}..."
kill_process_tree $(cat ${process.pid.file})
rm -f ${process.pid.file}
}
restart() {
echo -n "Restarting ${process.name}:"
stop
start
}
status() {
export DELTIX_HOME=${deltix.home}
${java.cmd} ${process.java.status.options}
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
status
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
esac
exit $? | true |
7d60e0112a7a08b61d3d525c03d2f0f1356e73cc | Shell | osuchanglab/crisprhit | /benchmark/run_blast.sh | UTF-8 | 731 | 2.875 | 3 | [] | no_license | #!/bin/bash
crisprhit="../crisprhit.py"
bout="spacer_v_protospacer.tab"
chout="benchmark.crisprhit"
opt=$1
if [[ ! -s "protospacers_rev.fas.nin" ]]; then
makeblastdb -dbtype nucl -in protospacers_rev.fas
fi
if [[ ! -s "$bout" || $opt == "blast" ]]; then
blast="blastn -db protospacers_rev.fas -query spacer.fna -outfmt 7 -max_target_seqs 50000 -evalue 1 -gapopen 10 -gapextend 2 -reward 1 -penalty -1 -word_size 5 -out $bout"
echo Running BLASTN command: $blast
$blast
echo Finished running BLASTN
fi
if [[ ! -s "$chout" || $opt == "crisprhit" ]]; then
ch="$crisprhit --verbose --PAM CTT protospacers_rev.fas spacer.fna $bout"
echo "Running crisprhit command: $ch > $chout"
$ch > $chout
echo Finished running crisprhit
fi
| true |
bbf137aeafaf2430f7a5343c14a9818f088bc8cd | Shell | Mullen/vagrant | /scripts/php7-fpm.sh | UTF-8 | 1,113 | 3.0625 | 3 | [] | no_license | #!/bin/bash
# https://raw.githubusercontent.com/Mullen/vagrant/master/scripts/php7-fpm.sh
hash nginx 2>/dev/null || {
echo "Nginx is required"
exit;
}
hash php 2>/dev/null || {
echo "PHP 7 is required"
exit;
}
yum install -y php70w-fpm > /dev/null 2>&1
systemctl start php-fpm > /dev/null 2>&1
systemctl enable php-fpm > /dev/null 2>&1
sed -i 's/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/g' /etc/php.ini > /dev/null 2>&1
chown -R nginx:nginx /var/www/html
cat > /etc/nginx/conf.d/default.conf <<EOF
server {
listen 80;
server_name localhost;
location / {
root /var/www/html;
index index.html index.htm index.php;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
location ~ \.php\$ {
root /var/www/html;
try_files \$uri =404;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include fastcgi_params;
}
}
EOF
ln -s /var/www/html/ /home/vagrant/app
systemctl restart nginx > /dev/null 2>&1
| true |
5291ffb742dfb313e28068bb9e19fd9f884f6445 | Shell | xaverh/bootstrap-scripts | /stage3/libwildebeest.sh | UTF-8 | 560 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/true
set -e
. $(dirname $(realpath -s $0))/common.sh
if [[ "${SERPENT_LIBC}" != "musl" ]]; then
printInfo "Skipping libwildebeest with non-musl libc"
exit 0
fi
serpentChrootCd libwildebeest
git clone https://dev.serpentos.com/source/libwildebeest.git
printInfo "Configuring libwildebeest"
serpentChroot meson --prefix=/usr --buildtype=plain build
printInfo "Building libwildebeest"
serpentChroot ninja -j "${SERPENT_BUILD_JOBS}" -C build
printInfo "Installing libwildebeest"
serpentChroot ninja install -j "${SERPENT_BUILD_JOBS}" -C build
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.