blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
be6ddfae46db7fd796e52240f5cd0b3198f944c0
|
Shell
|
huaminglin/docker-demo
|
/tomcat-demo/keytool/update.sh
|
UTF-8
| 993
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
cd $(dirname $0)
docker stop demotomcat
docker rm demotomcat
docker create --name demotomcat tomcat:8.5
docker start demotomcat
docker exec -it demotomcat keytool -genkey -alias tomcata -keyalg RSA -keystore /root/tomcat.jks -storetype pkcs12 -storepass changeit -dname "CN=127.0.0.1, OU=myunit, O=myorg, L=mycity, ST=mystate, C=cn"
docker exec -it demotomcat keytool -genkey -alias tomcatb -keyalg RSA -keystore /root/tomcat.jks -storetype pkcs12 -storepass changeit -dname "CN=myfirst2 mylast2, OU=myunit2, O=myorg2, L=mycity2, ST=mystate2, C=cn"
docker exec demotomcat keytool -exportcert -alias tomcata -storepass changeit -keystore /root/tomcat.jks -rfc -file /root/tomcata.pem
docker exec demotomcat keytool -exportcert -alias tomcatb -storepass changeit -keystore /root/tomcat.jks -rfc -file /root/tomcatb.pem
docker cp demotomcat:/root/tomcat.jks tomcat.jks
docker cp demotomcat:/root/tomcata.pem tomcata.pem
docker cp demotomcat:/root/tomcatb.pem tomcatb.pem
| true
|
0f71c30d55f229d7c2f968f24a20f422aa298917
|
Shell
|
akivajp/wmt2017
|
/scripts/tune.sh
|
UTF-8
| 3,927
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
dir="$(cd "$(dirname "${BASH_SOURCE:-${(%):-%N}}")"; pwd)"
source "${dir}/common.sh"
TUNE_NBEST=200
EVAL=bleu
MAX_ITERS=20
usage()
{
echo "usage: $0 mt_method corpus1 corpus2 inifile task_dir [tune_name [alter_lm]]"
echo ""
echo "options:"
echo " --threads={integer}"
echo " --eval={string}"
echo " --max_iters={integer}"
}
if [ ${#ARGS[@]} -lt 5 ]
then
usage
exit 1
fi
mt_method=${ARGS[0]}
src1=$(abspath ${ARGS[1]})
src2=$(abspath ${ARGS[2]})
inifile=$(abspath ${ARGS[3]})
task=${ARGS[4]}
tune_name=${ARGS[5]}
alter_lm=${ARGS[6]}
if [ ${opt_threads} ]; then
THREADS=${opt_threads}
fi
if [ ${opt_max_iters} ]; then
MAX_ITERS=${opt_max_iters}
fi
mert_out="mert-work"
if [ "${tune_name}" ]; then
mert_out="${mert_out}-${tune_name}"
fi
if [ "${opt_eval}" ]; then
EVAL=${opt_eval}
#mert_out="mert-work-${opt_eval}"
mert_out="${mert_out}-${EVAL}"
fi
workdir="${task}/working"
show_exec mkdir -p ${workdir}
case "${mt_method}" in
pbmt)
task=$(abspath $task)
show_exec pushd ${workdir}
show_exec $MOSES/scripts/training/mert-moses.pl ${src1} ${src2} ${BIN}/moses ${inifile} --mertdir $MOSES/bin --threads ${THREADS} 2\> mert.out \| tee mert.log
show_exec popd
;;
hiero|scfg)
options=""
trg_factors=$(grep -1 trg_factors $inifile | tail -n 1)
if [ "${trg_factors}" ]; then
options="-trg-factors ${trg_factors}"
if [ ${trg_factors} -gt 1 ]; then
EVAL="bleu:factor=0"
if [ "${opt_eval}" ]; then
EVAL="${opt_eval}:factor=0"
fi
if [ ! "${opt_max_iters}" ]; then
MAX_ITERS=30
fi
fi
fi
if [ "${alter_lm}" ]; then
abs_lm=$(abspath ${alter_lm})
show_exec cat ${inifile} \| sed -e "'/\\[lm_file\\]/ { n; s#.*#${abs_lm}# }'" \> ${workdir}/travatar-${tune_name}.ini
inifile=${workdir}/travatar-${tune_name}.ini
fi
#show_exec $TRAVATAR/script/mert/mert-travatar.pl -travatar-config ${inifile} -nbest ${TUNE_NBEST} -src ${src1} -ref ${src2} -travatar-dir ${TRAVATAR} -working-dir ${workdir}/${mert_out} -in-format word -threads ${THREADS} -eval ${EVAL} ${options} -resume
show_exec $TRAVATAR/script/mert/mert-travatar.pl -travatar-config ${inifile} -nbest ${TUNE_NBEST} -src ${src1} -ref ${src2} -travatar-dir ${TRAVATAR} -working-dir ${workdir}/${mert_out} -in-format word -threads ${THREADS} -eval ${EVAL} -max-iters ${MAX_ITERS} ${options} -resume
;;
t2s)
show_exec $TRAVATAR/script/mert/mert-travatar.pl -travatar-config ${inifile} -nbest ${TUNE_NBEST} -src ${src1} -ref ${src2} -travatar-dir ${TRAVATAR} -working-dir ${workdir}/${mert_out} -in-format penn -threads ${THREADS} -eval ${EVAL} -resume
;;
f2s)
show_exec $TRAVATAR/script/mert/mert-travatar.pl -travatar-config ${inifile} -nbest ${TUNE_NBEST} -src ${src1} -ref ${src2} -travatar-dir ${TRAVATAR} -working-dir ${workdir}/${mert_out} -in-format egret -threads ${THREADS} -eval ${EVAL} -resume
;;
s2s)
show_exec $TRAVATAR/script/mert/mert-travatar.pl -travatar-config ${inifile} -nbest ${TUNE_NBEST} -src ${src1} -ref ${src2} -travatar-dir ${TRAVATAR} -working-dir ${workdir}/${mert_out} -in-format word -threads ${THREADS} -eval ${EVAL} -resume
;;
x2x)
show_exec $TRAVATAR/script/mert/mert-travatar.pl -travatar-config ${inifile} -nbest ${TUNE_NBEST} -src ${src1} -ref ${src2} -travatar-dir ${TRAVATAR} -working-dir ${workdir}/${mert_out} -in-format word -threads ${THREADS} -eval ${EVAL} -resume
;;
esac
show_exec rm -rf ${workdir}/${mert_out}/filtered
if [[ "${mt_method}" == pbmt ]]; then
show_exec rm ${workdir}/${mert_out}/*.gz
else
show_exec rm ${workdir}/${mert_out}/*.nbest
show_exec rm ${workdir}/${mert_out}/*.stats
show_exec rm ${workdir}/${mert_out}/*.uniq
fi
if [ "${tune_name}" ]; then
show_exec mkdir -p ${task}/${tune_name}
cp ${workdir}/${mert_out}/travatar.ini ${task}/${tune_name}/
fi
| true
|
add43d23624532cd9e91d93fdc19868f229730e3
|
Shell
|
mimaun/rc
|
/bash/bashrc
|
UTF-8
| 978
| 2.609375
| 3
|
[] |
no_license
|
export PATH=$PATH:/usr/local/bin
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
export JAVA_HOME=/usr/local/java/j2sdk
export PATH=$PATH:$JAVA_HOME/bin
export PATH=/usr/bin:$PATH
# export PYTHONPATH=$HOME/lib/python2.7/site-packages/:$PATH
#連続したコマンドは記憶しない(ignoredups)
#スペースで始まるコマンドは記憶しない(ignorespace)
export HISTCONTROL=ignoreboth
#history=500 (.bashrc)
export HISTSIZE=500
#history=500 (.bash_profile)
export HISTFILESIZE=500
#ls --color
alias ls='ls -FGh'
#gvim
alias gvim='/Applications/MacVim.app/Contents/MacOS/mvim'
#sl
alias sl="sl -F"
#neko
alias neko='~/joke/cat.sh'
#python
# alias python='/usr/local/bin/python3.3'
alias localserver='cd /Applications/XAMPP/xamppfiles/htdocs'
alias fuck='eval $(thefuck $(fc -ln -1))'
alias FUCK='fuck'
# ll
alias ll='ls -l'
function dict() {
# grep $1 /path/to/gene.txt -A 1 -wi --color
grep "$1" /Users/misato/gene95/gene-utf8.txt -E -A 1 -wi --color
}
| true
|
73813f5ddee6b6e0bef3e8789b32003eb9463e07
|
Shell
|
0x7An/dockerized
|
/build.sh
|
UTF-8
| 528
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Purpose: Build image with specified Dockerfile name under `Dockerfiles/`.
# Usage:
#
# ./build.sh # Build with `Dockerfiles/Dockerfile`
# ./build.sh <name> # name of file under `Dockerfiles/`.
f="$1"
shift
if [[ X"${f}" == X"" ]]; then
df="Dockerfiles/Dockerfile"
label="iredmail/mariadb"
else
df="Dockerfiles/${f}"
label="iredmail/${f}"
fi
[[ -f ${df} ]] || (echo "Docker file ${df} doesnt exist." && exit 255)
docker build \
--tag ${label}:nightly \
-f ${df} .
| true
|
705aaa6fc402dbb4c7f8c4ca2aba16f45eea57d5
|
Shell
|
andrewharvey/dotfiles
|
/etc/rc.local
|
UTF-8
| 487
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
HIDDEVICE=$(dmesg | grep Apple | grep Keyboard | grep input0 | tail -1 | sed -e 's/.*hidraw\([[:digit:]]\+\).*/\/dev\/hidraw\1/')
/usr/local/sbin/disable-capslock-delay $HIDDEVICE
exit 0
| true
|
cd5da2367e5507c94385a04035d0d76e312dfe32
|
Shell
|
ThomasTJdev/flask_system_information
|
/bash/ping.sh
|
UTF-8
| 787
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# http://askubuntu.com/questions/413367/ping-multiple-ips-using-bash
# get absolute path to config file
SCRIPTPATH=`dirname $(readlink -f $0)`
CONFIG_PATH=$SCRIPTPATH"/../config/ping_hosts"
catCmd=`which cat`
pingCmd=`which ping`
awkCmd=`which awk`
sedCmd=`which sed`
numOfLinesInConfig=`$sedCmd -n '$=' $CONFIG_PATH`
result='['
$catCmd $CONFIG_PATH \
| while read output
do
singlePing=$($pingCmd -qc 2 $output \
| $awkCmd -F/ 'BEGIN { endLine="}," } /^rtt/ { if ('$numOfLinesInConfig'==1){endLine="}"} print "{" "\"host\": \"'$output'\", \"ping\": " $5 " " endLine }' \
)
numOfLinesInConfig=$(($numOfLinesInConfig-1))
result=$result$singlePing
if [ $numOfLinesInConfig -eq 0 ]
then
echo $result"]"
fi
done \
| $sedCmd 's/\},]/}]/g'
| true
|
b7df47ba85b59f4ab8c9731df982ad1607898ac6
|
Shell
|
vzctl/duck
|
/files/lib/installer.d/10-autodb
|
UTF-8
| 164
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
. /lib/libinstaller.sh
info "Setting autodb variables"
if ! a_cmdline; then
error "Failed to set kernel arguments"
exit 1
fi
exit 0
| true
|
05a6a8ebbe3ace00728522f6a457c53fd500e20e
|
Shell
|
ntrel/scripts
|
/git-pick-fetch.sh
|
UTF-8
| 193
| 2.75
| 3
|
[] |
no_license
|
#untested
OB=FETCH_HEAD
NB=HEAD
echo Picking from $OB onto $NB; press a key to start
read -n1
#~ NB=origin/master
#~ git fetch
#~ git checkout $NB
git cherry-pick `git merge-base $NB $OB`..$OB
| true
|
dbcf27171b6c00c8e176eefb3c43f5627fc260de
|
Shell
|
Kinoko-Huang/project
|
/PATH-SCRIPT-201608/wrf/wrf_run
|
UTF-8
| 6,682
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
MYDIR=`/usr/bin/dirname $0`
cd $MYDIR || exit 1
MYDIR=`/bin/pwd`
MYNAME=`/bin/basename $0`
ECHO="echo LOG:${MYNAME}:"
if [ ! -f $MYDIR/wrf_run.config ]; then
$ECHO "$MYDIR/wrf_run.config does not exist"
exit 1
fi
source $MYDIR/wrf_run.config
if [ $# -eq 1 ]; then
INIT_TIME=$1
else
$ECHO "Usage: $0 Initial_time"
$ECHO " e.g.: $0 2007010100"
exit 1
fi
OutputDir=$DATA_WRF_OUTPUT
NoOfDays=$MET_SIM_N_DAYS
if [ ${#INIT_TIME} -ne 10 ]; then
$ECHO "Invalid date input ${INIT_TIME}, must be in YYYYMMDDhh"
exit 1
fi
/bin/date -d "${INIT_TIME:0:8} ${INIT_TIME:8:2}" +%Y%m%d%H > /dev/null 2>&1
if [ $? -ne 0 ]; then
$ECHO "Invalid date input ${INIT_TIME}, must be in YYYYMMDDhh"
exit 1
fi
END_TIME=`/bin/date -d "${INIT_TIME:0:8} ${INIT_TIME:8:2} ${NoOfDays} days" +%Y%m%d%H`
if [ ! -e $OutputDir ]; then
$ECHO "Output directory $OutputDir does not exist"
exit 1
fi
$ECHO WRF Initial Time: $INIT_TIME
$ECHO WRF Ending Time: $END_TIME
$ECHO WRF Output Directory: $OutputDir
$ECHO WRF Current Directory: $MYDIR
if [ -z $FC_MODE ]; then
$ECHO "Error: FC_MODE does not set"
exit 1
fi
echo "$INIT_TIME $END_TIME"
if [ $FC_MODE -eq 0 ]; then
# New OBS files
OBSdomain=default
/bin/rm -r -f $MYDIR/obsfiles
/bin/mkdir $MYDIR/obsfiles || exit 1
$ECHO $PATH_SYSDIR/bin/get_envf_nudging_data_v2 ${INIT_TIME} ${END_TIME} $MYDIR/obsfiles $OBSdomain
$PATH_SYSDIR/bin/get_envf_nudging_data_v2 ${INIT_TIME} ${END_TIME} $MYDIR/obsfiles $OBSdomain
# FNL/GFS files
/bin/rm -r -f $MYDIR/FNL $MYDIR/tmp_fnl
/bin/mkdir $MYDIR/FNL $MYDIR/tmp_fnl || exit 1
if [ "$GFSData" == "fnl_grib1" ]; then
$ECHO $PATH_SYSDIR/bin/get_envf_fnl ${INIT_TIME} ${END_TIME} $MYDIR/tmp_fnl
$PATH_SYSDIR/bin/get_envf_fnl ${INIT_TIME} ${END_TIME} $MYDIR/tmp_fnl
elif [ "$GFSData" == "fnl_grib2" ]; then
$ECHO $PATH_SYSDIR/bin/get_envf_fnl_grib2 ${INIT_TIME} ${END_TIME} $MYDIR/tmp_fnl
$PATH_SYSDIR/bin/get_envf_fnl_grib2 ${INIT_TIME} ${END_TIME} $MYDIR/tmp_fnl
elif [ "$GFSData" == "gfs0p25" ]; then
$ECHO $PATH_SYSDIR/bin/get_envf_gfs_0.25deg.archive ${INIT_TIME} ${END_TIME} $MYDIR/tmp_fnl
$PATH_SYSDIR/bin/get_envf_gfs_0.25deg.archive ${INIT_TIME} ${END_TIME} $MYDIR/tmp_fnl || exit 1
else
$ECHO "Invalid GFSData value = ${GFSData} defined in wrf_run.config, must be either fnl_grib1, fnl_grib2 or gfs0p25"
exit 1
fi
/usr/bin/find $MYDIR/tmp_fnl -type f -exec /bin/mv -v {} $MYDIR/FNL/ \;
/bin/rm -r -f $MYDIR/tmp_fnl
# for TC Bogus
./find_wrf_initfile ${INIT_TIME}
BTime=${INIT_TIME:0:8}
BOGUS_ROOT=$MYDIR/TC_BOGUS
/bin/rm -r -f $BOGUS_ROOT/typhoon_mesg $BOGUS_ROOT/tmp_typhoon
/bin/mkdir $BOGUS_ROOT/typhoon_mesg $BOGUS_ROOT/tmp_typhoon || exit 1
$ECHO $PATH_SYSDIR/bin/get_envf_typhoon_mesg $BTime $MET_SIM_N_DAYS $BOGUS_ROOT/tmp_typhoon
$PATH_SYSDIR/bin/get_envf_typhoon_mesg $BTime $MET_SIM_N_DAYS $BOGUS_ROOT/tmp_typhoon
/usr/bin/find $BOGUS_ROOT/tmp_typhoon -type f -exec /bin/mv -v {} $BOGUS_ROOT/typhoon_mesg/ \;
/bin/rm -r -f $BOGUS_ROOT/tmp_typhoon
elif [ $FC_MODE -eq 1 ]; then
/bin/rm -r -f $MYDIR/GFS $MYDIR/tmp_gfs
/bin/mkdir $MYDIR/GFS $MYDIR/tmp_gfs || exit 1
if [ "$GFSData" == "gfs1p00" ]; then
$ECHO $PATH_SYSDIR/bin/get_envf_gfs ${INIT_TIME} ${END_TIME} $MYDIR/tmp_gfs
$PATH_SYSDIR/bin/get_envf_gfs2 ${INIT_TIME} ${END_TIME} $MYDIR/tmp_gfs
elif [ "$GFSData" == "gfs0p25" ]; then
$ECHO $PATH_SYSDIR/bin/get_envf_gfs_0.25deg ${INIT_TIME} ${END_TIME} $MYDIR/tmp_gfs
$PATH_SYSDIR/bin/get_envf_gfs_0.25deg ${INIT_TIME} ${END_TIME} $MYDIR/tmp_gfs
else
$ECHO "Invalid GFSData value = ${GFSData} defined in wrf_run.config, must be either gfs1p00 or gfs0p25"
exit 1
fi
/usr/bin/find $MYDIR/tmp_gfs -type f -exec /bin/mv -v {} $MYDIR/GFS/ \;
/bin/rm -r -f $MYDIR/tmp_gfs
# for TC Bogus
./find_wrf_initfile ${INIT_TIME}
#BTime=`/bin/date -d "${INIT_TIME:0:8} ${INIT_TIME:8:2}:00:00 1 day ago" +%Y%m%d%H`
BTime=${INIT_TIME:0:8}
BOGUS_ROOT=$MYDIR/TC_BOGUS
/bin/rm -r -f $BOGUS_ROOT/typhoon_mesg $BOGUS_ROOT/tmp_typhoon
/bin/mkdir $BOGUS_ROOT/typhoon_mesg $BOGUS_ROOT/tmp_typhoon || exit 1
$ECHO $PATH_SYSDIR/bin/get_envf_typhoon_mesg $BTime $MET_SIM_N_DAYS $BOGUS_ROOT/tmp_typhoon
$PATH_SYSDIR/bin/get_envf_typhoon_mesg $BTime $MET_SIM_N_DAYS $BOGUS_ROOT/tmp_typhoon
/usr/bin/find $BOGUS_ROOT/tmp_typhoon -type f -exec /bin/mv -v {} $BOGUS_ROOT/typhoon_mesg/ \;
/bin/rm -r -f $BOGUS_ROOT/tmp_typhoon
else
$ECHO "FC_MODE must be 0 or 1"
exit 1
fi
GTS_TEMP_ROOT=$MYDIR/gts_decoder/gts_temp
/bin/rm -r -f $GTS_TEMP_ROOT
/bin/mkdir $GTS_TEMP_ROOT
INIT_TIMEx=`/bin/date -d "${INIT_TIME:0:8} ${INIT_TIME:8:2} 1 days ago" +%Y%m%d%H`
let "NO_DAYS = $MET_SIM_N_DAYS + 2"
$ECHO $PATH_SYSDIR/bin/get_envf_gts ${INIT_TIMEx:0:8} $NO_DAYS $GTS_TEMP_ROOT
$PATH_SYSDIR/bin/get_envf_gts ${INIT_TIMEx:0:8} $NO_DAYS $GTS_TEMP_ROOT
OutputDirx=${OutputDir}/${INIT_TIME:0:4}/${INIT_TIME:0:6}/${INIT_TIME}
if [ -d $OutputDirx ]; then
$ECHO "Output directory $OutputDirx exists, remove it"
/bin/rm -r -f $OutputDirx
fi
/bin/rm -r -f $MYDIR/outputs
/bin/mkdir $MYDIR/outputs || {
$ECHO "Failed to create $MYDIR/outputs directory"
exit 1
}
Ncpu=`$PATH_SYSDIR/bin/mpi_total_cpus $MPIHostFile` || {
$ECHO "Failed to run $PATH_SYSDIR/bin/mpi_total_cpus $MPIHostFile"
exit 1
}
export Ncpu
$PATH_SYSDIR/bin/start_mpd || {
$ECHO "Failed to run $PATH_SYSDIR/bin/start_mpd"
exit 1
}
$ECHO Cleanup files in EPDdata directory
/usr/bin/find EPDdata -mindepth 1 -maxdepth 1 -type f -exec /bin/rm -v {} \;
$ECHO Clenaup files in met_WRFV3/test/em_real directory
/usr/bin/find met_WRFV3/test/em_real -mindepth 1 -maxdepth 1 -name 'wrfout_d0*' -exec /bin/rm -v {} \;
if [ $MET_REALTIME_COPY_OUTPUT -eq 1 ]; then
WRF_Status=$MYDIR/WRF_DONE
if [ -f $MYDIR/WRF_DONE ]; then
/bin/rm $WRF_Status
fi
CopyMetDataLog=/tmp/copy_met_data.log-${INIT_TIME}
PID=$$
$ECHO $PATH_SYSDIR/bin/copy_met_data wrf $INIT_TIME $WRF_Status $PID
$PATH_SYSDIR/bin/copy_met_data wrf $INIT_TIME $WRF_Status $PID > $CopyMetDataLog 2>&1 &
$ECHO copy_met_data log = $CopyMetDataLog
fi
if [ -z "$WRF_CSH" ]; then
WRF_CSH=./wrf.csh
fi
$ECHO "WRF_CSH = $WRF_CSH"
$WRF_CSH $INIT_TIME $END_TIME || {
$ECHO "Failed to run ./wrf.csh $INIT_TIME $END_TIME"
$PATH_SYSDIR/bin/stop_mpd
exit 1
}
if [ $MET_REALTIME_COPY_OUTPUT -eq 1 ]; then
/bin/touch $WRF_Status
wait
fi
$PATH_SYSDIR/bin/stop_mpd
./check_output $INIT_TIME || {
$ECHO "Failed to run ./check_output $INIT_TIME"
exit 1
}
./move_data $INIT_TIME $OutputDir
if [ $? -ne 0 ]; then
$ECHO "Failed to run ./move_data $INIT_TIME $OutputDir"
exit 1
fi
exit 0
| true
|
13bb9d85809d13fec58bc95edabdefa686bfbc69
|
Shell
|
andrewchambers/p2pkgs
|
/bin/build-packages-ifchange
|
UTF-8
| 687
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
# Create a temporary do script so we can use the redo -j option
# in conjunction with redo-ifchange (which takes no options).
set -eu
usage() {
echo "Usage: $0 [-j parallelism] pkgs..." 1>&2
exit 1
}
parallelism="1"
while getopts "j:" o
do
case $o in
j)
parallelism="${OPTARG}"
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
dir="$(cd "$(dirname "$0")"; pwd)"
projectroot="$(realpath "$dir/..")"
doscript="$(mktemp "$projectroot/.build-packages.XXXXXX.do")"
trap "rm -rf $doscript" EXIT
cat <<EOF > "$doscript"
#!/bin/sh
exec redo-ifchange $(printf '%q/.pkg.tar.gz ' $@)
EOF
set +e
redo -j "$parallelism" "${doscript%.do}"
exit "$?"
| true
|
9c74a34028c0d277d7d9d4bd2086a33fa799094f
|
Shell
|
scy/qb
|
/contrib/tsbackup/tsrestore.sh
|
UTF-8
| 264
| 3.21875
| 3
|
[
"WTFPL",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
# Run this script in your qb base directory. Supply the output of tsbackup.sh on
# stdin. The script will then touch the files contained in the list and set
# their modification time accordingly.
while read -r ts file; do
touch -d "@$ts" "$file"
done
| true
|
0d8bfbde38e354688b9f360e304283835311f0a8
|
Shell
|
mlminion/sweet
|
/benchmarks_sphere/h=g=r=1_f=0/compute_max_norm_all.sh
|
UTF-8
| 249
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
FILE="prog_h_t00000000000.00111111.csv"
for i in linear_gaussian_*; do
if [ -e "$i/$FILE" ]; then
./compute_max_norm.py "linear_gaussian_dam_rk4_robert_t0.0001_stable/$FILE" "$i/$FILE"
else
echo "$i/$FILE [not found]"
fi
done
| true
|
14aaadb7846c5a1237421739b812426a57f6070a
|
Shell
|
vangorra/python_withings_api
|
/scripts/common.sh
|
UTF-8
| 923
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
VENV_DIR=".venv"
PYTHON_BIN="python3"
LINT_PATHS="./withings_api ./tests/ ./scripts/"
function assertPython() {
if ! [[ $(which "$PYTHON_BIN") ]]; then
echo "Error: '$PYTHON_BIN' is not in your path."
exit 1
fi
}
function enterVenv() {
# Not sure why I couldn't use "if ! [[ `"$PYTHON_BIN" -c 'import venv'` ]]" below. It just never worked when venv was
# present.
VENV_NOT_INSTALLED=$("$PYTHON_BIN" -c 'import venv' 2>&1 | grep -ic ' No module named' || true)
if [[ "$VENV_NOT_INSTALLED" -gt "0" ]]; then
echo "Error: The $PYTHON_BIN 'venv' module is not installed."
exit 1
fi
if ! [[ -e "$VENV_DIR" ]]; then
echo "Creating venv."
"$PYTHON_BIN" -m venv "$VENV_DIR"
else
echo Using existing venv.
fi
if ! [[ $(env | grep VIRTUAL_ENV) ]]; then
echo "Entering venv."
set +uf
source "$VENV_DIR/bin/activate"
set -uf
else
echo Already in venv.
fi
}
| true
|
405902453e68c7c6117b6fc85b03d3d57c082a5a
|
Shell
|
DaniilD/BeatsPro
|
/scripts/migrate.sh
|
UTF-8
| 383
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
SCRIPT_DIR=$(cd -- "$(dirname -- "$0")" && pwd -P)
WORK_DIR="${SCRIPT_DIR}/.."
# check if .env exists
if [ ! -f "${WORK_DIR}"/.env ];then
printf "\033[31m.env file not found\033[0m\n"
exit 3
fi
# load .env
. "${WORK_DIR}"/.env
migrate -path migrations-dev -database "${DB_DRIVER}://${DB_USER}:${DB_PASSWORD}@tcp(${DB_HOST}:${DB_PORT})/${DB_NAME}" -verbose up
| true
|
d5cfaa8a5c7abcded576f42e45c72c63f79171c3
|
Shell
|
rshwet14/Real-Life-Script
|
/DiskSpaceCheck1
|
UTF-8
| 286
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# Author: Shwet Raj
# Date: 25/10/20
# Description: This script will check the disk space greater than 60%.
# Modified: 25/10/20
echo
echo Following is the disk space status
echo
# awk only those rows with the values
df -h | awk '0+$5 >= 10 {print}' | awk '{print $5, $6}'
| true
|
e24c8b3331fb3b012e3dc3bc2a6fbc8f4ac16b45
|
Shell
|
jraebrown/icloud-dotfiles
|
/.profile
|
UTF-8
| 1,470
| 3.09375
| 3
|
[] |
no_license
|
# Initialize homebrew
eval $(/opt/homebrew/bin/brew shellenv)
# Initialize my "xenv" language runtime managers if installed
if command -v rbenv &>/dev/null; then
eval "$(rbenv init -)"
fi
if command -v nodenv &>/dev/null; then
eval "$(nodenv init -)"
fi
if command -v pyenv &>/dev/null; then
eval "$(pyenv init --path)"
fi
if command -v gel &>/dev/null; then
eval "$(gel shell-setup)"
fi
# Additional PATH configuration
## My own scripts
export PATH="$HOME/bin:$PATH"
# Shell-specific settings
if [[ "$SHELL" == *zsh ]]; then
# Nothing to see here
true
elif [[ "$SHELL" == *bash ]]; then
## Bash settings
### stickier .bash_history
export HISTCONTROL=ignoredups:erasedups
export HISTSIZE=10000
export HISTFILESIZE=10000
shopt -s histappend
### Set up homebrew
if [ -f $(brew --prefix)/etc/bash_completion ]; then
source $(brew --prefix)/etc/bash_completion
fi
fi
# Other Customization
## Editor registration for git, etc
export EDITOR="vim"
export LC_CTYPE="en_US.UTF-8"
## Reference the location of iCloud Drive
export ICLOUD_DRIVE="$HOME/icloud-drive"
## Source ENV variables
source "$ICLOUD_DRIVE/dotfiles/.env"
## Set fzf to use rg like so for ctrl-t in shell:
export FZF_DEFAULT_COMMAND='rg --files --ignore --hidden --follow --glob "!.git/*"'
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
## Increase limit of open file descriptors because watch processes
ulimit -n 10000
## load custom PS1 prompt
source $HOME/bin/ps1
| true
|
52ebddce025c4d6686c6e4c8efb0b09d984a8659
|
Shell
|
DemonBloody/twc
|
/cave.sh
|
UTF-8
| 2,004
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# /cave
function _cave () {
_clanid
# if [[ -n $CLD ]]; then
# w3m -debug $ENC "$URL/clan/$CLD/quest/take/5" -o user_agent="$(shuf -n1 .ua)" | head -n15
# w3m -debug $ENC "$URL/clan/$CLD/quest/help/5" -o user_agent="$(shuf -n1 .ua)" | head -n15
# fi
_condition () {
SRC=$(w3m -debug -dump_source $ENC "$URL/cave/" -o user_agent="$(shuf -n1 .ua)")
ACCESS1=$(echo $SRC | sed 's/href=/\n/g' | grep '/cave/' | head -n1 | cut -d\' -f2)
DOWN=$(echo $SRC | sed 's/href=/\n/g' | grep '/cave/down' | cut -d\' -f2)
ACCESS2=$(echo $SRC | sed 's/href=/\n/g' | grep '/cave/' | head -n2 | tail -n1 | cut -d\' -f2)
ACTION=$(echo $SRC | sed 's/href=/\n/g' | grep '/cave/' | cut -d\' -f2 | tr -cd "[[:alpha:]]")
MEGA=$(echo $SRC | sed 's/src=/\n/g' | grep '/images/icon/silver.png' | grep "'s'" | tail -n1 | grep -o 'M')
}
_condition
num=100
until [[ $num -eq 0 ]]; do
_condition
case $ACTION in
(cavechancercavegatherrcavedownr)
SRC=$(w3m -debug -dump_source $ENC "$URL$ACCESS2" -o user_agent="$(shuf -n1 .ua)") ;
num=$[$num-1] ;
echo $num ;;
(cavespeedUpr)
SRC=$(w3m -debug -dump_source $ENC "$URL$ACCESS2" -o user_agent="$(shuf -n1 .ua)") ;
num=$[$num-1] ;
echo $num ;;
(cavedownr|cavedownrclanbuiltprivateUpgradetruerrefcave)
num=$[$num-1] ;
SRC=$(w3m -debug -dump_source $ENC "$URL$DOWN" -o user_agent="$(shuf -n1 .ua)") ;
echo $num ;;
(caveattackrcaverunawayr)
num=$[$num-1] ;
SRC=$(w3m -debug -dump_source $ENC "$URL$ACCESS1" -o user_agent="$(shuf -n1 .ua)") ;
SRC=$(w3m -debug -dump_source $ENC "$URL/cave/runaway" -o user_agent="$(shuf -n1 .ua)") ;
echo $num ;;
(*) num=0 ;;
esac
echo $SRC | sed 's/href=/\n/g' | grep '/cave/' | head -n2 | tail -n1 | cut -d\' -f2
done
# if [[ -n $CLD ]]; then
# w3m -debug $ENC "$URL/clan/$CLD/quest/end/5" -o user_agent="$(shuf -n1 .ua)" | head -n15
# w3m -debug $ENC "$URL/clan/$CLD/quest/deleteHelp/5" -o user_agent="$(shuf -n1 .ua)" | head -n15
# fi
echo -e "cave (✔)\n"
}
| true
|
6cac1036f9c4bb026fd0baa5c6cc37d6eaabe9d3
|
Shell
|
estis75/CompetitivePrograming
|
/Exec.sh
|
UTF-8
| 1,253
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "designate a file name"
exit 1;
fi
if [ "$1" == "init" ]; then
rm testCase/* 2> /dev/null
for i in {1..10}; do
touch testCase/`printf %02d $i`.txt
done
exit 0;
fi
if [ "$1" == "clear" ]; then
if [ ! `find -name "*_out.cpp"` ]; then
exit 0
fi
rm *_out.cpp
exit 0;
fi
if [ ! -e $1 ]; then
echo "designate a file name"
exit 1;
fi
output=${1/.cpp/}_out.cpp
if [ -e $output ]; then
rm $output
fi
g++ -O2 $1 -I ../default/ac-library -I ../default/Templates
if [ $? -ne 0 ]; then
exit 1;
fi
for i in `ls testCase/*.txt`; do
if [ "`cat $i`" == "" ];then
continue;
fi
echo "input:"
cat $i
echo ""
echo -e "output:\n"
./a.out < $i
echo -e "\n"
done
cp $1 temp.cpp
touch $output
cat ../default/Templates/libraries.h | head -2 >> $output
echo "using namespace std;" >> $output
for i in `cat ../default/Templates/libraries.h | grep "include\".*\""`; do
i=`echo ${i/\#include/} | sed -e 's/\"//g' `
cat ../default/Templates/$i | sed -e '/using namespace std;/d' | sed -e '/include/d' >> $output
done
cat ../default/Templates/libraries.h | sed -e '/using namespace std;/d' | sed -e '/include/d' >> $output
sed '1,3d' temp.cpp >> $output
rm temp.cpp a.out
| true
|
8a6cb9b4b0bb672297d8c05ab775e606421cddd3
|
Shell
|
koushikkirugulige/cricket-commentry
|
/cricket.sh
|
UTF-8
| 1,067
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "which team"
read team
echo "Enter the over"
read x
y=1
while :
do
wget -o dum.txt -O file.html http://www.cricbuzz.com/live-cricket-scores/18461/sl-vs-ind-3rd-test-india-tour-of-sri-lanka-2017
sed -i -e 's/<[^>]*>//g' file.html
grep -o $team\ [0-9][0-9]/[0-9]\ \([0-9]\.[0-9]\ Ovs\) file.html #score double digit overs<10
grep -o $team\ [0-9][0-9][0-9]/[0-9]\ \([0-9][0-9]\.[0-9]\ Ovs\) file.html #score>99 overs>9.6
grep -o $team\ [0-9][0-9]/[0-9]\ \([0-9][0-9]\.[0-9]\ Ovs\) file.html #score >10<99 overs>=10
grep -o $team\ [0-9]/[0-9]\ \([0-9]\.[0-9]\ Ovs\) file.html #score <10 overs <10
grep -o $team\ [0-9]/[0-9]\ \([0-9][0-9]\.[0-9]\ Ovs\) file.html #score <10 overs>10
grep -o $x\\.$y'[ a-zA-Z0-9,!]\+[ ]' file.html|espeak #speaks the commentry
grep -o $x\\.$y'[ a-zA-Z0-9,!]\+[ ]' file.html #echos the commentry on stdout
#just trying git command line
if [ "$?" -eq 0 ]
then
y=$(($y+1))
fi
if [ "$y" -eq 7 ]
then
x=$(($x+1))
y=1
fi
grep --color -o "India [a-z]\+ [0-9]\+ runs" file.html
sleep 10s
rm file.html
clear
done
| true
|
58ddbe6a1414f00fd9a8b8520250e33df031ca30
|
Shell
|
flaf/miscellaneous
|
/puppet/modules/pxeserver/files/jessie-elea-moosql/partman_early_command
|
UTF-8
| 5,167
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
### This file is managed by Puppet, don't edit it ###
exec >/tmp/partition.log 2>&1
set -x
parted='parted --script --align=opt'
c_ssd=1
c_sata=1
for i in a b c d
do
size=$(parted -m /dev/sd$i unit GB print | grep "^/dev/sd$i" | cut -d':' -f2)
if [ "$size" = '200GB' ]
then
eval l_ssd$c_ssd=$i
c_ssd=$((c_ssd + 1))
else
eval l_sata$c_sata=$i
c_sata=$((c_sata + 1))
fi
done
# Now, we have:
#
# - $l_ssd1 and $l_ssd2 the letters of SSD.
# - $l_sata1 and $l_sata2 the letters of SATA.
### Remove the RAID volumes if already created. ###
i='3' # on /dev/sd${l_sata1}3 and /dev/sd${l_sata2}3
raid1system='md0'
[ -e /dev/$raid1system ] && mdadm --stop /dev/$raid1system
[ -e /dev/sd${l_sata1}$i ] && mdadm --zero-superblock /dev/sd${l_sata1}$i
[ -e /dev/sd${l_sata2}$i ] && mdadm --zero-superblock /dev/sd${l_sata2}$i
i='4' # on /dev/sd${l_sata1}4 and /dev/sd${l_sata2}4
raid1swap='md1'
[ -e /dev/$raid1swap ] && mdadm --stop /dev/$raid1swap
[ -e /dev/sd${l_sata1}$i ] && mdadm --zero-superblock /dev/sd${l_sata1}$i
[ -e /dev/sd${l_sata2}$i ] && mdadm --zero-superblock /dev/sd${l_sata2}$i
i='5' # on /dev/sd${l_sata1}5 and /dev/sd${l_sata2}5
raid1hd='md2'
[ -e /dev/$raid1hd ] && mdadm --stop /dev/$raid1hd
[ -e /dev/sd${l_sata1}$i ] && mdadm --zero-superblock /dev/sd${l_sata1}$i
[ -e /dev/sd${l_sata2}$i ] && mdadm --zero-superblock /dev/sd${l_sata2}$i
i='1' # on /dev/sd${l_ssd1}1 and /dev/sd${l_ssd2}1
raid1ssd='md3'
[ -e /dev/$raid1ssd ] && mdadm --stop /dev/$raid1ssd
[ -e /dev/sd${l_ssd1}$i ] && mdadm --zero-superblock /dev/sd${l_ssd1}$i
[ -e /dev/sd${l_ssd2}$i ] && mdadm --zero-superblock /dev/sd${l_ssd2}$i
### Create GPT partition on each disk. ###
for i in a b c d
do
$parted /dev/sd${i} mktable gpt
done
### Partitioning on the non-SSD drives. ###
n=0
for i in ${l_sata1} ${l_sata2}
do
# a => n = 1, b => n = 2.
n=$((n + 1))
part_num=1
# The unused UEFI partitions if one day we decide
# to enable the Bios-UEFI.
a=1
b=$((250 + a)) # Size == 250MiB
$parted /dev/sd${i} -- unit MiB mkpart uefi${n}unused $a $b
part_num=$((part_num + 1))
# The biosgrub partitions.
a=$b
b=$((1 + a)) # Size == 1MiB
$parted /dev/sd${i} -- unit MiB mkpart biosgrub${n} $a $b
$parted /dev/sd${i} set $part_num bios_grub on
part_num=$((part_num + 1))
# The root partitions (will be a RAID1 volume).
a=$b
b=$((30 * 1024 + a)) # Size == 30GiB
$parted /dev/sd${i} -- unit MiB mkpart system${n} $a $b
$parted /dev/sd${i} set $part_num raid on
part_num=$((part_num + 1))
# The swap (will be a RAID1 volume).
a=$b
b=$((8 * 1024 + a)) # Size == 8GiB
$parted /dev/sd${i} -- unit MiB mkpart swap${n} linux-swap $a $b
$parted /dev/sd${i} set $part_num raid on
part_num=$((part_num + 1))
# The remaining of the disk is a LVM partition in a RAID1 volume.
a=$b
b='-1cyl' # The last cylinder
$parted /dev/sd${i} -- unit MiB mkpart lvm-hd${n} $a $b
$parted /dev/sd${i} set $part_num raid on
part_num=$((part_num + 1))
done
### Partitioning on the SSD drives. ###
n=0
for i in ${l_ssd1} ${l_ssd2}
do
# c => n = 1, d => n = 2.
n=$((n + 1))
# The LVM partition on the RAID1 volume in the two SSD.
a=1
b='-1cyl' # The last cylinder
$parted /dev/sd${i} -- unit MiB mkpart lvm-ssd${n} $a $b
$parted /dev/sd${i} set 1 raid on
done
### Creation of the RAID volumes. ###
# The system (/) partition.
mdadm --create /dev/$raid1system --level=1 --raid-devices=2 /dev/sd${l_sata1}3 /dev/sd${l_sata2}3 --force --run
# The swap partition.
mdadm --create /dev/$raid1swap --level=1 --raid-devices=2 /dev/sd${l_sata1}4 /dev/sd${l_sata2}4 --force --run
# The LVM partition in the harddrive.
mdadm --create /dev/$raid1hd --level=1 --raid-devices=2 /dev/sd${l_sata1}5 /dev/sd${l_sata2}5 --force --run
# The SSD RAID1 volume.
mdadm --create /dev/$raid1ssd --level=1 --raid-devices=2 /dev/sd${l_ssd1}1 /dev/sd${l_ssd2}1 --force --run
### Creation of the volume group LVM on the HD RAID1 volume etc. ###
pvcreate -ff --yes /dev/$raid1hd
vgcreate --force --yes vg1 /dev/$raid1hd
lvcreate --name varlogmysql --size 200g vg1
lvcreate --name backups --size 500g vg1
### Creation of the volume group LVM on the SSD RAID1 volume etc. ###
pvcreate -ff --yes /dev/$raid1ssd
vgcreate --force --yes vg2 /dev/$raid1ssd
lvcreate --name tmp --size 30g vg2
lvcreate --name varlibmysql --size 120g vg2
### Creation of the file systems. ###
mkfs.ext4 -F -E lazy_itable_init=0 -L system /dev/$raid1system
mkswap -L swap /dev/$raid1swap
mkfs.xfs -f -L varlogmysql /dev/mapper/vg1-varlogmysql
mkfs.xfs -f -L backups /dev/mapper/vg1-backups
mkfs.ext4 -F -E lazy_itable_init=0 -L tmp /dev/mapper/vg2-tmp
mkfs.ext4 -F -E lazy_itable_init=0 -L varlibmysql /dev/mapper/vg2-varlibmysql
# mkfs.vfat doesn't exist during Debian installation.
# Lowercase labels trigger a warning.
mkfs.fat -F 32 -n 'UEFI1' /dev/sd${l_sata1}1 # partition unused but hey...
mkfs.fat -F 32 -n 'UEFI2' /dev/sd${l_sata2}1 # partition unused but hey...
exit 0
| true
|
0737b3d7caebdfad9a0b0edb209f6ce33ea97043
|
Shell
|
fledge-iot/fledge
|
/scripts/extras/fledge_update
|
UTF-8
| 9,480
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##
# This script is copied into Fledge 'bin' directory by the Fledge install process
#
# This script let the user to update Fledge either manually or in auto mode via a schedule
# enable/disable/remove and also to set the time interval of the schedule for apt package update.
#
##
#
# Note:
# current implementation only supports the scheduling interval setting:
# it's not possible to specify to run at particular time not a specific week day
#
__author__="Massimiliano Pinto, Amarendra K Sinha"
__copyright__="Copyright (c) 2018 OSIsoft, LLC"
__license__="Apache 2.0"
__version__="1.0"
FLEDGE_AUTO_UPDATER_VER=${__version__}
REST_API_SCHEME="http://"
SCHEDULE_PROCESS_NAME="FledgeUpdater"
SCHEDULE_NAME="Fledge updater"
# Set FLEDGE_ROOT to default location if not set
if [ "${FLEDGE_ROOT}" = "" ]; then
FLEDGE_ROOT=/usr/local/fledge
fi
# Check FLEDGE_ROOT is a directory
if [ ! -d "${FLEDGE_ROOT}" ]; then
echo "Fledge home directory missing or incorrectly set environment"
exit 1
fi
# Add FLEDGE_ROOT/python to PYTHONPATH
export PYTHONPATH="${PYTHONPATH}:${FLEDGE_ROOT}/scripts/common"
# Print usage and credits
usage()
{
echo "Fledge auto update enable/disable v${FLEDGE_AUTO_UPDATER_VER} Copyright (c) 2018 OSIsoft, LLC"
echo
echo "usage: $(basename $0) --manual --auto --enable --disable --remove-update [--host --port --use-https --update-interval=seconds]"
echo
echo mandatory options:
echo " --manual Run update script manually."
echo " --auto Create update task schedule for auto update."
echo " --enable Enables the auto update or creates the enabled schedule if not set."
echo " --disable Disables the auto update if set"
echo " --remove-update Removes the auto update"
echo
echo "optional parameters:"
echo " --host Sets the Fledge REST API host, default is 127.0.0.1"
echo " --port Sets the Fledge REST API port, default is 8081"
echo " --use-https Sets HTTPS for Fledge REST API, default is http"
echo " --update-interval Sets the auto update interval in seconds, default is 10800 (3 hours)"
exit 0
}
# Handle '--use-https' option
echo "$@" | grep -q -- --use-https && REST_API_SCHEME="https://"
# Handle input parameters
while [ "$1" != "" ]; do
PARAM=`echo $1 | awk -F= '{print $1}'`
VALUE=`echo $1 | awk -F= '{print $2}'`
case $PARAM in
--port)
API_PORT=$VALUE
;;
--host)
API_ADDRESS=$VALUE
;;
--manual)
MANUAL_UPDATE="Y"
;;
--auto)
AUTO_UPDATE="Y"
;;
--enable)
ENABLE_UPDATE="Y"
;;
--disable)
DISABLE_UPDATE="Y"
;;
--remove-update)
REMOVE_UPDATE="Y"
;;
--update-interval)
UPDATE_INTERVAL=$VALUE
;;
-h | --help)
usage
;;
*)
usage
;;
esac
shift
done
# Check for mandatoruy options first
if [ ! "${MANUAL_UPDATE}" ] && [ ! "${AUTO_UPDATE}" ] && [ ! "${ENABLE_UPDATE}" ] && [ ! "${DISABLE_UPDATE}" ] && [ ! "${REMOVE_UPDATE}" ]; then
usage
exit 1
fi
# Set API default port
if [ ! "${API_PORT}" ]; then
API_PORT=8081
fi
# Set 'localhost' if API_ADDRESS is not set
if [ ! "${API_ADDRESS}" ]; then
API_ADDRESS="localhost"
fi
# Set API URL
REST_API_URL="${REST_API_SCHEME}${API_ADDRESS}:${API_PORT}"
# Check Fledge API is running at API_ADDRESS, API_PORT via 'ping'
CHECK_SERVICE=`curl -s -k --max-time 30 "${REST_API_URL}/fledge/ping" | grep -i uptime`
if [ ! "${CHECK_SERVICE}" ]; then
if [ ! "${CHECK_SERVICE}" ]; then
echo "$(basename $0): Error: cannot connect to Fledge API at [${REST_API_URL}]"
exit 1
fi
fi
# Check whether SCHEDULE_NAME exists
# Abort on JSON erros
CMD_SCHEDULE_EXISTS="curl -s -k --max-time 30 '${REST_API_URL}/fledge/schedule' | python3 -m json_parse get_schedule_id '${SCHEDULE_NAME}'"
SCHEDULE_EXISTS=`eval ${CMD_SCHEDULE_EXISTS}`
ret_code=$?
if [ "${ret_code}" -ne 0 ]; then
echo "$(basename $0): Error: checking schedule ${SCHEDULE_NAME}, [${SCHEDULE_EXISTS}]. Check Fledge configuration."
exit 3
fi
# Check SCHEDULE_NAME details from JSON data
# Abort if more than one scheduler is found
# Note:
# If the schedule doesn't exist it will be created with --enable
if [ "${SCHEDULE_EXISTS}" ]; then
NUM_SCHEDULES=`echo ${SCHEDULE_EXISTS} | tr ' ' '\\n' | wc -l`
if [ "${NUM_SCHEDULES}" -gt 1 ]; then
echo "$(basename $0): Error: found more than one 'schedule_id' for schedule ${SCHEDULE_PROCESS_NAME}. Check Fledge configuration."
exit 3
fi
# Set the schedule id
SCHEDULE_ID=${SCHEDULE_EXISTS}
fi
# Set default interval
if [ ! "${UPDATE_INTERVAL}" ]; then
UPDATE_INTERVAL=10800
fi
# Prepare JSON paylod for the new schedule creation
#
# - task type is INTERVAL
# - repeat set to default or specified value
# - enabled set to true
#
SCHEDULE_SET_PAYLOAD="{\"type\": 3, \
\"name\": \"${SCHEDULE_NAME}\",
\"process_name\": \"${SCHEDULE_PROCESS_NAME}\",
\"repeat\": ${UPDATE_INTERVAL},
\"enabled\": \"t\",
\"exclusive\": \"t\"}"
###
# Commands handling
###
# If manual mode has been choosen, then simply run the update task script and exit
if [ "${MANUAL_UPDATE}" = "Y" ]; then
# CREATE API call
MANUAL_OUTPUT=`curl -s -k --max-time 30 -X PUT "${REST_API_URL}/fledge/update"`
# Check 'deleted' in JSON output
CHECK_MANUAL=`echo ${MANUAL_OUTPUT} | grep Running`
if [ ! "${CHECK_MANUAL}" ]; then
echo "$(basename $0): error: failed to run manual update: ${MANUAL_OUTPUT}"
exit 3
else
echo "The Fledge update process has been successfully scheduled."
exit 0
fi
fi
# Create Update schedule vide create task
if [ "${AUTO_UPDATE}" = "Y" ]; then
if [ "${SCHEDULE_ID}" ]; then
echo "$(basename $0): warning: the schedule '${SCHEDULE_NAME}' with id ${SCHEDULE_ID} already exists."
exit 2
fi
# CREATE API call
AUTO_OUTPUT=`curl -s -k --max-time 30 -X POST -d "${SCHEDULE_SET_PAYLOAD}" "${REST_API_URL}/fledge/schedule"`
# Check 'deleted' in JSON output
AUTO_CREATE=`echo ${AUTO_OUTPUT} | grep ${SCHEDULE_PROCESS_NAME}`
if [ ! "${AUTO_CREATE}" ]; then
echo "$(basename $0): error: failed to create schedule: ${AUTO_OUTPUT}"
exit 3
else
echo "The Fledge update has been successfully auto scheduled."
exit 0
fi
fi
#
# --remove-update
# Remove the schedule from Fledge
#
if [ "${REMOVE_UPDATE}" = "Y" ]; then
if [ ! "${SCHEDULE_ID}" ]; then
echo "$(basename $0): warning: the schedule '${SCHEDULE_NAME}' is not active."
exit 2
fi
# DELETE API call
REMOVE_OUTPUT=`curl -s -k --max-time 30 -X DELETE "${REST_API_URL}/fledge/schedule/${SCHEDULE_ID}"`
# Check 'deleted' in JSON output
CHECK_REMOVE=`echo ${REMOVE_OUTPUT} | grep -i message | grep -i deleted`
if [ ! "${CHECK_REMOVE}" ]; then
echo "$(basename $0): error: failed to remove schedule: ${REMOVE_OUTPUT}"
exit 3
else
echo "The schedule '${SCHEDULE_NAME}', ID [${SCHEDULE_ID}] has been removed."
exit 0
fi
fi
#
# --enable
# Enable the update schedule or activating it if not set
#
if [ "${ENABLE_UPDATE}" = "Y" ]; then
if [ ! "${SCHEDULE_ID}" ]; then
echo "The schedule '${SCHEDULE_NAME}' is not active. Activating and enabling it"
# Create the schedule
# POST API call for 'enable' and 'update interval'
SCHEDULE_SET=`curl -s -k --max-time 30 -X POST -d "${SCHEDULE_SET_PAYLOAD}" "${REST_API_URL}/fledge/schedule"`
# Check "id" in JSON output
CMD_NEW_SCHEDULE_EXISTS="echo '${SCHEDULE_SET}' | python3 -m json_parse get_current_schedule_id '${SCHEDULE_NAME}'"
SCHEDULE_ID=`eval ${CMD_NEW_SCHEDULE_EXISTS}`
if [ ! "${SCHEDULE_ID}" ]; then
echo "$(basename $0): error: cannot get 'schedule_id' for new created schedule '${SCHEDULE_NAME}': [${SCHEDULE_SET}]"
exit 3
fi
echo "Schedule '${SCHEDULE_NAME}' successfully added, ID [${SCHEDULE_ID}], interval ${UPDATE_INTERVAL} seconds"
exit 0
else
# Update the schedule, using SCHEDULE_ID
# PUT API call for 'enable'and 'update interval'
ENABLE_OUTPUT=`curl -s -k --max-time 30 -X PUT -d "{\"repeat\": ${UPDATE_INTERVAL}, \"enabled\": true}" "${REST_API_URL}/fledge/schedule/${SCHEDULE_ID}"`
# Check "id":"..." in JSON output
CMD_NEW_SCHEDULE_EXISTS="echo '${ENABLE_OUTPUT}' | python3 -m json_parse get_current_schedule_id '${SCHEDULE_NAME}'"
SCHEDULE_ID=`eval ${CMD_NEW_SCHEDULE_EXISTS}`
if [ ! "${SCHEDULE_ID}" ]; then
echo "$(basename $0): error: failed to enable schedule: ${ENABLE_OUTPUT}"
exit 3
else
echo "The schedule '${SCHEDULE_NAME}', ID [${SCHEDULE_ID}] has been enabled, interval ${UPDATE_INTERVAL} seconds"
fi
exit 0
fi
fi
#
# --disable
# Disable the update schedule (just set 'false')
#
if [ "${DISABLE_UPDATE}" = "Y" ]; then
if [ ! "${SCHEDULE_ID}" ]; then
echo "$(basename $0): info: the schedule '${SCHEDULE_NAME}' is not active. Try with --enable to install/active it"
exit 2
fi
# PUT API call for 'disable' only using SCHEDULE_ID
DISABLE_OUTPUT=`curl -s -k --max-time 30 -X PUT "${REST_API_URL}/fledge/schedule/${SCHEDULE_ID}/disable"`
# Check "scheduleId":"..." in JSON output
CHECK_DISABLE=`echo ${DISABLE_OUTPUT} | grep -i '\"scheduleId\"'`
if [ ! "${CHECK_DISABLE}" ]; then
echo "$(basename $0): error: failed to disable schedule: ${DISABLE_OUTPUT}"
exit 3
else
echo "The schedule '${SCHEDULE_NAME}', ID [${SCHEDULE_ID}] has been disabled."
exit 0
fi
fi
| true
|
30ec1990ffed2abcfdc5999217faafbb0e0f71da
|
Shell
|
silverlovesl/xorm-entity-gen
|
/gen-electricity.sh
|
UTF-8
| 488
| 3.171875
| 3
|
[] |
no_license
|
#! /bin/bash
echo -e "Input table name"
read TABLE_NAME
OUTPUT=$GOPATH/src/bitbucket.org/beecomb-grid/renewable-energy-institute-api/entities
# Output file name
OUTPUT_FILE_PATH=${OUTPUT}/$(tr '[A-Z]' '[a-z]' <<< ${TABLE_NAME})
# Generate go entity
python generate.py \
--user=rei_user \
--host=127.0.0.1 \
--port=3306 \
--passwd=rei_pass \
--database=renewable_energy_institute \
--table_name=${TABLE_NAME} > ${OUTPUT_FILE_PATH}.go
# Format file
goimports -w $OUTPUT
| true
|
219aa7ed3921549a5d45112b3bcf5ffd7fbe5f55
|
Shell
|
jgwest/turtl-docker
|
/postgres/artifacts/pg-dump-to-temp-file.sh
|
UTF-8
| 649
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export SCRIPT_LOCT=$( cd $( dirname $0 ); pwd )
cd $SCRIPT_LOCT
. ./pg-container-includes.sh
# -------------------------
create_scoped_temp_dir "pg-dump-dir"
# GENERATED_TMP_DIR
TEMP_DIR=$GENERATED_TMP_DIR
pg_dump --format=directory --file=$TEMP_DIR --compress=0 -d postgres -U postgres -p 5432
if [[ $? != 0 ]]; then
echo "Error: Unable to dump postgres contents to file."
exit 1
fi
create_scoped_temp_dir "pg-dump-archive"
# GENERATED_TMP_DIR
TEMP_FILE="$GENERATED_TMP_DIR/pg-dump-archive.tar.gz"
set -e
# TEMP_FILE=`mktemp --suffix=.tar.gz`
cd $TEMP_DIR
tar czf $TEMP_FILE *
rm -rf $TEMP_DIR
echo $TEMP_FILE
exit 0
| true
|
8ac204ad88ddb06f28f6dea93400051ba1994aa1
|
Shell
|
carlosmccosta/ros_development_tools
|
/eclipse/eclipse_cmake.sh
|
UTF-8
| 410
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
build_dir=${1:-"$HOME/catkin_ws/build"}
source_dir=${2:-"$HOME/catkin_ws/src"}
build_type=${3:-'Release'} # Debug | Release | MinSizeRel | RelWithDebInfo
eclipse_version=${4:-'4.6'}
make_args=${5:-'-j8'}
cd "${build_dir}"
cmake -G"Eclipse CDT4 - Unix Makefiles" -DCMAKE_BUILD_TYPE=${build_type} -DCMAKE_ECLIPSE_MAKE_ARGUMENTS=${make_args} -DCMAKE_ECLIPSE_VERSION=${eclipse_version} "${source_dir}"
| true
|
01b2446e02a8a7ce0c0d1a2416fb385316420d09
|
Shell
|
ministryofjustice/hmpps-mis-terraform-repo
|
/ec2-ndl-dfi/scripts/create_data-sync-task.sh
|
UTF-8
| 1,479
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
REGION=$1
SOURCE_LOCATION_ARN=$2
CLOUDWATCH_LOG_ARN=$3
NAME=$4
FSX_SG_ARN=$5
USER_PARAM=$6
PASS_PARAM=$7
FSX_DOMAIN=$8
OPTIONS=VerifyMode="NONE",OverwriteMode="ALWAYS",Atime="BEST_EFFORT",Mtime="PRESERVE",Uid="NONE",Gid="NONE",PreserveDeletedFiles="REMOVE",PreserveDevices="NONE",PosixPermissions="NONE",TaskQueueing="ENABLED",LogLevel="TRANSFER"
#Create Destination Location #Unable to consume FSX outputs as it is written in TF13
FSX_USER=$(aws ssm get-parameters --names $USER_PARAM --region $REGION --query "Parameters[0]"."Value" | sed 's:^.\(.*\).$:\1:') || exit $?
FSX_PASS=$(aws ssm get-parameters --with-decryption --names $PASS_PARAM --region $REGION --query "Parameters[0]"."Value" | sed 's:^.\(.*\).$:\1:') || exit $?
FSX_ARN=$(aws fsx describe-file-systems | jq -r .FileSystems[0].ResourceARN) || exit $?
DESTINATION_LOCATION_ARN=$(aws datasync create-location-fsx-windows --fsx-filesystem-arn "${FSX_ARN}" --security-group-arns "${FSX_SG_ARN}" --user "${FSX_USER}" --password "${FSX_PASS}" --subdirectory "dfinterventions/dfi" --domain "${FSX_DOMAIN}" --tags "Key=Name,Value=$NAME" --region "${REGION}" | jq -r .LocationArn) || exit $?
#Create datasync Task
aws datasync create-task --source-location-arn ${SOURCE_LOCATION_ARN} --destination-location-arn ${DESTINATION_LOCATION_ARN} --cloud-watch-log-group-arn ${CLOUDWATCH_LOG_ARN} --name ${NAME} --options ${OPTIONS} --region ${REGION} && echo Success || exit $?
| true
|
030e5d1595c0de513752a4de649b2816cdbe3ee2
|
Shell
|
bruceSz/learnToexcellent
|
/shell/emulate_dir.sh
|
UTF-8
| 516
| 3.75
| 4
|
[] |
no_license
|
#!/bin/sh
# DIR pretends we 're the DIR
function usage
{
cat < EOF >&2
Usage: $0 [DOS flags] directory or directories
where:
/D sort by columns
/H show help for this shell script
/N show long listing format with filenames on right
EOF
exit 1
}
postcmd=""
flags=""
while [ $# -gt 0 ]
do
case $1 in
/D ) flags="$flags -x" ;;
/H ) usage;;
/[NOW] ) flags="$flags -l";;
esac
shift
done
echo $@
if [ ! -z "postcmd" ] ;then
ls $flags "$@" |$postcmd
else
ls $flags "$@"
fi
| true
|
7a4cefd4cc34c2ea59c731e4d74085585030c5e6
|
Shell
|
tungtran3012/Linux2018
|
/GKD1-L2/bai3.sh
|
UTF-8
| 646
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
bai3()
{
if [ $1 -eq 0 ]
then
if [ $2 -eq 0 ]
then
if [ $3 -eq 0 ]
then
echo "PT vo so nghiem"
else
echo "PT vo nghiem"
fi
else
result1=$(echo "scale=2;(-$3)/$2"|bc)
echo "PT co so nghiem la: $result1"
fi
else
delta=$(($2*$2-4*$1*$3))
if [ $delta -lt 0 ]
then
echo "PT vo nghiem"
else
if [ $delta -eq 0 ]
then
result2=$(echo "scale=2;(-$2)/(2*$1)"|bc)
echo "PT co nghiem $result2"
else
result3=$(echo "scale=2;(-$2-sqrt($delta))/(2*$1)"|bc)
echo "PT co nghiem $result3"
result4=$(echo "scale=2;($2-sqrt($delta))/(2*$1)"|bc)
echo "PT co nghiem $result4"
fi
fi
fi
}
bai3 0 -2 4
| true
|
c7a786a5abc73c457c2ca297a9a5f821c62802c3
|
Shell
|
qpfiffer/DotFiles
|
/.vim/deploy/scripts/provision_ci.sh
|
UTF-8
| 191
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
. base.sh
CI_DETAILS=$1
host=$(cleaned_cluster_detail $CI_DETAILS "ci_ip")
cat {base,self_provision_ci}.sh | \
ssh -o StrictHostKeyChecking=no \
-At ubuntu@"$host"
| true
|
31b57a7c37cd9c527295d8b02a65e10f40ac334e
|
Shell
|
diku-kmc/kleenexlang
|
/bench/re2j/getant.sh
|
UTF-8
| 737
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "Checking if 'ant' is installed on system..."
if hash ant 2>/dev/null; then
echo "'ant' is already installed."
echo "Making ./ant point to system ant"
rm -f ant
echo "#!/bin/sh" >> ant
echo "ant \$@" >> ant
chmod +x ant
exit 0
fi
echo "'ant' is not installed; installing it locally."
antball="apache-ant-1.9.4-bin.tar.gz"
anturl="http://mirrors.dotsrc.org/apache//ant/binaries/apache-ant-1.9.4-bin.tar.gz"
antdir="apache-ant-1.9.4"
if [ ! -f $antball ]; then
echo "Downloading ant..."
wget -O $antball $anturl
fi
if [ ! -d $antdir ]; then
echo "Unpacking ant..."
tar xfz $antball
echo "Making ./ant point to newly installed ant"
ln -s $antdir/bin/ant ant
fi
| true
|
bded5cfd9d32f5b4fd552f5c988a0ff147fc3e4f
|
Shell
|
tnakaicode/jburkardt
|
/doomsday/doomsday_prb.sh
|
UTF-8
| 500
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
#
g++ -c -I/$HOME/include doomsday_prb.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling doomsday_prb.cpp"
exit
fi
#
g++ doomsday_prb.o /$HOME/libcpp/$ARCH/doomsday.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading doomsday_prb.o."
exit
fi
#
rm doomsday_prb.o
#
mv a.out doomsday_prb
./doomsday_prb > doomsday_prb_output.txt
if [ $? -ne 0 ]; then
echo "Errors running doomsday_prb."
exit
fi
rm doomsday_prb
#
echo "Program output written to doomsday_prb_output.txt"
| true
|
a4f2bb55b0f4b330d30f9b45815aa23225719805
|
Shell
|
prboyer/linux-stuff
|
/zshsetup.sh
|
UTF-8
| 2,521
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# Initialize variables that control script behavior
local wsl_mode=false;
# Handle command line switches
while [[ "$1" =~ ^- && ! "$1" == "--" ]];
do case $1 in
-w | --WSL )
wsl_mode=true;
;;
-t | --theme )
shift; theme=$1;
;;
-u | --username )
shift; username=$1
;;
esac; shift; done
if [[ "$1" == '--' ]]; then
shift;
fi
echo Installing Dependencies
sudo apt install zsh git -y
echo Dependencies installed.
echo Configuring ZSH
sh -c "$(wget -O- https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting
git clone https://github.com/romkatv/powerlevel10k.git ~/.oh-my-zsh/custom/themes/powerlevel10k
git clone https://github.com/zsh-users/zsh-history-substring-search ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-history-substring-search
# copy over custom .zshrc
echo "Copy over Custom .zshrc file"
yes | /bin/cp -f .zshrc ~/.zshrc
echo Update zshrc With Username
sed -i "s/DEFAULT_USER/DEFAULT_USER=$username/g" ~/.zshrc
echo Install a Nerd Font
# you can modify this if you want a different font.
mkdir -p ~/.local/share/fonts
chmod 755 ~/.local/share/fonts
curl https://raw.githubusercontent.com/romkatv/powerlevel10k-media/master/MesloLGS%20NF%20Regular.ttf -o ~/.local/share/fonts/MesloLGS\ NF\ Regular.ttf &
chmod 644 ~/.local/share/fonts/*
fc-cache -vf ~/.local/share/fonts/
# FONT IN WSL
# You need to install the .ttf font in Windows manually. Then in the windows terminal JSON settings, set 'fontFace':"MesloLGS NF"; This will enable glyphs
# copy themes to local folder
echo copying themes
cp ./themes/* ~/.oh-my-zsh/custom/themes
chmod a-x ~/.oh-my-zsh/custom/themes/*.zsh-theme
# set the theme
sed -i "s/ZSH_THEME=/ZSH_THEME=\"$theme\"/gi" ~/.zshrc
if [[ $wsl_mode == true ]]; then
echo "
# ONLY NEEDED IN WSL
#Setting GUI Display for WSL
export DISPLAY=:0
" >> ~/.zshrc
fi
# change the shell to ZSH
echo Changing Shell
chsh -s /usr/bin/zsh $USER;
echo "You must change the font of your terminal to the installed nerd font in order for glyphs to work. Or comment out the "nerd-font" line in your ~/.zshrc file"
echo "Restart your terminal for changes to take effect."
# restart the shell
source ~/.zshrc &>/dev/null
| true
|
d9247949700fb01cacad9232f740d574d63b62cc
|
Shell
|
pmarcell10/OpRendszerek
|
/createFiles.sh
|
UTF-8
| 1,169
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
#Signal
trap exit_handler EXIT
exit_handler() { # Config törlése kilépéskor
if [ -f config.txt ] ; then
rm config.txt
echo "Config törölve."
fi
}
echo "$(basename $0) started running with process ID: $$"
# Paraméter ellenőrzés
if [ $# -ne 1 ] ; then
echo "No parameter directory given. Usage: $(basename $0) dest_dir"
exit 1
fi
# Config fájl használata
touch config.txt
echo DEST_DIR="$1" >config.txt
. config.txt
if [ ! -d $DEST_DIR ] ; then
echo "Directory doesn't exist. Create? Y/N"
read A
if [ $A = Y ] | [ $A = y ] ; then
mkdir $DEST_DIR
else
echo "Failed to create directory. Exiting."
exit 2
fi
fi
# Beolvasás
echo -n "Filename with content: "
read FILE
echo -n "Empty filename: "
read E_FILE
# Ha nem létezik az input file ext 3
if [ ! -f $FILE ] | [ ! -f $E_FILE ] ; then
echo "Input file error."
exit 3
fi
# Algoritmus
I=1
mkdir ./$DEST_DIR/content
mkdir ./$DEST_DIR/empty
while [ $I -lt 50 ] ; do
if [ $(expr $I % 2) -eq 0 ] ; then
cp $FILE ./$DEST_DIR/content/FILE_$I
else
cp $E_FILE ./$DEST_DIR/empty/E_FILE_$I
fi
I=$(expr $I + 1)
done
echo Done creating files.
| true
|
a5f6980012a055a0e10d7a55c510ae3e8c8a9b2a
|
Shell
|
jethrosun/NetBricks
|
/test/app-xcdr_t/profile.sh
|
UTF-8
| 546
| 2.6875
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/bash
# This script generates perf results that we can use to get a flamegraph.
#set -x
set -euo pipefail
# clean the states of transmission
sudo rm -rf downloads/*
sudo rm -rf config/*
mkdir -p config downloads
sudo rm -rf /data/downloads/*
sudo rm -rf /data/config/*
sudo mkdir -p /data/config /data/downloads
NF_NAME=pvn-p2p-nat
M_CORE=1
PORT_ONE="0000:01:00.0"
PORT_TWO="0000:01:00.1"
../../build.sh profile $NF_NAME -n "\n=========== Running RDR Proxy ============\n" -m $M_CORE \
-c 4 -c 5 \
-p $PORT_ONE -p $PORT_TWO
| true
|
7b9b3215fa7a8c8ef0a0b7a2d0f3a85513f3743f
|
Shell
|
UWrc/UWrc.github.io
|
/static/files/hyak101/python/start-jupyter-forwarding.sh
|
UTF-8
| 904
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# First, get the connection information off of the klone login node:
JUPYTER_INFO=$(ssh klone-node 'cat ~/.jupyter-port-and-token' 2>/dev/null)
# If we didn't receive anything, print an error message and exit:
if [[ -z $JUPYTER_INFO ]]; then
echo "Error: Couldn't retreive Jupyter server port/token"
exit 1
fi
# Some fancy Bash to split the port and the token:
JUPYTER_PORT=${JUPYTER_INFO% *}
JUPYTER_TOKEN=${JUPYTER_INFO#* }
# Start the port-forwarding and save the SSH process ID:
ssh -NL 8888:localhost:$JUPYTER_PORT klone-node &
SSH_PID=$!
# If the SSH process ended with an error, print an error message and exit:
if (( $? != 0 )); then
echo "Error: Port forwarding failed."
exit 1
fi
# Finally, print out the connection information:
echo
echo "Connect to:"
echo " http://localhost:8888/?token=$JUPYTER_TOKEN"
echo "Close tunnel with: "
echo " kill $SSH_PID"
echo
| true
|
2389bd4fe704cb24c8019e06d8b89d0429d3b932
|
Shell
|
andergcp/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/7-clock
|
UTF-8
| 236
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# displays the time for 12 hours and 59 minutes
hours=0
while [ $hours -lt 13 ]
do
echo "Hour: $hours"
mins=1
while [ $mins -le 59 ]
do
echo $mins
((mins++))
done
((hours++))
done
| true
|
3493d80f0a07dd442eb454d5099d37006b164bc1
|
Shell
|
heubi95/pyscaffold
|
/tests/system_test.sh
|
UTF-8
| 906
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e -x
PROJECT="my_project"
# Delete old project if necessary
if [ -d $PROJECT ]; then
rm -rf $PROJECT
fi
# Setup a test project
putup $PROJECT
# Run some common tasks
cd $PROJECT
python setup.py test
python setup.py doctest
python setup.py docs
python setup.py version
python setup.py sdist
python setup.py bdist
# Try updating
cd ..
putup --update $PROJECT
cd $PROJECT
git_diff=`git diff`
test ! -n "$git_diff"
# Try changing the description
cd ..
DESCRIPTION="new_description"
putup --update $PROJECT -d $DESCRIPTION
cd $PROJECT
test "`python setup.py --description`" = $DESCRIPTION
cd ..
putup --force --update $PROJECT -d $DESCRIPTION
cd $PROJECT
test "`python setup.py --description`" = $DESCRIPTION
# Try forcing overwrite
putup --force $PROJECT
# Try running Tox
if [[ "$DISTRIB" == "ubuntu" ]]; then
cd $PROJECT
tox -e py27
cd ..
fi
echo "System test successful!"
| true
|
f88ac35d6b4a4af8db3eb2119595ef29f8b7dc3a
|
Shell
|
Samironius/Python-Scripts
|
/script
|
UTF-8
| 187
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
testVariable=$1
# for testssxs
if [[ $testVariable == "hw2.py" ]]; then
python3 $testVariable data_json.json result.json
ls
else
echo "enter please patch to hw2.py"
fi
| true
|
ffba4b9dedbe5367c6a4351206785e744d8f1b84
|
Shell
|
mhoh3963/dk4cub-script
|
/util.sh
|
UTF-8
| 1,165
| 3.90625
| 4
|
[] |
no_license
|
#! /bin/bash
#set -x
function trim()
{
local __STR=$1
if [ -n "$STR" ]
then
__STR=`echo $__STR | sed -e 's/^ *//g' -e 's/ *$//g'`
fi
echo $__STR
}
function cut_trim()
{
local __STR=$1
local __SEL=$2
local __DEL=$3
if [ -n "$__STR" -a -n "$__SEL" ]
then
if [ -z "$__DEL" ]
then
__STR=`echo $__STR | cut -f $__SEL`
else
__STR=`echo $__STR | cut -f $__SEL -d $__DEL`
fi
fi
echo $(trim "$__STR")
}
function is_exist_image()
{
local __NAME=$(cut_trim $1 1 ":")
local __TAG=$(cut_trim $1 2 ":")
local __VALUE=`docker images | grep $__NAME | xargs | cut -d" " -f 2`
if [ "$__VALUE" == "$__TAG" ]
then
echo "1"
else
echo "0"
fi
}
function is_exist_container()
{
local __HOSTNAME=$1
local __VALUE=`docker ps -a | grep $__HOSTNAME | rev | xargs | cut -d" " -f 1 | rev`
if [ "$__VALUE" == "$__HOSTNAME" ]
then
echo "1"
else
echo "0"
fi
}
function build_docker()
{
local __IMAGE=$1
local __DOCKER_FILE=$2
local __TEMP_FILE=$3
docker build --force-rm=true --no-cache=true -t $__IMAGE -f $__DOCKER_FILE . > $__TEMP_FILE
if [ $(is_exist_image $__IMAGE) -eq 1 ]
then
echo "1"
else
echo "0"
fi
}
| true
|
8df29501eceb7974a8cd812900b689845e4fc7a7
|
Shell
|
arthurdehgan/cocobrainchannel
|
/python_files/cocostart.sh
|
UTF-8
| 1,328
| 3.171875
| 3
|
[] |
no_license
|
#! /bin/bash
# On recupere les arguments du script
subject=$1
length=$3
#path=$2
sport=$2
source "$(pwd)"/muse/bin/activate
clear
printf "Calibration utility\n"
#read -p "Press Any key to start calibration." -n1 -s
printf "\n"
read -p "Press Any key to start the first recording." -n1 -s
# le dernier argument est la duree de l'enregistrement en secondes
# l'avant dernier argument est la lettre utilisee pour identifier la condition
# cette lettre doit correspondre aux lettres dans calibration.py
printf "\n"
python save_gamma2mat.py $subject $sport n $length
printf "\n"
printf "done.\n"
printf "\n"
read -p "Press Any key to start the second recording." -n1 -s
printf "\n"
python save_gamma2mat.py $subject $sport s $length
printf "\n"
printf "done.\n"
printf "\n"
printf "training the classifier...\n"
# Dernier argument permet de specifier quelles lettres sont dans condition 1 et dans cond 2
# separees par un _ la condition a gauche du _ sera un 1 et l'autre un 0
python calibration.py $subject s_n
printf "\n"
wait
# combine raw mats to create the mat file to send to karim
python combine_mat.py $subject
read -p "Press Any key to start sending predictions" -n1 -s
# nohup python save_gamma2mat.py $subject $sport pred &>/dev/null
# blob=$!
printf "\n"
python prediction.py $subject $sport
# wait
# kill -9 $blob
| true
|
759837181d29c4bd67454b8560e553de4787c23f
|
Shell
|
ken3/LFS
|
/8.2/scripts/503_backup-lfs.sh
|
EUC-JP
| 547
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# LFSե륷ƥΥХååפμ褹
BASEDIR=`pwd`
. buildenv
[ `whoami` == root ] || exit 2
[ $LFS != "" ] || exit 2
cd $LFS && tar zcvf ${BASEDIR}/../lfs-stage4.tar.gz \
bin \
boot \
dev \
etc \
home \
lib \
lib64 \
media \
mnt \
opt \
proc \
root \
run \
sbin \
scripts \
sources \
srv \
sys \
tools \
tmp \
usr \
var
| true
|
0c695776203cc4f093c8573fecd2da88c40880e2
|
Shell
|
DeadDork/manuals
|
/BASH/bourne_shell/www.grymoire.com/Unix/Scripts/ShCmdArgs.sh
|
UTF-8
| 394
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
usage() {
echo `basename $0`: ERROR: $* 1>&2
echo usage: `basename $0` '[-[abc]] [-o file]' '[file ...]' 1>&2
exit 1
}
set -- `getopt "abco:" "$@"` || usage
a= b= c= o=
while :
do
case "$1" in
-a) a=1;;
-b) b=1;;
-c) c=1;;
-o) shift; o="$1";;
--) break;;
esac
shift
done
shift # get rid of --
# rest of script...
| true
|
9a8e89d8f699f34a9c7e93c55a80fa70c075404b
|
Shell
|
rklubenspies/easypost-ui
|
/run
|
UTF-8
| 817
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# Shortcut to start up container in production or development using docker-compose
# Usage:
# Start as development: ./run dev
# Start as production: ./run prod
# Build or rebuild images: ./run build
case "$1" in
d | dev | development)
docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
;;
p | prod | production)
docker-compose -f docker-compose.yml -f docker-compose.prod.yml up
;;
b | build | rebuild)
docker-compose build
;;
*)
echo " "
echo "Usage: $0 {dev|prod|build}"
echo " "
echo "Shortcut to start up container in production or development using docker-compose"
echo "Usage:"
echo " Start as development: ./run dev"
echo " Start as production: ./run prod"
echo " Build or rebuild images: ./run build"
echo " "
exit 1
esac
| true
|
6841e80dc15d1e7ca1ee3b334240cd88d54fd150
|
Shell
|
unDeadHerbs/init
|
/home/.zshrc.d/05-reconnect.base
|
UTF-8
| 271
| 2.890625
| 3
|
[] |
no_license
|
#! /usr/bin/env zsh
if [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ] || [ -n "$SSH_CONNECTION" ]; then
if [[ "$TERM" != "dumb" ]]; then
screen -dr >/dev/null && exit
if [[ "$(tmux ls|grep -v attached|wc -l)" == "1" ]]; then
tmux attach && exit
fi
fi
fi
| true
|
ac7f2b42c3dffc480d88af5d66e1489de7c8c9d8
|
Shell
|
gderber/smgl-cauldron
|
/cauldron/lib/libcauldron
|
UTF-8
| 11,095
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#-------------------------------------------------------------------------------
##
##=head1 SYNOPSIS
##
## libcauldron is a set of functions used internally by the cauldron script
##
##=head1 COPYRIGHT
##
## Copyright 2009 by the Cauldron Team
##
##=head1 FUNCTIONS
##
##=over 4
##
#-------------------------------------------------------------------------------
# set LC_COLLATE to C so we don't get affected by the user's locale
# when grepping, sorting, etc.
export LC_COLLATE="C"
# shorthand and non-hardcoded /dev/null for output dumping
CAULDRON_NULL="${CAULDRON_NULL:-/dev/null}"
#-------------------------------------------------------------------------------
##
## Used only when checking initialization of the library and a fatal error
## occurs. Afterwards, functions from liberror are used instead.
##
#-------------------------------------------------------------------------------
function cauldron_fatal_error() {
echo "$1"
exit 1
}
#-------------------------------------------------------------------------------
##
## error handling library includes - load these first!
##
#-------------------------------------------------------------------------------
. "$CAULDRON_COMMON/liberror" 2>"$CAULDRON_NULL" ||
cauldron_fatal_error "error: cannot load base library liberror"
# load cauldron error code and message defines
. "$CAULDRON_LIBS/errorcodes" 2>"$CAULDRON_NULL" ||
cauldron_fatal_error "error: cannot load cauldron error codes"
. "$CAULDRON_COMMON/libcolor" 2>"$CAULDRON_NULL" ||
cauldron_fatal_error "error: cannot load base library libcolor"
# test/set whether color output should be enabled
[[ $CAULDRON_COLOR != "yes" ]] && LIBCOLOR_NOCOLOR="yes"
# drop cauldron_fatal_error if liberror was successfully loaded
unset cauldron_fatal_error
#-------------------------------------------------------------------------------
## @param library name to load (no path, just basename)
##
## Loads the library given as the first argument. liberror_check_fatal is
## called to check if there was a problem loading the library, and if there was
## it will print a corresponding error message and then exit with the error
## $ERR_LOAD_LIBRARY.
##
#-------------------------------------------------------------------------------
function cauldron_load_library() {
local lib="$1"
. "$CAULDRON_LIBS/$lib" 2>$CAULDRON_NULL
liberror_check_fatal "cannot load library $lib"
}
#-------------------------------------------------------------------------------
# cauldron includes
#-------------------------------------------------------------------------------
cauldron_load_library "lib.sorcerous"
cauldron_load_library "lib.chroot"
cauldron_load_library "lib.init"
cauldron_load_library "lib.toolchain"
#-------------------------------------------------------------------------------
## @param color state [yes or no] (optional)
##
## Wrapper function that handles setting the color state in libcolor. Possible
## values for the color state parameter are 'yes' or 'no' (without the quotes).
## If the parameter isn't supplied, then it defaults to whatever CAULDRON_COLOR
## is set to.
##
#-------------------------------------------------------------------------------
function cauldron_color() {
local color="${1:-$CAULDRON_COLOR}"
[ "$1" = "no" ] && LIBCOLOR_NOCOLOR="yes"
}
#-------------------------------------------------------------------------------
## @param cleanfile
##
## Cleans out unneeded files that were used to generate the ISO, but should
## not be present on the final ISO system. cleanfile is a file that lists paths
## to remove (absolute paths, relative to a chroot of the iso), one file/path
## per line.
##
#-------------------------------------------------------------------------------
function cauldron_clean_iso_system() {
local cleanfile="$1"
[[ -z "$cleanfile" ]] && return $ERR_CLEAN_FILE
cauldron_chroot_current -q || return $ERR_CHROOT_CURRENT
for i in $(sort -r $cleanfile)
do
if [[ -d "$i" ]]
then
"$CAULDRON_CMD_OUTPUT[@]}" "Attempting to remove directory $i..."
cauldron_chroot rmdir "$i" || return $ERR_CLEAN_DIR
else
"$CAULDRON_CMD_OUTPUT[@]}" "Deleting $i"
cauldron_chroot rm "$i" || return $ERR_CLEAN_FILE
fi
done
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Packages the sys directory into a compressed tarball to be placed in the iso
## filesystem as system.tar.bz2. Relies on the fact that
## CAULDRON_CMD_ARCHIVE_SYS will add a file suffix to the CAULDRON_SYS_NAME
## base filename.
##
#-------------------------------------------------------------------------------
function cauldron_package_sys() {
"${CAULDRON_CMD_ARCHIVE_SYS[@]}" || return $ERR_SYS_PACKAGE
"${CAULDRON_CMD_COMPRESS[@]}" "$CAULDRON_BUILD/$CAULDRON_SYS_NAME".* ||
return $ERR_SYS_COMPRESS
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Builds the ISO filesystem using the generated ISO data and the filename
## set in the cauldron config file.
##
#-------------------------------------------------------------------------------
function cauldron_package_iso() {
"${CAULDRON_CMD_MKISO[@]}" || return $ERR_ISO_PACKAGE
"${CAULDRON_CMD_COMPRESS[@]}" "$CAULDRON_ISO_NAME" || return $ERR_ISO_COMPRESS
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Sources the enchantment config files in the build chroot so that we can
## obtain (and modify if the user so wishes) the paths where we will store the
## enchantment files (libraries, installers, install modules, etc.) on the ISO.
##
#-------------------------------------------------------------------------------
function cauldron_source_enchantment() {
local host_enchant_config="$CAULDRON_HOST/etc/cauldron/enchantment/config"
local enchant_config="$CAULDRON_BUILD/etc/enchantment/config"
local enchant_lconfig="$CAULDRON_BUILD/etc/enchantment/local/config"
# make sure the ISO has the enchantment configs first
# if, we need to copy them (they are required for the ISO)
if [[ ! -f "$enchant_config" ]]
then
"${CAULDRON_CMD_CP_FORCE[@]}" "$host_enchant_config" "$enchant_config" ||
liberror_die $ERR_ENCHANT_CONFIG
fi
# set ENCHANT_ISO_PATH before sourcing the enchantment configs so all the
# enchantment variables used by cauldron will be relative to the cauldron
# build chroot
ENCHANT_ISO_PATH="$CAULDRON_BUILD"
# source the enchantment config to make sure we get the paths correct
. "$enchant_config" 2>$CAULDRON_NULL ||
liberror_die $ERR_ENCHANT_CONFIG
# try to source the local config, but don't die if it doesn't exist
. "$enchant_lconfig" 2>$CAULDRON_NULL
}
#-------------------------------------------------------------------------------
##
## Adds the given installer(s) listed in $CAULDRON_INSTALLERS to the ISO system.
## The user can then choose the installer either at boot-time or run-time.
##
## The installer(s) can currently be shell or menu (future: X?). In theory it
## should support any installer, as long as it's packaged in its own directory
## and the name of that dir is included in CAULDRON_INSTALLERS, and the binary
## to execute for that installer is named enchantment* (i.e., enchantment
## (shell) or enchantment-menu). This behavior may change in the future though.
##
#-------------------------------------------------------------------------------
function cauldron_add_installers() {
local installer=""
# no need to return error if cauldron_source_enchantment fails, because it
# runs liberror_die itself on error, which causes an exit
[[ -z $ENCHANT_DATA ]] && cauldron_source_enchantment
# make sure CAULDRON_BUILD/ENCHANT_DATA exists
"${CAULDRON_CMD_MKDIR[@]}" "$ENCHANT_DATA" || return $ERR_INSTALLER_DIR
for installer in $CAULDRON_INSTALLERS
do
"${CAULDRON_CMD_CP_RECURSE[@]}" \
"$CAULDRON_LIBS/enchantment/$installer" \
"$ENCHANT_DATA/" ||
return $ERR_COPY_INSTALLER
"$CAULDRON_CMD_SYMLINK_FORCE[@]}" \
"$ENCHANT_DATA/$installer"/bin/enchantment* \
"$CAULDRON_BUILD/usr/sbin/" ||
return $ERR_LINK_INSTALLER
done
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## This creates the CAULDRON_BUILD directory, and copies sorcery and
## cauldron files into it. It also configures the CAULDRON_BUILD
## sorcery as necessary (i.e. ensures that CLEAN_SOURCE="off") for
## sanity purposes
##
#-------------------------------------------------------------------------------
function cauldron_prepare() {
# ensure that the build dir exists
"${CAULDRON_CMD_MKDIR[@]}" "$CAULDRON_BUILD"
# ensure CAULDRON_BUILD/etc exists
"${CAULDRON_CMD_MKDIR[@]}" "$CAULDRON_BUILD/etc"
# copy the host's resolv.conf in order to dl sources from the net
"${CAULDRON_CMD_CP[@]}" \
"$CAULDRON_RESOLV" \
"$CAULDRON_BUILD/etc/resolv.conf" ||
return $ERR_RESOLV
# prepare chroots to $CAULDRON_BUILD
cauldron_chroot_init || return $?
# install sorcery into the build dir
if ! type -t cauldron_sorcerous_install > "$CAULDRON_NULL"
then
cauldron_load_library lib.sorcerous
fi
cauldron_sorcerous_install "$CAULDRON_BUILD" || return $ERR_SORCEROUS_INSTALL
# cauldron is a spell
# there is a spoon
cauldron_chroot "${CAULDRON_CMD_CAST[@]}" cauldron || return $ERR_CHROOT_CAST
# clean up the chroot
cauldron_chroot_done || return $?
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Cleans up the tmp directories and such forth
##
#-------------------------------------------------------------------------------
function cauldron_cleanup() {
# if the user wants everything cleaned, kill the build directory
if [[ "$CAULDRON_CLEAN" == "yes" ]]
then
"${CAULDRON_CMD_RM_RECURSE_FORCE[@]}" "$CAULDRON_BUILD" ||
return $ERR_CLEANUP_BUILD
fi
# kill the temp directory
"${CAULDRON_CMD_RM_RECURSE_FORCE[@]}" "$CAULDRON_TMP" || return $ERR_CLEANUP_TMP
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## This software is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this software; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
#-------------------------------------------------------------------------------
# vim:ai:tw=80:tabstop=2:softtabstop=2:shiftwidth=2:expandtab
| true
|
544984dbed0c9dbf9d1c1ebc0bc9082896417913
|
Shell
|
dolthub/dolt
|
/integration-tests/bats/query-catalog.bats
|
UTF-8
| 5,398
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
dolt sql <<SQL
CREATE TABLE one_pk (
pk BIGINT NOT NULL,
c1 BIGINT,
c2 BIGINT,
c3 BIGINT,
c4 BIGINT,
c5 BIGINT,
PRIMARY KEY (pk)
);
SQL
dolt sql <<SQL
CREATE TABLE two_pk (
pk1 BIGINT NOT NULL,
pk2 BIGINT NOT NULL,
c1 BIGINT,
c2 BIGINT,
c3 BIGINT,
c4 BIGINT,
c5 BIGINT,
PRIMARY KEY (pk1,pk2)
);
SQL
dolt sql -q "insert into one_pk (pk,c1,c2,c3,c4,c5) values (0,0,0,0,0,0),(1,10,10,10,10,10),(2,20,20,20,20,20),(3,30,30,30,30,30)"
dolt sql -q "insert into two_pk (pk1,pk2,c1,c2,c3,c4,c5) values (0,0,0,0,0,0,0),(0,1,10,10,10,10,10),(1,0,20,20,20,20,20),(1,1,30,30,30,30,30)"
}
teardown() {
assert_feature_version
teardown_common
}
@test "query-catalog: save query" {
run dolt sql -q "desc dolt_query_catalog"
[ "$status" -eq 1 ]
run dolt sql -q "select pk,pk1,pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1" -s "my name" -m "my message"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 8 ]
run dolt sql -q "desc dolt_query_catalog"
[ "$status" -eq 0 ]
run dolt sql -q "select * from dolt_query_catalog" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 2 ]
[[ "$output" =~ "id,display_order,name,query,description" ]] || false
[[ "$output" =~ "my message" ]] || false
[[ "$output" =~ "my name" ]] || false
[[ "$output" =~ "select pk,pk1,pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1" ]] || false
run dolt status
[ "$status" -eq 0 ]
[[ "$output" =~ "dolt_query_catalog" ]] || false
run dolt add dolt_query_catalog
[ "$status" -eq 0 ]
run dolt commit -m "Added query catalog"
[ "$status" -eq 0 ]
run dolt status
[ "$status" -eq 0 ]
! [[ "$output" =~ "dolt_query_catalog" ]] || false
run dolt sql -q "select * from dolt_query_catalog" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 2 ]
}
@test "query-catalog: empty directory" {
mkdir empty && cd empty
run dolt sql -q "show databases" --save name
[ "$status" -ne 0 ]
[[ ! "$output" =~ panic ]] || false
[[ "$output" =~ "--save must be used in a dolt database directory" ]] || false
run dolt sql --list-saved
[ "$status" -ne 0 ]
[[ ! "$output" =~ panic ]] || false
[[ "$output" =~ "--list-saved must be used in a dolt database directory" ]] || false
run dolt sql --execute name
[ "$status" -ne 0 ]
[[ ! "$output" =~ panic ]] || false
[[ "$output" =~ "--execute must be used in a dolt database directory" ]] || false
}
@test "query-catalog: conflict" {
dolt sql -q "select pk,pk1,pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1" -s "name1" -m "my message"
dolt add .
dolt commit -m 'Added a test query'
dolt checkout -b edit_a
dolt sql -q "update dolt_query_catalog set name='name_a'"
dolt add .
dolt commit -m 'Changed name to edit_a'
dolt checkout main
dolt checkout -b edit_b
dolt sql -q "update dolt_query_catalog set name='name_b'"
dolt add .
dolt commit -m 'Changed name to edit_b'
dolt checkout main
dolt merge edit_a -m "merge edit_a"
run dolt merge edit_b -m "merge edit_b"
[ "$status" -eq 0 ]
[[ "$output" =~ "Merge conflict in dolt_query_catalog" ]] || false
run dolt conflicts cat .
[ "$status" -eq 0 ]
[[ "$output" =~ "name_a" ]] || false
[[ "$output" =~ "name_b" ]] || false
}
@test "query-catalog: executed saved" {
Q1="select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1"
Q2="select pk from one_pk order by pk"
dolt sql -q "$Q1" -s name1
dolt sql -q "$Q2" -s name2
# save Q1 and verify output
EXPECTED=$(cat <<'EOF'
pk,pk1,pk2
0,0,0
1,0,1
2,1,0
3,1,1
EOF
)
run dolt sql -r csv -x name1
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
# save Q2 and verify output
EXPECTED=$(cat <<'EOF'
pk
0
1
2
3
EOF
)
run dolt sql -r csv -x name2
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
# execute list-saved and verify output. I have no idea why the
# query on the second line isn't quoted, assuming it's a bash
# interpretation thing. Has quotes when run by hand.
EXPECTED=$(cat <<'EOF'
id,display_order,name,query,description
name1,1,name1,"select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1",""
name2,2,name2,select pk from one_pk order by pk,""
EOF
)
run dolt sql --list-saved -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
# update an existing query, and verify query catalog is updated
Q1_UPDATED="select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 and pk < 3 order by 1 desc"
dolt sql -q "$Q1_UPDATED" -s name1
# execute list-saved and verify output
EXPECTED=$(cat <<'EOF'
id,display_order,name,query,description
name1,1,name1,"select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 and pk < 3 order by 1 desc",""
name2,2,name2,select pk from one_pk order by pk,""
EOF
)
run dolt sql --list-saved -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
EXPECTED=$(cat <<'EOF'
pk,pk1,pk2
2,1,0
1,0,1
0,0,0
EOF
)
# Execute updated saved query and verify once output
run dolt sql -r csv -x name1
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
}
| true
|
fb9eeaf9febfe9e03263ebf93fdb7d143fd6eda3
|
Shell
|
pcjoshi9/habitat-plans
|
/nagios-core/plan.sh
|
UTF-8
| 904
| 2.953125
| 3
|
[] |
no_license
|
pkg_origin=myorigin
pkg_name=nagios
pkg_version=4.2.0
pkg_maintainer="Some User <someuser@example.com>"
pkg_license=('Nagios Open Software License')
pkg_source=http://prdownloads.sourceforge.net/sourceforge/${pkg_name}/${pkg_name}-${pkg_version}.tar.gz
pkg_shasum=93be769854d7e64c526da29b79c92fb500a9795a82547a85ca0a9180a8f6725c
pkg_deps=(core/perl core/glibc)
pkg_build_deps=(core/gcc core/make core/shadow)
pkg_svc_user="root"
pkg_svc_group="root"
do_build() {
#shadow pkg useradd and groupadd are not working or permission denied
#building without useradd will configure and make but gives error saying nagios is not a user
#building with useradd builds for a second and then exits on unseen error
#I believe it is not a error due to privelages because this is being run as root user
# useradd nagios
# groupadd nagcmd
# usermod -a -G nagios nagcmd
./configure --with-command-group=nagcmd
make all
}
| true
|
72fd10a9ddec6e9357311ce7984999c06f6dcb5b
|
Shell
|
nnur/recipes
|
/scripts/before_install.sh
|
UTF-8
| 167
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -d "/usr/apps/recipes" ]
then
echo "nothing to see here..."
else
sudo rm -rf /usr/apps/recipes
fi
sudo mkdir -p /usr/apps/recipes/recipes
| true
|
f8a2c42777ea41c13d6f1d781f9789e8906ef668
|
Shell
|
sgnn7/dotfiles
|
/bin/mem_watch
|
UTF-8
| 231
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
if [ -z "${1}" ]
then
echo "Need a command to run!"
exit 1
fi
${@} 2&>1 /tmp/mem_watch_output.log &
pid=$!
watch --differences -n 2 "cat /proc/${pid}/status | grep VmSize | awk '{print \$2 \$3}'"
kill $pid
| true
|
f1cdbec471d8618aaaadaf97408731f4ce8040f4
|
Shell
|
dargmuesli/jonas-thelemann_stack
|
/src/development/certificates/mkcert.sh
|
UTF-8
| 1,177
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
THIS=$(dirname "$(readlink -f "$0")")
create() {
NAME="$1"
shift
CONTENT=$*
path="$THIS/$NAME"
certfile="$path.crt"
keyfile="$path.key"
if [ "$CONTENT" != "" ]; then
# shellcheck disable=SC2086
mkcert \
-cert-file "$certfile" \
-ecdsa \
-key-file "$keyfile" $CONTENT
fi
cat "$(mkcert -CAROOT)/rootCA.pem" >> "$certfile"
}
rm "$THIS"/*.key "$THIS"/*.crt
create "root"
create "traefik" \
`# 1generator` "1generator.localhost" \
`# adminer` "adminer.localhost" \
`# creal` "creal.localhost" \
`# creal/postgraphile` "creal-postgraphile.localhost" \
`# creal/strapi` "creal-strapi.localhost" \
`# hedgedoc` "hedgedoc.localhost" \
`# jonas-thelemann` "localhost" "www.localhost" "127.0.0.1" "0.0.0.0" \
`# minio` "minio.localhost" "s3.localhost" "creal-audio.s3.localhost" \
`# nextcloud` "nextcloud.localhost" \
`# portainer` "portainer.localhost" \
`# thelounge` "thelounge.localhost" \
`# traefik` "traefik.localhost" \
`# trapparty` "trapparty.localhost" \
`# trapparty/postgraphile` "trapparty-postgraphile.localhost"
| true
|
09745a394a0584fc458eaac34db91bf4ad2b9cec
|
Shell
|
omkargundlur/shell
|
/miscellaneous.sh
|
UTF-8
| 1,014
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/expect
#add password in run time execution
useradd demo2
echo 'unjG123@' | sudo passwd --stdin demo2
<<'output'
[root@centos shell]# echo 'unjG123@' | sudo passwd --stdin demo1
Changing password for user demo1.
passwd: all authentication tokens updated successfully.
[root@centos shell]#
output
#Taking Input from user
#List files in long format and
ls -ld /boot /boot1 1>/tmp/out 2>/tmp/err
#This means 1 is passed so will go "out" file wheres boot1 will give an error so will go to /tmp/err, where 1 is pass an2 error code
ls -ld /boot /boot1 >/tmp/out 2>&1 #this will output both pass and error in single file
ls -ld /boot /boot1 &>/tmp/out #this will append all data to single file
ls -ld /boot /boot1 &>>/tmp/out #sinlg > will re-write everything whereas >> will append all data
ls -ld /boot /boot1 &>/dev/null #This will execute the command but not write any output whether pass or fail
$? #This will store the exit code status (number) of previous run command , for eg 126 is Permission denied
| true
|
92c0d3364fd2d1f986ae36e0e1cb416dc27582a4
|
Shell
|
LoganBarnett/dotfiles
|
/darwin-keyboard-install.sh
|
UTF-8
| 1,283
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Keyboard settings script found in: https://stackoverflow.com/a/58907582
# Quit System Preferences so it doesn't muck with your settings.
osascript -e 'tell application "System Preferences" to quit'
# Key codes, sans the 7000000xx prefix, can be found here:
# https://gist.github.com/MightyPork/6da26e382a7ad91b5496ee55fdc73db2
# Or here:
# https://developer.apple.com/library/archive/technotes/tn2450/_index.html#//apple_ref/doc/uid/DTS40017618-CH1-KEY_TABLE_USAGES
# I found the latter more useful.
#
# 0x700000039 - caps-lock.
# 0x7000000E4 - right control.
# 0x7000000E0 - left control.
# Remap caps-lock to escape
hidutil property --set '{
"UserKeyMapping":[{
"HIDKeyboardModifierMappingSrc":0x700000039,
"HIDKeyboardModifierMappingDst":0x7000000E4
}]
}'
# We need a launch agent that sets this again at boot time.
# TODO: Write this out with nix.
sudo mkdir -p ~/Library/LaunchAgents
sudo ln -snf \
$PWD/mac/launch-agents/com.lwds.CapslockToControl.plist \
~/Library/LaunchAgents/com.lwds.CapslockToControl.plist
sudo chown -R $USER ~/Library/LaunchAgents
# TODO: Document this keyboard keycode usage somewhere on disk.
# TODO: Make this work for external keyboards too. This appears to be global or
# whatever the "main" keyboard is.
| true
|
97c350a5edacdb89779a7072ba533dda08ba0136
|
Shell
|
octocraft/terraform-external-command
|
/tests/test_02_touch/verify.sh
|
UTF-8
| 112
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -f "test.dat" ] && [ ! "hello" = "$(< test.dat)" ] ; then
exit 1;$
fi
rm -rf "test.dat"
| true
|
f9d0a851c9da729c868cba8f5c5beb4cb2764a0f
|
Shell
|
uvdl/debian-var
|
/postinstall.sh
|
UTF-8
| 775
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
SERVICES="ModemManager lightdm hostapd variscite-bluetooth apt-daily.timer apt-daily apt-daily-upgrade.timer apt-daily-upgrade NetworkManager-wait-online"
TWOG=${1:-1} # number of 2.4 GHz antennas (default: 1)
FIVG=${2:-0} # number of 5.8 GHz antennas (default: 0)
# setup hostname to match serial number read from fuses
sn1=`cat /sys/fsl_otp/HW_OCOTP_CFG0`
sn2=`cat /sys/fsl_otp/HW_OCOTP_CFG1`
printf 'IMX6%0.8x%0.8x\n' $sn1 $sn2 > /etc/hostname
cat /etc/hostname
# change root password
passwd
# disable services that slow down boot and interfere with operations
for s in $SERVICES ; do
systemctl disable --now ${s}
done
# configure wifi chip and load initial firmware
( cd /usr/sbin/wlconf && ./configure-device.sh ) <<EOF
y
1837
n
${TWOG}
${FIVG}
y
EOF
| true
|
4beebb15df055af0161bde9852079669b013d84b
|
Shell
|
Matheswaaran/bash
|
/Dropbox/scripts/file.png.ch/if.and.or/if_and_or_chk.sh
|
UTF-8
| 203
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
sp="a.txt"
for i in *
do
#echo $i OK!
if [[ $i = $sp ]] || [[ $i = b.txt ]] || [[ $i = "1 sp" ]] || [[ $i = 2 ]] || [[ $i = *.m ]] || [[ $i = net* ]]
then
echo $i
else
echo rm -rf $i
fi
done
| true
|
0f151469f584e71f51bfe354f68f49a0edfe898f
|
Shell
|
XervoIO/docker-run-php
|
/install_php.sh
|
UTF-8
| 1,440
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
set -x
# Variables
HOME=/home/mop
PHP_BREW_DIR=$HOME/.phpbrew
PHP_INSTALL_DIR=/opt/modulus/php
TEMP_DIR=$HOME/tmp
TMP_DIR=$TEMP_DIR
TMPDIR=$TEMP_DIR
PHP_BREW_FLAGS="+default +mysql +pgsql +fpm +soap +gmp +gd +opcache -- \
--with-libdir=lib/x86_64-linux-gnu --with-gd=shared --enable-gd-natf \
--with-jpeg-dir=/usr --with-png-dir=/usr --with-gd --enable-opcache"
# Allows compiling with all cpus
export MAKEFLAGS="-j $(grep -c ^processor /proc/cpuinfo)"
# phpbrew must be initialized to work. Will create the folder
# ~/.phpbrew
phpbrew init
source ~/.phpbrew/bashrc
# Install PHP
if ! phpbrew install php-$PHP_VER $PHP_BREW_FLAGS ; then
if [ -f $PHP_BREW_DIR/build/php-$PHP_VER/build.log ]; then
tail -200 $PHP_BREW_DIR/build/php-$PHP_VER/build.log
exit 1
else
echo "Build failed, no log file created."
exit 1
fi
fi
# Install MONGO support
# NOTE: We run this out of the other loop because if we run this in the other
# loop, the installation of PHP 5.4 will almost always fail because reasons.
# (I really don't know why it fails, phpbrew just fails to install it)
phpbrew switch php-$PHP_VER
if ! phpbrew ext install mongo ; then
echo "Installing mongo failed for $PHP_VER"
if [ -f $PHP_BREW_DIR/build/php-$PHP_VER/ext/mongo/build.log ]; then
tail -200 $PHP_BREW_DIR/build/php-$PHP_VER/ext/mongo/build.log
exit 1
else
echo "Mongo build log missing"
exit 1
fi
fi
| true
|
5a465c37850eb174a1d13a40b93ea7d6bdb57869
|
Shell
|
eventhorizonpl/sic
|
/modules/basic_tools.sh
|
UTF-8
| 2,579
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
source ./lib
function install_package()
{
if [ $OS == "rhel" ]
then
if [ $VERSION == "7" ]
then
show_message "Installing EPEL release package..."
rpm -ihv https://dl.fedoraproject.org/pub/epel/7/x86_64/Packages/e/epel-release-7-11.noarch.rpm >> /tmp/install.log 2>&1
show_result $?
show_message "Installing REMI release package..."
rpm -ihv http://rpms.remirepo.net/enterprise/remi-release-7.rpm >> /tmp/install.log 2>&1
show_result $?
elif [ $VERSION == "8" ]
then
show_message "Installing EPEL release package..."
rpm -ihv https://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/e/epel-release-8-5.el8.noarch.rpm >> /tmp/install.log 2>&1
show_result $?
show_message "Installing REMI release package..."
rpm -ihv https://rpms.remirepo.net/enterprise/remi-release-8.rpm >> /tmp/install.log 2>&1
show_result $?
fi
fi
show_message "Installing basic tools..."
if [ $OS == "fedora" ]
then
if [ $ONLY_ESSENTIAL == "yes" ]
then
dnf install --assumeyes bzip2 git-core mc ntpdate policycoreutils-python-utils vim >> /tmp/install.log 2>&1
else
dnf install --assumeyes acl bzip2 git mc net-tools ntpdate patch \
policycoreutils-python-utils screen tar unzip vim wget >> /tmp/install.log 2>&1
fi
show_result $?
elif [ $OS == "rhel" ]
then
if [ $VERSION == "7" ]
then
yum install --assumeyes acl bzip2 git mc net-tools ntpdate patch \
policycoreutils-python screen tar unzip vim wget >> /tmp/install.log 2>&1
yum install --assumeyes centos-release-scl
yum install --assumeyes rh-git218
elif [ $VERSION == "8" ]
then
dnf install --assumeyes acl bzip2 git-core mc patch \
policycoreutils-python-utils tar vim wget >> /tmp/install.log 2>&1
fi
show_result $?
fi
if [ $OS != "rhel" ] && [ $VERSION != "8" ]
then
show_message "\tRestarting ntpdate..."
systemctl restart ntpdate.service >> /tmp/install.log 2>&1
show_result $?
show_message "\tEnabling ntpdate..."
systemctl enable ntpdate.service >> /tmp/install.log 2>&1
show_result $?
fi
}
while [ $# -ne 0 ]
do
if [ $1 == "install" ]
then
install_package
elif [ -f $1 ]
then
source $1
fi
shift
done
| true
|
f57a78c9aa9f9df796cf213ef8c66f5c74d60724
|
Shell
|
nikolasbasler/BEARCAVE
|
/scripts/trim_merge_SS_PE_CL72_var_min_length.sh
|
UTF-8
| 2,999
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# November 2018
softdir=../software/miniconda3/envs/py35/bin
# script for trimming adapter sequences from SS library PE data, removing short reads of user defined length, and merging overlapping PE reads.
# this script should be run from /BEARCAVE/scripts
# example command line: ./trim_merge_SS_PE_CL72_var_min_length.sh $1 $2 $3 $4
# $1 = SAMPLE*
# $2 = PREFIX*
# $3 = SEQ_RUN*
# $4 = min length threshold to be used
# * these can be found in /REPOSITORY/rawdata/metadata.txt
# set threads variable for number of cpus used by flash
threads=10
# make processing directory in trimdata directory
mkdir -p ../trimdata/$1_$4bp_processing
chmod u+w ../trimdata/$1_$4bp_processing/$2+$1* 2> /dev/null
# concatenate raw fastq files
zcat ../rawdata/$1/$2+$3*R1*.fastq.gz > ../trimdata/$1_$4bp_processing/$2+$1_$4bp_R1.fastq
zcat ../rawdata/$1/$2+$3*R2*.fastq.gz > ../trimdata/$1_$4bp_processing/$2+$1_$4bp_R2.fastq
echo "Script: trim_merge_SS_PE_CL72_var_min_length.sh - Minimum read length: $4 bp" > ../trimdata/$1_$4bp_processing/$2+$1_$4bp_trim_report.log
echo "Script: trim_merge_SS_PE_CL72_var_min_length.sh - Minimum read length: $4 bp" > ../trimdata/$1_$4bp_processing/$2+$1_$4bp_merge_report.log
# trim adaptor seqs and short seqs from R1 and R2
$softdir/cutadapt -a AGATCGGAAGAGCACACGTC -A GGAAGAGCGTCGTGTAGGGA -O 1 -m $4 -o ../trimdata/$1_$4bp_processing/$2+$1_$4bp_trim_R1.fastq -p ../trimdata/$1_$4bp_processing/$2+$1_$4bp_trim_R2.fastq ../trimdata/$1_$4bp_processing/$2+$1_$4bp_R1.fastq ../trimdata/$1_$4bp_processing/$2+$1_$4bp_R2.fastq >> ../trimdata/$1_$4bp_processing/$2+$1_$4bp_trim_report.log
# merge R1 and R2, set max overlap (-M) to 75bp as most ancient frags should be mergeable
$softdir/flash -M 75 -t $threads -d ../trimdata/$1_$4bp_processing -o $2+$1_$4bp ../trimdata/$1_$4bp_processing/$2+$1_$4bp_trim_R1.fastq ../trimdata/$1_$4bp_processing/$2+$1_$4bp_trim_R2.fastq >> ../trimdata/$1_$4bp_processing/$2+$1_$4bp_merge_report.log
# clean up unnecessary files
# here I am assuming ancient DNA data and discarding the non-overlapping PE reads
mkdir ../trimdata/$1_$4bp_processing/$2+$1_$4bp_save
mv ../trimdata/$1_$4bp_processing/$2+$1_$4bp_trim_report.log ../trimdata/$1_$4bp_processing/$2+$1_$4bp_merge_report.log ../trimdata/$1_$4bp_processing/$2+$1_$4bp.extendedFrags.fastq ../trimdata/$1_$4bp_processing/$2+$1_$4bp_save
rm ../trimdata/$1_$4bp_processing/$2+$1_$4bp* 2> /dev/null
mv ../trimdata/$1_$4bp_processing/$2+$1_$4bp_save/$2+$1_$4bp* ../trimdata/$1_$4bp_processing/
rmdir ../trimdata/$1_$4bp_processing/$2+$1_$4bp_save
# rename merged reads file
mv ../trimdata/$1_$4bp_processing/$2+$1_$4bp.extendedFrags.fastq ../trimdata/$1_$4bp_processing/$2+$1_$4bp_mappable.fastq
# and zip it
gzip ../trimdata/$1_$4bp_processing/$2+$1_$4bp_mappable.fastq
# and assign permissions
chmod 440 ../trimdata/$1_$4bp_processing/$2+$1*
echo "trim of $2+$1 min length $4 bp complete"
echo "The output can be found here: ../trimdata/$1_$4bp_processing/."
echo ''
| true
|
8fb570906fb93994963425e8af04ed21c9412f76
|
Shell
|
uesugitatsuya/iFeral
|
/bdupload/bdupload
|
UTF-8
| 8,929
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# --------------------------------------------------------------------------------
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
bdinfocli_path="$DIR/tools/bdinfocli.exe"
export PATH=$DIR:$DIR/tools:$PATH
export MONO_PATH=$DIR/mono.net-4.0
# --------------------------------------------------------------------------------
black=$(tput setaf 0); red=$(tput setaf 1); green=$(tput setaf 2); yellow=$(tput setaf 3);
blue=$(tput setaf 4); magenta=$(tput setaf 5); cyan=$(tput setaf 6); white=$(tput setaf 7);
on_red=$(tput setab 1); on_green=$(tput setab 2); on_yellow=$(tput setab 3); on_blue=$(tput setab 4);
on_magenta=$(tput setab 5); on_cyan=$(tput setab 6); on_white=$(tput setab 7); bold=$(tput bold);
dim=$(tput dim); underline=$(tput smul); reset_underline=$(tput rmul); standout=$(tput smso);
reset_standout=$(tput rmso); normal=$(tput sgr0); alert=${white}${on_red}; title=${standout};
baihuangse=${white}${on_yellow}; bailanse=${white}${on_blue}; bailvse=${white}${on_green};
baiqingse=${white}${on_cyan}; baihongse=${white}${on_red}; baizise=${white}${on_magenta};
heibaise=${black}${on_white};
shanshuo=$(tput blink); wuguangbiao=$(tput civis); guangbiao=$(tput cnorm)
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# 简介与检查
function _intro() {
clear
wget -t1 --timeout=5 --no-check-certificate -qO- https://github.com/Aniverse/inexistence/raw/master/03.Files/bluray.logo.1
echo -e "${bold}Automated Blu-ray Upload Toolkit${normal}"
}
# 询问路径
function _askpath() {
echo; echo -e "Note that ${blue}BDISO${white} is not supported yet"
echo -ne "${yellow}${bold}Input path to your stuff: ${normal}"; read -e pathtostuff
echo
}
# 挂载、定义变量
function _stufftype() {
if [[ -d "${pathtostuff}" ]]; then
stufftype=BDMV
echo -e "${magenta}BDMV${white} detected ..."
else
stufftype=BDISO
echo -e "${magenta}BDISO${white} detected ..."
echo -e "${baihongse}WARNING${white} BDISO is not supported, exiting ..."
exit 1
fi
bdmvpath="$pathtostuff"
bdpath="$pathtostuff"
file_title=`basename "$bdmvpath"`
file_title_clean="$(echo "$file_title" | tr '[:space:]' '.')"
file_title_clean="$(echo "$file_title_clean" | sed s'/[.]$//')"
file_title_clean="$(echo "$file_title_clean" | tr -d '(')"
file_title_clean="$(echo "$file_title_clean" | tr -d ')')"
tempvar=$(find "$bdpath" -type f -print0 | xargs -0 ls -1S)
main_m2ts_path=$(echo "$tempvar" | head -n 1)
duration1=$(ffmpeg -i "$main_m2ts_path" /dev/null 2>&1 | egrep '(Duration:)' | cut -d ' ' -f4 | cut -c1-8)
duration2=`date -u -d "1970-01-01 $duration1" +%s`
mkdir -p "$DIR/0utput/$file_title_clean"
outputpath="$DIR/0utput/$file_title_clean"
echo
}
# 询问扫描BDinfo
function _askscan() {
echo -e "01) ${cyan}Auto scan the first longest playlist${white}"
echo -e "02) ${cyan}Manually select which playlist to scan${white}"
echo -e "03) ${cyan}Do not scan BDinfo${white}"
echo -ne "${yellow}Whould you like to scan BDinfo?${white} (default: ${cyan}01${white}) "; read response
case $response in
01 | 1 | "") bdscan=auto ;;
02 | 2) bdscan=manual ;;
03 | 3) bdscan=no ;;
*) bdscan=auto ;;
esac
if [ "${bdscan}" == "auto" ]; then
echo "The script will scan the first longest playlist automaticly"
elif [ "${bdscan}" == "manual" ]; then
echo "Auto scan disabled, you need to select the mpls manually"
else
echo "BDinfo will not be scanned"
fi
echo
}
# 询问截图分辨率
function _askresolution() {
echo -e "01) ${cyan}1920x1080${white}"
echo -e "02) ${cyan}auto detect${white}"
echo -e "03) ${cyan}Input a specific resolution${white}"
echo -e "04) ${cyan}Do not take screenshots${white}"
echo -e "Since some BD's resolution are 1440x1080 with a 16:9 AR, I recommand specify 1920x1080"
echo -ne "${yellow}Which resolution of the screenshots you want?${white} (default ${cyan}01${white})"; read response
case $response in
01 | 1 | "") resolution=1080p ;;
02 | 2) resolution=auto ;;
03 | 3) resolution=input ;;
04 | 4) resolution=no ;;
*) resolution=1080p ;;
esac
if [[ "${resolution}" == "1080p" ]]; then
echo -e "The script will take 10 screenshots in 1920×1080"
elif [[ "${resolution}" == "auto" ]]; then
echo -e "The script will take 10 screenshots in origin resolution"
elif [[ "${resolution}" == "input" ]]; then
echo
read -e -p "Input the screenshost' resolution you want: ${green}" -i 1280x720 fenbianlv
echo -e "${normal}The script will take 10 screenshots in ${green}$fenbianlv${normal}"
fi
echo
}
# 询问是否制作种子
function _askmktorrent() {
echo -ne "${yellow}Would you like to create a new torrent file?${white} "
if [[ "${stufftype}" == "BDISO" ]]; then
echo -ne "[${cyan}Y${white}]es or [N]o "
read responce
case $responce in
[yY] | [yY][Ee][Ss] | "" ) newtorrent=Yes ;;
[nN] | [nN][Oo] ) newtorrent=No ;;
*) newtorrent=Yes ;;
esac
else
echo -ne "[Y]es or [${cyan}N${white}]o "
read responce
case $responce in
[yY] | [yY][Ee][Ss] ) newtorrent=Yes ;;
[nN] | [nN][Oo] | "" ) newtorrent=No ;;
*) newtorrent=No ;;
esac
fi
if [[ "${newtorrent}" == "Yes" ]]; then
echo -e "The script will create a new torrent"
elif [[ "${newtorrent}" == "No" ]]; then
echo -e "The script will not create a new torrent"
fi
echo
}
# 准备
function _preparation() {
echo "${bold}If you want to stop, Press ${on_red}Ctrl+C${normal} ${bold}; or Press ${on_green}ENTER${normal} ${bold}to start${normal}" ;read input
clear
starttime=$(date +%s)
echo -e "Work start!"
echo
}
# 获取BD info
function _getinfo() {
if [[ "${bdscan}" == "auto" ]]; then
echo -ne '1\n' | mono "${bdinfocli_path}" "${bdpath}" "${outputpath}"
elif [[ "${bdscan}" == "manual" ]]; then
mono "${bdinfocli_path}" "${bdpath}" "${outputpath}"
fi
echo;echo
if [[ ! "${bdscan}" == "no" ]]; then
sed -n '/QUICK SUMMARY/,//p' "${outputpath}/BDINFO.${file_title}.txt" > temptext
count=`wc -l temptext | awk '{print $1-1}' `
head -n $count temptext > "${outputpath}/bdinfo.quick.summary.txt"
rm temptext
sed -n '/DISC INFO/,/FILES/p' "${outputpath}/BDINFO.${file_title}.txt" > temptext
count=`wc -l temptext | awk '{print $1-2}' `
head -n $count temptext > "${outputpath}/bdinfo.main.summary.txt"
rm temptext
fi
mv "${outputpath}/BDINFO.${file_title}.txt" "${outputpath}/bdinfo.txt"
}
# 获取截图
function _takescreenshots() {
# 确定时间间隔
if [[ "${duration2}" -ge 3600 ]]; then
timestampsetting=306
elif [[ "${duration2}" -ge 1500 && "${duration2}" -lt 3600 ]]; then
timestampsetting=123
elif [[ "${duration2}" -ge 600 && "${duration2}" -lt 1500 ]]; then
timestampsetting=66
elif [[ "${duration2}" -lt 600 ]]; then
timestampsetting=25
fi
# 截图
if [[ "${resolution}" == "1080p" ]] || [[ "${resolution}" == "input" ]]; then
if [[ "${resolution}" == "1080p" ]]; then
fenbianlv=1920x1080
fi
for c in {01..10}
do
i=`expr $i + $timestampsetting`
timestamp=`date -u -d @$i +%H:%M:%S`
ffmpeg -y -ss $timestamp -i "$main_m2ts_path" -vframes 1 -s $fenbianlv "${outputpath}/screenshot${c}.png" >> /dev/null 2>&1
echo Writing screenshot$c.png from timestamp $timestamp
done
elif [[ "${resolution}" == "auto" ]]; then
for c in {01..10}
do
i=`expr $i + $timestampsetting`
timestamp=`date -u -d @$i +%H:%M:%S`
ffmpeg -y -ss $timestamp -i "$main_m2ts_path" -vframes 1 "${outputpath}/screenshot${c}.png" >> /dev/null 2>&1
echo -e Writing screenshot$c.png from timestamp $timestamp
done
fi
}
# 制作种子
function _mktorrent() {
if [[ "${newtorrent}" == "Yes" ]]; then
mktorrent -v -p -l 24 -a "" -o "$DIR/0utput/${file_title_clean}/${file_title_clean}.torrent" "$bdpath"
fi
}
# 结尾
function _end() {
endtime=$(date +%s)
timeused=$(( $endtime - $starttime ))
clear
echo -e "${bold}Done. Files created in ${yellow}\"${outputpath}\"${normal}"
if [[ $timeused -gt 60 && $timeused -lt 3600 ]]; then
timeusedmin=$(expr $timeused / 60)
timeusedsec=$(expr $timeused % 60)
echo -e "${bold}Time used: ${timeusedmin} min ${timeusedsec} sec${normal}"
elif [[ $timeused -ge 3600 ]]; then
timeusedhour=$(expr $timeused / 3600)
timeusedmin=$(expr $(expr $timeused % 3600) / 60)
timeusedsec=$(expr $timeused % 60)
echo -e "}Time used: ${timeusedhour} hour ${timeusedmin} min ${timeusedsec} sec${normal}"
else
echo -e "${bold}}Time used: ${timeused} sec${normal}"
fi
[[ ! "${bdscan}" == "no" ] && echo && cat "${outputpath}/bdinfo.quick.summary.txt"
echo
}
# 结构
_intro
_askpath
_stufftype
_askscan
_askresolution
_askmktorrent
_preparation
_getinfo
_takescreenshots
_mktorrent
_end
| true
|
947f52413998618bfc1c72befe0c35897e764333
|
Shell
|
vaslabs/CameraPi
|
/usr/local/bin/taketimelapse
|
UTF-8
| 10,340
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#===============================================================================
#
# FILE: taketimelapse
#
# USAGE: ./taketimelapse --aperture <value> --startshutterspeed <value>
# --endshutterspeed <value> --videoduration <seconds>
# --timelapseduration <minutes> --processvideo
# --videopath <path> --jpgpath <path> --fps <video fps>
# --scale <video scale>
#
# DESCRIPTION: Script to take timelapse photo sequence and optionally convert
# to AVI video. Timelapse sequence can vary shutterspeed to match
# increasing or decreasing light conditions.
#
# OPTIONS: --aperture <value> --startshutterspeed <value>
# --endshutterspeed <value> --videoduration <seconds>
# --timelapseduration <minutes> --processvideo
# --videopath <path> --jpgpath <path> --fps <video fps>
# --scale <video scale>
#
# REQUIREMENTS:
# BUGS: ---
# NOTES: ---
# AUTHOR: Andre Serfontein
# COMPANY:
# VERSION: 1.0.0
# CREATED: 07/11/2012
# REVISION:
#===============================================================================
#===============================================================================
# Set defaults
#===============================================================================
usage="Script to take timelapse photo sequence and optionally convert
to AVI video. Timelapse sequence can vary shutterspeed to match
increasing or decreasing light conditions.
usage: $0 --aperture <value> --startshutterspeed <value>
--endshutterspeed <value> --videoduration <seconds>
--timelapseduration <minutes> --processvideo
--videopath <path> --jpgpath <path> --fps <video fps>
--scale <video scale>
where:
aperture = Mandatory, AV value to set camera to.
startshutterspeed = Mandatory, shutterspeed to initiate photo sequence with. Use
actual camera values ie. "1/30" etc
endshutterspeed = Mandatory, shutterspeed to end photo sequence with. Can be
same as start shutterspeed. If different, then the shutter-
speed will be gradually increased or decreased as the photo
sequence progresses. Use actual camera values ie. "1/30" etc
videoduration = Mandatory, duration in SECONDS of required video.
timelapseduration = Mandatory, duration in MINUTES of required time lapse sequence.
processvideo = Optional, converts timelapse shots to single AVI video at 24fps.
videopath = Optional, path to save AVI file to. Recommended to use external
USB HDD as this will generate large IO on SD card.
jpgpath = Optional, path to save timelapse JPG files to. Recommended to
use external USB HDD as this will generate large IO on SD card.
fps = Optional, if the time lapse video must be compiled, specify the
desired frames per second the video should be compiled with. 24fps
gives best result but will require 24 photos per second of video.
Default 24.
scale = Optional, video scale. The Pi may not have the capacity to generate
high resolution videos. For high definition use 1920:1080.
Default 640:400.
"
HASEOS=`lsusb | grep "Canon" | wc -l | awk '{print $1}'`
APERTURE=8
STARTSHUTTERSPEED="1/200"
ENDSHUTTERSPEED="1/200"
VIDEODURATION=30
TIMELAPSEDURATION=120
PROCESSVIDEO=0
VIDEOPATH=""
JPGPATH=""
FPS=24
SCALE="640:400"
CAPTUREDELAY=5
#===============================================================================
# Parse arguments
#===============================================================================
/usr/local/bin/syncdatetime
if [ $# -eq 0 ] ; then
echo >&2 "$usage"
exit 1;
fi
while [ $# -gt 0 ]
do
case "$1" in
--aperture) APERTURE="$2"; shift;;
--startshutterspeed) STARTSHUTTERSPEED="$2"; shift;;
--endshutterspeed) ENDSHUTTERSPEED="$2"; shift;;
--videoduration) VIDEODURATION="$2"; shift;;
--timelapseduration) TIMELAPSEDURATION="$2"; shift;;
--processvideo) PROCESSVIDEO=1;;
--videopath) VIDEOPATH="$2"; shift;;
--jpgpath) JPGPATH="$2"; shift;;
--fps) FPS="$2"; shift;;
--scale) SCALE="$2"; shift;;
-*) echo >&2 "$usage"
exit 1;;
*) break;; # terminate while loop
esac
shift
done
echo "Capturing JPG images for $TIMELAPSEDURATION minutes:"
echo "APERTURE: $APERTURE"
echo "STARTSHUTTERSPEED: $STARTSHUTTERSPEED"
echo "ENDSHUTTERSPEED: $ENDSHUTTERSPEED"
echo "VIDEODURATION: $VIDEODURATION"
echo "TIMELAPSEDURATION: $TIMELAPSEDURATION"
echo "PROCESSVIDEO: $PROCESSVIDEO"
echo "VIDEOPATH: $VIDEOPATH"
echo "JPGPATH: $JPGPATH"
#===============================================================================
# Validate environment
#===============================================================================
if [ -z "$HASEOS" ] || [ $HASEOS -ne 1 ] ; then
echo "EOS camera not detected, exiting"
exit 1
fi
#===============================================================================
# Capture time lapse shots
#===============================================================================
if [ ! -z "$JPGPATH" ] ; then
rm $JPGPATH/*
fi
RESETEOS=`lsusb | grep "Canon" | sed 's/://g' | awk '{print "usbreset /dev/bus/usb/" $2 "/" $4}'`
eval "$RESETEOS"
CURRMODE=`gphoto2 --get-config /main/capturesettings/autoexposuremode | grep Current | awk '{print $2}'`
if [ "$CURRMODE" != "Manual" ] ; then
echo "Please set camera to Manual mode, exiting"
exit 1
fi
eval "$RESETEOS"
STARTSSINDEX=`gphoto2 --get-config /main/capturesettings/shutterspeed | grep "Choice" | sed 's/Choice: //g' | awk -v shutter="$STARTSHUTTERSPEED" '{if ($2 == shutter) {print $1}}'`
eval "$RESETEOS"
ENDSSINDEX=`gphoto2 --get-config /main/capturesettings/shutterspeed | grep "Choice" | sed 's/Choice: //g' | awk -v shutter="$ENDSHUTTERSPEED" '{if ($2 == shutter) {print $1}}'`
eval "$RESETEOS"
SSFIRSTINDEX=1
SSLASTINDEX=`gphoto2 --get-config /main/capturesettings/shutterspeed | grep "Choice" | sed 's/Choice: //g' | tail -1 | awk '{print $1}'`
if [ -z "$STARTSSINDEX" ] ; then
echo "Cannot locate start shutterspeed in valid supported shutterspeed options."
exit 1
fi
if [ -z "$ENDSSINDEX" ] ; then
echo "Cannot locate end shutterspeed in valid supported shutterspeed options."
exit 1
fi
# Calculate number of shutterspeed increments to make during timelapse photography
# If starting and ending shutterspeed values are the same, steps will be 1
if [ $STARTSSINDEX -ne $ENDSSINDEX ] ; then
if [ $STARTSSINDEX -gt $ENDSSINDEX ] ; then
SSSTEPS=$(( STARTSSINDEX - ENDSSINDEX + 1 ))
else
SSSTEPS=$(( ENDSSINDEX - STARTSSINDEX + 1 ))
fi
else
SSSTEPS=1
fi
# Calculate number of frames required to generate video
TOTFRAMES=$(( VIDEODURATION * 30 ))
# Calculate tripping point to move to next shutter speed
TRIPTMP=`echo "$TOTFRAMES / $SSSTEPS" | bc -l`
TRIPPOINT=`echo "scale=0; ($TRIPTMP + 0.5) / 1" | bc`
# Calculate number of images to take per minute. Arithmetic too complex
# for bash, so using bc so not to loose decimal place
IMGPERMINUTE=`echo "scale=1; 60 / (($TIMELAPSEDURATION * 60) / $TOTFRAMES)" | bc -l`
# Calculate sleep duration between shots taken into account 5s delay in
# taking each shot.
SLEEPDURATION=`echo "scale=0; (60 - ($CAPTUREDELAY * $IMGPERMINUTE)) / $IMGPERMINUTE" | bc -l`
echo "Calculated timelapse settings are:"
echo "STARTSSINDEX: $STARTSSINDEX"
echo "ENDSSINDEX: $ENDSSINDEX"
echo "SSSTEPS: $SSSTEPS"
echo "TOTFRAMES: $TOTFRAMES"
echo "TRIPPOINT: $TRIPPOINT"
echo "IMGPERMINUTE: $IMGPERMINUTE"
echo "SLEEPDURATION: $SLEEPDURATION"
# Set capture to camera SD and exposure mode to Manual and file format to Medium Fine JPG.
# CR2 capture is too slow and images way too large for video use.
eval "$RESETEOS"
gphoto2 --set-config /main/settings/capturetarget=1 \
--set-config-index /main/imgsettings/imageformat=2 \
--set-config-value /main/capturesettings/autoexposuremode=Manual \
--set-config-value /main/capturesettings/aperture=$APERTURE \
--set-config-index /main/capturesettings/shutterspeed=$STARTSSINDEX
CURRSSINDEX=$STARTSSINDEX
for (( i=1; i <= $TOTFRAMES; i++ ))
do
echo "Capturing"
eval "$RESETEOS"
gphoto2 --capture-image
ISTRIPPOINT=$(( i % TRIPPOINT ))
if [ $STARTSSINDEX -ne $ENDSSINDEX ] && [ $ISTRIPPOINT -eq 0 ] ; then
eval "$RESETEOS"
if [ $STARTSSINDEX -gt $ENDSSINDEX ] ; then
CURRSSINDEX=$(( CURRSSINDEX - 1 ))
echo "Decreasing shutter speed to $CURRSSINDEX"
gphoto2 --set-config-index /main/capturesettings/shutterspeed=$CURRSSINDEX
else
CURRSSINDEX=$(( CURRSSINDEX + 1 ))
echo "Increasing shutter speed to $CURRSSINDEX"
gphoto2 --set-config-index /main/capturesettings/shutterspeed=$CURRSSINDEX
fi
fi
echo "Sleeping for $SLEEPDURATION"
sleep $SLEEPDURATION
done
#===============================================================================
# Compose video
#===============================================================================
if [ $PROCESSVIDEO -eq 1 ] ; then
eval "$RESETEOS"
FIRSTJPGTMP=`gphoto2 --list-files | grep "JPG" | head -1 | sed 's/#//g'`
FIRSTJPGNUM=`echo "$FIRSTJPGTMP" | awk '{print $1}'`
FIRSTJPGFILE=`echo "$FIRSTJPGTMP" | awk '{print $2}'`
eval "$RESETEOS"
LASTJPGTMP=`gphoto2 --list-files | grep "JPG" | tail -1 | sed 's/#//g'`
LASTJPGNUM=`echo "$LASTJPGTMP" | awk '{print $1}'`
LASTJPGFILE=`echo "$LASTJPGTMP" | awk '{print $2}'`
cd $JPGPATH
eval "$RESETEOS"
gphoto2 --get-file ${FIRSTJPGNUM}-${LASTJPGNUM}
enfuse -o $HDRPATH/${FIRSTFILE}_to_${LASTFILE}.${OUTTYPE} $OUTPATH/$ENFUSEFIL
mencoder $JPGPATH/*JPG -mf fps=$FPS:type=jpg -ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell:vbitrate=7000 -vf scale=$SCALE -oac copy -o $VIDEOPATH/movie.avi
ffmpeg -i $VIDEOPATH/movie.avi -s qvga $VIDEOPATH/movie.flv
fi
exit 0
| true
|
664849a181cf28c38e8471e8095d629c7f01fa35
|
Shell
|
maandr/infra
|
/host-machine/renew-ssl-certificates.sh
|
UTF-8
| 1,443
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
script_dir=$(dirname $(readlink -f "$0"))
project_dir="$(dirname ${script_dir})"
blue=$(tput setaf 4)
yellow=$(tput setaf 3)
green=$(tput setaf 2)
red=$(tput setaf 1)
normal=$(tput sgr0)
printf "\n\n"
printf "${yellow}disable nginx ssl.conf..${normal}\n"
mv ${script_dir}/config/nginx/ssl/weidelandschaften.de.conf ${script_dir}/config/nginx/ssl/.weidelandschaften.de.conf
mv ${script_dir}/config/nginx/ssl/etiketten.weidelandschaften.de.conf ${script_dir}/config/nginx/ssl/.etiketten.weidelandschaften.de.conf
printf "\n\n"
printf "${yellow}starting infrastructure..${normal}\n"
./restart.sh
printf "\n\n"
printf "${yellow}fechting ssl-certificates with letsencrypt..${normal}\n"
docker run -it --rm \
--name certbot \
-v /docker-volumes/certs:/etc/letsencrypt \
-v /docker-volumes/var/lib/letsencrypt:/var/lib/letsencrypt \
-v /docker-volumes/var/log/letsencrypt:/var/log/letsencrypt \
-v ${script_dir}/static:/static \
certbot/certbot \
renew \
--webroot \
--webroot-path=/static \
--quiet \
&& docker kill --signal=HUP reverse-proxy
printf "\n\n"
printf "${yellow}enable nginx ssl.conf..${normal}\n"
mv ${script_dir}/config/nginx/ssl/.weidelandschaften.de.conf ${script_dir}/config/nginx/ssl/weidelandschaften.de.conf
mv ${script_dir}/config/nginx/ssl/.etiketten.weidelandschaften.de.conf ${script_dir}/config/nginx/ssl/etiketten.weidelandschaften.de.conf
./restart.sh
| true
|
eb287958e9b9cdc248da456ecb014fe3d5604789
|
Shell
|
kanaWEB/core
|
/graphics/askUser
|
UTF-8
| 143
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
read -r -p "$1:" answer
if [ -z $answer ]
then
{
colecho "No answer provided" $ERR
exit 1
}
fi
export USERANSWER=$answer
| true
|
b18a4d19f8430b531e4bb0315ff051a7eec1bb18
|
Shell
|
ykhemani/vault-local
|
/bin/vault.sh
|
UTF-8
| 5,100
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
export VAULT_TLS_DISABLE=${VAULT_TLS_DISABLE:-true}
export TLS_PRIVATE_KEY=${TLS_PRIVATE_KEY:-/etc/ssl/private/privkey.pem}
export TLS_CERTIFICATE=${TLS_CERTIFICATE:-/etc/ssl/certs/fullchain.pem}
export VAULT_FQDN=${VAULT_FQDN:-localhost}
if [ "${VAULT_TLS_DISABLE}" == "true" ]
then
export VAULT_ADDR="http://${VAULT_FQDN}:8200"
else
export VAULT_ADDR="https://${VAULT_FQDN}:8200"
fi
echo "Set VAULT_ADDR to ${VAULT_ADDR}."
export CLUSTER_ADDR="https://${VAULT_FQDN}:8201"
export DEFAULT_LEASE_TTL=${DEFAULT_LEASE_TTL:-1h}
export MAX_LEASE_TTL=${MAX_LEASE_TTL:-24h}
export VAULT=${VAULT:-/usr/local/bin/vault}
export VAULT_TOP=${VAULT_TOP:-/opt/vault}
export VAULT_DATA_DIR=${VAULT_DATA_DIR:-raft}
export VAULT_LOG_DIR=${VAULT_LOG_DIR:-log}
export VAULT_AUDIT_DIR=${VAULT_AUDIT_DIR:-audit}
export VAULT_AUDIT_RAW_DIR=${VAULT_AUDIT_RAW_DIR:-audit-raw}
export VAULT_CONFIG_DIR=${VAULT_CONFIG_DIR:-conf}
export VAULT_PLUGIN_DIR=${VAULT_PLUGIN_DIR:-plugins}
export VAULT_LOG=${VAULT_TOP}/${VAULT_LOG_DIR}/vault.log
export VAULT_AUDIT=${VAULT_TOP}/${VAULT_AUDIT_DIR}/vault_audit.log
export VAULT_AUDIT_RAW=${VAULT_TOP}/${VAULT_AUDIT_RAW_DIR}/vault_audit_raw.log
export VAULTRC=${VAULT_TOP}/vaultrc
export VAULT_INIT_KEYS=${VAULT_TOP}/init_keys
export VAULT_CONFIG=${VAULT_TOP}/${VAULT_CONFIG_DIR}/vault.hcl
echo "Stopping vault if it is running."
pkill vault
echo "Erasing any existing Vault config, data, logs, etc."
rm -rfv \
${VAULT_TOP}/${VAULT_DATA_DIR}/* \
${VAULT_LOG} \
${VAULT_AUDIT} \
${VAULT_AUDIT_RAW} \
${VAULTRC} \
${VAULT_INIT_KEYS}
echo "Creating required directories."
mkdir -p \
${VAULT_TOP}/${VAULT_LOG_DIR} \
${VAULT_TOP}/${VAULT_AUDIT_DIR} \
${VAULT_TOP}/${VAULT_AUDIT_RAW_DIR} \
${VAULT_TOP}/${VAULT_DATA_DIR} \
${VAULT_TOP}/${VAULT_CONFIG_DIR}
echo "Writing Vault config."
cat << EOF > ${VAULT_CONFIG}
storage "raft" {
path = "${VAULT_TOP}/${VAULT_DATA_DIR}"
node_id = "vault"
}
listener "tcp" {
address = "0.0.0.0:8200"
cluster_address = "0.0.0.0:8201"
tls_disable = "${VAULT_TLS_DISABLE}"
tls_key_file = "${TLS_PRIVATE_KEY}"
tls_cert_file = "${TLS_CERTIFICATE}"
tls_min_version = "tls12"
}
api_addr = "${VAULT_ADDR}"
cluster_addr = "${CLUSTER_ADDR}"
disable_mlock = "true"
ui = "true"
max_lease_ttl = "${MAX_LEASE_TTL}"
default_lease_ttl = "${DEFAULT_LEASE_TTL}"
cluster_name = "vault"
insecure_tls = "false"
plugin_directory = "${VAULT_TOP}/${VAULT_PLUGIN_DIR}"
EOF
echo "Starting Vault"
${VAULT} server -config ${VAULT_CONFIG} > ${VAULT_LOG} 2>&1 &
unset VAULT_TOKEN
echo "Waiting for $VAULT_ADDR/v1/sys/health to return 501 (not initialized)."
vault_http_return_code=0
while [ "$vault_http_return_code" != "501" ]
do
vault_http_return_code=$(curl --insecure -s -o /dev/null -w "%{http_code}" $VAULT_ADDR/v1/sys/health)
sleep 1
done
echo "Initializing Vault"
curl \
--insecure \
-s \
--header "X-Vault-Request: true" \
--request PUT \
--data '{"secret_shares":1,"secret_threshold":1}' $VAULT_ADDR/v1/sys/init \
> ${VAULT_INIT_KEYS}
export VAULT_TOKEN=$(cat ${VAULT_INIT_KEYS} | jq -r '.root_token')
export UNSEAL_KEY=$(cat ${VAULT_INIT_KEYS} | jq -r .keys[])
# ${VAULT} operator init \
# -format=json \
# -key-shares 1 \
# -key-threshold 1 \
# > ${VAULT_INIT_KEYS}
# export VAULT_TOKEN=$(cat ${VAULT_INIT_KEYS} | jq -r '.root_token')
# export UNSEAL_KEY=$(cat ${VAULT_INIT_KEYS} | jq -r .unseal_keys_hex[])
echo "Writing vaultrc ${VAULTRC}."
cat << EOF > ${VAULTRC}
#!/bin/bash
export VAULT_TOKEN=${VAULT_TOKEN}
export VAULT_ADDR=${VAULT_ADDR}
export VAULT_SKIP_VERIFY=true
export UNSEAL_KEY=${UNSEAL_KEY}
EOF
# unseal
echo "Waiting for $VAULT_ADDR/v1/sys/health to return 503 (sealed)."
vault_http_return_code=0
while [ "$vault_http_return_code" != "503" ]
do
vault_http_return_code=$(curl --insecure -s -o /dev/null -w "%{http_code}" $VAULT_ADDR/v1/sys/health)
sleep 1
done
echo "Unsealing Vault"
curl \
-s \
--request PUT \
--data "{\"key\": \"${UNSEAL_KEY}\"}" \
$VAULT_ADDR/v1/sys/unseal | jq -r .
echo "Waiting for $VAULT_ADDR/v1/sys/health to return 200 (initialized, unsealed, active)."
vault_http_return_code=0
while [ "$vault_http_return_code" != "200" ]
do
vault_http_return_code=$(curl --insecure -s -o /dev/null -w "%{http_code}" $VAULT_ADDR/v1/sys/health)
sleep 1
done
# Enable audit log
echo "Enable audit device ${VAULT_AUDIT}."
${VAULT} audit enable \
file \
file_path=${VAULT_AUDIT}
echo "Enable raw audit device ${VAULT_AUDIT_RAW}."
${VAULT} audit enable \
-path=raw file \
file_path=${VAULT_AUDIT_RAW} \
log_raw=true
if [ "${vault_license}" != "" ]
then
echo "Installing Vault license."
curl \
--insecure \
--header "X-Vault-Token: $VAULT_TOKEN" \
--request PUT \
--data "{\"text\": \"${vault_license}\"}" \
$VAULT_ADDR/v1/sys/license
else
echo "No Vault license specified."
fi
echo "Vault is ready for use."
echo "Please source vaultrc file ${VAULTRC} to configure your environment:"
echo ". ${VAULTRC}"
| true
|
6fdbab868467a525f02a5d1ebb245dde2368304a
|
Shell
|
dbhaig/4642HD_ir_tx
|
/scripts/change_channel.sh
|
UTF-8
| 716
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# File Name: change_channel.sh
#
# Copyright 2015-2016 Don Haig (time4tux at gmail dot com)
# MIT Licence (See LICENSE.txt for details)
#
# Creation Date: 21-11-2015
LAST_MODIFIED="Tue 20 Dec 2016 04:42:44 PM EST -0500 "
VERSION=0.1
#
# Purpose: A script for MythTV to call to change the channel on
# 4642HD using an arduino base IR Tx
#
LOG_FILE=/home/mythtv/change_channel.log
date=$(date +%Y-%m-%d-%H:%M:%S)
me=$(whoami)
if $(/home/mythtv/talk2irtx.py "CHANNEL $1" >> $LOG_FILE 2>&1); then
# echo $result >> $LOG_FILE
echo "$date 4642HD changed to channel'$1' $me" >> $LOG_FILE
if $(/home/mythtv/pvr2_record.py $1 >> $LOG_FILE 2>&1 &); then
echo "$date PVR2 record started" >> $LOG_FILE
fi
fi
| true
|
9d75558e7727b01244337b12362dfdcd9f32724d
|
Shell
|
codeprof/rpi-access-point-gen
|
/export-image/04-finalise/01-run.sh
|
UTF-8
| 6,594
| 2.796875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
IMG_FILE="${STAGE_WORK_DIR}/${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}.img"
INFO_FILE="${STAGE_WORK_DIR}/${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}.info"
on_chroot << EOF
/etc/init.d/fake-hwclock stop
hardlink -t /usr/share/doc
EOF
if [ -d "${ROOTFS_DIR}/home/master/.config" ]; then
chmod 700 "${ROOTFS_DIR}/home/master/.config"
fi
rm -f "${ROOTFS_DIR}/etc/apt/apt.conf.d/51cache"
rm -f "${ROOTFS_DIR}/usr/bin/qemu-arm-static"
rm -f "${ROOTFS_DIR}/etc/apt/sources.list~"
rm -f "${ROOTFS_DIR}/etc/apt/trusted.gpg~"
rm -f "${ROOTFS_DIR}/etc/passwd-"
rm -f "${ROOTFS_DIR}/etc/group-"
rm -f "${ROOTFS_DIR}/etc/shadow-"
rm -f "${ROOTFS_DIR}/etc/gshadow-"
rm -f "${ROOTFS_DIR}/etc/subuid-"
rm -f "${ROOTFS_DIR}/etc/subgid-"
rm -f "${ROOTFS_DIR}"/var/cache/debconf/*-old
rm -f "${ROOTFS_DIR}"/var/lib/dpkg/*-old
rm -f "${ROOTFS_DIR}"/usr/share/icons/*/icon-theme.cache
rm -f "${ROOTFS_DIR}/var/lib/dbus/machine-id"
true > "${ROOTFS_DIR}/etc/machine-id"
ln -nsf /proc/mounts "${ROOTFS_DIR}/etc/mtab"
find "${ROOTFS_DIR}/var/log/" -type f -exec cp /dev/null {} \;
rm -f "${ROOTFS_DIR}/root/.vnc/private.key"
rm -f "${ROOTFS_DIR}/etc/vnc/updateid"
update_issue "$(basename "${EXPORT_DIR}")"
install -m 640 "${ROOTFS_DIR}/etc/rpi-issue" "${ROOTFS_DIR}/boot/issue.txt"
install files/LICENSE.oracle "${ROOTFS_DIR}/boot/"
rm -f "${ROOTFS_DIR}/boot/config.txt"
install files/config.txt "${ROOTFS_DIR}/boot/"
rm -f "${ROOTFS_DIR}/boot/bcm2710-rpi-cm3.dtb"
rm -f "${ROOTFS_DIR}/boot/bcm2710-rpi-3-b.dtb"
rm -f "${ROOTFS_DIR}/boot/bcm2709-rpi-2-b.dtb"
rm -f "${ROOTFS_DIR}/boot/bcm2708-rpi-cm.dtb"
rm -f "${ROOTFS_DIR}/boot/bcm2708-rpi-b-plus.dtb"
rm -f "${ROOTFS_DIR}/boot/bcm2708-rpi-b.dtb"
rm -f "${ROOTFS_DIR}/boot/bcm2708-rpi-0-w.dtb"
rm -f "${ROOTFS_DIR}/boot/kernel.img"
rm -f "${ROOTFS_DIR}/boot/overlays/*"
rm -f "${ROOTFS_DIR}/etc/sudoers.d/*nopasswd"
mkdir -p "${ROOTFS_DIR}/etc/hostapd"
mkdir -p "${ROOTFS_DIR}/etc/default"
mkdir -p "${ROOTFS_DIR}/etc/ssh"
mkdir -p "${ROOTFS_DIR}/etc/network"
mkdir -p "${ROOTFS_DIR}/etc/wpa_supplicant"
mkdir -p "${ROOTFS_DIR}/etc/modprobe.d"
mkdir -p "${ROOTFS_DIR}/etc/systemd"
chmod 644 /etc/passwd
chown root:root /etc/passwd
chmod 644 /etc/group
chown root:root /etc/group
chmod 600 /etc/shadow
chown root:root /etc/shadow
chmod 600 /etc/gshadow
chown root:root /etc/gshadow
install -m 600 files/reboot "${ROOTFS_DIR}/etc/cron.d/reboot"
install -m 600 files/sudoers "${ROOTFS_DIR}/etc/sudoers"
install -m 640 files/hostapd.conf "${ROOTFS_DIR}/etc/hostapd/hostapd.conf"
install -m 640 files/dhcpcd.conf "${ROOTFS_DIR}/etc/dhcpcd.conf"
install -m 600 files/wpa_supplicant.conf "${ROOTFS_DIR}/etc/wpa_supplicant/wpa_supplicant.conf"
install -m 640 files/iptables.ipv4.nat "${ROOTFS_DIR}/etc/iptables.ipv4.nat"
install -m 640 files/rc.local "${ROOTFS_DIR}/etc/rc.local"
install -m 640 files/interfaces "${ROOTFS_DIR}/etc/network/interfaces"
install -m 640 files/dnsmasq.conf "${ROOTFS_DIR}/etc/dnsmasq.conf"
install -m 600 files/sshd_config "${ROOTFS_DIR}/etc/ssh/sshd_config"
install -m 640 files/ntp.conf "${ROOTFS_DIR}/etc/ntp.conf"
install -m 640 files/blacklist.conf "${ROOTFS_DIR}/etc/modprobe.d/blacklist.conf"
install -m 640 files/journald.conf "${ROOTFS_DIR}/etc/systemd/journald.conf"
chown root:root "${ROOTFS_DIR}/etc/ssh/sshd_config"
chown root:root "${ROOTFS_DIR}/etc/crontab"
chmod og-rwx "${ROOTFS_DIR}/etc/crontab"
chown root:root "${ROOTFS_DIR}/etc/cron.hourly"
chmod og-rwx "${ROOTFS_DIR}/etc/cron.hourly"
chown root:root "${ROOTFS_DIR}/etc/cron.daily"
chmod og-rwx "${ROOTFS_DIR}/etc/cron.daily"
chown root:root "${ROOTFS_DIR}/etc/cron.weekly"
chmod og-rwx "${ROOTFS_DIR}/etc/cron.weekly"
chown root:root "${ROOTFS_DIR}/etc/cron.monthly"
chmod og-rwx "${ROOTFS_DIR}/etc/cron.monthly"
chown root:root "${ROOTFS_DIR}/etc/cron.d"
chmod og-rwx "${ROOTFS_DIR}/etc/cron.d"
chown root:root "${ROOTFS_DIR}/etc/cron.d/reboot"
chmod og-rwx "${ROOTFS_DIR}/etc/cron.d/reboot"
echo "net.ipv4.ip_forward=1" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "kernel.sysrq=0" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "fs.protected_hardlinks=1" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "fs.protected_symlinks=1" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "net.ipv4.icmp_ignore_bogus_error_responses = 1" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "kernel.randomize_va_space=2" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "kernel.exec-shield=1" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "fs.suid_dumpable=0" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "net.ipv4.conf.all.accept_redirects=0" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "net.ipv4.conf.default.accept_redirects=0" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "net.ipv4.conf.all.send_redirects=0" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "net.ipv4.conf.default.send_redirects=0" >> "${ROOTFS_DIR}/etc/sysctl.conf"
echo "* hard core 0" >> "${ROOTFS_DIR}/etc/security/limits.conf"
echo 'DAEMON_CONF="/etc/hostapd/hostapd.conf"' >> "${ROOTFS_DIR}/etc/default/hostapd"
find -L "${ROOTFS_DIR}/bin" -type f -exec chmod o-rwx {} +
find -L "${ROOTFS_DIR}/sbin" -type f -exec chmod o-rwx {} +
find -L "${ROOTFS_DIR}/usr" -type f -exec chmod o-rwx {} +
find -L "${ROOTFS_DIR}/etc" -type f -exec chmod o-rwx {} +
chmod o+x "${ROOTFS_DIR}/bin/bash"
chmod o+x "${ROOTFS_DIR}/bin/su"
chmod o+x "${ROOTFS_DIR}/bin/ls"
#setfacl -m u:master:x "${ROOTFS_DIR}/bin/su"
cp "$ROOTFS_DIR/etc/rpi-issue" "$INFO_FILE"
{
firmware=$(zgrep "firmware as of" \
"$ROOTFS_DIR/usr/share/doc/raspberrypi-kernel/changelog.Debian.gz" | \
head -n1 | sed -n 's|.* \([^ ]*\)$|\1|p')
printf "\nFirmware: https://github.com/raspberrypi/firmware/tree/%s\n" "$firmware"
kernel="$(curl -s -L "https://github.com/raspberrypi/firmware/raw/$firmware/extra/git_hash")"
printf "Kernel: https://github.com/raspberrypi/linux/tree/%s\n" "$kernel"
uname="$(curl -s -L "https://github.com/raspberrypi/firmware/raw/$firmware/extra/uname_string7")"
printf "Uname string: %s\n" "$uname"
printf "\nPackages:\n"
dpkg -l --root "$ROOTFS_DIR"
} >> "$INFO_FILE"
ROOT_DEV="$(mount | grep "${ROOTFS_DIR} " | cut -f1 -d' ')"
unmount "${ROOTFS_DIR}"
zerofree -v "${ROOT_DEV}"
unmount_image "${IMG_FILE}"
mkdir -p "${DEPLOY_DIR}"
rm -f "${DEPLOY_DIR}/image_${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}.zip"
pushd "${STAGE_WORK_DIR}" > /dev/null
zip "${DEPLOY_DIR}/image_${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}.zip" \
"$(basename "${IMG_FILE}")"
popd > /dev/null
cp "$INFO_FILE" "$DEPLOY_DIR"
| true
|
d5974f2cc7294ccf949d00985ac511b2d94a0c9f
|
Shell
|
mdeora/druid_config
|
/update.sh
|
UTF-8
| 2,281
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
function abort()
{
echo $'\e[1;31merror:' $1 $'\e[0m' >&2
exit 1
}
[[ -n "$druid_coordinator" && -n "$druid_broker" && -n "$druid_historical_1" && -n "$druid_overlord" && -n "$druid_middleManager" ]] || abort "exports not set up"
scp -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_coordinator:/usr/local/druid-services/config/coordinator/runtime.properties coordinator.properties
scp -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_broker:/usr/local/druid-services/config/broker/runtime.properties broker.properties
scp -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_historical_1:/usr/local/druid-services/config/historical/runtime.properties historical.properties
scp -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_overlord:/usr/local/druid-services/config/overlord/runtime.properties overlord.properties
scp -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_middleManager:/usr/local/druid-services/config/middleManager/runtime.properties middleManager.properties
for I in *.properties; do
perl -pi -e 's/druid.s3.accessKey=.*/druid.s3.accessKey=XXXXXXXXXXXX/g' $I
perl -pi -e 's/druid.s3.secretKey=.*/druid.s3.secretKey=xxxxxxxxxxxxxxxxxxxx/g' $I
perl -pi -e 's/druid.storage.bucket=.*/druid.storage.bucket=s3-bucket/g' $I
perl -pi -e 's/druid.indexer.logs.s3Bucket=.*/druid.indexer.logs.s3Bucket=s3-bucket/g' $I
done
echo "JVM settings:"
ssh -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_coordinator "ps -efw | grep java | grep -v grep" 2>/dev/null
ssh -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_broker "ps -efw | grep java | grep -v grep" 2>/dev/null
ssh -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_historical_1 "ps -efw | grep java | grep -v grep" 2>/dev/null
ssh -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_overlord "ps -efw | grep java | grep -v grep" 2>/dev/null
ssh -i $WHIRR_PEM -o "UserKnownHostsFile /dev/null" -o StrictHostKeyChecking=no $druid_middleManager "ps -efw | grep java | grep -v grep" 2>/dev/null
| true
|
eb875c425d7af34efdac829fe3236a90c2fb5b32
|
Shell
|
tedmiston/qcbrunch
|
/.github/actions/healthchecks/entrypoint.sh
|
UTF-8
| 665
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
set -euo pipefail
# if [ $# -eq 0 ]; then
# echo 'Usage: <job status> <healthchecks url>'
# exit 1
# fi
job_status=$(echo "${INPUT_STATUS}" | tr '[:upper:]' '[:lower:]')
success_url="${INPUT_URL}${INPUT_SUCCESS_ROUTE}"
failure_url="${INPUT_URL}${INPUT_FAILURE_ROUTE}"
if [ "${job_status}" == 'success' ];
then
curl --silent --show-error --output /dev/null --retry 3 "${success_url}"
elif [ "${job_status}" == 'failure' ];
then
curl --silent --show-error --output /dev/null --retry 3 "${failure_url}"
elif [ "${job_status}" == 'cancelled' ];
then
echo 'job cancelled'
else
echo 'invalid job status'
exit 1
fi
echo "logged ${job_status}"
| true
|
505d2bdeb36619475d9ce3cc1f0f55150a40dd73
|
Shell
|
bridgecrew-perf7/mini-deployer
|
/install.sh
|
UTF-8
| 2,052
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
function getArch() {
case $(uname -i) in
x86_64 | amd64)
echo amd64
;;
i?86)
echo "386"
;;
arm*)
echo arm
;;
powerpc | ppc64)
echo PowerPC
;;
aarch64)
echo arm64
;;
unknown)
if [ $(uname -a | grep -c armv) = 1 ]; then
echo arm
fi
if [ $(uname -a | grep -c aarch64) = 1 ]; then
echo arm64
fi
;;
esac
}
function issueShow() {
echo
echo "If you think that is an error, please report an issue with your uname parameters:"
echo "https://github.com/ramzes642/mini-deployer/issues"
echo "uname -a: " $(uname -a)
echo "uname -i: " $(uname -i)
echo "uname -o: " $(uname -o)
}
if [ $(ps -p 1 | grep -c systemd) != 1 ]; then
echo "ERROR: Only systemd linux systems are supported, sorry"
issueShow
exit
fi
if [ "$(uname -o)" = "GNU/Linux" ]; then
mkdir /tmp/install-deployer
cd /tmp/install-deployer || exit
curl -s https://api.github.com/repos/ramzes642/mini-deployer/releases/latest |
grep "mini-deployer.*$(getArch).tar.gz" |
cut -d : -f 2,3 |
tr -d \" |
wget -qi -
tar -xzf mini-deployer*.tar.gz
# shellcheck disable=SC2181
if [ $? != 0 ]; then
echo "ERROR: Download failed, it seems that your arch '$(getArch)' is not supported"
issueShow
rm -rf /tmp/install-deployer
exit
fi
if [ ! -e /etc/mini-deployer.json ]; then
mv /tmp/install-deployer/config.sample.json /etc/mini-deployer.json
fi
mv /tmp/install-deployer/deployer.service /etc/systemd/system/mini-deployer.service
if [ -e /etc/systemd/system/mini-deployer.service ]; then
systemctl stop mini-deployer.service
fi
mv /tmp/install-deployer/mini-deployer /usr/bin/mini-deployer
systemctl enable mini-deployer.service
systemctl start mini-deployer.service
rm -rf /tmp/install-deployer
echo Mini-deployer succecefully installed
echo Run \# journalctl -fu mini-deployer to inspect logs
echo Edit /etc/mini-deployer.json to modify deployment hooks
else
echo Unsupported $(uname -o)
issueShow
fi
| true
|
2599614593483168e423dc74def8f337aae7d031
|
Shell
|
virtualspyder/MyRestaurant
|
/Makefile.sh
|
UTF-8
| 445
| 3.125
| 3
|
[] |
no_license
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
GENERAL="'$DIR/General.cpp'"
RESTAURANT="'$DIR/Restaurant.cpp'"
MAIN="'$DIR/main.cpp'"
EXECUTABLE="restaurant.out"
OUTPUT_FILE="'$DIR/$EXECUTABLE'"
COMMAND="g++ $GENERAL $RESTAURANT $MAIN -lncurses -o $OUTPUT_FILE"
echo $COMMAND
echo Note : Copy the generated command line above to compile this restaurant-project
echo To run the compiled executable, run cd "'$DIR'" and "'./$EXECUTABLE'"
| true
|
3a44a9cb550152e169895b4f718975d8b5933887
|
Shell
|
yunshengb/lucida
|
/lucida/questionanswering/lucida/compile-qa.sh
|
UTF-8
| 894
| 2.765625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# NOTE ABOUT CLASSPATHS:
# Classpaths contain jar files and paths to the TOP of package hierarchies.
# For example, say a program imports the class info.ephyra.OpenEphyra
# Now javac knows to look for the class info.ephyra.OpenEphyra in the directory info/ephyra/
# However, javac still needs the classpath to the package.
# Compile server
# 'source' command: Rather than forking a subshell, execute all commands
# in the current shell.
cd ../common
./compile-openephyra.sh
. ./qa-compile-config.inc
cd -
# Add command center to class path
export JAVA_CLASS_PATH=$JAVA_CLASS_PATH:$LUCIDAROOT/commandcenter/gen-java
# Use cp flag to avoid cluttering up the CLASSPATH environment variable
echo -e "javac -cp $JAVA_CLASS_PATH QADaemon.java QAServiceHandler.java gen-java/qastubs/QAService.java\n\n"
javac -cp $JAVA_CLASS_PATH QADaemon.java QAServiceHandler.java gen-java/qastubs/QAService.java
| true
|
99a000e9046145c84d1cb3325a3bbc074828367f
|
Shell
|
freesideatlanta/WaiverServ
|
/Install/install.sh
|
UTF-8
| 1,422
| 2.734375
| 3
|
[] |
no_license
|
# updates and stuff
sudo apt-get update
sudo apt-get -y upgrade
#keep screen on and remove cursor if no movment
sudo apt-get install -y unclutter
#enable ssh
sudo touch /boot/ssh
#move our stuff to a better spot
sudo mkdir -p /usr/local/bin/FSWaiver
sudo cp ./run_waiver.sh /usr/local/bin/FSWaiver/run_waiver.sh
sudo cp ../WaiverServ/bin/ARM/Release/WaiverServ.out /usr/local/bin/FSWaiver/WaiverServ
sudo cp ../WaiverServ/FS_Waiver_Apr_2019.png /usr/local/bin/FSWaiver/FS_Waiver_Apr_2019.png
#Install udev rules for tablet
sudo cp ./50-topaz.rules /etc/udev/rules.d/ 50-topaz.rules
#Execute/Read Permissions
sudo chmod a+r /usr/local/bin/FSWaiver/
sudo chmod a+rx /usr/local/bin/FSWaiver/WaiverServ
sudo chmod a+rx /usr/local/bin/FSWaiver/run_waiver.sh
sudo chmod a+r /usr/local/bin/FSWaiver/FS_Waiver_Apr_2019.png
#Make Folder To store Waivers
sudo mkdir /FSWaiver
sudo chmod a+rw /FSWaiver
#remove annoying thing asking us to change password
sudo rm /etc/xdg/lxsession/LXDE-pi/sshpwd.sh &
sudo rm /etc/xdg/lxsession/LXDE/sshpwd.sh &
#attempt to change the keyboard layout
sudo cp tools/keyboard /etc/default/keyboard
invoke-rc.d keyboard-setup start
#Do a number of things
#including trying again to keep the screen from blanking
sudo cp tools/.bashrc /home/pi/.bashrc
#Rotate the screen
sudo echo "display_rotate=1" > /boot/config.txt
#Install autostart
sudo sh tools/install_autostart.sh autostart_waiver
| true
|
ef00acfb73630eb346594da16ea93ae539a9ad28
|
Shell
|
yujinqiu/user-guide
|
/build.sh
|
UTF-8
| 3,622
| 3.75
| 4
|
[] |
no_license
|
#!/bin/sh
# Dependencies are DocBook DSSSL stylesheets and Jade (provided
# by the Debian packages docbook-dsssl and jade, respectively).
set -e
DSL="/usr/share/sgml/docbook/stylesheet/dsssl/modular/html/docbook.dsl"
ROOT="$(dirname "$0")"
cd "$ROOT"
ROOT=$(pwd)
directives_for_context()
{
local context=$1
shift
grep -A3 Context "$ROOT/directives/"* | fgrep "$context" | \
cut -d '-' -f 1 | sed 's,.*/,,'
}
build_by_context()
{
local context directive
directives_for_context 'server config' >"$ROOT/output/context-serverconfig"
directives_for_context '<Global>' >"$ROOT/output/context-Global"
directives_for_context '<VirtualHost>' >"$ROOT/output/context-VirtualHost"
directives_for_context '<Anonymous>' >"$ROOT/output/context-Anonymous"
directives_for_context '<Limit>' >"$ROOT/output/context-Limit"
directives_for_context '.ftpaccess' >"$ROOT/output/context-ftpaccess"
rm -f "$ROOT/output/by_context_source.sgml"
for context in serverconfig Global VirtualHost Anonymous Limit ftpaccess; do
cat "$ROOT/definitions/context/$context" \
>>"$ROOT/output/by_context_source.sgml"
for directive in $(cat "$ROOT/output/context-$context"); do
echo "<link linkend=\"$directive\">$directive</link>" \
>>"$ROOT/output/context-$context.sgml"
done
echo "</para>" >>"$ROOT/output/by_context_source.sgml"
echo "</refsect1>" >>"$ROOT/output/by_context_source.sgml"
echo "</refentry>" >>"$ROOT/output/by_context_source.sgml"
echo "" >>"$ROOT/output/by_context_source.sgml"
echo "" >>"$ROOT/output/by_context_source.sgml"
done
}
module_and_directive_list()
{
local file module directive
for file in $(find "$ROOT/directives" -type f); do
module=$(grep -A 2 "<keyword>" "$file" |
sed -n '/mod_/s/.*\(mod_[a-z]*\).*/\1/p')
directive=${file##*/}
echo "$module:$directive"
done
}
build_by_module()
{
local module_and_directive module directive
local last_module
rm -f "$ROOT/output/by_module_source.sgml"
# Module name must always be first so the list is sorted
# by module in our output.
for module_and_directive in $(module_and_directive_list | sort -f); do
module=${module_and_directive%%:*}
directive=${module_and_directive##*:}
if [ "$module" != "$last_module" ]; then
if [ ! -e "$ROOT/definitions/module/$module" ]; then
echo "$module doesn't have a module definition in $ROOT/definitions/module, skipping..." 1>&2
continue
fi
cat <<-EOS >>"$ROOT/output/by_module_source.sgml"
</para>
</refsect1>
</refentry>
EOS
cat "$ROOT/definitions/module/$module" \
>>"$ROOT/output/by_module_source.sgml"
last_module="$module"
fi
echo "<link linkend=\"$directive\">$directive</link>" \
>>"$ROOT/output/by_module_source.sgml"
done
}
build_by_name()
{
cat "$ROOT/directives/"* >>"$ROOT/output/by_name_source.sgml"
}
rm -rf "$ROOT/output"
mkdir -p "$ROOT/output/userguide/linked"
mkdir -p "$ROOT/output/config/linked"
build_by_context
build_by_module
build_by_name
# These jade(1) invocations exit with status 1, probably because of our
# crappy SGML input. I hate blindly ignoring their exit status, but it's
# easier than fixing the SGML. :-/
jade -t sgml -V nochunks -E 100000 -d "$DSL" \
"$ROOT/configuration.sgml" \
>"$ROOT/output/config/configuration_full.html" || true
cd "$ROOT/output/config/linked"
jade -t sgml -E 10000 -d "$DSL" "$ROOT/configuration.sgml" || true
jade -t sgml -V nochunks -d "$DSL" "$ROOT/userguide.sgml" \
>"$ROOT/output/userguide/userguide_full.html" || true
cd "$ROOT/output/userguide/linked"
jade -t sgml -E 20 -d "$DSL" "$ROOT/userguide.sgml" || true
| true
|
173c9261119834a6903a8d7fa65d4085c3a1cfe8
|
Shell
|
szovaty/scripts
|
/yrsync_xos
|
UTF-8
| 3,230
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# Based on template.src version 1.1 last modified 2006.08.07 by y
# sync a filesystem with another one
trap y_exit TERM HUP INT
# Uncomment this to allow colors on the console
USE_COLOR="yes"
# This will enable positioning of a text to the end (-8 char)
# of a line independent of window size
USE_POS="yes"
# Source the function library
FUNCTIONS="/bin/y_functions.sh"
test -r ${FUNCTIONS} && source ${FUNCTIONS}
# define the following to match the program current state
PROGRAM_NAME="yrsync_xos"
VERSION="0.7"
LAST_MOD="2022.05.12"
# Define log file here
LOG="/var/log/${0##*/}.log"
# Uncomment this if you need debug enabled
#Y_DEBUG="true"
# define backup system mount point
XOS_DIR="/xos"
# define filesystems to sync
DIR_LIST="/usr /var /opt /boot"
# define rsync command
#RSYNC_OPT="-arxK --delete"
RSYNC_OPT="-arx --delete"
# other definitions
FSTAB="/etc/fstab.xos"
DEV="yes"
MOUNT="yes"
# use these to set cleanup on exit
#MOUNTED="" # y_exit will unmount them
#DELETE_ON_EXIT="" # y_exit will delete them
#LAST_CMD="" # y_exit will run it just before exit
y_start
# ===================================================================================
# there is a bug in y_functions around y_help, it can not display RSYNC_OPT well
# read command line parameters
while [ -n "$1" ] ; do
case $1 in
--help|-h) y_help "[Options]"; y_exit;; #; Help
--xos|-x) XOS_DIR="$2"; shift ;; #; Define xos_dir [XOS_DIR]
--dirs|-d) DIR_LIST="$2"; shift ;; #; Define dirs to copy [DIR_LIST]
--rsync|-r) RSYNC_OPT="$2"; shift ;; #; Define rsync options [RSYNC_OPT]
--nocolor|-C) USE_COLOR="no";; #; Color display [USE_COLOR]
--nomount|-m) MOUNT="no";; #; Mount target dir [MOUNT]
--fstab|-f) FSTAB="$2"; shift ;; #; Define new fstab [FSTAB]
--nofstab|-F) FSTAB="";; #; Disable fstab modification
--nodev|-D) DEV="no";; #; Create core devices under /dev [DEV]
esac
shift
done
RSYNC="rsync $RSYNC_OPT"
y_debug "$RSYNC_OPT"
test -n "$FSTAB" && {
test -r "$FSTAB" || y_fatal_error "Invalid fstab defined [$FSTAB]"
}
test "$MOUNT" = "yes" && {
mount | grep -w "$XOS_DIR" >/dev/null 2>&1
test "$?" = "0" || {
y_run_cmd "mount $XOS_DIR"
MOUNTED=$XOS_DIR
}
}
# sync rootfs
for i in $DIR_LIST ; do
EXCLUDE="$EXCLUDE --exclude=$i"
done
s=`printf "%-12s" "/"`
y_progress "Sync $s -> ${XOS_DIR} ..."
y_run_cmd "$RSYNC $EXCLUDE / $XOS_DIR"
y_progress_end 0
# sync the rest of the filesystems
for i in $DIR_LIST ; do
s=`printf "%-12s" $i`
y_progress "Sync $s -> ${XOS_DIR}${i} ..."
y_run_cmd "$RSYNC $i $XOS_DIR"
y_progress_end 0
done
# adjust backup system
y_progress "Adjust filesystem..."
test -n "$FSTAB" && y_run_cmd "cp $XOS_DIR/$FSTAB $XOS_DIR/etc/fstab"
rm -f $XOS_DIR/etc/runlevels/boot/dmcrypt
test "$DEV" = "yes" && {
test -e "$XOS_DIR/dev/console" || y_run_cmd "mknod $XOS_DIR/dev/console c 5 1"
test -e "$XOS_DIR/dev/null" || y_run_cmd "mknod $XOS_DIR/dev/null c 1 3"
test -e "$XOS_DIR/dev/tty1" || y_run_cmd "mknod $XOS_DIR/dev/tty1 c 4 1"
}
y_progress_end 0
# ===================================================================================
# Leave this as the last line
y_exit
| true
|
d75233e7ea0b0bbb7f3ff252c50aaa4c2d175eb9
|
Shell
|
debjitpaul/Pre-processing-and-NLP-Tagger
|
/exec_brill.sh
|
UTF-8
| 404
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Brill Tagger:
# input:
# word1
# word2
# word3
# :
# :
# wordn
# output:
# word1/tag1
# word2/tag2
# word3/tag3
# word4/tag4
# :
# :
# wordn/tagn
for file in ./word/*;
do
BASENAME= basename $file
echo $BASENAME
/BRILL_TAGGER_NEWLISP_V1.14/Bin_and_Data/tagger LEXICON "$file" BIGRAMS LEXICALRULEFILE CONTEXTUALRULEFILE > ../raw_tag/$file
done
| true
|
a2346f532f6e70b2d129ce7d97fd4f28ab0d135f
|
Shell
|
huobazi/init
|
/install_golang.sh
|
UTF-8
| 210
| 2.53125
| 3
|
[] |
no_license
|
GOREL=go1.7.3.linux-amd64.tar.gz
wget https://storage.googleapis.com/golang/$GOREL
tar xfz $GOREL
mv go /usr/local/go
rm -f $GOREL
PATH=$PATH:/usr/local/go/bin
echo 'PATH=$PATH:/usr/local/go/bin' >> ~/.bashrc
| true
|
f92c2418247486e38018fcdbe78b3e7b275702e9
|
Shell
|
jimmo/dotfiles
|
/common/home/bin/sp
|
UTF-8
| 622
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
#set -x
set -e
if [ -z $1 ]; then
# Fallback to just using the parent directory name only (to match old behaviour)
NAME=${PWD##*/}
if [ ! -e "${HOME}/src/sublime-projects/${NAME}.sublime-project" ]; then
# But if that doesn't exist, use grantparent/parent for new ones.
NAME=`pwd | grep -E -o '[^/]*/[^/]*$' | tr / _`
fi
else
NAME=$1
fi
PROJ="${HOME}/src/sublime-projects/${NAME}.sublime-project"
if [ ! -e $PROJ ]; then
echo "Creating new project: ${NAME} --> $(pwd)"
cat > $PROJ <<EOF
{
"folders":
[
{
"path": "$(pwd)"
}
]
}
EOF
fi
s -p $PROJ
| true
|
77180ffdf18cc77e7f723835a0f3d90df6d087d6
|
Shell
|
shoryec/terraform-jenkins-slaves
|
/scripts/join-cluster.sh.tpl
|
UTF-8
| 1,192
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Install Java JDK 8"
sudo yum update -y
sudo yum remove -y java
sudo yum install -y java-1.8.0-openjdk
echo "Install Docker engine"
sudo yum update -y
sudo yum install docker -y
sudo usermod -aG docker ec2-user
sudo service docker start
echo "Install git"
sudo yum install -y git
echo "Connecting to Master"
export JENKINS_URL="${jenkins_url}"
export JENKINS_USERNAME="${jenkins_username}"
export JENKINS_PASSWORD="${jenkins_password}"
export INSTANCE_NAME=$(curl -s 169.254.169.254/latest/meta-data/local-hostname)
export INSTANCE_IP=$(curl -s 169.254.169.254/latest/meta-data/local-ipv4)
export JENKINS_CREDENTIALS_ID="${jenkins_credentials_id}"
sleep 60
echo "Running curl command"
curl -v -u $JENKINS_USERNAME:$JENKINS_PASSWORD -d 'script=
import hudson.model.Node.Mode
import hudson.slaves.*
import jenkins.model.Jenkins
import hudson.plugins.sshslaves.SSHLauncher
DumbSlave dumb = new DumbSlave("'$INSTANCE_NAME'",
"'$INSTANCE_NAME'",
"/home/ec2-user",
"3",
Mode.NORMAL,
"slaves",
new SSHLauncher("'$INSTANCE_IP'", 22, "'$JENKINS_CREDENTIALS_ID'", "", null, null, "", null, 60, 3, null),
RetentionStrategy.INSTANCE)
Jenkins.instance.addNode(dumb)
' $JENKINS_URL/script
| true
|
193981d47ab81eadbfede1c80f5b60e6cc4ad8f8
|
Shell
|
PennLINC/xcpEngine
|
/utils/tfilter
|
UTF-8
| 17,854
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###################################################################
# ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ #
###################################################################
###################################################################
# Generalised function for temporally filtering 4D BOLD timeseries
# data
###################################################################
###################################################################
# Constants
###################################################################
source ${XCPEDIR}/core/constants
source ${XCPEDIR}/core/functions/library.sh
###################################################################
# Usage function
###################################################################
Usage(){
cat << endstream
___________________________________________________________________
Usage: `basename $0` -i <input> -o <output> <options>
Compulsory arguments:
-i : Input timeseries
The 4D timeseries to which the temporal filter is to
be applied.
-o : Output timeseries
The path to the file where the filtered timeseries
will be written.
Optional arguments:
-f : Temporal filter mode [default fft]
Input can be any of 'gaussian', 'fft', 'butterworth',
'chebyshev1', 'chebyshev2', or 'elliptic'.
* FFT-based filters [default] use a fast Fourier
transform to attenuate frequencies. An FFT-based
filter may not be suitable for use in designs that
incorporate iterative motion censoring, since
it will include interpolated frequencies in its
calculations.
* A Gaussian filter uses Gaussian-weighted least-
squares
* Chebyshev and elliptic filters more ideally
discriminate accepted and attenuated frequencies
than do Butterworth filters, but they introduce
ripples in either the passband (chebyshev1),
stopband (chebyshev2), or both (elliptic) that
result in some signal distortion.
-h : Highpass frequency [default 0.01]
The cutoff frequency below which all signal will be
removed from the timeseries, in Hz. To create a
bandpass filter, specify a lowpass frequency that is
higher than the highpass frequency.
-l : Lowpass frequency [default nyquist]
The cutoff frequency above which all signal will be
removed from the timeseries, in Hz.
-m : Mask
Filtering is computed only within the specified
binary mask.
-n : Temporal mask with interpolation
Filtering is computed only within the specified
temporal mask. Outside of the temporal mask, the values
of the timeseries are interpolated so as to eliminate
the influence of masked-out time points on the processed
timeseries (following Power et al., 2014).
-a : TR
image time series fro interpolation.
-r : Filter order [default 1]
The filter order indicates the number of input samples
taken under consideration when generating an output
signal. In general, using a higher-order filter will
result in a sharper cutoff between accepted and
attenuated frequencies. For a gentler filter, use a
lower order.
(Affects only Butterworth, Chebyshev, and elliptic
filters.)
-d : Filter direction [default 2]
The filter direction indicates whether the input signal
should be processed in the forward direction only [-d 1]
or in both forward and reverse directions [-d 2].
(Affects only Butterworth, Chebyshev, and elliptic
filters.)
-p : Pass band ripple [default 1]
Chebyshev I and elliptic filters allow for sharper
discrimination between accepted and attenuated
frequencies at the cost of a 'ripple' in the pass band.
This ripple results in somewhat uneven retention of
pass-band frequencies.
(Affects only Chebyshev I and elliptic filters)
-s : Stop band ripple [default 1]
Chebyshev II and elliptic filters allow for sharper
discrimination between accepted and attenuated
frequencies at the cost of a 'ripple' in the stop band.
This ripple results in somewhat uneven removal of
stop-band frequencies.
(Affects only Chebyshev II and elliptic filters)
-1 : 1D timeseries
A comma separated list of names and paths to files containing
one-dimensional timeseries, formatted name1:path1,name2:path2
Each column in the file should represent a separate
timeseries, and each row should represent a separate time
point; the number of rows must equal the number of volumes
present in the primary BOLD timeseries. The same filtering
regime that was applied to the primary BOLD timeseries will
be applied to each column of each 1D file
-t : Trace
If this flag is set, then any commands called by the
tfilter routine will be explicitly printed to the
console or log.
endstream
}
###################################################################
# Define defaults
###################################################################
filter=fft
hipass=0.01
lopass=nyquist
order=1
pass=2
rpass=1
rstop=1
tmask=ones
unset tslist
###################################################################
# Parse arguments
###################################################################
while getopts "i:a:o:f:h:l:m:n:k:r:d:p:s:v:1:t" OPTION
do
case $OPTION in
i)
image=${OPTARG}
! is_image ${image} && Usage && exit
;;
o)
out=${OPTARG}
out_root=$(exec_fsl remove_ext ${out})
;;
f)
filter=${OPTARG}
if [[ ${filter} != gaussian ]] \
&& [[ ${filter} != fft ]] \
&& [[ ${filter} != butterworth ]] \
&& [[ ${filter} != chebyshev1 ]] \
&& [[ ${filter} != chebyshev2 ]] \
&& [[ ${filter} != elliptic ]]
then
echo "Unrecognised filter mode: ${filter}."
Usage
exit
fi
;;
h)
hipass=${OPTARG}
! is+numeric ${hipass} && Usage && exit
;;
l)
lopass=${OPTARG}
! is+numeric ${lopass} \
&& [[ ${lopass} != nyquist ]] \
&& Usage \
&& exit
;;
m)
mask=${OPTARG}
! is_image ${mask} && Usage && exit
;;
n)
tmask=${OPTARG}
! is_1D ${tmask} && Usage && exit
;;
r)
order=${OPTARG}
! is+integer ${order} && Usage && exit
;;
d)
pass=${OPTARG}
(( ${pass} != 1 )) \
&& (( ${pass} != 2 )) \
&& Usage \
&& exit
;;
p)
rpass=${OPTARG}
! is+numeric ${rpass} && Usage && exit
;;
a)
tr=${OPTARG}
! is+numeric ${tr} && Usage && exit
;;
s)
rstop=${OPTARG}
! is+numeric ${rstop} && Usage && exit
;;
1)
tslist="${tslist} ${OPTARG}"
;;
t)
set -x
;;
*)
echo "Option not recognised: ${OPTARG}"
Usage
exit
esac
done
###################################################################
# Ensure that all compulsory arguments have been defined
###################################################################
[[ -z ${image} ]] && Usage && exit
[[ -z ${out} ]] && Usage && exit
[[ -z ${filter} ]] && Usage && exit
###################################################################
# Ensure that all 1D timeseries (including the temporal mask) have
# the same number of volumes as the primary BOLD timeseries
###################################################################
nvol=$(exec_fsl fslnvols ${image})
if [[ ${tmask} != ones ]]
then
cvol=$(wc -l < ${tmask})
else
cvol=${nvol}
fi
if (( ${nvol} != ${cvol} ))
then
echo "Volume mismatch: temporal mask"
exit
fi
[[ -n ${tslist} ]] && tslist=${tslist//,/ }
declare -A ts1d
for ts in ${tslist}
do
ts_name=$(strslice ${ts} 1 :)
ts_path=$(strslice ${ts} 2 :)
ts1d[$ts_name]=${ts_path}
cvol=$(wc -l < ${ts_path})
if (( ${nvol} != ${cvol} ))
then
echo "Volume mismatch: ${ts}"
exit
fi
done
###################################################################
# Compute the repetition time. This is required for all filters.
###################################################################
t_rep=$(exec_fsl fslval ${image} pixdim4)
###################################################################
# 1. Interpolate over masked-out epochs, if a temporal mask has
# been provided.
###################################################################
if [[ ${tmask} != ones ]]
then
subroutine @u.1a Interpolating over masked-out epochs...
subroutine @u.1b This will be slow
exec_xcp \
interpolate.py \
-i ${image} \
-a $tr \
-m ${mask} \
-t ${tmask} \
-o ${out_root}_interpol.nii.gz
################################################################
# Update pointers to indicate the post-interpolation image
################################################################
image=${out_root}_interpol.nii.gz
fi
###################################################################
# Branch to the subroutine appropriate for the filter specified by
# user input
###################################################################
case ${filter} in
gaussian)
subroutine @u.2
genfilt=0
is_image ${mask} && maskcall="-mas ${mask}"
################################################################
# 2a. Filter the primary 4D timeseries.
#---------------------------------------------------------------
# * First, convert the cutoff frequencies from Hz (cycles per
# second) to cycles per repetition.
# * Convert from frequency cutoff (in Hz) to cycle cutoff
# (in s).
# * Then, determine how many cycles of the cutoff per
# repetition.
################################################################
hpw=$(arithmetic 1/${hipass})
hpf=$(arithmetic ${hpw}/${t_rep}/2)
################################################################
# * Repeat for the lowpass frequency with the following
# exception: Set lowpass frequency to -1 (no attenuation)
# if the lowpass frequency is set to Nyquist.
################################################################
if [[ ${lopass} == nyquist ]]
then
subroutine @u.2.1
lpf=-1
else
subroutine @u.2.2
lpw=$(arithmetic 1/${lopass})
lpf=$(arithmetic ${lpw}/${t_rep}/2)
fi
################################################################
# * With that, it is possible to apply the filter through
# fslmaths.
################################################################
exec_fsl \
fslmaths ${image} \
-bptf ${hpf} ${lpf} \
${maskcall} \
${out}
################################################################
# 3a. Filter any 1D timeseries specified by the user.
#---------------------------------------------------------------
# * This is much more complicated than it sounds because
# FSL's filter engine will only process NIfTIs.
# * So, the information in each 1D timeseries must be written
# into a (hopefully compact) NIfTI.
################################################################
for tsname in "${!ts1d[@]}"
do
subroutine @u.2.6
ts=${ts1d[$tsname]}
#############################################################
# * Determine the number of timeseries in the 1D timeseries
# file by dividing the total number of words in the file
# by the number of time points (volumes).
#############################################################
nfld=$(exec_sys wc -w < ${ts})
nfld=$(( ${nfld}/${nvol} ))
#############################################################
# * Through trial and error, we have found that the desired
# result is obtained only if the timeseries is transposed
# before conversion to NIfTI.
#############################################################
exec_afni 1dtranspose ${ts} ${out_root}_${tsname}_tp.1D
exec_fsl \
fslascii2img ${out_root}_${tsname}_tp.1D \
${nfld} 1 1 \
${nvol} 1 1 1 \
${t_rep} \
${out_root}_${tsname}.nii.gz
exec_fsl \
fslmaths ${out_root}_${tsname}.nii.gz \
-bptf ${hpf} ${lpf} \
${out_root}_${tsname}_filt.nii.gz
#############################################################
# * Convert back to a .1D file using fslmeants with the
# showall flag to return all timeseries instead of the
# mean timeseries.
#############################################################
exec_fsl \
fslmeants -i ${out_root}_${tsname}_filt.nii.gz \
--showall \
-o ${out_root}_${tsname}.1D
rm -f ${out_root}_${tsname}_tp.1D
rm -f ${out_root}_${tsname}.nii.gz
rm -f ${out_root}_${tsname}_filt.nii.gz
done
;;
fft)
subroutine @u.3
genfilt=0
is_image ${mask} && maskcall="-mask ${mask}"
################################################################
# 2b. Filter the primary 4D timeseries.
#---------------------------------------------------------------
# * Define the highpass and lowpass inputs to AFNI's
# 3dBandpass, which performs the FFT decomposition.
# * According to the 3dBandpass documentation, setting the
# lowpass cutoff to 99999 will remove only frequencies above
# the Nyquist limit.
# * So, if the user has requested all frequencies retained up
# to the Nyquist limit, pass 99999 as the lowpass cutoff.
################################################################
if [[ ${lopass} == nyquist ]]
then
subroutine @u.3.1
lopass=99999
fi
################################################################
# * Apply the FFT filter.
# * If the output path is already occupied by an image, then
# AFNI will not by default overwrite. Clear the output path.
################################################################
rm -rf ${out}
exec_afni \
3dBandpass \
-prefix ${out} \
-nodetrend -quiet \
${maskcall} \
${hipass} ${lopass} ${image} \
2>/dev/null
################################################################
# 3b. Filter any 1D timeseries specified by the user.
#---------------------------------------------------------------
# * For this purpose, the script uses 1dBandpass.
# * Because 1D files do not include a header with timing
# information, the repetition time must be provided so that
# the program can convert cutoff frequencies to units of
# sampling intervals.
################################################################
for tsname in "${!ts1d[@]}"
do
ts=${ts1d[$tsname]}
exec_sys rm -rf ${out_root}_${tsname}.1D
exec_afni \
1dBandpass \
-nodetrend \
-dt ${t_rep} \
${hipass} ${lopass} ${ts} \
>> ${out_root}_${tsname}.1D
done
;;
butterworth)
subroutine @u.4
genfilt=1
rpcall=""
rscall=""
fcall="-f butterworth"
;;
chebyshev1)
subroutine @u.5
genfilt=1
rpcall="-p ${rpass}"
rscall=""
fcall="-f chebyshev1"
;;
chebyshev2)
subroutine @u.6
genfilt=1
rpcall=""
rscall="-s ${rstop}"
fcall="-f chebyshev2"
;;
elliptic)
subroutine @u.7
genfilt=1
rpcall="-p ${rpass}"
rscall="-s ${rstop}"
fcall="-f elliptic"
;;
esac
###################################################################
# genfilter is a utility R script that applies Butterworth,
# Chebyshev I and II, and elliptic filters to an image.
###################################################################
if (( ${genfilt} == 1 ))
then
subroutine @u.8
################################################################
# Initialise parameters.
################################################################
is_image ${mask} && maskcall="-m ${mask}"
ocall="-r ${order}"
dcall="-d ${pass}"
################################################################
# Filter the primary 4D timeseries.
################################################################
exec_sys rm -rf ${out}
exec_xcp \
genfilter.R \
-i ${image} \
-o ${out} \
${fcall} \
${maskcall} \
-c ${hipass} \
-l ${lopass} \
${ocall} \
${dcall} \
${rpcall} \
${rscall}
################################################################
# Filter any 1D timeseries specified by the user.
#---------------------------------------------------------------
# * For this purpose, the script uses 1dGenfilter.
# * Because 1D files do not include a header with timing
# information, the repetition time must be provided so that
# the program can convert cutoff frequencies to units of
# sampling intervals.
################################################################
for tsname in "${!ts1d[@]}"
do
subroutine @u.8.3
ts=${ts1d[$tsname]}
exec_sys rm -rf ${out_root}_${tsname}.1D
exec_xcp \
1dGenfilter.R \
-i ${ts} \
-o ${out_root}_${tsname}.1D \
${fcall} \
-t ${t_rep} \
-c ${hipass} \
-l ${lopass} \
${ocall} \
${dcall} \
${rpcall} \
${rscall}
done
fi
| true
|
e802574f4029f17cde08de09719209c30cbe75ab
|
Shell
|
rluan/xapi-autobuilder
|
/fromcache.sh
|
UTF-8
| 341
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
set -e
echo "From cache: attempting to retrieve $1"
if [ ! "x${HTTPCACHE}" = "x" ]; then
wget ${HTTPCACHE}/latest/${DIST}/${ARCH}/$1 || true
exit 0
fi
if [ "x${cache}" = "x" ]; then
cache=`cat cachelocation`
fi
latest=${cache}/latest
if [ -e ${latest}/$1 ]; then
rsync -a ${latest}/$1 .
md5sum $1 > $1.md5
fi
| true
|
3f263933d30ee2630805e7ebe2840d6d84448c45
|
Shell
|
senolerd/openstack-docker
|
/cloud.sh
|
UTF-8
| 3,260
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
export base=$(pwd)
set_env(){
for line_no in $(seq 1 $(grep '^[A-Z]' openstack.env|wc -l))
do
line_content=$(grep '^[A-Z]' openstack.env|head -$line_no|tail -1)
variable=$(echo $line_content|awk -F= {'print $1'})
value=$(echo $line_content|awk -F= {'print $2'})
export $variable="$value"
done
export DOCKER_HOST_ADDR=$(docker info|grep "Manager Addresses" -A 1|tail -1|awk -F: {'print $1'}|awk -F" " '{print $1}')
}
make_pki(){
target_host=$(docker node ls --format "{{.Hostname}}:{{.ManagerStatus}}"|grep Leader|awk -F: {'print $1'})
export target_host=$target_host
INSECURE=$(echo "$INSECURE" | tr '[:upper:]' '[:lower:]')
case $INSECURE in
false)
echo "!! INSECURE is false in the env file, No TLS for ya";exit
;;
true)
echo "TLS is being done"
$base/branches/$OS_VERSION/etc/create_cert.sh
;;
esac
}
deploy(){
stack_list=($(ls -lX $base/branches/$OS_VERSION|grep yml|awk -F' ' '{print $9}'))
docker network create -d overlay --attachable $OVERLAY_NET_NAME
for stack in ${stack_list[@]};do
stack_name=$(echo $stack|awk -F_ '{print $2}')
echo "-----------"
echo stack: $stack
echo stack_name: $stack_name
docker -D stack deploy -c $base/branches/$OS_VERSION/$stack OS_$stack_name
echo "-----------"
done
}
purge(){
stack_list=($(ls -lX $base/branches/$OS_VERSION|grep yml|awk -F' ' '{print $9}'))
for stack in ${stack_list[@]};do
stack_name=$(echo $stack|awk -F_ '{print $2}')
echo "-----------"
#echo stack: $stack
echo stack_name: $stack_name
docker -D stack rm OS_$stack_name
echo "-----------"
docker network remove $OVERLAY_NET_NAME 2> /dev/null
rm -rf $base/branches/$OS_VERSION/etc/ca
done
}
status(){
echo -e "\nStack"
docker stack ls
echo -e "\nServices"
docker service ls
}
test(){
KEYSTONE_PUBLIC_ENDPOINT_TLS=$(echo "$KEYSTONE_PUBLIC_ENDPOINT_TLS" | tr '[:upper:]' '[:lower:]')
if [ "$KEYSTONE_PUBLIC_ENDPOINT_TLS" == "true" ];then
PROTO="https"
else
PROTO="http"
fi
if hash openstack 2>/dev/null; then
openstack user list --os-username admin \
--os-password $ADMIN_PASS \
--os-user-domain-name default \
--os-project-name admin \
--os-project-domain-name default \
--os-auth-url $PROTO://$DOCKER_HOST_ADDR:$KEYSTONE_PUBLIC_ENDPOINT_PORT/v3 \
--os-identity-api-version 3 \
--os-cacert $base/branches/$OS_VERSION/etc/ca/intermediate/certs/ca-chain.cert.pem
else
echo -e "\nError: Openstack client isn't found.\n\"python-openstackclient\" would be nice.\n"
fi
}
cacert(){
clear
cat $base/branches/$OS_VERSION/etc/ca/intermediate/certs/ca-chain.cert.pem
cp $base/branches/$OS_VERSION/etc/ca/intermediate/certs/ca-chain.cert.pem $base/
}
case $1 in
deploy) set_env;make_pki;deploy;;
test) set_env;test;;
cert) set_env;cacert;;
purge) set_env;purge;;
status) status;;
*) echo -e "Usage: deploy | test | cert | purge | status\n"
esac
| true
|
f3f8332e2ea76f4ac810722b6e80f5bd9f7bada5
|
Shell
|
lacti/dotutil
|
/git-branch-clean
|
UTF-8
| 475
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
git fetch --all
for REMOTE in $(git remote -v | awk '{print $1}' | sort -u); do
git remote prune "${REMOTE}"
done
for BRANCH in $(git branch --merged | awk '{print $1}' | egrep '(bugfix|feature|hotfix)'); do
echo "-------------------------"
echo "- Delete: ${BRANCH}"
git checkout "${BRANCH}" && \
(git pull || true) && \
git checkout - && \
git branch -d "${BRANCH}" && \
(git push --delete "${USER}" "${BRANCH}" || true)
done
git gc
| true
|
2c1def37e4b676c81e3b214074efa754805063fe
|
Shell
|
shtoneyan/tfprofile
|
/bw_to_tfr.sh
|
UTF-8
| 2,442
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# ARGUMENTS
basenji_samplefile=$1
basset_samplefile=$2 #/home/shush/profile/basenji/data/w32_l1024/sample_files/basset_sample_beds_top25.txt
profile_subdir=$3
output_dir=$4 #/home/shush/profile/tfprofile/datasets/top25
prefix=$5
o_prefix=$output_dir/$prefix
mkdir -p $output_dir
d=1 #take all without downsampling
input_size=1024
pool_window=32
genome_size=/home/shush/genomes/GRCh38_EBV.chrom.sizes.tsv
genome=/home/shush/genomes/hg38.fa
unmap=datasets/GRCh38_unmap.bed
# samplefile_dir=data/sample_files
# samplefile_dir=data/w32_l1024/sample_files
# label=${biosample}_${filetype}
# samplefile_dir="$outdir_data/sample_files"
# out_label_dir="${outdir_data}/$label"
# in_label_dir="${data}/${label}"
# in_label_dir="${data}/${filetype}"
# create samplefile for basset AND basenji using existing data folder
# bin/create_samplefile.py "$data" $summary_file_path $label_for_samplefiles $filetype $samplefile_dir
# select best bed from basset preprocessing
/home/shush/codebase/src/preprocess_features.py -y -m 200 -s $input_size \
-o $o_prefix -c $genome_size \
$basset_samplefile
bedfile=$o_prefix.bed
sorted_bedfile="sorted_samplefile.bed"
sorted_genome="sorted_genome.bed"
bedfile="$o_prefix.bed"
sorted_bedfile="$sorted_bedfile.bed"
sorted_genome="$sorted_genome.bed"
sort -k1,1 -k2,2n $bedfile > $sorted_bedfile # sort best bed
sort -k1,1 -k2,2n $genome_size > $sorted_genome # sort genome
# get the complement of the sorted bed and the genome to get which parts to avoid
bedtools complement -i $sorted_bedfile -g $sorted_genome > nonpeaks.bed
# complete the avoid regions by adding unmappable
cat nonpeaks.bed $unmap > avoid_regions.bed
sort -k1,1 -k2,2n avoid_regions.bed > sorted_avoid_regions.bed
bedtools merge -i sorted_avoid_regions.bed > merged_avoid_regions.bed
rm nonpeaks.bed
rm avoid_regions.bed
rm sorted_avoid_regions.bed
rm $sorted_genome
rm $sorted_bedfile
# preprocess data using GRCh38, and using the bed file to select regions
bin/basenji_data.py $genome \
$basenji_samplefile \
-g merged_avoid_regions.bed \
-l $input_size -o $output_dir/$profile_subdir -t chr8 -v chr9 \
-w $pool_window --local -d $d
# #
scp merged_avoid_regions.bed "$output_dir/$profile_subdir/"
rm merged_avoid_regions.bed
#top 25 input args
| true
|
45e32560f6cb049054f535f8e71d99f3d528aa91
|
Shell
|
jwslab/ML100days
|
/tag.sh
|
UTF-8
| 489
| 3.75
| 4
|
[] |
no_license
|
TAG_VER="$(git describe --abbrev=0 --tags)"
echo 'The Latest Tag :'"$TAG_VER" $'\n'
echo '##### Generating New Tag #####'
IFS=. VER=(${TAG_VER##*-})
FIRST=${VER[0]}
SECOND=${VER[1]}
THIRD=${VER[2]}
TAG_AUTO=$FIRST.$SECOND.$(($THIRD + 1))
echo 'The New Tag :'"$TAG_AUTO" $'\n'
echo '##### Adding tag commit #####'
git commit --allow-empty -m "$TAG_AUTO"
git push
echo ''
echo '##### Taging #####'
git tag "$TAG_AUTO"
echo ''
echo '##### Pushing tag #####'
git push origin "$TAG_AUTO"
| true
|
9717ad155f70240fae482e0a8465029967cd5078
|
Shell
|
mkock/BiTE
|
/Vagrantbootstrap.sh
|
UTF-8
| 1,655
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Update Debian packages
sudo apt-get -y update
sudo apt-get -y install curl
cd /bite
# Ensure that .bash_profile contains correct locale setting for MongoDB
if [ ! -f /home/vagrant/.bash_profile ]; then
touch /home/vagrant/.bash_profile
fi
sudo chown vagrant:vagrant /home/vagrant/.bash_profile
if ! grep -q "LC_ALL" /home/vagrant/.bash_profile; then
echo "export LC_ALL=\"en_US.UTF-8\"" > /home/vagrant/.bash_profile
fi
# Install MongoDB
if ! hash mongo 2>/dev/null; then
curl -O https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-2.6.7.tgz
tar -zvxf mongodb-linux-x86_64-2.6.7.tgz
ln -fs /bite/mongodb-linux-x86_64-2.6.7 /bite/mongodb
ln -fs /bite/mongodb/bin/mongo /usr/bin/
ln -fs /bite/mongodb/bin/mongod /usr/bin/
rm *.tgz
fi
# Install redis
if ! hash redis-server 2>/dev/null; then
curl -O http://download.redis.io/releases/redis-2.8.19.tar.gz
tar -zvxf redis-2.8.19.tar.gz
cd redis-2.8.19
make
ln -fs /bite/redis-2.8.19/src/redis-server /usr/bin/
ln -fs /bite/redis-2.8.19/src/redis-cli /usr/bin/
cd ..
rm *.tar.gz
fi
# Install nodejs
if ! hash node 2>/dev/null; then
curl -O http://nodejs.org/dist/v0.10.36/node-v0.10.36-linux-x64.tar.gz
tar -zvxf node-v0.10.36-linux-x64.tar.gz
ln -fs /bite/node-v0.10.36-linux-x64 /bite/node
ln -fs /bite/node/bin/node /usr/bin/
ln -fs /bite/node/bin/npm /usr/bin/
rm *.tar.gz
fi
# Install bower globally
if ! hash bower 2>/dev/null; then
sudo /usr/bin/npm install -g bower
fi
# Create db data directory
sudo mkdir -p /home/vagrant/data/db
sudo chown -R vagrant:vagrant /home/vagrant/data/db
sudo chmod -R 0755 /home/vagrant/data
| true
|
9a3bbaca4e997ce91a4eccfbba6031b5ffeb784d
|
Shell
|
dinnozap/McServerMaker
|
/ServerMaker.sh
|
UTF-8
| 1,165
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#_______________________________________________________________________
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||McServerMaker||||||||||||||||||||||||||||||||||||||
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#________________________________________________________________________
#Couleurs
VERT="\\033[1;32m"
NORMAL="\\033[0;39m"
ROUGE="\\033[1;31m"
ROSE="\\033[1;35m"
BLEU="\\033[1;34m"
BLANC="\\033[0;02m"
BLANCLAIR="\\033[1;08m"
JAUNE="\\033[1;33m"
CYAN="\\033[1;36m"
clear
echo -e $BLEU Téléchargement de spigot 1.7.10 ...
mkdir Spigot
cd Spigot
wget -q http://getspigot.org/spigot/spigot-1.7.10-R0.1-SNAPSHOTBuild1544.jar
wget -q http://www.freevip.ovh/owncloud/index.php/s/iPdLvLPlVfm0uMr/download
clear
mv download eula.txt
mv spigot-1.7.10-R0.1-SNAPSHOTBuild1544.jar spigot.jar
echo -e "L'IP de votre serveur est... $BLEU"
curl ifconfig.me
echo -e $BLEU screen -r pour acceder a la console $NORMAL
echo -e $NORMAL Lancement du serveur... $NORMAL
screen -dmS minecraft java -Xms1024M -Xmx1024M -jar spigot.jar nogui
echo -e $BLEU Terminé ! $NORMAL
| true
|
9403ce5c1317db9da76cec540166bee519adc68e
|
Shell
|
xf20110925/uranus-parent
|
/uranus-manager/bin/start.sh
|
UTF-8
| 357
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source /etc/profile
HOME=$(cd "$(dirname "$0")/../"; pwd)
cd $HOME
jars=$(ls $HOME/|grep .jar)
libjar=`ls libs | grep .jar | awk '{jar=jar"'"libs/"'"$1":"} END {print jar}'`
echo $jars
echo $HOME/config:$jars:$libjar
nohup java -cp $HOME/config:$libjar:$jars com.ptb.uranus.manager.UranusManagerApplication $* >> logs/run.log 2>&1 &
| true
|
cac6283001be524f909fee45eb35a99611b67688
|
Shell
|
Undomyr/aryalinux
|
/applications/sddm.sh
|
UTF-8
| 4,804
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set +h
. /etc/alps/alps.conf
. /var/lib/alps/functions
SOURCE_ONLY=n
DESCRIPTION="br3ak The SDDM package contains abr3ak lightweight display manager based upon Qt and QML.br3ak"
SECTION="x"
VERSION=0.14.0
NAME="sddm"
#REQ:cmake
#REQ:extra-cmake-modules
#REQ:qt5
#REC:linux-pam
#REC:upower
cd $SOURCE_DIR
URL=https://github.com/sddm/sddm/releases/download/v0.14.0/sddm-0.14.0.tar.xz
if [ ! -z $URL ]
then
wget -nc http://ftp.lfs-matrix.net/pub/blfs/conglomeration/sddm/sddm-0.14.0.tar.xz || wget -nc https://github.com/sddm/sddm/releases/download/v0.14.0/sddm-0.14.0.tar.xz || wget -nc ftp://ftp.lfs-matrix.net/pub/blfs/conglomeration/sddm/sddm-0.14.0.tar.xz || wget -nc ftp://ftp.osuosl.org/pub/blfs/conglomeration/sddm/sddm-0.14.0.tar.xz || wget -nc http://mirrors-ru.go-parts.com/blfs/conglomeration/sddm/sddm-0.14.0.tar.xz || wget -nc http://ftp.osuosl.org/pub/blfs/conglomeration/sddm/sddm-0.14.0.tar.xz || wget -nc http://mirrors-usa.go-parts.com/blfs/conglomeration/sddm/sddm-0.14.0.tar.xz
TARBALL=`echo $URL | rev | cut -d/ -f1 | rev`
if [ -z $(echo $TARBALL | grep ".zip$") ]; then
DIRECTORY=`tar tf $TARBALL | cut -d/ -f1 | uniq | grep -v "^\.$"`
tar --no-overwrite-dir -xf $TARBALL
else
DIRECTORY=$(unzip_dirname $TARBALL $NAME)
unzip_file $TARBALL $NAME
fi
cd $DIRECTORY
fi
whoami > /tmp/currentuser
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
groupadd -g 64 sddm &&
useradd -c "SDDM Daemon" \
-d /var/lib/sddm \
-u 64 -g sddm \
-s /bin/false sddm
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
mkdir build &&
cd build &&
cmake -DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-Wno-dev .. &&
make "-j`nproc`" || make
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
make install &&
install -v -dm755 -o sddm -g sddm /var/lib/sddm
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sddm --example-config > sddm.example.conf
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
cp -v sddm.example.conf /etc/sddm.conf
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
sed -e '/ServerPath/ s|usr|opt/xorg|' \
-i.orig /etc/sddm.conf
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
sed -e 's/-nolisten tcp//'\
-i /etc/sddm.conf
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
sed -e 's/\"none\"/\"on\"/' \
-i /etc/sddm.conf
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
systemctl enable sddm
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
cat > /etc/pam.d/sddm << "EOF" &&
# Begin /etc/pam.d/sddm
auth requisite pam_nologin.so
auth required pam_env.so
auth required pam_succeed_if.so uid >=1000 quiet
auth include system-auth
account include system-account
password include system-password
session required pam_limits.so
session include system-session
# End /etc/pam.d/sddm
EOF
cat > /etc/pam.d/sddm-autologin << "EOF" &&
# Begin /etc/pam.d/sddm-autologin
auth requisite pam_nologin.so
auth required pam_env.so
auth required pam_succeed_if.so uid >=1000 quiet
auth required pam_permit.so
account include system-account
password required pam_deny.so
session required pam_limits.so
session include system-session
# End /etc/pam.d/sddm-autologin
EOF
cat > /etc/pam.d/sddm-greeter << "EOF"
# Begin /etc/pam.d/sddm-greeter
auth required pam_env.so
auth required pam_permit.so
account required pam_permit.so
password required pam_deny.so
session required pam_unix.so
-session optional pam_systemd.so
# End /etc/pam.d/sddm-greeter
EOF
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sddm-greeter --test-mode --theme <em class="replaceable"><code><theme path></em>
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
echo 'setxkbmap <em class="replaceable"><code>"<your keyboard comma separated list>"</em>' >> \
/usr/share/sddm/scripts/Xsetup
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
sudo tee rootscript.sh << "ENDOFROOTSCRIPT"
echo "source /etc/profile.d/dircolors.sh" >> /etc/bashrc
ENDOFROOTSCRIPT
sudo chmod 755 rootscript.sh
sudo bash -e ./rootscript.sh
sudo rm rootscript.sh
if [ ! -z $URL ]; then cd $SOURCE_DIR && cleanup "$NAME" "$DIRECTORY"; fi
register_installed "$NAME" "$VERSION" "$INSTALLED_LIST"
| true
|
7236895140b1f61d83f536d211968487d562789d
|
Shell
|
open-estuary/appbenchmark
|
/toolset/setup/basic_cmd.sh
|
UTF-8
| 3,804
| 4.5
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Define some basic functions to download and install packages
#
#
#############################################################
# Purpose:
# To download the file from the specified url address
#
# Usage:
# tool_download <url_address>
#
############################################################
tool_download () {
# Start a background process to print a dot every
# 30 seconds (avoids travis-ci 10min timeout)
while :;do sleep 30; echo -n .;done &
curl -sL "$@"
# Ensure the background job is killed if we are
kill $!; trap 'kill $!' SIGTERM
}
#############################################################
# Purpose:
# To untar the file to current directry
#
# Usage:
# tool_untar <file.tar>
#
############################################################
tool_untar() {
echo "Running 'tar xf $@'...please wait"
tar xf "$@"
echo "Removing compressed tar file"
# use -f to avoid printing errors if they gave additional arguments
rm -f "$@"
}
#############################################################
# Purpose:
# To unzip the file to current directry
#
# Usage:
# tool_unzip <file.zip>
#
############################################################
tool_unzip() {
echo "Running 'unzip $@'...please wait"
unzip -o -q "$@"
echo "Removing compressed zip file"
# use -f to avoid printing errors if they gave additional arguments
rm -f "$@"
}
#################################################################
# Purpose:
# Return 0 if file or directory exist; otherwise return 1
# Usage:
# tool_check_exists <file|directory name>
#################################################################
tool_check_exists() {
if [ -f $1 ] || [ -d $1 ]; then
echo 0
else
echo 1
fi
}
#################################################################
# Purpose:
# To get first directory name under the specified directory
#
# Usage:
# tool_get_first_dirname <cur_dir>
#################################################################
tool_get_first_dirname() {
if [ ! -d $1 ] ; then
echo "$1 directory does not exist"
exit 1
fi
for dirname in $(ls $1)
do
if [ -d "$1/${dirname}" ] && [ ${dirname} != ".." ] && [ ${dirname} != "." ]; then
echo "${dirname}"
return
fi
done
echo ""
}
################################################################################
# Purpose:
# To get build directory name
# If the compressed file name has been specified, it just return
# its prefix as directory name;
# Otherwise it will return one random name such as "builddir_{random num}"
#
# Usage:
# tool_get_build_dir [filename]
###############################################################################
tool_get_build_dir() {
if [ $# -ge 1 ] ; then
echo "builddir_""${1%%.*}"
return
fi
retry_num=0
rand_dirname="builddir_""${RANDOM}"
while [[ ${retry_num} -lt 10000 ]] ; do
is_unique=1
for dirname in $(ls ./)
do
if [ ${dirname} == ${rand_dirname} ] ; then
is_unique=0
break
fi
done
if [ ${is_unique} -eq 1 ] ; then
break
fi
rand_dirname="builddir_""${RANDOM}"
let "retry_num++"
done
echo ${rand_dirname}
}
################################################################################
# Purpose:
# To add "sudo" prefix for non-root users
#
# Usage:
# tool_add_sudo
################################################################################
tool_add_sudo() {
if [ $(whoami) == "root" ] ; then
echo ""
else
echo "sudo"
fi
}
| true
|
dc28a1bcfc0e9ae56726e7f56386e6eb04891e85
|
Shell
|
albertfiati/perflabs
|
/BashScripts/task4.1.3-a/task4.1.3-a.sh
|
UTF-8
| 1,463
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
rm *.log
for n in 2 4 8 12 16;
do
echo "Value of N is " $n | tee -a stress.log sar.log
echo "**************** N START ******************" | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
for m in 128 256 512 1024 2048
do
echo "Value of M is " $m | tee -a stress.log sar.log
echo "+++++++++++++++++++ M START +++++++++++++++++++" | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
for i in `seq 1 5`;
do
echo " " | tee -a stress.log sar.log
echo "Test run " $i | tee -a stress.log sar.log
echo "<<<<<<<<<<<<<<<<<<< iteration start <<<<<<<<<<<<<<<<<<<<<<<" | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
stress-ng -v --matrix $n --matrix-method prod --matrix-size $m --metrics-brief --perf -t 10s | tee -a stress.log &
sar -r 1 10 | tee -a sar.log &
sleep 15
echo " " | tee -a stress.log sar.log
echo ">>>>>>>>>>>>>>>>>>> iteration end >>>>>>>>>>>>>>>>>>>>>" | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
done
echo " " | tee -a stress.log sar.log
echo "+++++++++++++++++++ M END +++++++++++++++++++" | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
done
echo " " | tee -a stress.log sar.log
echo "***************** N END *****************" | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
echo " " | tee -a stress.log sar.log
done
| true
|
6081f16251b4532d70e415d7da67f5602f50258f
|
Shell
|
HarrisonJM/CodeExamples
|
/sonarTest/removeOldTars.sh
|
UTF-8
| 1,774
| 4.09375
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# removeOldTars
# Author: Harrison James Marcks
source /home/marcks/scripts/commonCoverageFunctions.sh
# Just deletes old tars in the /home/tester.nds/coverage directory
TARORIGINDIR="/mnt/fs01.brs/home/tester.nds/coverage/"
STARTTIME=$(date +%s)
##############################################################################
# helpFunction - Displays help text to the user; documenting the switches
# Parameters: $1= the code to exit with
##############################################################################
function helpFunction
{
local exitCode="$1"
printf "usage: postJobMerge.py [-h] [-t TRACEFILEDIRROOT] [-w WORKSPACE]\n"
printf " [-v MAINRELEASEVERSION] [-f FULLVERSION]\n\n"
printf "Coverage tar remover\n\n"
printf "optional arguments:\n"
printf " -v CINUMBER The CI Number as understood in RTEST\n"
exit $exitCode
}
if [[ "$1" == "--help" ]] || [[ $1 == "-h" ]]; then
helpFunction 0
fi
while getopts "t:w:v:f:h" OPTIONS; do
case "$OPTIONS" in
v)
OPTARGCheck "$OPTARG" "$OPTIONS"
CINUMBER="$OPTARG"
;;
h)
helpFunction 0
;;
\?)
echo "INVALID Option"
echo "Type incrementalGeneration.sh -h for help!"
exit 1
;;
:)
echo "No argument supplied!"
echo "Type incrementalGeneration.sh -h pr --help for help!"
exit 1
;;
esac
done
function prepare
{
# We need to extract the CI number
# One-NDS-SONAR-19.0.0-CI253
CINUMBER="$(echo $CINUMBER | cut -d'-' -f5)"
}
function gatherTars
{
local tarsToRemove=""
for tars in "$TARORIGINDIR"/*; do
if [[ ! "$tars" =~ $CINUMBER ]] && [[ -f "$tars" ]] ; then
tarsToRemove="$tarsToRemove $tars"
fi
done
rm -f $tarsToRemove
}
gatherTars
| true
|
217bcf354fab8e3f4563ae81f43d4ca959a82e9a
|
Shell
|
leusic38/dotfiles
|
/bin/.local/bin/randomImg
|
UTF-8
| 327
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
#
#replace path with your local paths
i=1
path=/hdd/Images/wallpapers
find $path/horizontal -name "*.jpg" -o -name "*.jpeg" -type f | shuf -n 2 | while read file; do
cp "$file" "$path/horizontal_$((i++)).jpg";
done
cp "$(find $path/vertical -name "*.jpg" -o -name "*.jpeg" -type f | shuf -n 1)" "$path/vertical.jpg"
| true
|
0a227a1b8b4315af8d0bbfd3a6d7195ee852329e
|
Shell
|
Cour-de-cassation/label
|
/scripts/version.sh
|
UTF-8
| 960
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
export OS_TYPE=$(cat /etc/os-release | grep -E '^NAME=' | sed 's/^.*debian.*$/DEB/I;s/^.*ubuntu.*$/DEB/I;s/^.*fedora.*$/RPM/I;s/.*centos.*$/RPM/I;')
if ! (which jq > /dev/null 2>&1); then
if [ "${OS_TYPE}" = "DEB" ]; then
sudo apt-get install -yqq jq;
fi;
if [ "${OS_TYPE}" = "RPM" ]; then
sudo yum install -y jq;
fi;
fi
if [ -z "${APP_ID}" ]; then
packages="label-client label-backend";
else
packages=${APP_ID};
fi;
for package in $packages; do
echo $([ -z "${APP_ID}" ] && echo $package:)$( ( [ -f "package.json" ] && (cat package.json | jq -r '.version') ) || ( [ -f "setup.py" ] && ( grep -r __version__ */__init__.py | sed 's/.*=//;s/"//g;s/\s//g' ) ) || (git tag | tail -1) )-$(export LC_COLLATE=C;export LC_ALL=C;cat tagfiles.${package}.version | xargs -I '{}' find {} -type f | egrep -v '(.tar.gz)$' | sort | xargs cat | sha256sum - | sed 's/\(......\).*/\1/')
done;
| true
|
01b3ca59366df1597607b4bb3ea01c186193d4d4
|
Shell
|
sosnus/scripts
|
/misc-legacy/ttn-old/ttn/install-ttnctl-linux-amd64.sh
|
UTF-8
| 247
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "download, unpack and install ttnctl on platform linux-amd64"
wget -qO- "https://ttnreleases.blob.core.windows.net/release/master/ttnctl-linux-amd64.tar.gz" | tar xvz -C ./
mv ./ttnctl-linux-amd64 /bin/ttnctl
chmod +x /bin/ttnctl
| true
|
154b941be30a527112f04a402bdd92699bc19443
|
Shell
|
ronakpatel70/wedding-plan
|
/bin/build
|
UTF-8
| 510
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e -x
env
gem env
BUNDLE_IGNORE_MESSAGES=true bin/bundle install --deployment --jobs 2 --retry 3 --without development
bin/bundle package
service postgresql start
su postgres -c "createuser -s root"
createdb -T template0 wedding_expo_test
bin/rails test
RAILS_ENV=production bin/rails assets:precompile
pushd public/assets
zopfli *.*
popd
rel=$(cat .git/refs/heads/master | cut -c 1-7)
mv bin/deploy ../build
rm -rf tmp log/*
tar -czf ../build/$rel.tar.gz *
echo "$rel" > ../build/REL
| true
|
77a401dc4002b18c4cfc8df7b59da169bdf4d62d
|
Shell
|
RDCEP/galaxyTools-quadui
|
/1.3.1/quadui_classic.sh
|
UTF-8
| 1,854
| 3.234375
| 3
|
[] |
no_license
|
#! /bin/bash
inputType=$1
domeType=$2
surveyData=$3
fieldDome=$4
if [ "$domeType" == "seasonal" ]
then
if [ "$inputType" == "zip" ]
then
seasonalDome=$5
linkage=$6
outputDssat=$7
outputApsim=$8
else
seasonalDome=$4
linkage=$5
outputDssat=$6
outputApsim=$7
fi
else
seasonalDome=""
linkage=$5
outputDssat=$6
outputApsim=$7
fi
echo input_type: $inputType
echo dome_type: $domeType
echo survey_data: $surveyData
echo field_overlay_dome: $fieldDome
echo seasonal_strategy_dome: $seasonalDome
echo linkage: $linkage
echo output DSSAT: $outputDssat
echo output APSIM: $outputApsim
#INSTALL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
INSTALL_DIR=/mnt/galaxyTools/quadui/1.3.1
quadui=quadui-1.3.1.jar
ln -sf $INSTALL_DIR/$quadui
if [ "$inputType" == "zip" ]
then
if [ "$domeType" == "seasonal" ]
then
cp -f $surveyData $PWD/survey.zip
cp -f $fieldDome $PWD/overlay.zip
cp -f $seasonalDome $PWD/strategy.zip
cp -f $linkage $PWD/linkage.alnk
java -jar $quadui -cli -clean -s -DA "survey.zip" "linkage.alnk" "overlay.zip" "strategy.zip" "./"
else
cp -f $surveyData $PWD/survey.zip
cp -f $fieldDome $PWD/overlay.zip
cp -f $linkage $PWD/linkage.alnk
java -jar $quadui -cli -clean -f -DA "survey.zip" "linkage.alnk" "overlay.zip" "./"
fi
else
if [ "$domeType" == "seasonal" ]
then
cp -f $surveyData $PWD/1.aceb
cp -f $fieldDome $PWD/1.dome
cp -f $linkage $PWD/1.alnk
java -jar $quadui -cli -clean -s -DA "1.aceb" "1.alnk" "1.dome" "1.dome" $PWD
else
cp -f $surveyData $PWD/1.aceb
cp -f $fieldDome $PWD/1.dome
cp -f $linkage $PWD/1.alnk
java -jar $quadui -cli -clean -f -DA "1.aceb" "1.alnk" "1.dome" $PWD
fi
fi
rm -f $quadui
cd DSSAT
zip -r -q ../retD.zip *
cd ..
cp retD.zip $outputDssat
cd APSIM
zip -r -q ../retA.zip *
cd ..
cp retA.zip $outputApsim
| true
|
9d44125695202d971533dc896d8e1899ce1b2899
|
Shell
|
deltacodinguk/server-tools
|
/removesite.sh
|
UTF-8
| 1,095
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# == 0 ]; then
echo "Usage: ./removesite <domain> [-f]"
exit 1;
fi
if [ "$(whoami)" != 'root' ]; then
echo "Sorry, you must be root to remove a virtual host."
exit 1;
fi
# Remove the site from apache
a2dissite $1
service apache2 reload
# Remove the site config
rm -rf /etc/apache2/sites-available/$1.conf
# Remove the line from hosts
sed -i "/www.$1/ d" /etc/hosts
# Delete the site files (or rename the folder)
# Look for the delete option
if [ "$2" != "-f" ]; then
echo "Retaining website files"
if [ ! -d "/var/www/vhosts/$1-retired" ]; then
# Rename the virtual host folder to indicate it's retired
mv /var/www/vhosts/$1 /var/www/vhosts/$1-retired
echo "The file folder has been renamed to end with -retired."
else
echo "Could not rename the folder - /var/www/vhosts/$1-retired already exists"
fi
else
rm -rf /var/www/vhosts/$1
echo "The file folder has been deleted."
fi
echo "$1 has been removed from Apache's enabled sites"
echo "$1 has been removed from the hosts file"
echo "Don't forget to remove the database and site files."
| true
|
1ccac9b11116d3613513bfb1ac7e8d3a7cc47033
|
Shell
|
DF104/EasyUbuntu
|
/jdkenv1.8.sh
|
UTF-8
| 932
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
apt-get update && apt-get install curl -y && apt-get clean
TIME=$(date +%s)
TIME="$TIME"123
FILENAME="jdk8.tgz"
URL="http://download.oracle.com/otn-pub/java/jdk/8u171-b11/512cd62ec5174c3487ac17c61aaa89e8/jdk-8u171-linux-x64.tar.gz"
curl -o ${FILENAME} -H "Cookie: s_cc=true; oraclelicense=accept-securebackup-cookie; s_nr=$TIME; gpw_e24=http%3A%2F%2Fwww.oracle.com%2Ftechnetwork%2Fjava%2Fjavase%2Fdownloads%2Fjdk8-downloads-2133151.html; s_sq=%5B%5BB%5D%5D" -H "User-Agent: Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -O -L ${URL}
tar zxf $FILENAME
mkdir /usr/lib/jvm
mv jdk1.8.0* /usr/lib/jvm/jdk1.8.0
cat >> /etc/profile << EOF
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0
export CLASSPATH=.:\$JAVA_HOME/lib:\$JAVA_HOME/jre/lib:\$CLASSPATH
export PATH=\$JAVA_HOME/bin:\$JAVA_HOME/jre/bin:\$PATH
export JRE_HOME=\$JAVA_HOME/jre
EOF
source /etc/profile
| true
|
2573376cc1d617b74fb74f1cf2e1e98525c50c3e
|
Shell
|
kjseefried/cbsd-scenes
|
/xrescuebsdkms/bin/xconfig
|
UTF-8
| 3,118
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
#v9.0.0
globalconf="${workdir}/cbsd.conf";
MYOPTARG="verbose"
MYDESC="X Configuration tools"
[ -f ${globalconf} ] || err 1 "no such conf file";
. ${globalconf}
. ${subr}
. ${inventory}
init $*
# cut "Files" section for inserting own list of dirs
# $1 - source xorg.conf
# $2 - dst xorg.conf
patch_fonts()
{
[ -f "$1" ] || return 1
PARTCONF="$jailsysdir/$jname/conf/xorg.conf-patch"
[ -f "${PARTCONF}" ] || {
echo "No such patch ${PARTCONF}"
read p
return 1
}
cp ${PARTCONF} $2
echo >> $2
cat $1 | awk 'begin {flag = 0 }; $0 ~ /^Section \"Files\"/ {flag =1 }; $0 ~ /EndSection$/ { if (flag==1) {flag=0;next;} else flag =0; }; {if (flag == 0) {print $0}} ' >> $2
}
# cut "Serverflags"" section for inserting own
# $1 - source xorg.conf
# $2 - dst xorg.conf
patch_flags()
{
[ -f "$1" ] || return 1
PARTCONF="$jailsysdir/$jname/conf/xorg.conf-patch2"
[ -f "${PARTCONF}" ] || {
echo "No such patch ${PARTCONF}"
read p
return 1
}
cp ${PARTCONF} $2
echo >> $2
cat $1 | awk 'begin {flag = 0 }; $0 ~ /^Section \"ServerFlags\"/ {flag =1 }; $0 ~ /EndSection$/ { if (flag==1) {flag=0;next;} else flag =0; }; {if (flag == 0) {print $0}} ' >> $2
}
new_conf()
{
. ${system}
local amount=0
# Mount fstab if not
[ $baserw -eq 1 ] && path=$data
[ ! -d "${path}" ] && mkdir -p ${path}
if is_mounted "${path}"; then
amount=1
fi
if [ $amount -eq 0 ]; then
mountbase
mountfstab
mount -t devfs devfs ${path}/dev
fi
TMPLOG=`mktemp ${ftmpdir}/xconf.XXX`
A=`chroot ${path} X -configure >${TMPLOG} 2>&1; echo $?`
if [ "${A}" != "0" ]; then
echo "Error: `cat ${TMPLOG}`"
echo "Please any key"
read p
[ $amount -eq 0 ] && {
umount ${path}/dev
cbsd unmountfstab jroot=${path} fstab=${mount_fstab}
unmountbase
}
return 1;
fi
if [ -f "${path}/etc/X11/xorg.conf" -a -f "${path}/root/xorg.conf.new" ]; then
mv ${path}/etc/X11/xorg.conf ${path}/etc/X11/xorg.conf-bak
fi
if [ -f "${path}/root/xorg.conf.new" ]; then
patch_fonts ${path}/root/xorg.conf.new ${path}/root/xorg.conf.new1
patch_flags ${path}/root/xorg.conf.new1 ${path}/etc/X11/xorg.conf
fi
#Unmount
if [ $amount -eq 0 ]; then
umount ${path}/dev
cbsd unmountfstab fstab=${mount_fstab} jroot=${path}
unmountbase
fi
}
edit_conf()
{
if [ -f "${data}/etc/X11/xorg.conf" ]; then
vi ${data}/etc/X11/xorg.conf
else
echo "No such xorg.conf. Please create them"
read p
fi
}
myconf()
{
DIALOG=${DIALOG=/usr/bin/dialog}
tempfile=`mktemp ${ftmpdir}/xconf.XXXX`
trap "rm -f ${tempfile}" 0 1 2 3
cul8r=0
while [ $cul8r ]; do
$DIALOG --clear --title "XConf" \
--menu "Choose an option\n" 10 55 4 \
"Auto" "Create or regenerate new xorg.conf file" \
"Edit" "Edit xorg.conf" \
"Quit" "Quit" 2> $tempfile
retval=$?
choice=`cat ${tempfile}`
rm -f ${tempfile}
case $retval in
0)
[ $choice = "Auto" ] && new_conf
[ $choice = "Edit" ] && edit_conf
[ $choice = "Quit" ] && exit
;;
1)
echo "Cancel pressed."
exit;;
255)
[ -z "$choice" ] || echo $choice ;
echo "ESC pressed."
exit;;
esac
done
}
| true
|
7380335c84054d6efba0401fccad0665c2845e8c
|
Shell
|
shinemacro/origin-tools
|
/oc_project_search.sh
|
UTF-8
| 548
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Searches for an object by name in all the OpenShift cluster projects
OC="${HOME}/Go/src/github.com/openshift/origin/_output/local/bin/linux/amd64/oc"
if [[ $1 != "" ]]; then
SEARCH_STRING=$1
else
echo "Usage: $0 <search string>"
exit
fi
#readarray -t projects < <($OC projects | grep -v 'You have access to the following')
readarray -t projects < <($OC projects)
shift
for i in "${projects[@]:2}"; do
echo "Checking for ${SEARCH_STRING} in project ${i}"
$OC get all -n $i | grep -i $SEARCH_STRING
done
| true
|
1e8a499eb255ab94b730476eab6751d89de8dd58
|
Shell
|
Gerrad1011/guessing_game
|
/guessinggame.sh
|
UTF-8
| 482
| 3.859375
| 4
|
[] |
no_license
|
#! /bin/bash
function run(){
num_files=$(ls -A -1 | wc -l)
re='[^0-9]+'
while [[ 1 ]]
do
echo "Enter the number of files in the directory"
read number
if [[ $number =~ $re ]]
then
echo "Only numbers allowed!! Try Again"
continue
fi
if [[ $number -eq $num_files ]]
then
echo "Congratulations you guessed it right its $number"
break
elif [[ $number -lt $num_files ]]
then
echo "The Guess Was Too Low Guess Again"
else
echo "The Guess Was Too High Guess Again"
fi
done
}
run
| true
|
36f5a8c679edf8462236f95bc422b7d9072df753
|
Shell
|
fasterthanlime/coke-output-example
|
/utils/apt-status.sh
|
UTF-8
| 425
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
## apt is a front-end to dpkg, which installs .deb packages
## on Debian-based distributions (Debian, Ubuntu, ...)
##
## apt-get is standard (as opposed to aptitude), and packages
## can be installed via `sudo apt-get install PKG`.
dpkg -s $1 &> /dev/null; INSTALLED=$?
if [[ $INSTALLED -eq 0 ]]; then
echo INSTALLED
elif [[ $INSTALLED -eq 1 ]]; then
echo NOT_INSTALLED
else
echo ERROR
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.