blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4cd6040af8477c726cbc7d65bc275f7f2b5b50ca | Shell | glensk/dotfiles | /thermodynamics/getGibbsEnergyOfFormation.sh | UTF-8 | 13,355 | 3.296875 | 3 | [] | no_license | #!/bin/bash
#-----set parameters and paths---------------------------------
out=no #yes #(print additional info for debugging when yes)
nPar=4 # number of needed coefficients in each fit
fits="EVinet EMurn EBirch ECubic"
#--------------------------------------------------------------
path=`set | grep BASH_SOURCE | sed 's|.*\"\(.*\)/[^/]*\".*|\1|' | sed '/BASH_SOURCE=/s/.*/\./'`;[ "$out" = "yes" ] && echo path: $path
script=`set | grep BASH_SOURCE | sed 's|.*\".*/\([^/]*\)\".*|\1|' | sed '/BASH_SOURCE=/s/.*/\./'`;[ "$out" = "yes" ] && echo script: $script
options=$*; . $path/utilities/functions.include; checkOptions "-h -help -v";[ "$out" = "yes" ] && echo options: $options
lom=`getlinuxormac`;[ "$lom" = "Linux" ] && add="";[ "$lom" = "mac" ] && add="'' -e"
h=`getOption -h`
if [ $h = True ]; then
usage $script
printOptions
exit
fi
# detailed help
help=`getOption -help`
if [ $help = True ]; then
details $script
echo2 " the script calculates the Gibbs energy of defect formation from a bulk and defect free energy surface"
echo2 " the output consists of a Gibbs energy surface as a function of pressure and temperature that can be" \
" directly used for determination of thermodynamic properties employing the getThermodynamics.sh script" \
" additionally Gibbs energy files at given pressure, volume, and temperature are written for plotting" \
" and checking purposes; the default values for these files are P=0,V=Veq,T=0K and T=Tmelt"
echo2 " an optional \"param\" file can be used to set different P,V,T values" \
" in \"param\" a line starts with the letter P or V or T standing for pressure, volume, temperature " \
" the letter is followed by one/more values separated by blanks at which FForm should be calculated (GPa,Ang^3,K)" \
" the volume refers to the number of atoms in the T=0K energy fit" \
" to calculate at V=Veq put -1"
echo2 " an optional \"volume_range\" or \"volume_range_atom\" file can exist with \"Vmin Vmax nV\" in the first line" \
" Vmin and Vmax in Ang^3 and refering to the # of atoms in T=0K energy fit (volume_range) or per atom (volume_range_atom)" \
" they give the range and mesh (nV) at which FForm at const T is calculated" \
" and also where it is fitted (for further processing in getThermodynamics.sh)" \
" if the file \"volume_range\" does not exist, the defaults are: Vmin=0.97*Veq, Vmax=1.12*Veq, nV=100"
echo2 " if only T=0K fits are supplied an additional \"temperature_range\" file must exists" \
" it needs to contain \"Tmin Tmax Tstep\" (all in K) in the first line" \
" they give the temperature range at which FForm is printed"
echo2 " input files containing the (free) energies have the following naming convention:" \
" defect cell: \033[1mE\033[0mxxx\033[1m_d_\033[0mNNN \033[1mF\033[0myyy\033[1m_d_\033[0mnn1 \033[1mF\033[0myyy\033[1m_d_\033[0mnn2 ..." \
" bulk cell: \033[1mE\033[0mxxx\033[1m_b_\033[0mMMM \033[1mF\033[0myyy\033[1m_b_\033[0mmm1 \033[1mF\033[0myyy\033[1m_b_\033[0mmm2 ..." \
" where xxx stands for one of the T=0K parametrizations: Vinet, Murn, Birch" \
" yyy are arbitrary names (e.g., qh, el, ah, mag)" \
" NNN is the number of atoms in the T=0K parametrization of the defect cell" \
" MMM is the number of atoms in the T=0K parametrization of the bulk cell" \
" nni is the number of atoms in the i'th free energy parametrization of the defect cell" \
" mmi is the number of atoms in the i'th free energy parametrization of the bulk cell" \
" it must hold that NNN+-1=MMM and nni+-1=mmi (+ for vacancy, - for interstitial)" \
" further NNN>=nni and MMM>=mmi"
echo2 " Example (3x3x3/2x2x2 vacancy fcc cell): EVinet_d_107 Fqh_d_31 Fel_d_107 Fah_d_31 Fmag_d_31" \
" EVinet_b_108 Fqh_b_32 Fel_b_108 Fah_b_32 Fmag_b_32"
echo2 " the format of the T=0K fits is: E0 Veq BM BMder (EVinet,EMurn,EBirch)" \
" E0 Veq BM 3rdOrdCoef (ECubic) "
echo2 " E0 and V0 in first and second column are important, since this is assumed later in the fortran program"
exit
fi
echo; echo -n " checking input ... "
rm -f _bul_F* _bul_T _bul_input
rm -f _def_F* _def_T _def_input
rm -fr output/
string=""
# check if input files are existing
nd=`ls -1 *_d_* 2> /dev/null | wc -l | sed 's|[ ]*||'`
nb=`ls -1 *_b_* 2> /dev/null | wc -l | sed 's|[ ]*||'`
if [ "$nd" == 0 -o "$nd" != "$nb" ]; then error "number of input files wrong"; fi
if [ $nd == 1 ]; then
if [ ! -e temperature_range ]; then error "no free energy contributions and no temperature_range file"; fi
string="$string temperature_range"
cat temperature_range | awk '{for (i=$1;i<=$2;i=i+$3) print i}' > _def_T; cp _def_T _bul_T
cat temperature_range | awk '{for (i=$1;i<=$2;i=i+$3) print 0}' > _def_F__1
cat temperature_range | awk '{for (i=$1;i<=$2;i=i+$3) print 0}' > _bul_F__1
fi
# check if every defect contribution has a corresponding bulk contribution
def=`ls -1 *_d_*`
for i in $def; do
bul=`echo $i | sed 's|\(.*\)_d_.*|\1|'`
nAtv=` echo $i | sed 's|.*_d_\(.*\)|\1|' | awk '{print $1+1}'`
nAtdv=`echo $i | sed 's|.*_d_\(.*\)|\1|' | awk '{print $1+2}'`
nAti=` echo $i | sed 's|.*_d_\(.*\)|\1|' | awk '{print $1-1}'`
if [ ! -e $bul\_b_$nAtv -a ! -e $bul\_b_$nAtdv -a ! -e $bul\_b_$nAti ]; then error "no matching bulk contribution to $i"; fi
done
# we do not support yet magnetization files
M=`ls -1 M*_{b,d}_* 2> /dev/null | wc -l | sed 's|[ ]*||'`
if [ $M != 0 ]; then error "magnetization files (M*_{b,d}_*) are currently not supported"; fi
# check T=0K parametrization
dfits=`echo $fits | xargs -n1 | awk '{print $1"_d_*"}'`
nE=`ls -1 $dfits 2> /dev/null | wc -l | sed 's|[ ]*||'`
if [ "$nE" == 0 ]; then error "no supported Efit existing (supported: $fits)"; fi
if [ "$nE" != 1 ]; then error "too many Efits exisiting (`ls $fits | xargs`)"; fi
# check number of coefficients in defect T=0K parametrization
Edfit=`ls -1 $dfits 2> /dev/null`
EdefAtoms=`echo $Edfit | sed 's|E.*_d_\(.*\)|\1|'`
nP=`head -n1 $Edfit | xargs -n1 | wc -l | sed 's|[ ]*||'`
if [ "$nP" != "$nPar" ]; then error "wrong # of coefficients in $Edfit"; fi
# check number of coefficients in bulk T=0K parametrization
bfits=`echo $fits | xargs -n1 | awk '{print $1"_b_*"}'`
Ebfit=`ls -1 $bfits 2> /dev/null`
EbulkAtoms=`echo $Ebfit | sed 's|E.*_b_\(.*\)|\1|'`
nP=`head -n1 $Ebfit | xargs -n1 | wc -l | sed 's|[ ]*||'`
if [ "$nP" != "$nPar" ]; then error "wrong # of coefficients in $Ebfit"; fi
# check if temperatures from various free energy contributions match and if temperature step is ok
if [ $nd != 1 ]; then
Ffit=`ls F*_d_* F*_b_*`
for f in $Ffit; do wc -l $f | awk '{print $1}' > _$f; awk '{print $1}' $f >> _$f; done
ok=`paste _F* | awk 'BEGIN{ok="true";step="ok"};
NR==2{t=$1}
{for (i=2;i<=NF;i++) if ($i!=$(i-1)) ok="false"}
NR>2{if ($1-t>2||$1-t<=0) step="wrong"; t=$1}
END{if (ok=="false") print ok; else print step}'`
if [ "$ok" = false ]; then error "mismatch between the Ffits (number of T points or Tstep)"; fi
if [ "$ok" = wrong ]; then error "error in Tstep (larger than 2 K, zero or negative)"; fi
mv _$f _def_T; sed -i $add '1d' _def_T; rm _F*; cp _def_T _bul_T
fi
nT=`wc -l _def_T | awk '{print $1}'`;
# check and prepare parameters file
P="0 0.0001"; V=-1; T="`head -n1 _def_T` `tail -n1 _def_T`"
if [ -e param ]; then
string="$string param"
P=`awk 'BEGIN{p=0};$1=="P"{p=1;for (i=2;i<=NF;i++) printf("%f ",$i)};END{if (p==0) print '"$P"'}' param`
V=`awk 'BEGIN{p=0};$1=="V"{p=1;for (i=2;i<=NF;i++) printf("%f ",$i)};END{if (p==0) print '"$V"'}' param`
T=`awk 'BEGIN{p=0};$1=="T"{p=1;for (i=2;i<=NF;i++) printf("%f ",$i)};END{if (p==0) print '"$T"'}' param`
fi
nnP=`echo $P | xargs -n1 | wc -l | sed 's|[ ]*||'`
nnV=`echo $V | xargs -n1 | wc -l | sed 's|[ ]*||'`
nnT=`echo $T | xargs -n1 | wc -l | sed 's|[ ]*||'`
echo $nnP $nnV $nnT > _additional_input;
echo $P >> _additional_input; echo $V >> _additional_input; echo $T >> _additional_input;
# first part of _def_input file
if [ $nd == 1 ]; then nF=1; else nF=`ls -1 F*_d_* 2> /dev/null | wc -l | sed 's|[ ]*||'`; fi
Etype=`echo $Edfit | sed 's|\(.*\)_d_.*|\1|'`
#
# second last 0 is for number of defect Gibbs energy files which only applies to perfect bulk calculation
# last 0 is is for nr of magnetization files (currently not supported)
echo defect 0 $nT $Etype $nPar $nF 0 0 > _def_input
cp _def_input _bul_input # first line the same
cat $Edfit >> _def_input
cat $Ebfit >> _bul_input
if [ $nd != 1 ]; then
# check for consistency in free energy contributions (number of columns, i.e., coefficients)
# copy them to _def_F_* files without temperatures for fortran program input
# add the number of columns, i.e., of coefficients, to _def_input
Fdfit=`ls -1 F*_d_*`; c=1
rm -f defAtoms
for f in $Fdfit; do
awk 'NR==1{n=NF;error=0};
{for (i=2;i<=NF;i++) printf("%s ",$i); printf("\n"); if (NF!=n) error=1;};
END{if (error==1) print "ERROR"}' $f > _def_F__$c;
if [ "`tail -n1 _def_F__$c`" == "ERROR" ]; then error "inconsistency in columns of $f"; fi
nV=`head -n1 _def_F__$c | awk '{print NF}'`
echo $nV >> _def_input
nAt=`echo $f | sed 's|F.*_d_\(.*\)|\1|'`
echo $nAt >> defAtoms
c=`expr $c + 1`;
done
# the same as above but for bulk
rm -f bulkAtoms
Fbfit=`ls -1 F*_b_*`; c=1
for f in $Fbfit; do
awk 'NR==1{n=NF;error=0};
{for (i=2;i<=NF;i++) printf("%s ",$i); printf("\n"); if (NF!=n) error=1;};
END{if (error==1) print "ERROR"}' $f > _bul_F__$c;
if [ "`tail -n1 _bul_F__$c`" == "ERROR" ]; then error "inconsistency in columns of $f"; fi
nV=`head -n1 _bul_F__$c | awk '{print NF}'`
echo $nV >> _bul_input
nAt=`echo $f | sed 's|F.*_b_\(.*\)|\1|'`
echo $nAt >> bulkAtoms
c=`expr $c + 1`;
done
else
echo 1 >> _bul_input
echo 1 >> _def_input
fi
if [ -e volume_range -a -e volume_range_atom ]; then error "delete one of volume_range or volume_range_atom"; fi
if [ -e volume_range ]; then
string="$string volume_range"
awk '{for (i=1;i<=NF;i++) printf("%s ",$i); for (i=NF+1;i<=4;i++) printf("-1 ")}; END{printf("\n")}' \
volume_range >> _additional_input
else
if [ -e volume_range_atom ]; then
string="$string volume_range_atom"
awk '{for (i=1;i<=NF;i++) if(i<3) printf("%s ",'$EbulkAtoms'*$i); else printf("%s ",$i); for (i=NF+1;i<=4;i++) printf("-1 ")};END{printf("\n")}' \
volume_range_atom >> _additional_input
else
echo -1 -1 -1 -1 >> _additional_input
fi
fi
echo $EbulkAtoms $EdefAtoms >> _additional_input
if [ $nd != 1 ]; then
# check if EbulkAtoms > bulkAtoms and EdefAtoms > defAtoms
ok=`awk 'BEGIN{s="ok"};$1>'$EbulkAtoms'{s="error"};END{print s}' bulkAtoms`
if [ $ok == error ]; then error "atoms of some free energy bulk paremtrization are larger than for T=0K fit"; fi
ok=`awk 'BEGIN{s="ok"};$1>'$EdefAtoms'{s="error"};END{print s}' defAtoms`
if [ $ok == error ]; then error "atoms of some free energy defect paremtrization are larger than for T=0K fit"; fi
cat bulkAtoms | xargs >> _additional_input
cat defAtoms | xargs >> _additional_input
rm bulkAtoms defAtoms
else
echo $EbulkAtoms >> _additional_input
echo $EdefAtoms >> _additional_input
fi
echo "input ok"; echo
if [ -n "$string" ]; then echo -e " \033[1m$string\033[0m file(s) read in"; echo; fi
echo " contributions read in:"
echo " defect: `ls E*_d_* F*_d_* 2> /dev/null | xargs`"
echo " bulk: `ls E*_b_* F*_b_* 2> /dev/null | xargs`"
echo
mkdir output
$path/fortran/getGibbsEnergyOfFormation.x
| true |
71ec7b6fcc1d4dc7cccf208ca8f99a1c4875ea63 | Shell | mafenet/Blatand-Lyd | /bluetooth.sh | UTF-8 | 876 | 4 | 4 | [] | no_license | #!/bin/bash
#
# Connect to Bluetooth audio device and change sink accordingly
# variables: list of sinks and MAC address of device
IFS=$'\n'
sinks=( $(pactl list sinks short) )
device=`echo "devices" | bluetoothctl | grep ^Device | cut -f2 -d" "`
device_underscore=`echo $device | sed -e s/:/_/g`
# if not yet connected
if [[ ${#sinks[@]} -eq 1 ]]; then
# power on bluetooth and connect to device. Executed twice because one does not work for some reason
echo -e "power on\nconnect $device" | bluetoothctl
sleep 10
echo -e "power on\nconnect $device" | bluetoothctl
sleep 30
# set default sink and save for later use
sink=`pactl list sinks short | grep $device_underscore | awk '{print $1}'`
pactl set-default-sink $sink
echo "sink=$sink" > audio_sink
else
# disconnect device when script is run again
echo -e "disconnect $device\npower off" | bluetoothctl
fi
| true |
40113e1bf728a9db805ae0050a1fafeb9eb454dc | Shell | ivoarch/.centfiles | /.zsh/rc/directory.zsh | UTF-8 | 421 | 2.78125 | 3 | [] | no_license | # Most of these options are used for quick navigation.
setopt autocd # change to dirs without cd
setopt pushd_to_home # Push to home directory when no argument is given.
setopt auto_pushd # Push the old directory onto the stack on cd.
setopt auto_name_dirs # Auto add variable-stored paths to ~ list.
setopt pushd_ignore_dups # Do not store duplicates in the stack.
| true |
b0704d1e96d2d4f13815eb73df9f929fc1eabc06 | Shell | XDestination/kafkanetes | /kafka-start.sh | UTF-8 | 289 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
ADVERTISED_HOST_NAME=$1
ZOOKEEPER_CONNECT=$2
cat config/kafka.server.config.template |
sed "s/ADVERTISED_HOST_NAME/$ADVERTISED_HOST_NAME/g" |
sed "s/ZOOKEEPER_CONNECT/$ZOOKEEPER_CONNECT/g" > config/server.properties
bin/kafka-server-start.sh config/server.properties
| true |
279cb5c41279394522f2a9197b7ce6d14425c578 | Shell | MomoKewe/Test_Technique | /script.sh | UTF-8 | 2,919 | 3.65625 | 4 | [] | no_license | ################################ FULL BACKUP################################
#!/bin/bash
################################################################
##
## MySQL Database Backup Script
## Written By: BA Mouhamadou Moustapha
##
################################################################
export PATH=/bin:/usr/bin:/usr/local/bin
TODAY=`date +"%d%b%Y"`
################################################################
################## Update below values ########################
DB_BACKUP_PATH='/backup/dbbackup'
MYSQL_HOST='localhost'
MYSQL_PORT='3306'
MYSQL_USER='root'
MYSQL_PASSWORD='mysecret'
DATABASE_NAME='mydb'
BACKUP_RETAIN_DAYS=30 ## Number of days to keep local backup copy
#################################################################
mkdir -p ${DB_BACKUP_PATH}/${TODAY}
echo "Backup started for database - ${DATABASE_NAME}"
mysqldump -h ${MYSQL_HOST} \
-P ${MYSQL_PORT} \
-u ${MYSQL_USER} \
-p${MYSQL_PASSWORD} \
${DATABASE_NAME} | gzip > ${DB_BACKUP_PATH}/${TODAY}/${DATABASE_NAME}-${TODAY}.sql.gz
if [ $? -eq 0 ]; then
echo "Database backup successfully completed"
else
echo "Error found during backup"
exit 1
fi
##### Remove backups older than {BACKUP_RETAIN_DAYS} days #####
DBDELDATE=`date +"%d%b%Y" --date="${BACKUP_RETAIN_DAYS} days ago"`
if [ ! -z ${DB_BACKUP_PATH} ]; then
cd ${DB_BACKUP_PATH}
if [ ! -z ${DBDELDATE} ] && [ -d ${DBDELDATE} ]; then
rm -rf ${DBDELDATE}
fi
fi
#################Teste de ping sur une machine étrangère########################
i=0
while [ $i -ne 50 ] ; do
ping $HOST -c $TIMES -i $WAITFOR &> /dev/null
pingReturn=$?
if[$pingReturn -eq 0]; then
# Si la machine est joignale
echo "La machine est joignable avec succes!!!"
scp ${TODAY}.sql.gz root@IP-Address:/home/root
exit 0
else
# Si la machine est injoignable
echo "La machine est injoignable" | mail -s "Machine Down" myadress@xxxx.com
exit 1
fi
i=$(($i + 1))
done
###################################ANSIBLE#########################################
sudo apt-get update
sudo apt-get -y install nginx
sudo cp /etc/nginx/sites-available/default nginx.conf
sed -i -e '/^\s*#.*$/d' -e '/^\s*$/d' nginx.conf
sudo sed -i 's/root \/var\/www\/html\;/root \/usr\/share\/nginx\/html\;/g' nginx.conf
sudo cp nginx.conf /etc/nginx/sites-available/default
cat << EOF > index.html
<html><head><title>Debian Ubuntu Nginx Installation</title></head>
<body><h1>Nginx Installed</h1><p>If you can see this, nginx is successfully installed.</p></body></html>
EOF
sudo cp index.html /usr/share/nginx/html/index.html
sudo chmod 644 /usr/share/nginx/html/index.html
sudo systemctl restart nginx
sudo apt -y install curl
curl http://localhost
sudo useradd wordly
sudo mkhomedir_helper wordly
cd /home/wordly
echo 'Hello, world.' > wordly.txt
### End of script ####
| true |
01a98942cfc4cc39838407283dc9f5aa334524f2 | Shell | tulioalberton/BFT-SMaRt_TLS | /docker/startCluster.sh | UTF-8 | 4,714 | 3.484375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
TYPE=$1
FAULTS=$2
END_OF_IP=100
ETHX=$3
MASTER=$4
if [ $# != 4 ]
then
echo "Usage: $0 <MicroBenchmark | YCSB> <n tolerated faults: 1 | 3 | 10> < interface: eth0|em1> <swarm init: true|false>"
echo "Example: bash startCluster.sh MicroBenchmark 3 eth0 true // if it is to init the swarm, first time execution."
echo "Example: bash startCluster.sh MicroBenchmark 3 eth0 false // swarm already started."
echo "Shal exist one master swarm which will initialize and link other workers together."
echo "Currently workers: core2"
echo ""
sleep 1
exit 1
fi
if [ $4 == true ]
then
echo "Leaving swarm."
docker swarm leave --force
echo "Initializing a new swarm and saving its token at bft-network.token."
MY_IP=`/sbin/ifconfig $ETHX | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'`
#docker swarm init --advertise-addr $ETHX |grep 'docker \|--token \|'$MY_IP > bft-network.token
docker swarm init --advertise-addr $ETHX |grep "docker swarm join --token" > bft-network.token
cat ./bft-network.token
#exit 1
echo "Creating network overlay: name:bft-network, subnet:10.1.1.0/24, gateway:10.1.1.1."
docker network create -d overlay --attachable bft-network --driver overlay --subnet=10.1.1.0/24 --gateway=10.1.1.1 > /dev/null
joinCmd=`cat ./bft-network.token`
worker="core2"
#for worker in {"core2","null"}; do
echo "Leaving swarm on "$worker"."
ssh $worker docker swarm leave --force
echo "Joining swarm on "$worker"."
ssh $worker $joinCmd
sleep 1
#done
fi
echo "Removing all containers."
bash removeAllContainers.sh
case "$TYPE" in
MicroBenchmark)
echo ""
echo "Starting MicroBenchmark Server with f=$FAULTS."
case "$FAULTS" in
1)
VRS=v6_f1
for replica in {0..1};
do
echo "Starting: R"$replica", Version: bft-smart:"$VRS," IP:10.1.1."$END_OF_IP
docker run -t --rm --expose=12000 --net bft-network --ip 10.1.1.$END_OF_IP --name=bft.relica.$replica tulioribeiro/bft-smart:v6_f1_RSA bftsmart.demo.microbenchmarks.ThroughputLatencyServer $replica 100000 0 0 false & > /dev/null
((END_OF_IP++))
sleep 1
done
;;
*)
echo "Usage: $0 <MicroBenchmark | YCSB> <n tolerated faults: 1 | 3 | 10> < interface: eth0|em1> <swarm init: true|false>"
echo "Example: bash startCluster.sh CounterServer 3 eth0 false"
exit 1
esac
echo ""
echo "Now run the client."
echo "docker run -t --rm --expose=12000 --net bft-network --ip 10.1.1.220 --name=bft.cli.1001 tulioribeiro/bft-smart:v6_f1_RSA bftsmart.demo.microbenchmarks.ThroughputLatencyClient <clientID (initial)> <Num clients> <n ops> <payload req> <payload reply> false false"
echo "docker run -t --rm --expose=12000 --net bft-network --ip 10.1.1.220 --name=bft.cli.1001 tulioribeiro/bft-smart:v6_f1_RSA bftsmart.demo.microbenchmarks.ThroughputLatencyClient 1001 1 1000 0 0 false false"
echo "docker run --net bft-network tulioribeiro/bft-smart:$VRS java -cp /opt/BFT-SMaRt.jar:/opt/lib/* bftsmart.demo.counter.CounterClient 1001 1 200"
echo ""
;;
YCSB)
echo ""
echo "Starting YCSB with f=$FAULTS."
case "$FAULTS" in
1)
VRS=v4_f1
for replica in {0..3};
do
echo "Starting: R"$replica", Version: bft-smart:"$VRS," IP:10.1.1."$END_OF_IP
docker run --net bft-network --ip 10.1.1.$END_OF_IP tulioribeiro/bft-smart:$VRS java -cp /opt/BFT-SMaRt.jar:/opt/lib/* bftsmart.demo.ycsb.YCSBServer $replica & >> /dev/null
((END_OF_IP++))
sleep 1
done
;;
*)
echo "Usage: $0 <CounterServer | YCSB> <n tolerated faults: 1 | 3 | 10> < interface: eth0|em1> <swarm init: true|false>"
echo "Example: bash startCluster.sh CounterServer 3 eth0"
exit 1
esac
echo ""
echo "Now run the client: ID_Client range [1000 - 1500]. You can create more keys."
echo "docker run --net bft-network tulioribeiro/bft-smart:$VRS java -cp /opt/BFT-SMaRt.jar:/opt/lib/* com.yahoo.ycsb.Client -threads 4 -P config/workloads/workloada -p measurementtype=timeseries -p timeseries.granularity=1000 -db bftsmart.demo.ycsb.YCSBClient -s > output_YCSB.txt"
echo ""
;;
*)
echo "Usage: $0 <MicroBenchmark | YCSB> <n tolerated faults: 1 | 3 | 10> < interface: eth0|em1> <swarm init: true|false>"
echo "Example: bash startCluster.sh CounterServer 3 eth0 false"
exit 1
esac
echo ""
echo " INITIALIZED"
| true |
eb049f15d021b846e4b26b5a1472edeb629bed93 | Shell | petronny/aur3-mirror | /pctel/PKGBUILD | UTF-8 | 811 | 2.6875 | 3 | [] | no_license | # Contributor: Jens Adam (byte/jra) <j_adam@web.de>
_kernver=2.6.23-ARCH
_realver=0.9.7-9-rht-8
pkgname=pctel
pkgver=0.9.7.9_rht8
pkgrel=1
pkgdesc='Drivers for PCTel winmodems'
url='http://linmodems.technion.ac.il/pctel-linux/welcome.html'
license=('GPL' 'proprietary') # FIXME
arch=('i686')
depends=('kernel26>=2.6.23')
install=$pkgname.install
source=("http://linmodems.technion.ac.il/pctel-linux/$pkgname-$_realver.tar.gz")
md5sums=('a8fa09d0754e36673740a5ba236b3379')
build() {
cd $startdir/src/$pkgname-$_realver/src/
# pick one of the two types supported for 2.6 kernels, or use ./configure -auto
echo pct789 | ./configure -manual
# echo cm8738 | ./configure -manual
make || return 1
install -d $startdir/pkg/lib/modules/$_kernver/misc/
install -m 644 *.ko $startdir/pkg/lib/modules/$_kernver/misc/
}
| true |
c34947503d8646653f12c334a0b3edccfc569cab | Shell | badrelmers/qemucomm | /qemucomm | UTF-8 | 7,461 | 3.84375 | 4 | [] | no_license | #!/bin/bash
set -eu
set -o pipefail
QEMU_COMMAND=
QEMU_SOCKET=
COMMAND_ARGS=
main() {
while getopts ":hg:q:" opt; do
case $opt in
q)
QEMU_COMMAND=qemu_qmp
QEMU_SOCKET=$OPTARG
;;
g)
QEMU_COMMAND=qemu_ga
QEMU_SOCKET=$OPTARG
;;
h)
usage
exit
;;
\?)
echo "Invalid option -$OPTARG" >&2
usage >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument" >&2
usage >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
if [[ $# -lt 1 ]]; then
usage >&2
exit 1
fi
local COMMAND="$1"
shift
COMMAND_ARGS=("$@")
if [[ -z "$QEMU_COMMAND" ]]; then
echo "Must specify either -q or -g" >&2
usage >&2
exit 1
fi
"$QEMU_COMMAND" "$QEMU_SOCKET" "proxy_cmd cmd_$COMMAND"
}
proxy_cmd() {
"$1" "${COMMAND_ARGS[@]}"
}
cmd_info() {
qemu_execute guest-info '{}'
jq -er .version <<< "$GA_RETURN"
#jq -e .supported_commands <<< "$GA_RETURN"
}
cmd_exec() {
local OPT_WAIT=false
local OPT_INPUT=false
local OPT_OUTPUT=false
local OPT_ENV=()
OPTIND=
while getopts ":e:wio" opt; do
case $opt in
e)
OPT_ENV+=("$OPTARG")
;;
w)
OPT_WAIT=true
;;
i)
OPT_INPUT=true
;;
o)
OPT_OUTPUT=true
;;
\?)
echo "Invalid option -$OPTARG" >&2
usage >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument" >&2
usage >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
if [[ $# -lt 1 ]]; then
usage >&2
exit 1
fi
local CMD="$1"
shift
local OPT_ARG=("$@")
local JSON_ENV JSON_ARG PID STATUS EXIT_CODE
JSON_ENV=$(json_array ${OPT_ENV[@]+"${OPT_ENV[@]}"})
JSON_ARG=$(json_array ${OPT_ARG[@]+"${OPT_ARG[@]}"})
qemu_execute guest-exec "$(json \
--arg path "$CMD" \
--arg input "$([[ $OPT_INPUT == false ]] || base64)" \
--argjson arg "$JSON_ARG" \
--argjson env "$JSON_ENV" \
--argjson capture "$OPT_OUTPUT" \
'{"path": $path, "arg": $arg, "env": $env, "input-data": $input, "capture-output": $capture}' \
)"
PID="$(jq -re .pid <<< $GA_RETURN)"
if [[ $OPT_WAIT = true || $OPT_OUTPUT == true ]]; then
while true; do
qemu_execute guest-exec-status "$(json --argjson pid "$PID" '{"pid": $pid}')"
STATUS="$GA_RETURN"
if [[ "$(jq -er .exited <<< "$STATUS")" == false ]]; then
sleep 0.1
else
EXIT_CODE=$(jq -er .exitcode <<< "$STATUS")
if [[ $OPT_OUTPUT == true ]]; then
jq -r '."out-data" // empty' <<< "$STATUS" | base64 -d
jq -r '."err-data" // empty' <<< "$STATUS" | base64 -d >&2
# TODO: check .out-truncated, .err-truncated
fi
return $EXIT_CODE
fi
done
else
echo "$PID"
fi
}
cmd_shutdown() {
local OPT_REBOOT=false
local OPT_HALT=false
OPTIND=
while getopts ":rh" opt; do
case $opt in
r)
OPT_REBOOT=true
;;
h)
OPT_HALT=true
;;
\?)
echo "Invalid option -$OPTARG" >&2
usage >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument" >&2
usage >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
qemu_execute guest-shutdown "$(json \
--arg mode "$([[ $OPT_REBOOT == true ]] && echo reboot || ([[ $OPT_HALT == true ]] && echo halt) || echo powerdown)" \
'{"mode": $mode}'
)"
}
cmd_custom() {
if [[ $# -lt 1 ]]; then
usage >&2
exit 1
fi
local COMMAND="$1"
shift
local ARGS
if [[ $# -gt 0 ]]; then
ARGS="$1"
shift
else
ARGS='{}'
fi
qemu_execute "$COMMAND" "$ARGS"
jq . <<< "$GA_RETURN"
}
cmd_add_device() {
if [[ $# -lt 2 ]]; then
usage >&2
exit 1
fi
local DRIVER="$1"
local ID="$2"
shift 2
qemu_execute device_add "$(json_dict driver="$DRIVER" id="$ID" "$@")"
}
cmd_del_device() {
if [[ $# -lt 1 ]]; then
usage >&2
exit 1
fi
local ID="$1"
qemu_execute device_del "$(json --arg id "$ID" '{"id": $id}')"
}
cmd_add_object() {
if [[ $# -lt 2 ]]; then
usage >&2
exit 1
fi
local DRIVER="$1"
local ID="$2"
shift 2
local PARAMS
PARAMS=$(json_dict "$@")
qemu_execute object-add "$(json \
--arg driver "$DRIVER" \
--arg id "$ID" \
--argjson params "$PARAMS" \
'{ "qom-type": $driver, "id": $id, "props": $params }'
)"
}
cmd_del_object() {
if [[ $# -lt 1 ]]; then
usage >&2
exit 1
fi
local ID="$1"
qemu_execute object-del "$(json --arg id "$ID" '{"id": $id}')"
}
_cmd_del() {
local COMMAND="$1"
local ID="$2"
qemu_execute "$COMMAND" "$(json --arg id "$ID" '{"id": $id}')"
}
qemu_execute() {
local COMMAND ARGS
COMMAND="$1"
ARGS="${2-}"
json --arg cmd "$COMMAND" --argjson args "$ARGS" '{"execute": $cmd, "arguments": $args}' >&$FD_SOCKET_OUT
local LINE
read -t 5 -r -u $FD_SOCKET_IN LINE
if [[ -n "${QCOMM_DEBUG-}" ]]; then
echo "SEND: $(json --arg cmd "$COMMAND" --argjson args "$ARGS" '{"execute": $cmd, "arguments": $args}')" >&2
echo "RECV: $LINE" >&2
fi
local ERROR=$(jq -r '.error.desc // empty' <<< "$LINE")
if [[ -n "$ERROR" ]]; then
echo "$ERROR" >&2
return 1
fi
GA_RETURN=$(jq -cM .return <<< "$LINE")
}
qemu_ga() {
local SOCKET COMMAND
SOCKET="$1"
COMMAND="$2"
coproc FDS (
socket "$SOCKET"
)
FD_SOCKET_IN=${FDS[0]}
FD_SOCKET_OUT=${FDS[1]}
# sync character isn't working?
#printf '\xff' >&$FD_SOCKET_OUT
local PID=$$
qemu_execute guest-sync "$(json --argjson pid "$PID" '{"id": $pid}')"
[[ "$(jq -re . <<< "$GA_RETURN")" = "$$" ]] || (echo "guest-sync mismatch" >&2 && return 1)
eval "$COMMAND"
local RETURN
kill -INT "$FDS_PID" 2>/dev/null
wait "$FDS_PID" || RETURN=$?
if [[ $RETURN != 130 ]]; then
return $RETURN
fi
}
qemu_qmp() {
local SOCKET COMMAND
SOCKET="$1"
COMMAND="$2"
coproc FDS (
socket "$SOCKET"
)
FD_SOCKET_IN=${FDS[0]}
FD_SOCKET_OUT=${FDS[1]}
local LINE
read -t 5 -r -u $FD_SOCKET_IN LINE
[[ -n "$(jq -re .QMP.version.qemu.major <<< "$LINE")" ]] || (echo "QMP handshake failed" >&2 && return 1)
qemu_execute qmp_capabilities "{}"
eval "$COMMAND"
local RETURN
kill -INT "$FDS_PID" 2>/dev/null
wait "$FDS_PID" || RETURN=$?
if [[ $RETURN != 130 ]]; then
return $RETURN
fi
}
socket() {
if inpath socat; then
exec socat - UNIX-CONNECT:"$1"
elif inpath nc; then
exec nc -U "$1"
fi
}
json() {
jq -ncM "$@"
}
json_array() {
for arg in "$@"; do
json --arg arg "$arg" '$arg'
done | jq -cMs .
}
json_dict() {
local SEPARATOR="="
for arg in "$@"; do
local KEY=$(cut -d "$SEPARATOR" -f1 <<< $arg)
local VALUE=$(cut -d "$SEPARATOR" -f2- <<< $arg)
json --arg value "$VALUE" '{"'$KEY'": $value}'
done | jq -cMs 'add // {}'
}
inpath() {
which "$1" > /dev/null 2>&1
}
usage() {
echo "$0 [options] COMMAND"
echo " An interface to QEMU QMP and guest agent"
echo " -h: show help"
echo " -q PATH: QEMU QMP socket path"
echo " -g PATH: QEMU guest agent socket path"
echo
echo "Commands"
echo " exec [options] PATH [ARGUMENTS..]"
echo " Executes a process inside the guest"
echo " -e ENV=value: set environment variable(s)"
echo " -w: wait for process to terminate"
echo " -i: send stdin"
echo " -o: capture stdout"
echo " info"
echo " Displays information about the guest, and can be used to check that the guest agent is running"
echo " shutdown"
echo " Tells the guest to initiate a system shutdown"
echo " -h: halt immediately"
echo " -r: reboot"
echo " custom COMMAND [JSON_ARGUMENTS]"
echo " Runs a custom command, optionally passing arguments"
echo " add_device DRIVER ID [KEY=VALUE..]"
echo " del_device ID"
echo " add_object TYPE ID [KEY=VALUE..]"
echo " del_object ID"
}
main "$@"
| true |
115894f751789c972a887a932d78e88ef196e355 | Shell | zedaav/userconfig | /shell/profile.d/20-time.sh | UTF-8 | 1,752 | 3.90625 | 4 | [] | no_license | # Utility functions to deal with time reckonings
# Function for duration pretty printing
function __prettyPrintDuration {
local DURATION="$1"
local M=$((60))
local H=$(($M*60))
local D=$(($H*24))
local DAYS=$(( $DURATION / $D ))
local HOURS=$(( ($DURATION - $DAYS*$D) / $H ))
local MINS=$(( ($DURATION - $DAYS*$D - $HOURS*$H) / $M ))
local SECS=$(( $DURATION % $M ))
# Print results
if test $DAYS -gt 0; then
echo -n "${DAYS}d"
fi
if test $HOURS -gt 0; then
echo -n "${HOURS}h"
fi
if test $MINS -gt 0; then
echo -n "${MINS}m"
fi
if test $SECS -gt 0; then
echo -n "${SECS}s"
fi
}
# To be invoked just before displaying prompt
function __refreshLastCmdDuration {
# Update current timestamp
local CURRENT_TIME=$(date +%s)
# Reckon duration only if we get a start time
if test -n "${EXEC_START_TIME}"; then
declare -il DURATION
DURATION=$(( ${CURRENT_TIME} - ${EXEC_START_TIME} ))
# Update global env
LAST_EXEC_TIME=${DURATION}
LAST_EXEC_TIME_STR="$(__prettyPrintDuration $LAST_EXEC_TIME)"
fi
}
# To be hooked for execution before every command
function __rememberExecStart {
# Handle completion and prompt hooks
if test -n "$COMP_LINE" -o "$BASH_COMMAND" == "$PROMPT_COMMAND"; then
# Start time is set?
if test -n "$EXEC_START_TIME"; then
# We're just before displaying the prompt: reckon duration
__refreshLastCmdDuration
unset EXEC_START_TIME
fi
return
fi
# Remember current time if we're about to run a command
if test -n "$BASH_COMMAND"; then
EXEC_START_TIME=$(date +%s)
fi
}
| true |
42478f99029d7ca57455aa5f3453fc2a87f3017e | Shell | benfred/cudf | /ci/cpu/prebuild.sh | UTF-8 | 269 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#Upload cudf once per PYTHON
if [[ "$CUDA" == "10.0" ]]; then
export UPLOAD_CUDF=1
else
export UPLOAD_CUDF=0
fi
#Upload libcudf once per CUDA
if [[ "$PYTHON" == "3.6" ]]; then
export UPLOAD_LIBCUDF=1
else
export UPLOAD_LIBCUDF=0
fi | true |
877a1a79171cbd914a16abf6f4b5f7a3fcf14e10 | Shell | Samsung/TizenRT | /external/iotjs/tools/mem_stats.sh | UTF-8 | 3,514 | 4.125 | 4 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# Copyright 2015-present Samsung Electronics Co., Ltd. and other contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage
if [ "$#" -lt 3 ]
then
echo "$0: Benchmark memory usage of IoT.js"
echo ""
echo "Usage: $0 [-d] IOTJS IOTJS_MEMSTATS BENCHMARK..."
echo ""
echo "Positional arguments:"
echo " IOTJS path to IoT.js engine built without memory"
echo " statistics support"
echo " IOTJS_MEMSTATS path to IoT.js engine built with memory statistics"
echo " support"
echo " BENCHMARK... paths to JavaScript programs to be used as the"
echo " benchmark suite"
echo ""
echo "Optional arguments:"
echo " -d generate semicolon-delimited output (default:"
echo " formatted human-readable output)"
echo ""
echo "The tool benchmarks the memory usage of IoT.js with the help of two"
echo "different builds and a suite of JavaScript programs. Each benchmark"
echo "script is executed by both builds: the \"memstats\" build reports"
echo "statistics retrieved from JerryScript, while the \"normal\" build"
echo "reports RSS results."
exit 1
fi
# Choosing table or semicolon-separated output mode
if [ "$1" == "-d" ]
then
TABLE="no"
PRINT_TEST_NAME_AWK_SCRIPT='{printf "%s;", $1}'
PRINT_TOTAL_AWK_SCRIPT='{printf "%d;%d\n", $1, $2 * 1024}'
shift
else
PRINT_TEST_NAME_AWK_SCRIPT='{printf "%30s", $1}'
PRINT_TOTAL_AWK_SCRIPT='{printf "%25d%25d\n", $1, $2 * 1024}'
TABLE="yes"
fi
function fail_msg
{
echo "$1"
exit 1
}
# Engine
# Check if the specified build supports memory statistics options
function is_mem_stats_build
{
[ -x "$1" ] || fail_msg "Engine '$1' is not executable"
tmpfile=`mktemp`
"$1" --memstat $tmpfile 2>&1 | \
grep -- "Ignoring memory statistics option" 2>&1 > /dev/null
code=$?
rm $tmpfile
return $code
}
IOTJS=$(readlink -f "$1")
shift
is_mem_stats_build "$IOTJS" || fail_msg \
"First engine specified should be built without memory statistics support"
IOTJS_MEM_STATS=$(readlink -f "$1")
shift
is_mem_stats_build "$IOTJS_MEM_STATS" && fail_msg \
"Second engine specified should be built with memory statistics support"
# Benchmarks list
BENCHMARKS=""
while [ $# -ne 0 ]
do
BENCHMARKS="$BENCHMARKS $1"
shift
done
# Running
if [ "$TABLE" == "yes" ]
then
awk 'BEGIN {printf "%30s%25s%25s\n", "Test name", "Peak Heap (jerry)", \
"Maximum RSS"}'
echo
fi
STARTDIR=$(pwd)
for bench in $BENCHMARKS
do
bench_name=$(basename -s '.js' $bench)
bench_canon=$(readlink -f $bench)
cd `dirname $bench_canon`
echo "$bench_name" | awk "$PRINT_TEST_NAME_AWK_SCRIPT"
MEM_STATS=$("$IOTJS_MEM_STATS" --memstat $bench_canon | \
grep -e "Peak allocated =" | grep -o "[0-9]*")
RSS=$($STARTDIR/deps/jerry/tools/rss-measure.sh "$IOTJS" $bench_canon | \
tail -n 1 | grep -o "[0-9]*")
echo $MEM_STATS $RSS | xargs | awk "$PRINT_TOTAL_AWK_SCRIPT"
cd $STARTDIR
done
| true |
5bf22f907ce5563b929ef37df0c8ac5337262683 | Shell | hgxl/CLI | /component/docker/make/conf/apache2.sh | UTF-8 | 1,049 | 2.765625 | 3 | [] | no_license | #! /bin/bash
source $HOME/.skyflow/helper.sh
source $SKYFLOW_DIR/component/docker/helper.sh
dir=$SKYFLOW_DOCKER_DIR/conf/apache2
githubDir=$SKYFLOW_GITHUB_CONTENT/component/docker/conf/apache2
mkdir -p $dir/default
curl -s "$githubDir/default/httpd.conf" -o $dir/default/httpd.conf
curl -s "$githubDir/default/magic" -o $dir/default/magic
curl -s "$githubDir/default/mime.types" -o $dir/default/mime.types
mkdir -p $dir/default/conf.d
curl -s "$githubDir/default/conf.d/default.conf" -o $dir/default/conf.d/default.conf
curl -s "$githubDir/default/conf.d/info.conf" -o $dir/default/conf.d/info.conf
curl -s "$githubDir/default/conf.d/languages.conf" -o $dir/default/conf.d/languages.conf
curl -s "$githubDir/default/conf.d/mpm.conf" -o $dir/default/conf.d/mpm.conf
curl -s "$githubDir/default/conf.d/userdir.conf" -o $dir/default/conf.d/userdir.conf
mkdir -p $dir/php/conf.d
curl -s "$githubDir/php/conf.d/php5-module.conf" -o $dir/php/conf.d/php5-module.conf
curl -s "$githubDir/php/conf.d/php7-module.conf" -o $dir/php/conf.d/php7-module.conf | true |
20c927029116ab94e853d956c19a7d4d5299e7be | Shell | andrew-houghton/self-driving-MIT-car | /jetsonhacks/installBLDC/installBLDC.sh | UTF-8 | 872 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Build the Qt gui control for the custom VESC BLDC controller
# Note that this is different from the actual VESC firmware itself, though
# compiled versions of the firmware are contained within the bldc-tool source tree
# Also grabs the VESC configuration files from the mit-racecar hardware repository
cd $ROOT
echo "Installing BLDC Tool prerequisites"
sudo apt-get install qtcreator qt-sdk libudev-dev libqt5serialport5-dev
echo "Fetching bldc-tool source code"
git clone https://github.com/vedderb/bldc-tool
cd bldc-tool
echo "Building bldc-tool from source"
qmake -qt=qt5
make clean && make
cd $ROOT
git clone https://github.com/mit-racecar/hardware.git
echo "You should now be able to run BLDC_Tool from the ~/bldc-tool directory"
echo "The VESC firmware is in ~/bldc-tool/firmwares"
echo "The RACECAR VESC configuration files are in ~/hardware/vesc"
| true |
5b04afd94f0347fcbbed4e1d1b0880be58d4e292 | Shell | PeyWn/TDTS07 | /tutorials/mparm/gsm/shared/bin/doIt.sh | UTF-8 | 289 | 2.53125 | 3 | [
"TU-Berlin-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
count=10
i=3
while [ $i -lt $count ]; do
let cache_size=$i+1;
echo $cache_size;
mpsim.x --intc=s -w --is=$cache_size -c2;
sync;
cp stats.txt set.fixed_f.var_icache.$i;
cp stats_light.txt set.light.fixed_f.var_icache.$i;
sync;
i=`expr $i + 1`;
done;
| true |
d7e0d6cb910ec385ee1584ae8aea8c84966ef560 | Shell | fbricker/iCanLocalize-Transporter | /downloadMetadata | UTF-8 | 541 | 3.390625 | 3 | [] | no_license | #!/bin/bash
dir=`dirname "$0"`
cd "$dir"
user=$1
sku=$2
folder="download/$sku"
if [ "$user" == "" -o "$sku" == "" -o "$#" != "2" ]; then
echo "ERROR: Missing parameters."
echo "Use mode:"
echo "------------"
echo "$0 (iTunes Connect User) (APP SKU)"
echo " "
exit
fi
echo -n "Enter PASSWORD: "
read -s pass
echo ""
loaderFolder=/Applications/Xcode.app/Contents/Applications/Application\ Loader.app/Contents/itms/bin
"$loaderFolder/iTMSTransporter" -m lookupMetadata -u "$user" -p "$pass" -vendor_id "$sku" -destination "$folder"
| true |
a2086556c677d3220ad2c14be0eb9fd88788a1a6 | Shell | BhanukaUOM/GCP-Cloud-Build-Badge-with-Email-Notifications | /download_badges.bash | UTF-8 | 457 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
declare -A statuses
statuses['status_unknown']='inactive'
statuses['queued']='yellow'
statuses['working']='yellow'
statuses['success']='green'
statuses['failure']='critical'
statuses['cancelled']='red'
statuses['internal_error']='red'
statuses['timeout']='red'
mkdir -p badges
for status in "${!statuses[@]}"
do
curl -sS \
https://img.shields.io/badge/cloud_build-${status}-${statuses[$status]} \
-o badges/$status.svg
done
| true |
6f78084620260bc22d3ce478f97038dc7ccb88ee | Shell | neutrino-mp/libstb-hal | /tools/pic2m2v.sh | UTF-8 | 882 | 3.84375 | 4 | [] | no_license | #!/bin/sh
#
# creates still-mpegs from pictures, to be able to display them
# using a hardware video decoder. Does about the same as pic2m2v.c
# (C) 2013 Stefan Seyfried
# License: GPLv2+
#
if grep -q TRIPLEDRAGON /proc/cpuinfo; then
RES=704x576
else
RES=1280x720
fi
while true; do
IN=$1
test -z "$IN" && break
shift
OUT="/var/cache/`echo ${IN#/}|sed 's@/@.@g'`"
MD5=${OUT}.md5
M2V=${OUT}.m2v
# $MD5 not existing => return code != 0
if [ -s $M2V ] && md5sum -c -s $MD5 > /dev/null 2>&1; then
echo "$IN is unchanged"
touch -r $IN $M2V
continue
fi
if ! [ -e $IN ]; then
echo "$IN does not exist!"
continue
fi
echo "converting $IN -> $M2V"
ffmpeg -v 31 -y -f mjpeg -i $IN -s $RES $M2V < /dev/null
if ! [ -s $M2V ]; then
echo "$M2V does not exist - conversion error?"
continue
fi
# set the time identical to input file
touch -r $IN $M2V
md5sum $IN > $MD5
done
| true |
250a1d89894ce52996f42e23eda4ff6ce6026dad | Shell | Rhinomcd/dotfiles | /zsh_overrides/aliases.zsh | UTF-8 | 197 | 2.578125 | 3 | [] | no_license | # aliases.zsh
if [[ ! -z $(command -v python3) ]]; then
alias python="python3"
fi
alias tmux="tmux -u"
alias oracle="ssh oracle@$HOST"
alias flake8="python -m flake8"
alias ls="ls -G --color"
| true |
ef7005164036197e3e34e8ca6646518ab6ef202e | Shell | AbdulRehmanMehar/mini-IDE | /redirect.sh | UTF-8 | 659 | 3.296875 | 3 | [] | no_license | #!bin/bash
# AUTHOR_NAME Abdul Rehman
# AUTHOR_EMAIL mehars.6925@gmail.com
# AUTHOR_PROFILE https://github.com/AbdulRehmanMehar
# PUBLIC_URI https://github.com/AbdulRehmanMehar/mini-IDE
makeChangesToExistingProgram() {
echo "Enter (absolute) Path of File: "
read filePath
echo " "
echo "Enter (absolute) Path to save new File: "
read newFile
echo " "
echo "Enter content to change (a word or line): "
read toChange
echo " "
echo "Enter the new Content: "
read newContent
echo ""
eval tr ${toChange} ${newContent} < ${filePath} > ${newFile}
pid=$!
wait $pid
echo "Done!"
}
makeChangesToExistingProgram
| true |
83f4e4f6862856dc5d803e34beaf13d18d799bbd | Shell | wangcongcong123/rectrts | /wget_/rr.sh | UTF-8 | 261 | 2.8125 | 3 | [] | no_license | #!/bin/bash
while IFS='' read -r line || [[ -n "$line" ]]; do
wget --user tipster --password cdroms https://trec.nist.gov/results/trec26/rts/$line
echo "Text read from file: $line"
done < "$1"
#https://trec.nist.gov/results/trec26/rts/adv_lirmm-Run1.gz | true |
7c698c45d4aaeaea1952acd1adee254ac7165050 | Shell | ianalderman/JenkinsTerraformAzure | /scripts/BuildTerraformEnvironmentsVariablesFile.sh | UTF-8 | 312 | 2.609375 | 3 | [
"MIT"
] | permissive | !#/bin/bash
mkdir -p $WORKSPACE/$BUILD_NUMBER
cd $WORKSPACE/$BUILD_NUMBER
#Build out Environment specific variables for Terraform
echo "region_id = \"$RegionId\"" > environment.tfvars
echo "environment_id = \"$EnvironmentId\"" >> environment.tfvars
echo "azure_region =\"$AzureRegion\"" >> environment.tfvars
| true |
19be910657e1de4289dfcc192a178f8f70d038a1 | Shell | lingfennan/cloaking-detection | /src/cloaking_detection_dedup.sh | UTF-8 | 2,043 | 3.234375 | 3 | [] | no_license | #!/bin/bash
#
# INFILE=$1
TYPE=$1
BASEDIR=$2
echo "Note: I need to specify TYPE [ad/search] [BASEDIR, eg. $BASEDIR, and compile the user list, either search user list or ad user list, with no suffix"
#BASEDIR='../data/all.computed/'
ls $BASEDIR*$TYPE*user*compute_list > $BASEDIR$TYPE'_user_list'
INFILE=$BASEDIR$TYPE'_user_list'
# in the above step, we just processed $TYPE_google_list
LEARN=$BASEDIR$TYPE'_google_list'
# for search, use text r 15, c 3
# for search, use dom 10, c 3
# for ad, use text r 10, c 2
# for ad, use dom 8, c 2
if [ "$TYPE" == "ad" ]; then
TEXT_R=8
TEXT_C=3
DOM_R=4
DOM_C=2
else
TEXT_R=15
TEXT_C=3
DOM_R=8
DOM_C=2
fi
ls $BASEDIR*$TYPE*google*list.dom*dedup > $BASEDIR$TYPE'_google_dom_list'
ls $BASEDIR*$TYPE*google*list.text*dedup > $BASEDIR$TYPE'_google_text_list'
python cluster_learning.py -f learn -i $BASEDIR$TYPE'_google_text_list' -o $BASEDIR$TYPE'_google_list.text' -t TEXT -c 1.1
python cluster_learning.py -f learn -i $BASEDIR$TYPE'_google_dom_list' -o $BASEDIR$TYPE'_google_list.dom' -t DOM -c 1.1
while read observed_file
do
# this parameter is based on what i observed from site_dynamics sites.
echo "Results on $observed_file.text.dedup"
python cloaking_detection.py -f detect -i $observed_file.text.dedup -l $LEARN.text.learned -t TEXT -r $TEXT_R -c $TEXT_C
echo "Results on $observed_file.dom.dedup"
python cloaking_detection.py -f detect -i $observed_file.dom.dedup -l $LEARN.dom.learned -t DOM -r $DOM_R -c $DOM_C
INTERSECT=$observed_file.cloaking.intersect
echo "Intersection file is $INTERSECT"
ls $observed_file.*.cloaking | python utils/data_util.py -f intersect_sites -o $INTERSECT
done < $INFILE
LEARN=$BASEDIR$TYPE'_google_list'
RESULT=$BASEDIR$TYPE'.detection.result'
LEARNED=$BASEDIR$TYPE'.detection.learned'
ls $BASEDIR*$TYPE*.intersect | python utils/data_util.py -f merge_sites -o $RESULT
python utils/util.py -f evaluation_form -i $RESULT -p ObservedSites
python utils/data_util.py -f get_learned_eval -i $RESULT -l $LEARN.text.learned -o $LEARNED
| true |
f62edfb82b0ab3ad003acf8ea53d1c9e1336f4a8 | Shell | ddxgz/swift-saio.sh | /modules/swauth_deb_install.sh | UTF-8 | 2,377 | 3.796875 | 4 | [] | no_license | #
# Author: Marcelo Martins (btorch AT gmail.com)
# Created: 2011/06/26
#
# Info:
# This a script that is imported (sourced) by the main swift-saio.sh in order
# to install swauth from .deb packages retrieved from github.
# Created it as a separate file so that it can be re-used or extended wihout
# impacting the main script.
#
# The script still needs to use some variables that are configured in the
# swift-saio.cfg configuration file.
#
###########################
# SWIFT SOURCE INSTALL
###########################
swauth_deb_install (){
SWAUTH="https://github.com/downloads/gholt/swauth/python-swauth_1.0.2-1_all.deb"
SWAUTH_DOC="https://github.com/downloads/gholt/swauth/swauth-doc_1.0.2-1_all.deb"
# If swift version is 1.4.1 or greater then swauth needs to be installed from github
VER_REGEX="^1\.[4-9]\.[0-9]"
if [[ "$VERSION" =~ $VER_REGEX ]]; then
SWAUTH_TEMP="swauth-src"
cd $CURDIR
printf "\n\n\t - Starting swauth debian pkg installation process \n"
if [ ! -d ./$SWAUTH_TEMP ]; then
mkdir $SWAUTH_TEMP
fi
cd $SWAUTH_TEMP
printf "\t\t Downloading .deb packages from github \n"
wget -q $SWAUTH
wget -q $SWAUTH_DOC
if [[ -e `basename $SWAUTH` ]]; then
printf "\n\t\t -> Download of `basename $SWAUTH` sucessful "
else
printf "\t\t\t -> \033[1;31;40m Error occurred (pkg file not found) \033[0m\n\n"
exit 1
fi
if [[ -e `basename $SWAUTH_DOC` ]]; then
printf "\n\t\t -> Download of `basename $SWAUTH_DOC` sucessful "
else
printf "\t\t\t -> \033[1;31;40m Error occurred (pkg file not found) \033[0m\n\n"
exit 1
fi
sleep 2
printf "\n\n\t\t Installing swauth .deb packages \n"
dpkg -i `basename $SWAUTH` &>/dev/null
dpkg -i `basename $SWAUTH_DOC` &>/dev/null
CODE=$?
if [ $CODE -eq 0 ];then
printf "\t\t -> Install sucessful "
else
printf "\t\t\t -> \033[1;31;40m Error found (check log file) \033[0m\n\n"
exit 1
fi
sleep 2
printf "\n\n"
cd $CURDIR
if [ -d $CURDIR/$SWAUTH_TEMP ]; then
rm -rf $SWAUTH_TEMP
fi
fi
return 0
}
| true |
f3526b4921ad2b997d70be41ab063fe25a16649b | Shell | Azure/aks-engine | /vhd/packer/install-dependencies.sh | UTF-8 | 12,726 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source /home/packer/provision_installs.sh
source /home/packer/provision_source.sh
source /home/packer/packer_source.sh
VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete
echo "Starting build on " $(date) > ${VHD_LOGS_FILEPATH}
copyPackerFiles
echo ""
echo "Components downloaded in this VHD build (some of the below components might get deleted during cluster provisioning if they are not needed):" >> ${VHD_LOGS_FILEPATH}
AUDITD_ENABLED=true
MS_APT_REPO=packages.microsoft.com
installDeps
cat << EOF >> ${VHD_LOGS_FILEPATH}
apt packages:
- apache2-utils
- apt-transport-https
- auditd
- blobfuse
- ca-certificates
- ceph-common
- cgroup-lite
- cifs-utils
- conntrack
- cracklib-runtime
- dkms
- dbus
- ebtables
- ethtool
- fuse
- gcc
- git
- glusterfs-client
- htop
- iftop
- init-system-helpers
- iotop
- iproute2
- ipset
- iptables
- jq
- libpam-pwquality
- libpwquality-tools
- make
- mount
- nfs-common
- pigz
- socat
- sysstat
- traceroute
- util-linux
- xz-utils
- zip
EOF
if [[ ${UBUNTU_RELEASE} == "20.04" || ${UBUNTU_RELEASE} == "18.04" ]]; then
{
echo " - ntp"
echo " - ntpstat"
echo " - chrony"
} >> ${VHD_LOGS_FILEPATH}
fi
chmod a-x /etc/update-motd.d/??-{motd-news,release-upgrade}
if [[ ${UBUNTU_RELEASE} == "20.04" || ${UBUNTU_RELEASE} == "18.04" ]]; then
overrideNetworkConfig
fi
cat << EOF >> ${VHD_LOGS_FILEPATH}
Binaries:
EOF
apmz_version="v0.5.1"
ensureAPMZ "${apmz_version}"
echo " - apmz $apmz_version" >> ${VHD_LOGS_FILEPATH}
installBpftrace
echo " - bpftrace" >> ${VHD_LOGS_FILEPATH}
MOBY_VERSION="20.10.14"
CONTAINERD_VERSION="1.5.13"
installMoby
installRunc
systemctl_restart 100 5 30 docker || exit 1
echo " - moby v${MOBY_VERSION}" >> ${VHD_LOGS_FILEPATH}
downloadGPUDrivers
echo " - nvidia-container-runtime" >> ${VHD_LOGS_FILEPATH}
ETCD_VERSION="3.3.25"
ETCD_DOWNLOAD_URL="mcr.microsoft.com/oss/etcd-io/"
installEtcd "docker"
echo " - etcd v${ETCD_VERSION}" >> ${VHD_LOGS_FILEPATH}
VNET_CNI_VERSIONS="
1.4.39.1
"
for VNET_CNI_VERSION in $VNET_CNI_VERSIONS; do
VNET_CNI_PLUGINS_URL="https://kubernetesartifacts.azureedge.net/azure-cni/v${VNET_CNI_VERSION}/binaries/azure-vnet-cni-linux-amd64-v${VNET_CNI_VERSION}.tgz"
downloadAzureCNI
echo " - Azure CNI version ${VNET_CNI_VERSION}" >> ${VHD_LOGS_FILEPATH}
done
CNI_PLUGIN_VERSIONS="
0.9.1
"
for CNI_PLUGIN_VERSION in $CNI_PLUGIN_VERSIONS; do
CNI_PLUGINS_URL="https://kubernetesartifacts.azureedge.net/cni-plugins/v${CNI_PLUGIN_VERSION}/binaries/cni-plugins-linux-amd64-v${CNI_PLUGIN_VERSION}.tgz"
downloadCNI
echo " - CNI plugin version ${CNI_PLUGIN_VERSION}" >> ${VHD_LOGS_FILEPATH}
done
installImg
echo " - img" >> ${VHD_LOGS_FILEPATH}
systemctl status docker --no-pager || exit 1
echo "Docker images pre-pulled:" >> ${VHD_LOGS_FILEPATH}
METRICS_SERVER_VERSIONS="
0.5.2
"
for METRICS_SERVER_VERSION in ${METRICS_SERVER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/metrics-server:v${METRICS_SERVER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
KUBE_ADDON_MANAGER_VERSIONS="
9.1.3
9.1.5
"
for KUBE_ADDON_MANAGER_VERSION in ${KUBE_ADDON_MANAGER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/kube-addon-manager:v${KUBE_ADDON_MANAGER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
MCR_PAUSE_VERSIONS="3.4.1"
for PAUSE_VERSION in ${MCR_PAUSE_VERSIONS}; do
# Pull the arch independent MCR pause image which is built for Linux and Windows
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/pause:${PAUSE_VERSION}"
loadContainerImage "${CONTAINER_IMAGE}"
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CLUSTER_AUTOSCALER_VERSIONS="
1.22.1
"
for CLUSTER_AUTOSCALER_VERSION in ${CLUSTER_AUTOSCALER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/autoscaler/cluster-autoscaler:v${CLUSTER_AUTOSCALER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CORE_DNS_VERSIONS="
1.8.6
"
for CORE_DNS_VERSION in ${CORE_DNS_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/coredns:${CORE_DNS_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
AZURE_CNIIMAGEBASE="mcr.microsoft.com/containernetworking"
AZURE_NPM_VERSIONS="
1.2.2_hotfix
"
for AZURE_NPM_VERSION in ${AZURE_NPM_VERSIONS}; do
CONTAINER_IMAGE="${AZURE_CNIIMAGEBASE}/azure-npm:v${AZURE_NPM_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
NVIDIA_DEVICE_PLUGIN_VERSIONS="
1.0.0-beta6
"
for NVIDIA_DEVICE_PLUGIN_VERSION in ${NVIDIA_DEVICE_PLUGIN_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/nvidia/k8s-device-plugin:${NVIDIA_DEVICE_PLUGIN_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
KV_FLEXVOLUME_VERSIONS="0.0.16"
for KV_FLEXVOLUME_VERSION in ${KV_FLEXVOLUME_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/k8s/flexvolume/keyvault-flexvolume:v${KV_FLEXVOLUME_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
BLOBFUSE_FLEXVOLUME_VERSIONS="1.0.8"
for BLOBFUSE_FLEXVOLUME_VERSION in ${BLOBFUSE_FLEXVOLUME_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/k8s/flexvolume/blobfuse-flexvolume:${BLOBFUSE_FLEXVOLUME_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
IP_MASQ_AGENT_VERSIONS="
2.8.0
"
for IP_MASQ_AGENT_VERSION in ${IP_MASQ_AGENT_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/ip-masq-agent:v${IP_MASQ_AGENT_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
KMS_PLUGIN_VERSIONS="0.0.10"
for KMS_PLUGIN_VERSION in ${KMS_PLUGIN_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/k8s/kms/keyvault:v${KMS_PLUGIN_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
loadContainerImage "mcr.microsoft.com/oss/busybox/busybox:1.33.1"
echo " - busybox" >> ${VHD_LOGS_FILEPATH}
K8S_VERSIONS="
1.24.17
1.23.17
1.22.17
1.21.14
"
for KUBERNETES_VERSION in ${K8S_VERSIONS}; do
for component in kube-apiserver kube-controller-manager kube-proxy kube-scheduler; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/${component}:v${KUBERNETES_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
KUBE_BINARY_URL="https://kubernetesartifacts.azureedge.net/kubernetes/v${KUBERNETES_VERSION}/binaries/kubernetes-node-linux-amd64.tar.gz"
extractKubeBinaries
done
# Starting with 1.16 we pull cloud-controller-manager and cloud-node-manager
CLOUD_MANAGER_VERSIONS="
1.24.0
1.23.11
1.1.14
1.0.18
"
for CLOUD_MANAGER_VERSION in ${CLOUD_MANAGER_VERSIONS}; do
for COMPONENT in azure-cloud-controller-manager azure-cloud-node-manager; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/${COMPONENT}:v${CLOUD_MANAGER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
done
AZUREDISK_CSI_VERSIONS="
1.10.0
"
for AZUREDISK_CSI_VERSION in ${AZUREDISK_CSI_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v${AZUREDISK_CSI_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
AZUREFILE_CSI_VERSIONS="
1.9.0
"
for AZUREFILE_CSI_VERSION in ${AZUREFILE_CSI_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/azurefile-csi:v${AZUREFILE_CSI_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CSI_ATTACHER_VERSIONS="
3.3.0
"
for CSI_ATTACHER_VERSION in ${CSI_ATTACHER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v${CSI_ATTACHER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CSI_NODE_DRIVER_REGISTRAR_VERSIONS="
2.4.0
"
for CSI_NODE_DRIVER_REGISTRAR_VERSION in ${CSI_NODE_DRIVER_REGISTRAR_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v${CSI_NODE_DRIVER_REGISTRAR_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CSI_PROVISIONER_VERSIONS="
3.0.0
"
for CSI_PROVISIONER_VERSION in ${CSI_PROVISIONER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v${CSI_PROVISIONER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
LIVENESSPROBE_VERSIONS="
2.5.0
"
for LIVENESSPROBE_VERSION in ${LIVENESSPROBE_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v${LIVENESSPROBE_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CSI_RESIZER_VERSIONS="
1.3.0
"
for CSI_RESIZER_VERSION in ${CSI_RESIZER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v${CSI_RESIZER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CSI_SNAPSHOTTER_VERSIONS="
4.2.1
"
for CSI_SNAPSHOTTER_VERSION in ${CSI_SNAPSHOTTER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v${CSI_SNAPSHOTTER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
SNAPSHOT_CONTROLLER_VERSIONS="
4.2.1
"
for SNAPSHOT_CONTROLLER_VERSION in ${SNAPSHOT_CONTROLLER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v${SNAPSHOT_CONTROLLER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CSI_SECRETS_STORE_PROVIDER_AZURE_VERSIONS="
0.0.12
"
for CSI_SECRETS_STORE_PROVIDER_AZURE_VERSION in ${CSI_SECRETS_STORE_PROVIDER_AZURE_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/azure/secrets-store/provider-azure:${CSI_SECRETS_STORE_PROVIDER_AZURE_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CSI_SECRETS_STORE_DRIVER_VERSIONS="
0.0.19
"
for CSI_SECRETS_STORE_DRIVER_VERSION in ${CSI_SECRETS_STORE_DRIVER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes-csi/secrets-store/driver:v${CSI_SECRETS_STORE_DRIVER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
AAD_POD_IDENTITY_MIC_VERSIONS="
1.6.1
"
for AAD_POD_IDENTITY_MIC_VERSION in ${AAD_POD_IDENTITY_MIC_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/k8s/aad-pod-identity/mic:${AAD_POD_IDENTITY_MIC_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
AAD_POD_IDENTITY_NMI_VERSIONS="
1.6.1
"
for AAD_POD_IDENTITY_NMI_VERSION in ${AAD_POD_IDENTITY_NMI_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/k8s/aad-pod-identity/nmi:${AAD_POD_IDENTITY_NMI_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
CLUSTER_PROPORTIONAL_AUTOSCALER_VERSIONS="
1.8.5
"
for CLUSTER_PROPORTIONAL_AUTOSCALER_VERSION in ${CLUSTER_PROPORTIONAL_AUTOSCALER_VERSIONS}; do
CONTAINER_IMAGE="mcr.microsoft.com/oss/kubernetes/autoscaler/cluster-proportional-autoscaler:${CLUSTER_PROPORTIONAL_AUTOSCALER_VERSION}"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
done
# This is to accommodate air-gapped environments, e.g., Azure Stack
CONTAINER_IMAGE="registry:2.7.1"
loadContainerImage ${CONTAINER_IMAGE}
echo " - ${CONTAINER_IMAGE}" >> ${VHD_LOGS_FILEPATH}
df -h
# warn at 75% space taken
[ -s $(df -P | grep '/dev/sda1' | awk '0+$5 >= 75 {print}') ] || echo "WARNING: 75% of /dev/sda1 is used" >> ${VHD_LOGS_FILEPATH}
# error at 95% space taken
[ -s $(df -P | grep '/dev/sda1' | awk '0+$5 >= 95 {print}') ] || exit 1
echo "Using kernel:" >> ${VHD_LOGS_FILEPATH}
tee -a ${VHD_LOGS_FILEPATH} < /proc/version
{ printf "Installed apt packages:\n"; apt list --installed | grep -v 'Listing...'; } >> ${VHD_LOGS_FILEPATH}
{
echo "Install completed successfully on " $(date)
echo "VSTS Build NUMBER: ${BUILD_NUMBER}"
echo "VSTS Build ID: ${BUILD_ID}"
echo "Commit: ${COMMIT}"
echo "Feature flags: ${FEATURE_FLAGS}"
} >> ${VHD_LOGS_FILEPATH}
| true |
4b335208efa453e73dd475078235ff39d72258ce | Shell | jakobadam/my-conf | /my-emacs.sh | UTF-8 | 236 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env bash
add-apt-repository ppa:ubuntu-elisp/ppa
apt-get update
apt-get install emacs-snapshot
if [[ ! -e ~/.emacs.d/.git ]] ; then
rm -rf ~/.emacs.d
git clone https://github.com/jakobadam/.emacs.d.git ~/.emacs.d
fi
| true |
149b9c721be6317bd09371721a947ee917b44d5c | Shell | avoidik/experience-with-vault | /uncarl.sh | UTF-8 | 431 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env bash
ACCESSOR_ID="$(vault auth list -format=json | jq -r '.["oidc/"].accessor')"
echo "Accessor ID: ${ACCESSOR_ID}"
#
# unelevate carl
#
echo
echo "Unelevate Carl"
echo
ENTITY_ID="$(vault write -field=id -format=json identity/lookup/entity \
alias_name='carldoe@contoso.com' \
alias_mount_accessor="${ACCESSOR_ID}" | jq -r '.')"
if [[ -n "${ENTITY_ID}" ]]; then
vault delete identity/entity/id/"${ENTITY_ID}"
fi
| true |
a97f1075be7b12d5dfb369639fe191c9569a0fcd | Shell | KenichiSuda/blogansible | /execute.sh | UTF-8 | 649 | 3.359375 | 3 | [] | no_license | #!/bin/bash
declare -A extra_hash
extra_hash["reinstall_openvpn"]="no"
extra_hash["reinstall_selenium"]="no"
extra_hash["reinstall_mysql"]="no"
extra_hash["vagrant_mode"]="no"
extra_vars=""
for key in ${!extra_hash[@]}; do
tmp="\"${key}\" : \"${extra_hash[$key]}\""
if [ "x${extra_vars}" = "x" ];then
extra_vars=$tmp
else
extra_vars=$extra_vars,$tmp
fi
done
if [ "x$extra_vars" != "x" ];then
extra_vars="{ $extra_vars }"
echo "Ansible実行します。パラメータ$extra_vars"
ansible-playbook ./site.yml --extra-vars "$extra_vars"
else
echo "Ansible実行します。パラメータ無し"
ansible-playbook ./site.yml
fi
| true |
a4e8b05b6a902d21b9d442821d6d98c91853223b | Shell | batman-nair/dotfiles | /scripts/opout | UTF-8 | 424 | 3.71875 | 4 | [] | no_license | #!/bin/bash
# opout: "open output": A general handler for opening a file's intended output.
# I find this useful especially running from vim.
file=$(readlink -f "$1")
ext="${file##*.}"
filename="${file%.*}"
case "$ext" in
tex|md|rmd) zathura $filename.pdf & ;;
cpp|cc|c|h) $filename.o ;;
py) python $file ;;
html) $BROWSER $filename.html & ;;
svg) feh --magick-timeout 1 $file ;;
*) xdg-open $file
esac
| true |
897a666259ccc0ad499e6da2467f3a8c22deab80 | Shell | delkyd/alfheim_linux-PKGBUILDS | /bastet/PKGBUILD | UTF-8 | 1,365 | 2.90625 | 3 | [] | no_license | # Maintainer: Jens Adam <jra@byte.cx>
pkgname=bastet
pkgver=0.43.2
pkgrel=1
pkgdesc="Tetris(r) clone with 'bastard' block-choosing AI"
url="http://fph.altervista.org/prog/bastet.html"
license=('GPL3')
arch=('i686' 'x86_64')
depends=('boost-libs' 'ncurses')
makedepends=('boost')
backup=('var/games/bastet.scores2')
source=("${pkgname}-${pkgver}.tar.gz::https://github.com/fph/bastet/archive/${pkgver}.tar.gz")
md5sums=('aee009b77b8cf9516d686bd24673800e')
build() {
cd "${srcdir}"/${pkgname}-${pkgver}
make
}
package() {
cd "${srcdir}"/${pkgname}-${pkgver}
# populate files
install -D bastet "${pkgdir}"/usr/bin/bastet
install -D -m 0644 bastet.6 "${pkgdir}"/usr/share/man/man6/bastet.6
install -D -m 0644 bastet.png "${pkgdir}"/usr/share/pixmaps/bastet.png
install -D -m 0644 bastet.desktop "${pkgdir}"/usr/share/applications/bastet.desktop
install -D -m 0644 bastet.appdata.xml "${pkgdir}"/usr/share/appdata/bastet.appdata.xml
for F in AUTHORS INSTALL NEWS README; do
install -D -m 0644 ${F} "${pkgdir}"/usr/share/doc/bastet/${F}
done
# prepare the (optional) global highscore file
# (users need to be in 'games' group, obviously)
install -d -m 0775 -g games "${pkgdir}"/var/games
touch "${pkgdir}"/var/games/bastet.scores2
chmod 0664 "${pkgdir}"/var/games/bastet.scores2
chown root:games "${pkgdir}"/var/games/bastet.scores2
}
| true |
d21fdc4773c64f4d5f135d1849a3a635d3aeb50d | Shell | DerpGusta/dotfiles | /i3/.config/i3/i3-restore/utils/error_handling.bash | UTF-8 | 775 | 4.15625 | 4 | [
"MIT"
] | permissive | # Contains useful functions to display errors to the user.
# Must only be used after sourcing common.sh
# Trap all errors. Uses the filename to identify which part of the script was run
trap 'error "An unknown error occured. Run ${0##*/} manually to see the error" 1' ERR
#####################################
# Displays an error using i3-nagbar
# Arguments:
# Error message
# Boolean to add button to view logs (Optional)
#####################################
error() {
# Add arguments
local args=()
args+=("-m" "i3-restore: ${1}")
args+=("-t error")
if [[ -n ${2} ]]; then
args+=("-b" "View Logs" "i3-sensible-editor ${I3_RESTORE_LOG_FILE}")
args+=("-b" "Run Manually" "${0}")
fi
i3-nagbar "${args[@]}" >/dev/null 2>&1
}
| true |
aafe0c4f7f63016fc409973aea30cb60ac4c9593 | Shell | very-twi/vesta | /bin/v_unsuspend_dns_domain | UTF-8 | 1,679 | 3.453125 | 3 | [] | no_license | #!/bin/bash
# info: unsuspening dns domain
#----------------------------------------------------------#
# Variable&Function #
#----------------------------------------------------------#
# Argument defenition
user=$1
domain=$(idn -t --quiet -u "$2" )
domain_idn=$(idn -t --quiet -a "$domain")
# Importing variables
source $VESTA/conf/vars.conf
source $V_CONF/vesta.conf
source $V_FUNC/shared.func
source $V_FUNC/domain.func
#----------------------------------------------------------#
# Verifications #
#----------------------------------------------------------#
# Checking arg number
check_args '2' "$#" 'user domain'
# Checking argument format
format_validation 'user' 'domain'
# Checking web system is enabled
is_system_enabled 'dns'
# Checking user
is_user_valid
# Checking domain exist
is_dns_domain_valid
# Check domain is suspened
is_domain_unsuspended 'dns'
#----------------------------------------------------------#
# Action #
#----------------------------------------------------------#
# Defining config
conf="/etc/named.conf"
# Adding zone in named.conf
nmd_rec="zone \"$domain\" { type master; file \"/etc/namedb/$domain.db\"; };"
echo "$nmd_rec" >> $conf
#----------------------------------------------------------#
# Vesta #
#----------------------------------------------------------#
# Unsuspending domain in config
update_dns_domain_value '$SUSPEND' 'no'
# Adding task to the vesta pipe
restart_schedule 'dns'
# Logging
log_event 'system' "$V_EVENT"
exit
| true |
395e6986df7dd90d74d86f1d548fc85f18ac18f2 | Shell | itomato/NeXTSrc | /sgcmds-18/usr.bin/install/install.sh.xxx | UTF-8 | 3,162 | 3.796875 | 4 | [] | no_license | #! /bin/sh
#
# @(#)install.sh 4.5 (Berkeley) 10/12/83
#
######################################################################
# HISTORY
# 07-Feb-86 Glenn Marcy (gm0w) at Carnegie-Mellon University
# Added code to post message to local bboard if not reinstalled.
#
# 08-Dec-85 Glenn Marcy (gm0w) at Carnegie-Mellon University
# Added -r option to run ranlib on file. Added -q option for
# non-interactive installations. Doesn't post a bboard message
# and doesn't do a reinstall.
#
# 20-Nov-85 Glenn Marcy (gm0w) at Carnegie-Mellon University
# Added questions after installation for reinstalling file into
# distribution directory and posting a cs-unix bboard message.
# The default for the former is no and the latter is yes.
#
# 25-Aug-85 Glenn Marcy (gm0w) at Carnegie-Mellon University
# Changed default owner and group to cs. Removed explicit
# locations from programs that should be executed from PATH.
# Preceded the remaining explicit file references with "BASEDIR",
# which will be replaced by the Makefile when installed to the
# appropriate value. Added -l switch to create links to the
# destination file.
#
# 16-Apr-82 Mike Accetta (mja) at Carnegie-Mellon University
# Changed to strip comments from csh or sh command scripts
# during installation under the ""-xc"" and ""-xs"" options
# respectively.
#
######################################################################
umask 022
cmd=mv
strip=""
ranlib=""
quick=""
chmod="chmod 755"
chown="/etc/chown -f OWNER"
chgrp="/bin/chgrp -f OWNER"
links=""
while true ; do
case $1 in
-s ) strip="strip"
shift
;;
-r ) ranlib="ranlib"
shift
;;
-q ) quick="quick"
shift
;;
-c ) cmd="cp"
shift
;;
-m ) chmod="chmod $2"
shift
shift
;;
-o ) chown="/etc/chown -f $2"
shift
shift
;;
-g ) chgrp="/bin/chgrp -f $2"
shift
shift
;;
-xc ) cmd="sed"
comments='/^[ ]*#/d'
shift
;;
-xs ) cmd="sed"
comments='/^[ ]*[#:]/d'
shift
;;
-l ) links="$links $2"
shift
shift
;;
* ) break
;;
esac
done
if [ ! ${2-""} ]
then echo "install: no destination specified"
exit 1
fi
if [ ${3-""} ]
then echo "install: too many files specified -> $*"
exit 1
fi
if [ $1 = $2 -o $2 = . ]
then echo "install: can't move $1 onto itself"
exit 1
fi
if [ '!' -f $1 ]
then echo "install: can't open $1"
exit 1
fi
if [ -d $2 ]
then file=$2/$1
else file=$2
fi
if [ "$cmd" = "sed" ]
then echo sed -e '<strip comments>' $1 ">$file"
sed -e '1s;^#!;&;p' -e "$comments" $1 >$file
else echo $cmd $1 $file
$cmd $1 $file
fi
if [ $strip ]
then $strip $file
fi
if [ $ranlib ]
then echo $ranlib $file
$ranlib $file
fi
echo $chown $file
$chown $file
echo $chgrp $file
$chgrp $file
echo $chmod $file
$chmod $file
for i in $links
do
echo ln $file $i
rm -f $i
ln $file $i
done
if [ '!' -d /dist/root ]
then quick="quick"
fi
if [ "$quick" != "quick" ]
then echo -n "reinstall in distribution directory ? [no] "
read ans
bb="local"
if [ "$ans" = "y" -o "$ans" = "yes" ]
then reinstall -y $file $links
bb="cs-unix"
fi
echo -n "post $bb bboard message ? [yes] "
read ans
if [ "$ans" != "n" -a "$ans" != "no" ]
then post -subject $file $bb
fi
fi
exit 0
| true |
a88af028431d6b2ab69a004bd1975c4113367fed | Shell | ajw498/libraries | /build-sablot | UTF-8 | 263 | 2.984375 | 3 | [] | no_license | #!/bin/sh
SABLOT=Sablot-1.0.1
if [ -n "$1" ]; then
PREFIX=$1
else
PREFIX=/home/riscos/cross/local
fi
rm -rf $SABLOT
tar -xzf $SABLOT.tar.gz
(cd $SABLOT; \
ro-config --prefix=$PREFIX \
--with-expat=$PREFIX \
&& make clean && make && make install) || exit 1
| true |
44e6a411e9d31d02971273500a0834d244615392 | Shell | alockwood05/dotfiles | /.zshrc | UTF-8 | 5,628 | 2.796875 | 3 | [] | no_license | # Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="avit"
plugins=(virtualenv, git, gitfast)
source $ZSH/oh-my-zsh.sh
export VIRTUAL_ENV_DISABLE_PROMPT=yes
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# To use coreutils `brew install coreutils`
export COREUTILS_GNUBIN_DIR="/usr/local/opt/coreutils/libexec/gnubin/";
export PATH="$COREUTILS_GNUBIN_DIR:$PATH"
export EDITOR=vim
export MANPATH="/usr/local/man:$MANPATH"
#Ruby
# => Brew install chruby
# source /usr/local/share/chruby/chruby.sh
# source /usr/local/share/chruby/auto.sh
# => Brew install rbenv
eval "$(rbenv init -)"
if which ruby >/dev/null && which gem >/dev/null; then
PATH="$(ruby -r rubygems -e 'puts Gem.user_dir')/bin:$PATH"
fi
## Customizations
# Better `ls`
export CLICOLOR=1
export LSCOLORS=GxFxCxDxBxegedabagaced
alias ls='ls -p'
# Better `grep`
export GREP_OPTIONS='--color=auto'
# brew install grc
# Colorized `traceroute`, `tail`, `head` (requires prepending command with `grc`)
[[ -s "/etc/grc.zsh" ]] && source /etc/grc.zsh
# Add tab completion for SSH hostnames based on ~/.ssh/config, ignoring wildcards
[ -e "$HOME/.ssh/config" ] && complete -o "default" -o "nospace" -W "$(grep "^Host" ~/.ssh/config | grep -v "[?*]" | cut -d " " -f2)" scp sftp ssh
alias dc='docker-compose'
# history -E shows timestamps in zsh
alias hist='history -E | grep '
alias vimcc='rm ~/.vim-tmp/* ~/.vim-tmp/.*'
# IP addresses
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias localip="ipconfig getifaddr en1"
alias ips="ifconfig -a | grep -o 'inet6\? \(\([0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+\)\|[a-fA-F0-9:]\+\)' | sed -e 's/inet6* //'"
# Don’t clear the screen after quitting a manual page
export MANPAGER="less -X"
# Make some commands not show up in history
export HISTIGNORE="ls:cd:cd -:pwd:exit:date:* --help"
#
# gitfast updated for local only bash completion
# git bash completion
alias gcor='git checkoutr';
# `~/.oh-my-zsh/plugins/gitfast/git-completion.bash`
# ```
# # __gitcomp_nl "$(__git_refs '' $track)"
# # https://gist.github.com/mmrko/b3ec6da9bea172cdb6bd83bdf95ee817
# if [ "$command" = "checkoutr" ]; then
# __gitcomp_nl "$(__git_refs '' $track)"
# else
# __gitcomp_nl "$(__git_heads '' $track)"
# fi
# ;;
# ```
#
# One Medical specific
#
# Automatically jump to your onelife directory from anywhere
alias onelife='cd ~/Code/onemedical/onelife'
alias patient-ui='cd ~/Code/onemedical/patient-ui'
alias channel-routing='cd ~/Code/onemedical/channel-routing'
alias onelife-ssh='docker exec -it onelife_onelife_1 /bin/bash'
alias channel-routing-dev='docker exec channel-routing_app_1 /bin/bash'
alias channel-routing-local='docker exec channel-routing_app /bin/bash'
alias onelife-seed-local="docker compose run onelife rake onelife:database_setup[\"true\",\"false\"] --rm"
alias onelife-migrate='bin/rails db:migrate RAILS_ENV=development'
alias onelife-inventory-index='rake onelife:search_index:reindex["AppointmentInventories"]'
# alias login-ecr='eval "$( aws ecr get-login --region us-east-1 --no-include-email )"'
alias ecr-login='aws ecr get-login-password | docker login --username AWS --password-stdin 193567999519.dkr.ecr.us-east-1.amazonaws.com'
alias be='bundle exec'
alias onelife-assets='bundle exec rake assets:precompile assets:clean RAILS_ENV=development'
alias onelife-weekly-inventory='docker-compose exec onelife rake onelife:appointment_inventory:populate_one_week'
alias onelife-ssh-seoul='bundle exec beans ssh exec -a onelife-seoul -i ~/.ssh/1life-core.pem'
alias onelife-exec='docker-compose exec onelife'
alias onelife-db-pull='dc run onelife rake onelife:database_setup[true,false] --rm'
alias dcrake="docker-compose run onelife bundle exec rake --rm"
# for use with `binding.pry` debugger
alias onelife-attach='docker attach onelife_onelife_1'
#python: pyenv
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
if command -v pyenv 1>/dev/null 2>&1; then
eval "$(pyenv init -)"
fi
# pyenv activate autocomplete-lambda
# pyenv virtualenvs
# ipython // interactive terminal
# pip3 install -r requirements.txt
# tabtab source for serverless package
# uninstall by removing these lines or running `tabtab uninstall serverless`
[[ -f /Users/alockwood/.nvm/versions/node/v9.11.2/lib/node_modules/serverless/node_modules/tabtab/.completions/serverless.zsh ]] && . /Users/alockwood/.nvm/versions/node/v9.11.2/lib/node_modules/serverless/node_modules/tabtab/.completions/serverless.zsh
# tabtab source for sls package
# uninstall by removing these lines or running `tabtab uninstall sls`
[[ -f /Users/alockwood/.nvm/versions/node/v9.11.2/lib/node_modules/serverless/node_modules/tabtab/.completions/sls.zsh ]] && . /Users/alockwood/.nvm/versions/node/v9.11.2/lib/node_modules/serverless/node_modules/tabtab/.completions/sls.zsh
# tabtab source for slss package
# uninstall by removing these lines or running `tabtab uninstall slss`
[[ -f /Users/alockwood/.nvm/versions/node/v9.11.2/lib/node_modules/serverless/node_modules/tabtab/.completions/slss.zsh ]] && . /Users/alockwood/.nvm/versions/node/v9.11.2/lib/node_modules/serverless/node_modules/tabtab/.completions/slss.zsh
export PATH="/usr/local/opt/terraform@0.11/bin:$PATH"
| true |
33fc62820a6042b15910f2a39ec5e413771c7f14 | Shell | madosuki/shell_scripts | /arrange_armord_key_on_trusted_gpg_d.sh | UTF-8 | 598 | 4 | 4 | [] | no_license | #!/bin/sh
# this script is arrange key on trusted.gpg.d after dearmor to armor key.
OPTIONS=`getopt -o n -l name -- "$@"`
if [ -p /dev/stdin ]; then
name=""
eval set -- ${OPTIONS}
for opt in "$@"; do
case ${opt} in
-n) name=${3}; shift 2;;
--name) name=${3}; shift;;
--) shift; break;;
esac
shift
done
sudo sh -c "cat - | gpg --dearmor > /etc/apt/trusted.gpg.d/${name}.gpg"
else
specific=$(file ${1} | awk '{print $2}')
name=${2}
if [ "${specific}" = "UTF-8" -o "${specific}" = "ASCII" ]; then
sudo sh -c "gpg --dearmor ${1} > /etc/apt/trusted.gpg.d/${name}.gpg"
fi
fi
| true |
1771e8fdea035db87271925993f740a9c81992bd | Shell | telostia/lunarium-guides | /guides/lunarium_auto.sh | UTF-8 | 2,000 | 2.921875 | 3 | [] | no_license | #!/bin/bash
sudo apt-get update -y
sudo apt-get upgrade -y
sudo apt-get dist-upgrade -y
sudo apt-get install git -y
sudo apt-get install nano -y
sudo apt-get install curl -y
sudo apt-get install pwgen -y
sudo apt-get install wget -y
sudo apt-get install build-essential libtool automake autoconf -y
sudo apt-get install autotools-dev autoconf pkg-config libssl-dev -y
sudo apt-get install libgmp3-dev libevent-dev bsdmainutils libboost-all-dev -y
sudo apt-get install libzmq3-dev -y
sudo apt-get install libminiupnpc-dev -y
sudo add-apt-repository ppa:bitcoin/bitcoin -y
sudo apt-get update -y
sudo apt-get install libdb4.8-dev libdb4.8++-dev -y
cd
#remove old files
sudo rm lunarium*
sudo rm /usr/local/bin/lunarium*
#get wallet files
#wget https://github.com/telostia/lunarium-guides/raw/master/wallet/linux/lunarium-linux.tar.gz
sudo wget https://github.com/LunariumCoin/lunarium/releases/download/v1.0.2/lunarium-1.0.2-x86_64-linux-gnu.tar.gz
#tar -xvf lunarium-linux.tar.gz
#untar and strip unwanted directories to current folder
sudo tar --strip-components=2 -zxf lunarium-1.0.2-x86_64-linux-gnu.tar.gz
sudo chmod +x lunarium*
sudo cp lunarium-cli lunariumd /usr/local/bin/
#clean up
sudo rm lunarium-linux.tar.gz lunarium_auto.sh
sudo rm lunarium-tx lunarium-cli lunariumd lunarium-qt
sudo rm lunarium-1.0.2-x86_64-linux-gnu.tar.gz
sudo ufw allow 44071/tcp
#masternode input
echo -e "${GREEN}Now paste your Masternode key by using right mouse click and press ENTER ${NONE}";
read MNKEY
EXTIP=`curl -s4 icanhazip.com`
USER=`pwgen -1 20 -n`
PASSW=`pwgen -1 20 -n`
echo -e "${GREEN}Preparing config file ${NONE}";
#remove directory if exists
rm -rf $HOME/.lunarium
sudo mkdir $HOME/.lunarium
printf "rpcuser=lunarium$USER\nrpcpassword=$PASSW\nrpcport=44072\nrpcallowip=127.0.0.1\ndaemon=1\nlisten=1\nserver=1\nmaxconnections=56\nexternalip=$EXTIP:44071\nmasternode=1\nmasternodeprivkey=$MNKEY" > $HOME/.lunarium/lunarium.conf
sleep 1
lunariumd
sleep 5
watch lunarium-cli getinfo
| true |
18d3e1bcaa77137a036785c5a6b0c4ffe1e9d8ff | Shell | ursulahuang/Neverwing | /EverWing Assets/changeAllNames | UTF-8 | 97 | 2.625 | 3 | [] | no_license | #!/bin/bash
allFiles=$(ls *.png)
echo $allFiles
for i in $allFiles; do
mv $i everwing-$i
done | true |
559f77abd9fa93d2f54aa2203333024c858b1da0 | Shell | bblinder/home-brews | /touchbar_restart.sh | UTF-8 | 310 | 2.875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#restarts the touchbar on my 2019 MBP
if [[ "$(id -u)" -ne 0 ]] ; then
echo "::: Please run as root."
exit
fi
if [[ "$(uname -s)" == "Darwin" ]] ; then
pkill "Touch Bar agent"
killall ControlStrip
else
echo "This will only run on MacOS with a touchbar"
exit 1
fi
| true |
d10665a069f30904a3d3b4f3a593bc86375f03dc | Shell | beauvankirk/persistent | /travis/run.sh | UTF-8 | 816 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euxo pipefail
if [ "$BACKEND" = "none" ]
then
PACKAGES=$(stack --install-ghc query locals | grep '^ *path' | sed 's@^ *path:@@' | grep -v 'persistent-test' )
PEDANTIC="--pedantic"
# Turn off pedantic for lts-7, due to the sometimes invalid
# redundant constraint warnings.
if [ "$ARGS" = "--resolver lts-7" ]
then
PEDANTIC=""
fi
exec stack $ARGS --no-terminal test $PEDANTIC $PACKAGES
else
if [ "$BACKEND" = "postgresql" ]
then
psql -c 'create database persistent;' -U postgres
elif [ "$BACKEND" = "mysql" ]
then
mysql -e 'create database persistent;'
fi
cd persistent-test
exec stack $ARGS --no-terminal test --pedantic --fast persistent-test --flag persistent-test:$BACKEND --exec persistent-test
fi
| true |
67457415c36f9d425561411151a798ba2559a5db | Shell | leewis101/docker-headless-shell | /crontab.sh | UTF-8 | 1,222 | 3.09375 | 3 | [] | no_license | #!/bin/bash
set -e
SRC=$(realpath $(cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))
export PATH=$PATH:$HOME/src/misc/chrome/depot_tools
export CHROMIUM_BUILDTOOLS_PATH=/media/src/chromium/src/buildtools
pushd $SRC &> /dev/null
echo "------------------------------------------------------------"
echo ">>>>> STARTING BUILD ($(date)) <<<<<"
rm -f .last
./build-headless-shell.sh
echo ">>>>> ENDED BUILD ($(date)) <<<<<"
echo ">>>>> STARTING DOCKER ($(date)) <<<<<"
docker pull blitznote/debase:18.04
./build-docker.sh
pushd $SRC/out &> /dev/null
VER=$(ls *.bz2|sort -r -V|head -1|sed -e 's/^headless-shell-//' -e 's/\.tar\.bz2$//')
popd &> /dev/null
docker push chromedp/headless-shell:$VER
docker push chromedp/headless-shell:latest
IMAGES=$(docker images|egrep '^chromedp/headless-shell\s+'|grep -v latest|grep -v $VER|awk '{print $3}')
if [ ! -z "$IMAGES" ]; then
docker rmi $IMAGES
fi
echo ">>>>> ENDED DOCKER ($(date)) <<<<<"
echo ">>>>> PUBLISH SLACK ($(date)) <<<<<"
curl \
-F file=@./out/headless-shell-$VER.tar.bz2 \
-F channels=CGEV595RP \
-H "Authorization: Bearer $(cat $HOME/.slack-token)" \
https://slack.com/api/files.upload
echo -e "\n>>>>> END SLACK ($(date)) <<<<<"
popd &> /dev/null
| true |
b68c5f74d7c5ef3d3d3198b1419e5dcd0da170cf | Shell | germancarrillo/CERNvarious | /Scripts/runscan.sh | UTF-8 | 913 | 2.65625 | 3 | [] | no_license | #!/bin/bash
ma=$1
shift 1
source /data01/montoya/workarea/HGamma/Stats/xmlAnaWSBuilder/setup.sh
cat << EOF > compile.C
{
gROOT->ProcessLine(".L limit.C+");
}
EOF
root -l -q -b compile.C
cat << EOF > execute.sh
#!/bin/bash
mx=\$1
ma=\$2
shift 2
source /data01/montoya/workarea/HGamma/Stats/xmlAnaWSBuilder/setup.sh
npoints=30
poimin=0
poimax=\`echo 226.239*e\(-0.00779834*\$mx\) + 2.44177 + 5 | bc -l\` # 3.2fb-1
root -l -q -b "limit.C+(\"Comb_v01.root\", \$mx, \$ma, \$npoints, \$poimin, \$poimax )" &> outputs/log_\$mx\_\$ma.log
EOF
seq 200 10 2000 | parallel source execute.sh {} $ma
rm compile.C execute.sh
#to execute: for i in `echo 0.1 0.2 0.3 0.5 0.7 1.0 1.5 2.0 3.0 5.0 7.5 10.0`; do source runscan.sh $i ; done
a = 226.239 +/- 25.07 (11.08%)
b = -0.00779834 +/- 0.000571 (7.323%)
c = 2.44177 +/- 0.4661 (19.09%)
| true |
326d4256108a31ed0a48054ef794516f287cbc3f | Shell | IanVan/hover-jet | /infrastructure/scripts/jet_image.sh | UTF-8 | 510 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
JET_REPO_PATH=$(git rev-parse --show-toplevel)
if [ $? -ne 0 ]; then
exit $?
fi
CPU_INFO=$(lscpu)
if [[ $(echo $CPU_INFO | grep "Architecture:") =~ "x86_64" ]]; then
IMAGE_NAME="jet"
fi
if [[ $(echo $CPU_INFO | grep "Architecture:") =~ "arm" ]]; then
IMAGE_NAME="jet-arm"
fi
DATE=`date +%Y.%m.%d-%H.%M.%S`
FULL_IMAGE_NAME_TAG="hoverjet/$IMAGE_NAME:$DATE"
echo Building image: $FULL_IMAGE_NAME_TAG
docker build $JET_REPO_PATH/infrastructure/scripts/docker/ -t $FULL_IMAGE_NAME_TAG
| true |
fe7ea7ef1bae44435e1d4f9f4af932561f675b49 | Shell | gHosting20/BashCode | /ghostsettings.sh | UTF-8 | 15,148 | 3.734375 | 4 | [] | no_license | #!/bin/bash
clear
echo "gHost configuration started.."
sleep 3s
comm=""
if [ -n "$(command -v apt-get)" ]; then
echo "apt-get"
comm="apt-get"
fi
if [ -n "$(command -v apt)" ]; then
echo "apt"
comm="apt"
fi
if [ -n "$(command -v yum)" ]; then
echo "yum"
comm="yum"
echo "non è ancora stata eleborata l'integrazione di distro yum based"
exit 1
fi
if [[ $comm == "" ]]; then
echo "no apt, apt-get or yum found"
exit 1
fi
clear
echo "update and upgrade starting..."
sudo apt update && sudo DEBIAN_FRONTEND=noninteractive apt upgrade -y
sudo apt install figlet -y
sudo apt install dos2unix -y
sudo apt install git -y
clear
echo "checking firewall..."
checkufw='ufw'
if ! dpkg -s $checkufw >/dev/null 2>&1; then
echo "installing ufw firewall"
sudo apt install ufw -y
else
echo "ufw firewall found"
fi
#php check and install
checkphp='php'
webserver=''
clear
echo "check what type of web server is running"
sleep 3s
#check webserver
if [[ `ps -acx|grep apache|wc -l` > 0 ]]; then
echo "Found Apache"
webserver='Apache'
fi
if [[ `ps -acx|grep nginx|wc -l` > 0 ]]; then
echo "Found Nginx"
webserver='Nginx'
fi
if [ "$webserver" == "" ]; then
echo "no type of web server found"
echo "Non è stata registrata la presenza di alcun web server...."
echo "Per continuare è necessario installare o Apache o Nginx..."
echo -n "Vuoi installare Apache o Nginx? "
read choice < /dev/tty
if [ "$choice" == "Apache" ]; then
echo "Apache installing..."
sudo apt install apache2 -y
sudo systemctl start apache2
echo "Apache installed and running"
webserver='Apache'
elif [ "$choice" == "Nginx" ]; then
echo "Nginx installing..."
sudo apt install nginx -y
sudo systemctl start nginx
echo "Nginx installed and running"
webserver='Nginx'
else
echo "Se non vuoi installare nessun tipo di web server gHost non puo essere configurato"
exit 1
fi
fi
#check bug user per debian
sudo apt-get remove --purge unscd -y
sudo rm -f /etc/localtime
sudo ln -sf /usr/share/zoneinfo/Europe/Rome /etc/localtime
cd /var/www && sudo mkdir ghost
cd /var/www && sudo mkdir ghost_file_manager
#certbot
clear
echo "Certbot installing..."
sleep 3s
if [ "$webserver" == "Apache" ]; then
sudo add-apt-repository ppa:certbot/certbot
sudo apt install python-certbot-apache -y
else
sudo add-apt-repository ppa:certbot/certbot
sudo apt install python-certbot-nginx -y
fi
#check php
if [ "$webserver" == "Apache" ]; then
clear
echo "writing Apache conf"
sleep 3s
LINESTART=$(grep -nr "DocumentRoot" /etc/apache2/sites-available/000-default.conf | cut -d : -f1 )
TEXT='Alias /ghostAPI /var/www/ghost'
TEXT1='<Directory /var/www/ghost>'
TEXT2='Require all granted'
TEXT3='AllowOverride all'
TEXT4='</Directory>'
sed -i $((LINESTART+1))"i\\$TEXT" /etc/apache2/sites-available/000-default.conf
sed -i $((LINESTART+2))"i\\$TEXT1" /etc/apache2/sites-available/000-default.conf
sed -i $((LINESTART+3))"i\\$TEXT2" /etc/apache2/sites-available/000-default.conf
sed -i $((LINESTART+4))"i\\$TEXT3" /etc/apache2/sites-available/000-default.conf
sed -i $((LINESTART+5))"i\\$TEXT4" /etc/apache2/sites-available/000-default.conf
if ! dpkg -s $checkphp >/dev/null 2>&1; then
clear
echo "no found"
echo "php installing...."
sleep 3s
sudo apt-get install -y php7.2 libapache2-mod-php php-mysql
sudo apt-get install php7.2 -y
sudo apt-get install php7.2-{bcmath,dev,bz2,intl,gd,mbstring,mysql,zip,fpm} -y
if [[ `php -v` < 40 ]]; then
sudo apt-get install -y php libapache2-mod-php php-mysql
sudo apt-get install php -y
sudo apt-get install php-{bcmath,dev,bz2,intl,gd,mbstring,mysql,zip,fpm} -y
fi
else
clear
echo "php found"
sleep 3s
dpkg-query -W -f='${Status} ${Version}\n' php
fi
fi
if [ "$webserver" == "Nginx" ]; then
FILE=/etc/nginx/sites-enabled/default
if [ -f "$FILE" ]; then
FILE=/etc/nginx/sites-available/default
else
echo "$FILE non trovato"
echo -n "Perfavore digitare il nome del file di configurazione che gHost dovrà scrivere per completare la configurazione: "
read path < /dev/tty
FILE=/etc/nginx/sites-available/$path
if [ -f "$FILE" ]; then
clear
echo "$FILE found"
else
echo "$FILE non trovato"
exit 1
fi
fi
phpinst=false
if [[ `php -v` > 40 ]]; then
clear
echo "php found"
sleep 3s
phpinst=true
else
clear
echo "php installing..."
sleep 3s
sudo apt install php7.2 php7.2-fpm php7.2-dev php7.2-mysql -y
sudo systemctl restart nginx
fi
if [[ `php -v` < 40 ]]; then
echo "try to get a better version of php..."
sudo apt-get update
sudo apt-get install php-fpm php-dev php-mysql -y
sudo systemctl restart nginx
fi
if [[ `php -v` < 40 ]]; then
echo "retry php installing..."
echo "php installing..."
sudo apt install -y apt-transport-https lsb-release ca-certificates
wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list
sudo apt update
sudo apt install php7.2 php7.2-fpm php7.2-dev php7.2-mysql -y
sudo systemctl restart nginx
fi
clear
echo "writing nginx conf"
sleep 3s
word=`php -v | awk '/^PHP/{print $2}'`
version=`printf '%-.3s' "$word"`
if ! $phpinst ; then
sed -i 's/index index.html index.htm index.nginx-debian.html;/index index.php index.html index.htm index.nginx-debian.html;/' $FILE
LINESTART=$(grep -nr ".php$ {" $FILE | cut -d : -f1 )
LINEEND=$((LINESTART+2))
sed -i "${LINESTART},${LINEEND} s/# *//" $FILE
LINESTART1=$(grep -nr "fastcgi_pass unix" $FILE | cut -d : -f1 )
LINEEND1=$((LINESTART1+0))
sed -i "${LINESTART1},${LINEEND1} s/# *//" $FILE
LINEPHP=`sed -n ${LINESTART1}p $FILE`
rgtline=$(echo $LINEPHP | sed 's/\//\\\//g')
sed -i 's/'"${rgtline}"'/fastcgi_pass unix:\/var\/run\/php\/php'"$version"'-fpm.sock;}/' $FILE
fi
TEXT='location /ghostAPI/ {'
TEXT1='alias /var/www/ghost/;'
TEXT2='index index.php;'
TEXT3='location ~ \.php$ {'
TEXT4='include snippets/fastcgi-php.conf;'
TEXT5='fastcgi_param SCRIPT_FILENAME $request_filename;'
TEXT6='fastcgi_pass unix:/var/run/php/php'"$version"'-fpm.sock;'
TEXT7='}'
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT1"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT2"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT3"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT4"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT5"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT6"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT7"'' $FILE
sed -i '/^server {/,/^}/!b;/^}/i\'"$TEXT7"'' $FILE
fi
#install ruby and gems per scout-realtime
clear
echo "installing ruby and scout realtime gem"
sleep 3s
sudo apt -y install ruby-full
sudo apt-get -y install rubygems
sudo gem install scout_realtime
#check vsftpd
clear
echo "checking vsftpd package"
sleep 3s
checkvsftpd='vsftpd'
#if [[ $checkvsftpd == *"no packages found"* ]]; then
if ! dpkg -s $checkvsftpd >/dev/null 2>&1; then
echo "no found"
echo "vsftpd installing...."
sudo apt-get install -y vsftpd
sudo systemctl start vsftpd
sudo systemctl enable vsftpd
echo "settings vsftpd.conf"
echo "making bakup of original file..."
sudo cp /etc/vsftpd.conf /etc/vsftpd.conf.back
sed -i 's/#write_enable=YES/write_enable=YES/' /etc/vsftpd.conf
sed -i 's/#local_umask=022/local_umask=022/' /etc/vsftpd.conf
sed -i 's/#chroot_local_user=YES/chroot_local_user=YES/' /etc/vsftpd.conf
sed -i 's/user_sub_token=$USER/#user_sub_token=$USER/' /etc/vsftpd.conf
sed -i 's/local_root=/home/$USER/ftp/#local_root=/home/$USER/ftp/' /etc/vsftpd.conf
echo "writing vsftpd.conf"
echo "userlist_enable=YES" >> /etc/vsftpd.conf
echo "userlist_file=/etc/vsftpd.userlist" >> /etc/vsftpd.conf
echo "userlist_deny=NO" >> /etc/vsftpd.conf
echo "force_dot_files=YES" >> /etc/vsftpd.conf
echo "pasv_min_port=40000" >> /etc/vsftpd.conf
echo "pasv_max_port=50000" >> /etc/vsftpd.conf
echo "Restarting vsftpd..."
sudo systemctl restart vsftpd
echo "Restarted."
else
echo "vsftpd found"
dpkg-query -W -f='${Status} ${Version}\n' vsftpd
fi
sudo apt install wget -y
#mysql check
checkmysql=$(mysql --version 2>&1)
if [[ ( $checkmysql == *"not found"* ) || ( $checkmysql == *"No such file"* ) ]]
then
clear
echo "mysql not found"
sleep 3s
echo "Installing..."
checklinux=$(lsb_release -a | grep 'Distributor ID:')
if [[ $checklinux == *"Ubuntu"* ]]; then
sudo apt update
sudo apt install mysql-server -y
sudo systemctl start mysql
sudo systemctl enable mysql
echo -n "Inserisci la password root per MySQL: "
read answer < /dev/tty
com="alter user 'root'@'localhost' identified with mysql_native_password by '$answer'"
sudo mysql -uroot -p -e "$com"
elif [[ $checklinux == *"Debian"* ]]; then
sudo apt update
wget http://repo.mysql.com/mysql-apt-config_0.8.13-1_all.deb
sudo dpkg -i mysql-apt-config_0.8.13-1_all.deb
sudo apt update
sudo apt install mysql-server -y
sudo systemctl start mysql
sudo systemctl enable mysql
else
clear
echo "mysql non disponibile per questa distro"
sleep 5s
fi
else
clear
echo "mysql found"
sleep 3s
fi
#mariadb check (not supported from gHost)
comaria=$(dpkg -l | grep -e mariadb-server)
length=${#comaria}
if [[ $length > 0 ]]; then
clear
echo "la versione di MySQL presente sul tuo hosting ha delle dipendenze verso mariadb-server, gHost non supporta questa versione di MySQL"
sleep 5s
fi
#mongodb check
checkmongo=$(mongo --version 2>&1)
if [[ ( $checkmongo == *"not found"* ) || ( $checkmongo == *"No such file"* ) ]]
then
clear
echo "mongo not found"
sleep 3s
key=$(wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | sudo apt-key add -)
if [[ $key == *"OK"* ]]; then
echo "key imported successfully"
else
sudo apt-get install gnupg -y
wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | sudo apt-key add -
fi
checkdistro=$(lsb_release -a | grep 'Distributor ID:')
if [[ $checkdistro == *"Ubuntu"* ]]; then
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/4.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-4.2.list
sudo apt update
sudo apt-get install -y mongodb-org
sudo systemctl start mongod
sudo systemctl enable mongod
elif [[ $checkdistro == *"Debian"* ]]; then
checkdebian=$(lsb_release -a | grep 'Description')
if [[ $checkdebian == *"stretch"* ]]; then
echo "deb http://repo.mongodb.com/apt/debian stretch/mongodb-enterprise/4.2 main" | sudo tee /etc/apt/sources.list.d/mongodb-enterprise.list
sudo apt-get update
sudo apt-get install -y mongodb-enterprise
sudo systemctl start mongod
sudo systemctl enable mongod
elif [[ $checkdebian == *"buster"* ]]; then
echo "deb http://repo.mongodb.com/apt/debian buster/mongodb-enterprise/4.2 main" | sudo tee /etc/apt/sources.list.d/mongodb-enterprise.list
sudo apt-get update
sudo apt-get install -y mongodb-enterprise
sudo systemctl start mongod
sudo systemctl enable mongod
else
clear
echo "mongoDB non disponibile per questa distro"
sleep 3s
fi
else
clear
echo "mongoDB non disponibile per questa distro"
sleep 3s
fi
else
clear
echo "mongo found"
sleep 3s
fi
clear
echo "install php driver for mongo"
sleep 3s
sudo apt install php-pear php-mongodb -y
sudo pecl install mongodb
vers=`php -i | grep /.+/php.ini -oE`
offvers="$vers"
sudo echo ";extension=mongodb.so" >> "$offvers"
if [ "$webserver" == "Nginx" ]; then
sudo systemctl restart nginx
else
sudo systemctl restart apache2
fi
sudo systemctl restart mongod
#ghost user creation
clear
echo "ghost user creation and configuration as root"
sleep 3s
sudo adduser ghost --gecos "First Last,RoomNumber,WorkPhone,HomePhone" --disabled-password
echo -n "Inserisci la password per ghost: "
read answerpass < /dev/tty
echo "ghost:$answerpass" | sudo chpasswd
echo "ghost root permission settings..."
sudo usermod -aG sudo,adm ghost
echo "ghost ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
echo "ghost is root"
#ghost ftp settings
echo "ghost ftp settings..."
sudo usermod -d /var/www ghost
echo "ghost properties on destination folder..."
sudo chown ghost:ghost /var/www/ghost
sudo chown ghost:ghost /var/www/ghost_file_manager
echo "ghost" | sudo tee -a /etc/vsftpd.userlist
clear
echo "ufw settings...."
sleep 3s
sudo ufw allow ssh
sudo ufw allow 80/tcp
sudo ufw allow 443/tcp
sudo ufw allow 20/tcp
sudo ufw allow 21/tcp
sudo ufw allow 990/tcp
sudo ufw allow 40000:50000/tcp
sudo ufw allow in from 127.0.0.1 to any port 5555 proto tcp
echo "y" | sudo ufw enable
if [ "$webserver" == "Nginx" ]; then
echo "Make adjustament for Nginx web server"
word=`php -v | awk '/^PHP/{print $2}'`
version=`printf '%-.3s' "$word"`
versionphp="$version"
ngingroup=$(grep "group = " -m1 /etc/php/$version/fpm/pool.d/www.conf)
ngingroupuser=$(grep ".group = " /etc/php/$version/fpm/pool.d/www.conf)
nginuser=`ps aux | egrep '([n|N]ginx|[h|H]ttpd)' | awk '{ print $1}' | uniq | tail -1`
sed -i 's/user '"$nginuser"';/user ghost;/' /etc/nginx/nginx.conf
sed -i 's/user = '"$nginuser"'/user = ghost/' /etc/php/$version/fpm/pool.d/www.conf
sed -i 's/'"$ngingroup"'/group = ghost/' /etc/php/$version/fpm/pool.d/www.conf
sed -i 's/listen.owner = '"$nginuser"'/listen.owner = ghost/' /etc/php/$version/fpm/pool.d/www.conf
sed -i 's/'"$ngingroupuser"'/listen.group = ghost/' /etc/php/$version/fpm/pool.d/www.conf
echo "file written."
echo "Restarting services...."
echo "Restarting Nginx...."
sudo systemctl restart nginx
echo "Restarting php...."
sudo systemctl restart php"$versionphp"-fpm
echo "Done."
figlet gHost
echo "Developed by Simone Ghisu and Marcello Pajntar"
elif [ "$webserver" == "Apache" ]; then
sudo chown root:adm /var/log/apache2
echo "Make adjustament for Apache web server"
checkgroup=$(grep "export APACHE_RUN_GROUP=" /etc/apache2/envvars)
checkuser=$(grep "export APACHE_RUN_USER=" /etc/apache2/envvars)
sed -i 's/'"$checkuser"'/export APACHE_RUN_USER=ghost/' /etc/apache2/envvars
sed -i 's/'"$checkgroup"'/export APACHE_RUN_GROUP=root/' /etc/apache2/envvars
echo "Restarting Apache...."
sudo systemctl restart apache2
echo "Done."
figlet gHost
echo "Developed by Simone Ghisu and Marcello Pajntar"
fi
| true |
0b72d93e4657e932e7aedb6c67ba8738c2da290a | Shell | flexiOPSResources/FCONodeMonitoringScripts | /scripts/nodeload/node-load10min.sh | UTF-8 | 1,760 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#Location for files to be created. This example explores the use of two clusters with one management coming
#directly from the location the script is running and the second where Node tool needs to be ran on a different machine
while true;
do
#Replace ip with required IP address
NODEIPS=$( node-tool -l -v | grep 'Node IP' | awk '{print $3}')
NODEIPS2=$(sshe 11.222.33.44 node-tool -l -v | grep 'Node IP' | awk '{print $3}')
NODEDIR="/opt/extility/skyline/war/nodeload10/"
NODEDIR2="/opt/extility/skyline/war/nodeload10/"
DATE=$(date "+%Y-%m-%dT%H:%M:%S")
cluster1 ()
{
NODELOAD1=$(/usr/bin/node-tool -v $i | grep Load | awk '{print $2}')
NODECORE1=$(/usr/bin/node-tool -v $i | grep 'CPU core'| awk '{print $3}')
echo $i,$NODELOAD1,$NODECORE1,$DATE >> $NODEDIR$i.csv
}
cluster2 ()
{
NODELOAD2=$(sshe 11.222.33.44 /usr/bin/node-tool -v $n | grep Load | awk '{print $2}')
NODECORE2=$(sshe 11.222.33.44 /usr/bin/node-tool -v $n | grep 'CPU core'| awk '{print $3}')
echo $n,$NODELOAD2,$NODECORE2,$DATE >> $NODEDIR2$n.csv
}
for i in $NODEIPS
do
if [ $( grep $(date -d"10 minutes ago" +%H:%M) $NODEDIR$i.csv ) ] ; then
rm -f $NODEDIR$i.csv
touch $NODEDIR$i.csv
echo "NODEIP,LOAD,CORES,DATE" > $NODEDIR$i.csv
cluster1
else
cluster1
fi &
done
for n in $NODEIPS2
do
if [ $( grep $(date -d"10 minutes ago" +%H:%M) $NODEDIR2$n.csv ) ] ; then
rm -f $NODEDIR2$n.csv
touch $NODEDIR2$n.csv
echo "NODEIP,LOAD,CORES,DATE" > $NODEDIR2$n.csv
cluster2
else
cluster2
fi &
done
sleep 30
done
| true |
46b2cded72be84c459d8172a67a993dca49926ae | Shell | avallonking/ScienceScripts | /bam2fastq.sh | UTF-8 | 196 | 2.90625 | 3 | [] | no_license | #convert all bam in the same directory to fastq
#!/bin/sh
for i in *.bam
do
name=`echo $i | cut -d . -f 1-2`
/data/home/lijiaj/software/samtools/bin/samtools bam2fq -On -s $name.fastq $i
done
| true |
9ce6722d9a24e63e1a0d4e7c0c9ff4153aa0cd53 | Shell | portsip/resiprocate | /rutil/fixupGperf | UTF-8 | 1,583 | 4.25 | 4 | [
"BSD-3-Clause",
"VSL-1.0",
"BSD-2-Clause"
] | permissive | #!/bin/sh -x
PN=$(basename $0 )
TMPFILES=""
cleanup()
{
for F in ${TMPFILES}; do
/bin/rm -f ${F}
done
}
mkTemp()
{
FN=$( mktemp fgp.XXXXXX )
TMPFILES="${TMPFILES} ${FN}"
printf "%s\n" "${FN}"
}
usage()
{
printf "%s: %s\n" "${PN}" "${*}"
printf "usage: %s file -o outputfile [--include=file ...]\n" \
"${PN}" >&2
cleanup
exit 1
}
die()
{
printf "%s: error editing. Has output changed?\n" "${PN}" >&2
cleanup
exit 2
}
[ ${#} -ge 3 ] || usage too few arguments.
[ "${2}" == "-o" ] || usage syntax error.
OF="${3}"
IF="${1}"
shift
shift
shift
[ -r "${IF}" ] || usage unable to open file.
T1=$(mkTemp)
touch ${T1}
NS=0
IC=0
while [ "$#" -ge 1 ]; do
case "${1}" in
--ignorecase) IC=1;;
--ns*=*)
printf "namespace %s\n{\n" "${1//-*=}" >> ${T1}
NS=$(( ${NS} + 1 ))
;;
--us*=*)
printf "using %s;\n" "${1//-*=}" >> ${T1}
;;
--i*=*)
printf "#include \"%s\"\n" "${1//-*=}" >> ${T1}
;;
*) ;;
esac
shift
done
T2=$(mkTemp)
if [ "${IC}" -eq 1 ]; then
sed 's/str\[\([0-9][0-9]*\)\]/tolower(str[\1])/g' ${IF} >> ${T1} || die
sed 's/^\([ ]*\)if *(\*\([a-z][a-z]*\) *== *\*\([a-z][a-z]*\) *&& *!strncmp (\([^)]*\)).*$/\1 if (tolower(*\2) == *\3 \&\& !strncasecmp( \4 ))/g' ${T1} > ${T2}
else
cat ${IF} >> ${T1}
cat ${T1} > ${T2}
fi
# Close namespaces
while [ ${NS} -gt 0 ]; do printf "}\n" >> ${T2}; NS=$(( $NS - 1 )); done
cp ${T2} ${OF}
cleanup
exit 0
| true |
b32ffb4f4eac5f2f4c4d8ffb8b432b58c0e41cfd | Shell | kechol/dotfiles | /bootstrap.sh | UTF-8 | 795 | 3.765625 | 4 | [] | no_license | #!/usr/bin/env bash
#symlinks
PWD=`pwd`
PREFIX='.'
DOTFILES=`ls`
IGNOREFILES=( .. backup bootstrap.sh brew.sh README.md .git .gitignore .gitmodules osx )
BACKUPTIME=`date +%s`
BACKUPDIR="${PWD}/backup/${BACKUPTIME}"
for DOTFILE in ${DOTFILES[@]}
do
for IGNOREFILE in ${IGNOREFILES[@]}
do
if [ ${DOTFILE} == ${IGNOREFILE} ]
then
continue 2
fi
done
SYMLINK="${HOME}/${PREFIX}${DOTFILE}"
if [ ! -d ${BACKUPDIR} ]
then
mkdir -p ${BACKUPDIR}
fi
if [ -f ${SYMLINK} ] && [ ! -L ${SYMLINK} ]
then
cp -pfa ${SYMLINK} ${BACKUPDIR}
echo "Move: ${BACKUPDIR}/${SYMLINK}"
fi
if [ ! -L ${SYMLINK} ] || [ ! -e ${SYMLINK} ]
then
echo "Link: ${PWD}/${DOTFILE} => ${SYMLINK}"
rm -Rf ${SYMLINK}
ln -fs ${PWD}/${DOTFILE} ${SYMLINK}
fi
done
| true |
5a571f5f2bc9b92b8a06598f2f50773932e05afc | Shell | takumi-oikawa/dotfiles | /install.sh | UTF-8 | 250 | 3 | 3 | [] | no_license | #!/bin/bash
for f in .??*
do
[ "$f" = ".git" ] && continue
[ "$f" = ".gitignore" ] && continue
filepath=$(cd $(dirname $0) && pwd)/"$f"
ln -svf $filepath "$HOME"/"$f"
done
ln -svf $(cd $(dirname $0) && pwd)/nvim "$HOME"/.config/nvim
| true |
eeff583bf33ce48e0bd04b8068a285796ccebdeb | Shell | wangjuanmt/kb_script | /suse/util/init_ruby.sh | UTF-8 | 509 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env bash
sudo zypper -n install ruby2.1 ruby2.1-devel ruby ruby-devel
# Change to mainland rubygem taobao mirror
gem source -r https://rubygems.org/
gem source -a https://ruby.taobao.org/
#gem source -a https://rubygems.org/
sudo gem install pry
sudo gem2.1 install pry
# To install gems in home dir, use --user-install,
# then add "$(ruby -rubygems -e 'puts Gem.user_dir')/bin" to $PATH,
# which is usually "$HOME/.gem/ruby/1.9.1/bin" for ruby1.9.x,
# and "$HOME/.gem/ruby/2.0.0" for ruby2.x.
| true |
fd34d5ad53f91d9aaceef5965d3823ef7c9e72c3 | Shell | burdenc/torrenter | /port_forward/pia-port.sh | UTF-8 | 537 | 3.484375 | 3 | [] | no_license | #!/bin/bash
trap 'exit 0' SIGTERM
OLDPORT=0
PORT=0
auth="$TRANSMISSION_USER:$TRANSMISSION_PASS"
echo "Running port forward helper..."
while true
do
[ -r "/pia-shared/port.dat" ] && PORT=$(cat /pia-shared/port.dat)
if [ $OLDPORT -ne $PORT ]; then
echo "Setting Transmission port settings ($PORT)..."
transmission-remote "${TRANSMISSION_PORT}" --auth "${auth}" -p "${PORT}"
echo "Testing port..."
transmission-remote "${TRANSMISSION_PORT}" --auth "${auth}" -pt
OLDPORT=$PORT
fi
sleep 10 &
wait $!
done
| true |
f9a74302db158966ff346ee9f5368a211070255a | Shell | burnegg/TG799vac-router | /lib/netifd/wireless/quantenna.sh | UTF-8 | 1,934 | 3.390625 | 3 | [] | no_license | #!/bin/sh
# FRV: This script is the minimum needed to be able to let netifd add wireless interfaces.
# Wireless parameters themselves (ssid,...) are to be updated via
# hostapd_cli uci_reload
# OR
# ubus call wireless reload
. $IPKG_INSTROOT/lib/functions.sh
NETIFD_MAIN_DIR="${NETIFD_MAIN_DIR:-/lib/netifd}"
. $NETIFD_MAIN_DIR/netifd-wireless.sh
init_wireless_driver "$@"
#FRV: Add device config parameters that are needed below
drv_quantenna_init_device_config() {
dummy=1
}
#FRV: Add iface config parameters that are needed below
drv_quantenna_init_iface_config() {
config_add_int state
config_add_string hotspot_timestamp
}
#FRV: Map radio and interface number to interface name.
#!! For quantenna: only one interface is supported for the moment
quantenna_get_if_name() {
g_quantenna_if_name=eth5
}
#FRV: Setup virtual interface
# -> pass real interface name back to netifd
quantenna_setup_vif() {
local name="$1"
quantenna_get_if_name $name
#Add to network
wireless_add_vif "$name" "$g_quantenna_if_name"
#Enable interface if needed
state=$(uci_get_state "wireless" "$g_quantenna_if_name" "state")
if [ "$state" != "0" ] ; then
ifconfig $g_quantenna_if_name up
# else
# ifconfig $g_quantenna_if_name down
fi
}
#FRV: Setup all interfaces of a radio
# -> pass interface names back to netifd via ubus
# -> enable them
drv_quantenna_setup() {
g_quantenna_radio_name=$1
# json_dump
for_each_interface "sta ap adhoc" quantenna_setup_vif
# wireless_set_data phy=phy0
wireless_set_up
}
quantenna_teardown_vif() {
local name="$1"
quantenna_get_if_name $name
ifconfig $g_quantenna_if_name down
}
#FRV: Not sure what this should do.
drv_quantenna_teardown() {
g_quantenna_radio_name=$1
for_each_interface "sta ap adhoc" quantenna_teardown_vif
# json_select data
# json_get_var phy phy
# json_select ..
# json_dump
}
drv_quantenna_cleanup() {
dummy=1
}
add_driver quantenna
| true |
b22ce5eea6ffdb206bf5dbcfc7e4560a9f9de73f | Shell | msegzda/rpi-aws-automation | /etc/scripts/capture-image | UTF-8 | 4,934 | 3.890625 | 4 | [] | no_license | #!/bin/bash
# dont do anything if process is already running
if [ $(pgrep -f ${0} | wc -l) -gt 2 ]; then
echo "[ERR] Process of ${0} is already running"
exit
fi
loggertitle="$(basename ${0})[$$]"
FILENAME=`date +%Y%m%d%H%M%S`
DATEDIR=`date +%Y/%m/%d`
RETRYDATE=`date +%Y%m%d`
CAM=${1,,} # always lower case ($1 param)
CONFIG=$(cat /etc/rpi.json)
maindir=$(jq -r .maindir <<< $CONFIG)
TMPLOCAL="$maindir/$CAM/$DATEDIR"
if [ ! -z "$(find $TMPLOCAL -mmin -1 -type f -name *.jpg | head -n 1)" ]; then
echo "Image is already captured in the last minute"
exit
fi
wwwhtml="/var/www/html"
htmlcurrent="$wwwhtml/$CAM/current"
SUNFILE=$(cat $maindir/sunrise-sunset-today.json)
setoffset=$(jq -r .sun.setoffset <<< $CONFIG)
riseoffset=$(jq -r .sun.riseoffset <<< $CONFIG)
jq_temp=$(jq -r .weather.jq_temp <<< $CONFIG)
jq_wind=$(jq -r .weather.jq_wind <<< $CONFIG)
if [ -z "$CAM" ]; then
echo "ERROR: Please specify \$1 parameter (for eg. cam1)"
exit
fi
s3bucket="s3://"$(jq -r .aws.s3 <<< $CONFIG)"/$CAM"
s3public=$(jq -r .aws.s3pub <<< $CONFIG)"/$CAM"
if [ -z "$SUNFILE" ]; then
/etc/scripts/sunset-sunrise-today
fi
if [ ! -d $TMPLOCAL ]; then
mkdir -v -p $TMPLOCAL
chmod -v 777 $TMPLOCAL
fi
if [ ! -d $htmlcurrent ]; then
mkdir -v -p $htmlcurrent
chmod -v 777 $htmlcurrent
fi
# in the night do not capture
# below compares numbers in HHmmss format
sunrse=`date +"%H%M%S" --date="$(jq -r .results.sunrise <<< $SUNFILE) $riseoffset"`
sunset=`date +"%H%M%S" --date="$(jq -r .results.sunset <<< $SUNFILE) $setoffset"`
NOW=$(date +"%H%M%S")
if [ $NOW -ge $sunrse ] && [ $NOW -le $sunset ]; then
takecmd=$(jq -r .$CAM.takecmd <<< $CONFIG)
takecmd=$(printf "$takecmd" "/tmp/$CAM-$FILENAME-1.jpg")
logger -t $loggertitle "> $takecmd"
eval $takecmd 2>&1 | logger -t $loggertitle
# If file is not JPEG reboot the cam with $rebootcmd
if [[ ${PIPESTATUS[0]} -ne 0 ]] || [[ ! $(file -b "/tmp/$CAM-$FILENAME-1.jpg") =~ JPEG ]]; then
logger -t $loggertitle "[ERR] $CAM failed to provide valid JPEG"
cat /tmp/$CAM-$FILENAME-1.jpg | tail -n 20 | logger -t $loggertitle
# reboot of cam cannot happen frequently
if [ ! -e /tmp/$CAM.rebooted ]; then
rebootcmd=$(jq -r .$CAM.rebootcmd <<< $CONFIG)
logger -t $loggertitle "> $rebootcmd"
eval $rebootcmd | jq -rc ".[0]" 2>&1 | logger -t $loggertitle
# try once more if needed
if [ ${PIPESTATUS[1]} -ne 0 ]; then
sleep 10s
eval $rebootcmd | jq -rc ".[0]" 2>&1 | logger -t $loggertitle
fi
touch /tmp/$CAM.rebooted
else
logger -t $loggertitle "[WARN] $CAM was rebooted lately, not sending another command."
fi
exit
fi
sleep 3 # wait a bit
# now annotate with temperature
temp=$(jq -rc "$jq_temp" < $maindir/weather.json)
wind=$(jq -rc "$jq_wind" < $maindir/weather.json)
camname=$(jq -r .$CAM.name <<< $CONFIG)
annotateformat=$(jq -r .$CAM.annotateformat <<< $CONFIG)
annotateformat=$(printf "$annotateformat" "$temp" "$CAM")
annotatecmd=$(jq -r .$CAM.annotatecmd <<< $CONFIG)
annotatecmd=$(printf "$annotatecmd" "/tmp/$CAM-$FILENAME-1.jpg" "$annotateformat" "/tmp/$CAM-$FILENAME-2.jpg")
#TODO: TZ=Europe/Vilnius date --iso-8601=seconds
#2019-03-13T19:32:57+02:00 annotate with date in this format
logger -t $loggertitle "> $annotatecmd"
eval $annotatecmd 2>&1 | logger -t $loggertitle
if [ ${PIPESTATUS[0]} -eq 0 ]; then
# place annotated image into correct place
mv -fv /tmp/$CAM-$FILENAME-2.jpg $TMPLOCAL/$FILENAME.jpg | logger -t $loggertitle
else
logger -t $loggertitle "[WARN] Annotation failed. IM convert exit code: ${PIPESTATUS[0]}"
fi
# remove residual tmp files
rm -fv /tmp/$CAM-$FILENAME-*.jpg | logger -t $loggertitle
# now upload to s3
# Note: this requires S3 PutObject and PutObjectAcl permissions
aws s3 cp $TMPLOCAL/$FILENAME.jpg $s3bucket/$DATEDIR/ \
--acl "public-read" 2>&1 | logger -t $loggertitle
if [ ${PIPESTATUS[0]} -eq 0 ]; then
redirto=$(sed "s/\//\\\\\//g" <<< "$s3public/$DATEDIR/$FILENAME.jpg")
sedcmd="sed \"s/%s/${redirto}/g\" $wwwhtml/redirect.template.html > $htmlcurrent/index.html"
logger -t $loggertitle "> $sedcmd"
eval $sedcmd
else
logger -t $loggertitle "[ERR] AWSCLI failed. Exit code: ${PIPESTATUS[0]}"
if [ -f $TMPLOCAL/$FILENAME.jpg ]; then
echo "aws s3 cp $TMPLOCAL/$FILENAME.jpg $s3bucket/$DATEDIR/ --acl public-read" \
>> $maindir/aws-s3-cp-$RETRYDATE.retry
logger -t $loggertitle "[WARN] $maindir/aws-s3-cp-$RETRYDATE.retry file written"
else
logger -t $loggertitle "[ERR] File $TMPLOCAL/$FILENAME.jpg does not exist"
fi
fi
else
echo "Goodnight"
fi | true |
f628725060e95e76bdb7acf285088b1c459d7c20 | Shell | B-Rich/community-mirror | /wkhtmltopdf/repos/community-i686/PKGBUILD | UTF-8 | 790 | 2.828125 | 3 | [] | no_license | # $Id$
# Maintainer: Evangelos Foutras <evangelos@foutrelis.com>
pkgname=wkhtmltopdf
pkgver=0.9.9
pkgrel=2
pkgdesc="Simple shell utility to convert html to pdf using the webkit rendering engine, and qt"
arch=('i686' 'x86_64')
url="http://code.google.com/p/wkhtmltopdf/"
license=('GPL3')
depends=('qtwebkit')
optdepends=('xorg-server: wkhtmltopdf needs X or Xvfb to operate')
source=(http://wkhtmltopdf.googlecode.com/files/$pkgname-$pkgver.tar.bz2)
sha1sums=('41f598c0103326e7c13101391447b0284b4ba3cb')
build() {
cd "$srcdir/$pkgname-$pkgver"
qmake wkhtmltopdf.pro
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make INSTALL_ROOT="$pkgdir/usr" install
# Generate and install man page
install -d "$pkgdir/usr/share/man/man1"
./wkhtmltopdf --manpage >"$pkgdir/usr/share/man/man1/wkhtmltopdf.1"
}
| true |
4e5b1b5a3796975e444e2769ece752c8e2f115bb | Shell | dotnet-fizzyy/NodeJS-Nest | /postgres-connect.sh | UTF-8 | 184 | 2.75 | 3 | [] | no_license | echo "Please, enter user name: "
read userName
echo "Please, enter database name: "
read dbName
docker exec -it $(docker ps -aqf "name=postgres_db") psql -U ${userName} -d ${dbName}
| true |
1fcd5250811028cc42c1ca43ede6a7eb8b1c14fe | Shell | matallen/jboss-fuse-drools | /install/install-drools-runtime.sh | UTF-8 | 2,018 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# This script installs the necessary libraries from the drools runtime into maven
URL="file:///home/mallen/.m2/repository"
FILES=$(find . -name "*BRMS*.jar")
echo "Going to install the following files"
for filename in $FILES
do
echo "$filename"
done
for filename in $FILES
do
brmsVersion=$(echo $filename | grep -oE "([0-9]{1}\.)*BRMS")
artifactId=$(echo $filename | grep -oE ".*/(.*)-")
echo "mvn deploy:deploy-file -Dfile=$filename -DgroupId=org.drools -DartifactId=${artifactId%?} -Dversion=$brmsVersion -Dpackaging=jar -DgeneratePom=true -DcreateChecksum=true -Durl=$URL"
mvn deploy:deploy-file -Dfile=$filename -DgroupId=org.drools -DartifactId=${artifactId%?} -Dversion=$brmsVersion -Dpackaging=jar -DgeneratePom=true -DcreateChecksum=true -Durl=$URL
done
ECJ=*ecj*
for filename in $ECJ
do
ecjVersion=$(echo $filename | grep -oE "([0-9]{1})\.([0-9]{1})\.([0-9]{1})")
mvn deploy:deploy-file -Dfile=$filename -DgroupId=org.eclipse.jdt.core.compiler -DartifactId=ecj -Dversion=$ecjVersion -Dpackaging=jar -DgeneratePom=true -DcreateChecksum=true -Durl=$URL
done
MVEL=*mvel2*
for filename in $MVEL
do
mvelVersion=$(echo $filename | grep -oE "([0-9]{1})\.([0-9]{1})\.([0-9]{1}).([A-Za-z])*([0-9])*")
echo $mvelVersion
mvn deploy:deploy-file -Dfile=$filename -DgroupId=org.mvel -DartifactId=mvel2 -Dversion=$mvelVersion -Dpackaging=jar -DgeneratePom=true -DcreateChecksum=true -Durl=$URL
done
ANTLR=*antlr-runtime*
for filename in $ANTLR
do
antlrVersion=$(echo $filename | grep -oE "([0-9]{1})\.([0-9]{1})")
mvn deploy:deploy-file -Dfile=$filename -DgroupId=org.antlr -DartifactId=antlr-runtime -Dversion=$antlrVersion -Dpackaging=jar -DgeneratePom=true -DcreateChecksum=true -Durl=$URL
done
JXL=*jxl*
for filename in $JXL
do
jxlVersion=$(echo $filename | grep -oE "([0-9]{1})\.([0-9]{1})\.([0-9]{1})")
mvn deploy:deploy-file -Dfile=$filename -DgroupId=jxl -DartifactId=jxl -Dversion=$jxlVersion -Dpackaging=jar -DgeneratePom=true -DcreateChecksum=true -Durl=$URL
done
| true |
1e185d58f959859d5d5bdb8b8f821b115429129f | Shell | xeonye/leaf | /adoc/gh-pages.sh | UTF-8 | 293 | 2.734375 | 3 | [
"BSL-1.0"
] | permissive | #!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR
asciidoctor README.adoc -o ../index1.html
cd ..
git checkout gh-pages
git pull
rm -f index.html
mv index1.html index.html
git add -u index.html
git commit -m "documentation update"
git push
git checkout master
| true |
8844313b79a85286df8ff8e1d259a8dcd25d9c5d | Shell | schfkt/df | /bin/tmux-exec-file.sh | UTF-8 | 567 | 3.78125 | 4 | [] | no_license | #!/bin/bash
file=$1
exec_js_test_file() {
if [[ "$1" =~ ^test\/ ]]; then
local cmd=(yarn exec jest --silent "$file")
echo "${cmd[@]}"
"${cmd[@]}"
elif [[ "$1" =~ ^puppeteer\/ ]]; then
local cmd=(yarn puppeteer-tests --silent "$file")
echo "${cmd[@]}"
"${cmd[@]}"
else
echo "Can't handle this js/ts file"
fi
}
exec_go_test_file() {
go test -v $1
}
if [[ "$file" =~ \.spec\.(js|ts)$ ]]; then
exec_js_test_file "$file"
elif [[ "$file" =~ _test.go$ ]]; then
exec_go_test_file "$file"
else
echo "Filetype not supported"
fi
| true |
24b7eb1e3a6bb2e0c563e29a2cbdc78bcd8d217c | Shell | cloudkats/docker-tools | /gatling/entrypoint.sh | UTF-8 | 232 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -euo pipefail
[[ ${DEBUG:-} ]] && set -x
# first arg is `-f` or `--some-option`
# or there are no args
if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
# docker run bash -c 'echo hi'
exec bash "$@"
fi
exec "$@"
| true |
36db4fba73a4411195ef2afa665609816454743c | Shell | paulboone/sam-modules | /compile-scripts/python-3.5.1.sh | UTF-8 | 715 | 3.078125 | 3 | [] | no_license | #!/bin/sh
#
# Download python source to ../src/. i.e.:
#
# wget https://www.python.org/ftp/python/3.5.1/Python-3.5.1.tgz
# md5sum -c Python-3.5.1.tgz
# tar -xzf Python-3.5.1.tgz
#
# MD5=be78e48cdfc1a7ad90efff146dce6cfe
#
#
#
set -ex
ver=3.5.1
module_home=$HOME/modules
prefix=$module_home/local/python/$ver
cd $module_home/src/Python-$ver
module purge
module load binutils/2.23-gcc48-rhel
# compile to local
./configure --prefix=$prefix
make
# make test
make install
## ghetto-patch python's ctype command so the broken ldconfig implementation gets wrapped
## should be deleted when we move to the slurm cluster.
cd $prefix/lib/python3.5/ctypes/
patch < $module_home/patches/python-$ver/ctypes_ldconfig.patch
| true |
7af10bb68c158bfa401716597499bdf21a6420c5 | Shell | spiegelm/smart-heating-report | /evaluation-analysis/scripts/parse_messages.sh | UTF-8 | 1,206 | 3.046875 | 3 | [] | no_license | set -e
output_file_prefix=`cat output_file_prefix`
file_input="smart-heating.log"
file_tidy="$output_file_prefix.tidy.log"
file_parsed="$output_file_prefix.parsed.log"
cp $file_input $file_tidy
# remove multi line log entries (stack traces) not starting with 2015
sed -E -i 's%^[^0-9]{4}.*%%g' $file_tidy
# remove empty lines
sed -i '/^$/d' $file_tidy
cp $file_tidy $file_parsed
# split time, microtime, severity, name, message AND add special char to import in excel
sed -E -i 's%^(.{19})(,.{3})\s*\[(\S*)\]\s*(\S+):\s*(.*)%\1§\2§\3§\4§\5%g' $file_parsed
# ^(GET|PUT)\s(coap://\[[a-z0-9:]+)(/\S+)
# \1\t§\2\t§\3\t§
sed -E -i 's%§(GET|PUT)\s(coap://\[[a-z0-9:]+\])(/\S+)%§\1§\2§\3§%g' $file_parsed
# ^(Request timed out:) (\S+) (coap://\S+)
# \1\t§\2\t§\3\t§
sed -E -i 's%§(Request timed out:) (\S+) (coap://\S+)\s*%§\1§\2§\3§%g' $file_parsed
# ^(Could not upload: )
# \1\t§
sed -E -i 's%§(Could not upload:)\s*(\S*)%§\1§\2§%g' $file_parsed
sed -E -i 's%§(upload successful:)\s*(\S*)%§\1§\2§%g' $file_parsed
sed -E -i 's%§(poll)\s*%§\1§%g' $file_parsed
sed -E -i 's%§(Retransmission)%§\1§%g' $file_parsed
# sed -E -i 's%find%replace%g' $file_parsed
| true |
5269c9a415591bd0b0a40182be94ad88724c8281 | Shell | shiva028/bashv2 | /variable.sh | UTF-8 | 219 | 2.765625 | 3 | [] | no_license | # integer variable and its increment
# does not works in sh !!!
#---------------------------------------------------
#!/bin/bash
var=12345
let var=$var+1 # let is important
echo $var
v=12345
v=$v+1 # result "12345+1"
| true |
01a3f185efaca890f6b85545faba9d276cea9ff5 | Shell | noob-hackers/m-wiz | /core/mbp/opt.sh | UTF-8 | 490 | 2.875 | 3 | [
"MIT"
] | permissive | #colour section
red='\033[1;31m'
rset='\033[0m'
grn='\033[1;32m'
ylo='\033[1;33m'
#script coding starts
clear
cd $HOME/m-wiz/mbp
echo " "
echo " "
echo -e "$ylo To terminate the process click 'n' or to continue
double click on ENTER$rset"
read choice
if [[ $choice = 'n' ]] ; then
echo -e "$red process is terminating? press ENTER to Terminate$rset"
read choice
cd $HOME/m-wiz
bash m-wiz.sh
else
read choice
cd $HOME/m-wiz/min
bash metain.sh
echo " "
echo " "
fi
| true |
ee46829511261a336dcd4507a9af2fc1a1b2d899 | Shell | krf/dotfiles | /bin/rpi_sysinfo.sh | UTF-8 | 775 | 3.46875 | 3 | [] | no_license | #!/bin/sh
# check if we're on a Raspberry Pi board, exit otherwise
hash vcgencmd 2> /dev/null || {
echo "Failure: 'vcgencmd' does not exist on this system";
exit 1;
}
echo "###############################################"
echo "# RASPBERRY PI SYSTEM INFORMATIONS #"
echo "###############################################"
echo
echo "CPU current Frequency: `vcgencmd measure_clock arm`"
echo "CORE current Frequency: `vcgencmd measure_clock core`"
echo "CORE current Voltage: `vcgencmd measure_volts core`"
echo "CPU current Temperature: `vcgencmd measure_temp`"
echo
echo "Firmware Version: `vcgencmd version`\n"
echo "Codecs Status:"
echo "`vcgencmd codec_enabled H264`"
echo "`vcgencmd codec_enabled MPG2`"
echo "`vcgencmd codec_enabled WVC1`"
echo
| true |
ef0a8e70f3366a1a8a9a11adbd4feb32fd37e8ad | Shell | jvamsee/bqtable | /bqtable.sh | UTF-8 | 295 | 2.796875 | 3 | [] | no_license | #!/bin/bash
project_id=$1
dataset_name=$2
table_name=$3
gcloud config set project $project_id
### Create Dataset
bq mk -d $dataset_name
## Create Table
bq query --use_legacy_sql=false '
CREATE TABLE IF NOT EXISTS '$dataset_name.$table_name' (
`FIRST_NAME` STRING,
`LAST_NAME` STRING
)'
| true |
2948fdc3315b37186654b5699738fb1dc1a7361a | Shell | oranhuang/socket_client_tf | /capture_and_send.sh | UTF-8 | 227 | 2.578125 | 3 | [] | no_license | #!/bin/bash
if [ -z "$1" ];
then
echo "please type the Server's IP address"
elif [ "$1" -le 0 ];
then
echo "take picture"
./raspistill -o image.jpg -t 1000
./socket_client_tf -p "$1" -f image.jpg
echo "finish ....."
fi
| true |
293eb52b6e19f53e778cbba6cc7344b15dd2fc30 | Shell | hitswint/.emacs.d | /dotfiles/bin/run-or-raise.sh | UTF-8 | 481 | 3.140625 | 3 | [] | no_license | #! /bin/bash
# 当窗口不在当前monitor的上层时,xdotool/wmctrl都无法在不同monitor间切换。
Windx=`xdotool search --onlyvisible --classname $1 | head -1`
if [ $Windx ]; then
if [ $1 == "emacs" ]; then
xdotool mousemove 0 0 click 1 windowactivate --sync $Windx mousemove restore;
fi
xdotool windowactivate --sync $Windx;
else
$2;
fi
# Windx=`wmctrl -l | grep -i $1@`
# if [ "$Windx" ]; then
# wmctrl -x -a "$1";
# else
# $2;
# fi
| true |
59f92fd595c9e642284024bf64f2500ebbc792a5 | Shell | mollifier/config | /mklink_rc.sh | UTF-8 | 2,760 | 3.953125 | 4 | [] | no_license | #!/bin/bash
##########################################################
# make symbolic links to dotfiles
##########################################################
#set -o noglob
#####################
# constants
#####################
declare -r SCRIPT_NAME=${0##*/}
tmp_src_dir_name=$(dirname "$0")
declare -r SRC_DIR_NAME=${tmp_src_dir_name}
declare -r DEST_DIR_NAME=${HOME}
declare -ar DOTFILES=(
'dot.atoolrc'
'dot.bash_profile'
'dot.bashrc'
'dot.emacs'
'dot.gitconfig'
'dot.gitignore'
'dot.gvimrc'
'dot.inputc'
'dot.npmrc'
'dot.screenrc'
'dot.tmux.conf'
'dot.vim'
'dot.vimrc'
'dot.vimplug.vim'
'dot.Xmodmap'
'dot.zshenv'
'dot.zlogin'
'dot.zprofile'
'dot.zshrc'
'dot.zlogout'
'dot.peco_config.json'
'dot.config/fish/config.fish'
'dot.config/fish/config_linux.fish'
'dot.config/fish/config_mac.fish'
'dot.config/fish/fish_plugins'
'dot.config/fish/gabbr_config'
'dot.config/fish/functions/fish_user_key_bindings.fish'
'dot.config/fish/functions/user_z_select_and_change_directory.fish'
)
#####################
# functions
#####################
print_usage()
{
cat << EOF
Usage: $SCRIPT_NAME [-df]
Make symbolic links to dotfiles in HOME.
-d dry run
not make link, but display ln command
[default]
-f make link actually
-h display this help and exit
EOF
}
print_error()
{
echo "$SCRIPT_NAME: $*" 1>&2
echo "Try \`-h' option for more information." 1>&2
}
# create dest filename by link src filename
get_dest_filename()
{
# complex sed substitution is required
# shellcheck disable=SC2001
echo "${1}" | sed -e 's/^dot\././'
}
#####################
# main
#####################
# false : not make link
# true : make link actually
make_link="false"
while getopts ':fdh' option; do
case $option in
f)
make_link="true"
;;
d)
make_link="false"
;;
h)
print_usage
exit 0
;;
:) # option argument is missing
print_error "option requires an argument -- $OPTARG"
exit 1
;;
*) # unknown option
print_error "invalid option -- $OPTARG"
exit 1
;;
esac
done
shift $((OPTIND - 1))
cd "$SRC_DIR_NAME" || exit
for src_filename in "${DOTFILES[@]}"; do
dest_filename=$(get_dest_filename "$src_filename")
if [ -e "${DEST_DIR_NAME}/${dest_filename}" ]; then
# skip file which already exists
continue
fi
if [ "$make_link" == "true" ]; then
# make link actually
ln -s "${PWD}/${src_filename}" "${DEST_DIR_NAME}/${dest_filename}"
else
# not make link, but echo command
echo ln -s "${PWD}/${src_filename}" "${DEST_DIR_NAME}/${dest_filename}"
fi
done
exit $?
| true |
6766352825836cf8f160bfe72af7c88d8ea80740 | Shell | mrandou/42_minishell_v2 | /script_mem_minishell | UTF-8 | 283 | 3.0625 | 3 | [] | no_license | #! /bin/bash
if test -z "$1"
then echo "usage : ./script \"processus name\""
exit
fi
echo "\033[31m\n/////MEMORY CHECKER/////\n\033[0m"
echo "\033[31mYour Processus : "
echo $1
echo "\033[0m"
echo "\033[96mNAME MEMORY\n\033[32m"
top -stats command,mem | grep $1
| true |
8dab786eee9b40aedfc3acec663ce077eacfaf5d | Shell | bassosimone/aladdin | /domain-check.bash | UTF-8 | 10,550 | 3.734375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# aladdin: a diamond in the rough next-gen web connectivity
#
# This is an experiment to explore how specific subsets of the next
# generation web connectivity nettest would look like.
#
# The probe-engine/miniooni platform already contains enough functionality
# to allow us to implement most of the rest as a bash script for now.
#
# Of course, the final objective is to get this right and rewrite all
# this in golang, to be integrated in probe-engine.
#
# This work has been heavily influenced by Jigsaw-Code/net-analysis
# blocktest/measure.sh methodology <https://git.io/JfsZb>.
#
function usage_then_die() {
echo ""
echo "usage: $0 <domain>" 1>&2
echo ""
echo "# Environment variables"
echo ""
echo "- MINIOONI_TEST_HELPER: optional domain for test helper"
echo ""
echo "- MINIOONI_EXTRA_OPTIONS: extra options for miniooni (e.g. -n to avoid"
echo "submitting measurements to the OONI collector)"
echo ""
exit 1
}
acceptance_file=I_UNDERSTAND_THE_RISK
function disclaimer_then_die() {
cat << EOF
======= BEGIN DISCLAIMER =======
github.com/bassosimone/aladdin contains experimental OONI code for performing
network measurements. Because this is experimental code, we cannot guarantee the
same level of vetting of non-experimental OONI code. On top of that, the logic
that generates yes/no/maybe results in this script is alpha stage code.
This repository will upload measurements to the OONI collector. You should read
OONI's data policy <https://ooni.org/about/data-policy> as well as the docs on
potential risks <https://ooni.org/about/risks/>.
If you understand (1) the above disclaimer about experimental code, (2) the
data policy, (3) and the risks document, and want to run aladdin, then please
create an empty file named $acceptance_file in the current directory.
======= END DISCLAIMER =======
EOF
exit 1
}
inputCount=$#
[ $inputCount -ge 1 ] || usage_then_die
[ -f $acceptance_file ] || disclaimer_then_die
function log() {
echo "$@" 1>&2
}
function checking() {
log -n "checking $@... "
}
function fatal() {
log "$@"
exit 1
}
function require() {
checking "for $1"
if ! [ -x "$(command -v $1)" ]; then
fatal "not found; please run: $2"
fi
log "ok"
}
require base64 "sudo apt install coreutils (or sudo apk add coreutils)"
require gcc "sudo apt install gcc (or sudo apk add gcc)"
require git "sudo apt install git (or sudo apk add git)"
require go "sudo apt install golang (or sudo apk add go)"
require jq "sudo apt install jq (or sudo apk add jq)"
require openssl "sudo apt install openssl (or sudo apk add openssl)"
require uuidgen "sudo apt install uuid-runtime (or sudo apk add util-linux)"
measurement_path=`date +%Y%m%dT%H%M%SZ`-`basename $1`
mkdir -p ./tmp/"$measurement_path"
log_file=./tmp/"$measurement_path"/aladdin.log
function run() {
echo "" >> $log_file
echo "+ $@" >> $log_file
"$@" 2>> $log_file
}
report_file=./tmp/"$measurement_path"/report.jsonl
function fatal_with_logs() {
log "$@"
log "please, check $log_file and $report_file for more insights"
exit 1
}
function must() {
"$@" || fatal_with_logs "failure"
}
log -n "building the latest version of miniooni (may take long time!)... "
must run go build -tags nomk ./cmd/aladdin
log "done"
doh_cache="-ODNSCache=cloudflare-dns.com 1.0.0.1 1.1.1.1"
doh_url="-OResolverURL=doh://cloudflare"
log "options used to enable alternative resolver: \"$doh_cache\" $doh_url"
checking "for test helper to use"
test_helper=${MINIOONI_TEST_HELPER:-example.org}
log "$test_helper"
checking "for extra options to pass to miniooni"
extra_options=${MINIOONI_EXTRA_OPTIONS}
log "$extra_options"
function urlgetter() {
run ./aladdin -v $extra_options -o $report_file -Asession=$uuid "$@" urlgetter
}
function getipv4first() {
tail -n1 $report_file|jq -r ".test_keys.queries|.[]|select(.hostname==\"$1\")|select(.query_type==\"A\")|.answers|.[0].ipv4"
}
log -n "getting $test_helper's IP address using alternative resolver... "
urlgetter -Astep=resolve_test_helper_ip \
"$doh_cache" \
$doh_url \
-idnslookup://$test_helper
test_helper_ip=$(getipv4first $test_helper)
{ [ "$test_helper_ip" != "" ] && log "$test_helper_ip"; } || fatal_with_logs "failure"
function getfailure() {
tail -n1 $report_file|jq -r .test_keys.failure
}
function maybe() {
log "maybe (check $report_file)"
}
function getipv4list() {
echo $(tail -n1 $report_file|jq -r ".test_keys.queries|.[]|select(.hostname==\"$1\")|select(.query_type==\"A\")|.answers|.[].ipv4"|sort)
}
function getcertificatefile() {
local filename=$(mktemp ./tmp/"$measurement_path"/aladdin.XXXXXX)
tail -n1 $report_file|jq -r '.test_keys.tls_handshakes|.[]|.peer_certificates|.[0]|.data'|base64 -d > $filename
echo $filename
}
function printcertificate() {
local certfile
certfile=$(getcertificatefile)
checking "for x509 certificate issuer"
log $(openssl x509 -inform der -in $certfile -noout -issuer 2>/dev/null|head -n1|sed 's/^issuer=\ *//g')
checking "for x509 certificate subject"
log $(openssl x509 -inform der -in $certfile -noout -subject 2>/dev/null|head -n1|sed 's/^subject=\ *//g')
checking "for x509 certificate notBefore"
log $(openssl x509 -inform der -in $certfile -noout -dates 2>/dev/null|head -n1|sed 's/^notBefore=//g')
checking "for x509 certificate notAfter"
log $(openssl x509 -inform der -in $certfile -noout -dates 2>/dev/null|sed -n 2p|sed 's/^notAfter=//g')
checking "for x509 certificate SHA1 fingerprint"
log $(openssl x509 -inform der -in $certfile -noout -fingerprint 2>/dev/null|head -n1|sed 's/^SHA1 Fingerprint=//g')
}
function getbodyfile() {
# Implementation note: requests stored in LIFO order
local filename=$(mktemp ./tmp/"$measurement_path"/aladdin.XXXXXX)
tail -n1 $report_file|jq -r ".test_keys.requests|.[0]|.response.body" > $filename
echo $filename
}
function diffbodyfile() {
local filename=$(mktemp ./tmp/"$measurement_path"/aladdin.XXXXXX)
diff -u $1 $2 > $filename
echo $filename
}
function main() {
domain=$1
log -n "generating UUID to correlate measurements... "
uuid=$(uuidgen)
log "$uuid"
checking "for SNI-triggered blocking"
urlgetter -Astep=sni_blocking \
-OTLSServerName=$domain \
-itlshandshake://$test_helper_ip:443
{ [ "$(getfailure)" = "ssl_invalid_hostname" ] && log "no"; } || maybe
checking "for host-header-triggered blocking"
urlgetter -Astep=host_header_blocking \
-OHTTPHost=$domain \
-ONoFollowRedirects=true \
-ihttp://$test_helper_ip
{ [ "$(getfailure)" = "null" ] && log "no"; } || maybe
checking "for DNS injection"
urlgetter -Astep=dns_injection \
-OResolverURL=udp://$test_helper_ip:53 \
-idnslookup://$domain
{ [ "$(getfailure)" = "null" ] && log "yes"; } || log "no"
checking "whether the system resolver returns bogons"
urlgetter -Astep=bogons \
-ORejectDNSBogons=true \
-idnslookup://$domain
{ [ "$(getfailure)" = "dns_bogon_error" ] && log "yes"; } || log "no"
checking "for IPv4 addresses returned by the system resolver"
# Implementation note: with dns_bogons_error we still have the IP addresses
# available inside the response, so we can read then
ipv4_system_list=$(mktemp ./tmp/"$measurement_path"/aladdin.XXXXXX)
getipv4list $domain > $ipv4_system_list
log $(cat $ipv4_system_list)
checking "for IPv4 addresses returned by the alternate resolver"
urlgetter -Astep=doh_lookup \
"$doh_cache" \
$doh_url \
-idnslookup://$domain
ipv4_doh_list=$(mktemp ./tmp/"$measurement_path"/aladdin.XXXXXX)
getipv4list $domain > $ipv4_doh_list
log $(cat $ipv4_doh_list)
checking "for DNS consistency between system and alternate resolver"
ipv4_overlap_list=$(comm -12 $ipv4_system_list $ipv4_doh_list)
{ [ "$ipv4_overlap_list" != "" ] && log "yes"; } || log "no"
checking "whether the system resolver lied to us"
urlgetter -Astep=system_resolver_validation \
"-ODNSCache=$domain $(cat $ipv4_system_list)" \
-ihttps://$domain/
vanilla_failure=$(getfailure)
{ [ "$vanilla_failure" = "null" ] && log "no"; } || maybe
printcertificate
body_vanilla=$(getbodyfile)
log "webpage body available at... $body_vanilla"
checking "whether we obtain the same body using psiphon"
urlgetter -Astep=psiphon -OTunnel=psiphon -ihttps://$domain
body_tunnel=$(getbodyfile)
body_diff=$(diffbodyfile $body_vanilla $body_tunnel)
{ [ "$(cat $body_diff)" = "" ] && log "yes"; } || log "no (see $body_diff)"
checking "whether we can retrieve a webpage by removing TLS validation"
urlgetter -Astep=https_blockpage_fetch \
"-ODNSCache=$domain $(cat $ipv4_system_list)" \
-ONoTLSVerify=true \
-ihttps://$domain/
{ [ "$(getfailure)" = "null" ] && log "yes"; } || log "no"
printcertificate
body_noverify=$(getbodyfile)
log "webpage body available at... $body_noverify"
checking "whether we obtain the same body when removing TLS validation"
body_diff=$(diffbodyfile $body_noverify $body_tunnel)
{ [ "$(cat $body_diff)" = "" ] && log "yes"; } || log "no (see $body_diff)"
checking "whether we can retrieve a webpage using the alternate resolver"
urlgetter -Astep=doh_resolver_validation \
"-ODNSCache=$domain $(cat $ipv4_doh_list)" \
-ihttps://$domain/
{ [ "$(getfailure)" = "null" ] && log "yes"; } || log "no"
printcertificate
body_doh=$(getbodyfile)
log "webpage body available at... $body_doh"
checking "whether we obtain the same body using the alternate resolver"
body_diff=$(diffbodyfile $body_doh $body_tunnel)
{ [ "$(cat $body_diff)" = "" ] && log "yes"; } || log "no (see $body_diff)"
checking "whether we can retrieve a webpage using TLSv1.3 and DoH"
urlgetter -Astep=tls_v1_3_and_doh \
-OTLSVersion=TLSv1.3 \
"-ODNSCache=$domain $(cat $ipv4_doh_list)" \
-ihttps://$domain/
{ [ "$(getfailure)" = "null" ] && log "yes"; } || log "no"
printcertificate
body_tls13=$(getbodyfile)
log "webpage body available at... $body_tls13"
checking "whether we obtain the same body using TLSv1.3 and DoH"
body_diff=$(diffbodyfile $body_tls13 $body_tunnel)
{ [ "$(cat $body_diff)" = "" ] && log "yes"; } || log "no (see $body_diff)"
}
inputCounter=0
while [[ $1 != "" ]]; do
((inputCounter++))
log "[$inputCounter/$inputCount] running with input: $1"
main $1
sleep 1
shift
done
| true |
2ad03116a68aa571773e8e8c17aec1be5a56ab2d | Shell | gideao/squid-builder | /release.sh | UTF-8 | 483 | 3.078125 | 3 | [] | no_license | #!/bin/bash
set -xe
if [ ! $(command -v github-release) ]; then
go get github.com/aktau/github-release
fi
if [ ! $GITHUB_TOKEN ]; then
echo 'Invalid GITHUB_TOKEN value! Set it up!'
exit 1
fi
github-release release \
--user gideao \
--repo squid \
--tag v3.5.27 \
--name "v3.5.27" \
--pre-release
for f in `ls *.deb`; do
github-release upload \
--user gideao \
--repo squid \
--tag v3.5.27 \
--name "$f" \
--file $f
done
| true |
b2b15b6313bc2ebf566469339250f6c70638a316 | Shell | adarnimrod/rcfiles | /Documents/bin/git-skel | UTF-8 | 125 | 2.59375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/bin/sh
set -eu
for i in "$@"
do
cp --dereference --target-directory=./ "$HOME/.config/git/skel/$i"
done
git add "$@"
| true |
c745244e5f1c5c29e821419423c54f969d03d91c | Shell | ZHANGneuro/Customized-package-for-Representational-similarity-analysis | /bash script/gunzip.sh | UTF-8 | 207 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
fsf_list=( /Users/boo/Documents/degree_PhD/data_fmri/analysis_MVPA_roi/folder*)
for i in "${fsf_list[@]}"; do
fsf_list2=($i/*)
for ii in "${fsf_list2[@]}"; do
gunzip $ii
done
done
| true |
2b09fa65c780188bb450315e63b30657340d1a95 | Shell | zhang75656/openstarck-mitaka | /bin/sh/8_install_manila.sh | GB18030 | 2,570 | 3.171875 | 3 | [] | no_license | #!/bin/bash
. $os_FUNCS
. $os_CONF_FILE
os_fn_inst_manila_server(){
#ܣװ洢:manila.
#鵱ǰڵǷΪControllerڵ.
fn_check_tag_file controller_node
fn_err_or_info_log "ǰڵ: controller "
fn_check_tag_file manila_server
fn_warn_or_info_log "ǰڵѰװ: manila_server"
[ $? -eq 0 ] && return 1
fn_check_tag_file keystone
fn_err_or_info_log "ǰڵѰװ: keystone "
fn_check_tag_file neutron_server
fn_err_or_info_log "ǰڵѰװ: neutron_server"
fn_create_db manila manila
. $ADMINOPENRC
fn_create_user_and_grant default:manila:$MANILA_PASSWORD service:admin
fn_create_service_and_endpoint manila:"OpenStack Shared File Systems":share 3*http://$CONTROLLER_HOST_NAME:8786/v1/%\\\(tenant_id\\\)s
fn_create_service_and_endpoint manilav2:"OpenStack Shared File Systems":sharev2 3*http://$CONTROLLER_HOST_NAME:8786/v2/%\\\(tenant_id\\\)s
fn_exec_eval "yum install -y openstack-manila python-manilaclient"
local manilaconf=/etc/manila/manila.conf
fn_check_file_and_backup "$manilaconf"
fn_exec_openstack-config "
database|connection=mysql+pymysql://manila:$MANILA_PASSWORD@$CONTROLLER_HOST_NAME/manila
DEFAULT|rpc_backend=rabbit
oslo_messaging_rabbit|rabbit_host=$CONTROLLER_HOST_NAME
oslo_messaging_rabbit|rabbit_userid=$RABBITMQ_USERNAME;rabbit_password=$RABBITMQ_PASSWORD
DEFAULT|default_share_type=default_share_type;rootwrap_config=/etc/manila/rootwrap.conf
DEFAULT|auth_strategy=keystone
keystone_authtoken|auth_uri=http://$CONTROLLER_HOST_NAME:5000;auth_url=http://$CONTROLLER_HOST_NAME:35357
keystone_authtoken|memcached_servers=$CONTROLLER_HOST_NAME:11211;auth_type=password
keystone_authtoken|project_domain_name=default;user_domain_name=default;project_name=service
keystone_authtoken|username=manila;password=$MANILA_PASSWORD
DEFAULT|my_ip=$CONTROLLER_MANAGE_IP
oslo_concurrency|lock_path=/var/lib/manila/tmp"
fn_exec_eval "su -s /bin/sh -c 'manila-manage db sync' manila"
fn_exec_sleep 3
echo $SHOW_manila_TABLES
fn_exec_eval "$SHOW_manila_TABLES"
fn_exec_systemctl "openstack-manila-api openstack-manila-scheduler"
fn_create_tag_file "manila_server"
fn_inst_componet_complete_prompt "Install Manila(Share File System) Service Successed.@Controller Node."
}
os_fn_inst_manila_server
| true |
32af26b9bd761b8e85d220ccf6ad1143c2406f55 | Shell | pc9795/watchdog | /backend/local_run.sh | UTF-8 | 1,861 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env bash
# Locally run the project without docker. This script assumes that all the projects are in the same directory as this
# script is.
# NOTE: THIS SCRIPT ASSUMES THAT COCKROACH-DB AND MONGO-DB ARE RUNNING
# Method to check a command worked or not.
check_error(){
if [[ $? != '0' ]]; then
echo "=========================================="
echo "<<Previous command not ran successfully>>"
echo "<<ERROR>>:".$1
echo "EXITING..."
echo "=========================================="
exit
fi
}
# Build parent POM; -N means that it will not recurse into child projects
echo ">>>Installing parent project"
mvn -N install
# Installing core in local repo.
echo ">>>Installing core package"
cd core/
mvn clean compile install
check_error "Core project doesn't installed successfully"
cd ..
declare -a dirs=("notifications-service/")
# Build and run NON-SPRING services
for i in "${dirs[@]}"
do
echo ">>>Building project: $i"
cd $i
mvn clean compile
check_error $i." project is not able to be compiled"
# Running the service in background
mvn exec:java &
check_error $i." service is not able to run"
# Sleeping for 5 seconds so that out put is not intermingled as much as possible
sleep 5
cd ..
done
declare -a dirs=("client-service/" "monitoring-service/")
# Build and run SPRING services.
for i in "${dirs[@]}"
do
echo ">>>Building project: $i"
cd $i
mvn clean compile
check_error $i." project is not able to be compiled"
# Running the service in background
mvn spring-boot:run &
check_error $i." service is not able to run"
# Sleeping for 5 seconds so that out put is not intermingled as much as possible
sleep 5
cd ..
done
# Run the frontend
cd ../frontend
ng serve &
check_error "Front end is not able to start" | true |
321131d71bb5886d93f4a272f9d61c1e194b4f60 | Shell | demidenko05/beige-math | /arch.sh | UTF-8 | 157 | 2.5625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
#You should set environment var ARCH_PATH
nme=beige-math$(date +'%d%m%g%H%M')
pth=$ARCH_PATH/$nme.tar.xz
tar --exclude=__pycache__/* -cJf $pth *
| true |
a53a20fd6922658528c49d0f324e2fa7b0495277 | Shell | ANTsX/ANTsR | /configure.win | UTF-8 | 1,794 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
CXX_STD=CXX14
ITKRCMAKE=`${R_HOME}/bin/Rscript.exe -e 'a<-ITKR:::itkIncludes(); cat(a)'`
ITKRLIB=`${R_HOME}/bin/Rscript.exe -e 'a<-ITKR:::itkLibs(); cat(a)'`
compflags=`${R_HOME}/bin/Rscript.exe -e 'a<-ITKR:::itkCompileFlags(); cat(a)'`
ITKDIR=`${R_HOME}/bin/Rscript.exe -e 'a<-ITKR:::itkDir(); cat(a)'`
CMAKE_BUILD_TYPE=Release
# get a version of cmake
# cmaker=`which cmake`
# if [[ ! -x $cmaker ]] ; then # try r version
# cmaker=`${R_HOME}/bin/Rscript -e 'a<-cmaker::cmake()'`
# fi
cd ./src
# platform-specific Makevars
echo "myantssource=\${PWD}/ants/" > Makevars
# echo "ITK = \`\$(R_HOME)/bin/Rscript -e 'ITKR:::itkIncludes()'\`" >> Makevars
echo "ITK=${ITKRCMAKE}" >> Makevars
needVCL_CAN_STATIC_CONST_INIT_FLOAT=0
echo $OSTYPE $needVCL_CAN_STATIC_CONST_INIT_FLOAT
aincludes=`${R_HOME}/bin/Rscript.exe -e 'a<-ANTsRCore:::antsIncludes(); cat(a)'`
echo "PKG_CPPFLAGS = -I\${ITK} -I\${PWD} \
-I${aincludes} \
-I../inst/include/ " >> Makevars
echo "CXX_STD = CXX14" >> Makevars
myantslib=`${R_HOME}/bin/Rscript.exe -e 'a<-ANTsRCore:::antsLibs(); cat(a)'`
# echo "myantslib=`\${R_HOME}/bin/Rscript -e 'ANTsRCore:::antsLibs()'` " >> Makevars
echo "myantslib=${myantslib}" >> Makevars
echo "libants=\`find ${myantslib} -name \"lib*.a\"\`" >> Makevars
echo "libitk=\`find ${ITKRLIB} -name \"lib*.a\"\`" >> Makevars
echo "PKG_LIBS=\${libitk} \${libants} \${libitk} \${libants} \${libitk} \
\${libants} \${libitk} \${libants} \${itklib} \${libants} \
\${libitk} \${libants} \${itklib} \${libants} \${itklib} \
\${libants} \${libitk} \${libants} \${itklib}" >> Makevars
echo ".PHONY: all libs" >> Makevars
echo "all: \$(SHLIB)" >> Makevars
echo "\$(SHLIB): libs" >> Makevars
echo "libs: ; cd \${PWD}" >> Makevars
| true |
38375811ad320d1c21357854d93471a785b0c5ff | Shell | meekrob/kw-onish-elt-2 | /ELT2_Summit_Analyses/02_SCRIPTS/ChIPpipeline.sh | UTF-8 | 18,191 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env bash
#SBATCH --nodes=1
#SBATCH --ntasks=12
#SBATCH --time=0:35:00
#SBATCH --partition=shas
# --open-mode=append assures that the log files will be appended to from different jobs
# These directives will serve as defaults when submitted via sbatch
# but are comments when run via bash
NTHREADS=${SLURM_NTASKS} # passes --ntasks set above
echo "$SLURM_JOB_NAME[$SLURM_JOB_ID] $@" # log the command line
SUBMIT=$0
JOBSTEPS="SPP IDR BW BW-SUBTRACT BW-AVERAGE UNION AGGREGATE"
########################################################
##### CONFIGURATION VARIABLES
FLANK=150 # for bedToBw
# the genome sequence
GENOME_FASTA_N=/projects/dcking@colostate.edu/support_data/ce11/ce11.NMasked.fa
# the BWA indexes
BWA_GENOME=/projects/dcking@colostate.edu/support_data/bwa-index/ce11.unmasked.fa
CHROMLENGTHS=/projects/dcking@colostate.edu/support_data/ce11/ce11.chrom.sizes
BLACKLIST=/projects/dcking@colostate.edu/support_data/ce11/ce11-blacklist.bed
ALL_STAGES_UNION=allStagesUnion.bed
ALL_STAGES_AGGREGATE=${ALL_STAGES_UNION}.df
# PROJECT ORGANIZATION
INPUT_DIR=01_FASTQ
#ALIGN_DIR=03_ALIGN
#SPP_DIR=04_SPP
#IDR_DIR=05_IDR
ALIGN_DIR=03_SUBALIGN
SPP_DIR=04_SUBSPP
IDR_DIR=05_SUBIDR
SIG_DIR=06_SIGNAL
SEQ_DIR=07_SEQUENCES
mkdir -pv $ALIGN_DIR $SPP_DIR $IDR_DIR $SIG_DIR $SEQ_DIR
# Helper functions
errecho()
{
1>&2 echo $@
}
run()
{
echo "running $@"
time eval $@
}
sb()
{
sbatch --parsable $@
}
deps()
{
vars="$@"
vars=${vars// /,}
d=""
[ -n "$vars" ] && d="-d afterok:$vars"
echo $d
}
makeFlankFilename()
{ # anticipate the output filename written by bedToBw.sh
# (it is not available as an argument)
infile=$1
flank=$2
root=${infile%.*}
echo ${root}x${flank}n.bw
}
if [ -z "$SLURM_JOB_ID" ]
######## THIS PART OF THE SCRIPT RUNS IN A NORMAL BASH SESSION ########
######## but launches JOBSTEPS using sbatch (also in this script) ########
######## The job steps are invoked by specifying input and output ########
######## filenames only. The specific arguments to the programs ########
######## called within the jobstep definitions in the sbatch ########
######## block of this script. ########
then
# Metadata file
metadatafile=$1
shift
[ $# -gt 0 ] && JOBSTEPS="$@"
# the "filename" portion of each block must always run
# in order to make it possible for the pipeline to be
# called with an internal starting point, as when
# the pipeline fails at some specific step.
all_ids=""
idr_filesnames=""
IDR_JOB_IDS=""
SCORE_FILEPATHS=""
SCORE_JOB_IDS=""
while read label rep1_fastq rep2_fastq input_fastq
do
stage_jids=""
# skip comment lines
if [ ${label:0:1} == '#' ]
then
errecho "skipping: $label $rep1_fastq $rep2_fastq $input_fastq" # essentially print 'line' back out
continue
fi
BASE_LOGFILE="$label.jobs.log.$(date +%y%m%d.%H%M)"
date > $BASE_LOGFILE
echo -e "\n-----------------------------------------------" >> $BASE_LOGFILE
echo -e "${label}\t${rep1_fastq}\t${rep2_fastq}\t${input_fastq}" >> $BASE_LOGFILE
#ALIGN
# FILENAMES
rep1_sam=${rep1_fastq/.fastq/.sam}
rep2_sam=${rep2_fastq/.fastq/.sam}
input_sam=${input_fastq/.fastq/.sam}
# JOBS
if [[ " $JOBSTEPS " =~ " BWA " ]]
then
T="--time=0:05:00"
align_jid1=$(sb --job-name=bwa-${label}-1 $SUBMIT BWA ${INPUT_DIR}/$rep1_fastq ${ALIGN_DIR}/$rep1_sam)
align_jid2=$(sb --job-name=bwa-${label}-2 $SUBMIT BWA ${INPUT_DIR}/$rep2_fastq ${ALIGN_DIR}/$rep2_sam)
align_jid3=$(sb --job-name=bwa-${label}-i $SUBMIT BWA ${INPUT_DIR}/$input_fastq ${ALIGN_DIR}/$input_sam)
stage_jids="$stage_jids $align_jid1 $align_jid2 $align_jid3"
fi
#CONVERT FORMAT
# files generated:
rep1_bam=${rep1_sam/.sam/.bam}
rep2_bam=${rep2_sam/.sam/.bam}
input_bam=${input_sam/.sam/.bam}
# JOBS
if [[ " $JOBSTEPS " =~ " BAM " ]]
then
D=$(deps $align_jid1)
bam_jid1=$(sb --job-name=bam-${label}-1 $O $D $SUBMIT BAM $ALIGN_DIR/$rep1_sam $ALIGN_DIR/$rep1_bam)
D=$(deps $align_jid2)
bam_jid2=$(sb --job-name=bam-${label}-2 $O $D $SUBMIT BAM $ALIGN_DIR/$rep2_sam $ALIGN_DIR/$rep2_bam)
D=$(deps $align_jid3)
bam_jid3=$(sb --job-name=bam-${label}-i $O $D $SUBMIT BAM $ALIGN_DIR/$input_sam $ALIGN_DIR/$input_bam)
stage_jids="$stage_jids $bam_jid1 $bam_jid2 $bam_jid3"
fi
#BW COMPUTE SIGNAL FILES
# files generated:
# see function: makeFlankFilename()
rep1_bw=${label}_1.bw
rep2_bw=${label}_2.bw
input_bw=${label}_input.bw
# JOBS
if [[ " $JOBSTEPS " =~ " BW " ]]
then
ntasks="--ntasks=4"
tim="--time=0:11:00"
D=$(deps $bam_jid1)
bw_jid1=$(sb --job-name=bw-${label}-1 $ntasks $tim $D $SUBMIT BW $ALIGN_DIR/$rep1_bam $SIG_DIR/$rep1_bw)
D=$(deps $bam_jid2)
bw_jid2=$(sb --job-name=bw-${label}-2 $ntasks $tim $D $SUBMIT BW $ALIGN_DIR/$rep2_bam $SIG_DIR/$rep2_bw)
D=$(deps $bam_jid3)
bw_jid3=$(sb --job-name=bw-${label}-i $ntasks $tim $D $SUBMIT BW $ALIGN_DIR/$input_bam $SIG_DIR/$input_bw)
stage_jids="$stage_jids $bw_jid1 $bw_jid2 $bw_jid3"
fi
#BW-SUBTRACT
# files generated:
rep1_input_subtracted_bw="${label}_1_minus_input.bw"
rep2_input_subtracted_bw="${label}_2_minus_input.bw"
SCORE_FILEPATHS="$SCORE_FILEPATHS $SIG_DIR/$rep1_input_subtracted_bw $SIG_DIR/$rep2_input_subtracted_bw"
if [[ " $JOBSTEPS " =~ " BW-SUBTRACT " ]]
then
ntasks="--ntasks=4"
tim="--time=0:03:00"
D=$(deps $bw_jid1 $bw_jid3)
bws_jid1=$(sb --job-name=${label}_1-${label}i $ntasks $tim $D $SUBMIT BW-SUBTRACT $SIG_DIR/$rep1_bw $SIG_DIR/$input_bw $SIG_DIR/$rep1_input_subtracted_bw)
D=$(deps $bw_jid2 $bw_jid3)
bws_jid2=$(sb --job-name=${label}_2-${label}i $ntasks $tim $D $SUBMIT BW-SUBTRACT $SIG_DIR/$rep2_bw $SIG_DIR/$input_bw $SIG_DIR/$rep2_input_subtracted_bw)
stage_jids="$stage_jids $bws_jid1 $bws_jid2"
SCORE_JOB_IDS="$SCORE_JOB_IDS $bws_jid1 $bws_jid2"
fi
# BW-AVERAGE
# file generated:
signal_averaged_file="${label}.bw"
if [[ " $JOBSTEPS " =~ " BW-AVERAGE " ]]
then
D=$(deps $bws_jid1 $bws_jid2)
ntasks="--ntasks=4"
tim="--time=0:03:00"
bwav_jid=$(sb --job-name=${label}-ave $ntasks $time $D $SUBMIT BW-AVERAGE $SIG_DIR/$rep1_input_subtracted_bw $SIG_DIR/$rep2_input_subtracted_bw $SIG_DIR/$signal_averaged_file)
stage_jids="$stage_jids $bwav_jid"
fi
#SPP
# files generated:
rep1_regionPeak=${rep1_bam%%.bam}_VS_${input_bam%%.bam}.regionPeak.gz
rep2_regionPeak=${rep2_bam%%.bam}_VS_${input_bam%%.bam}.regionPeak.gz
# SPP JOBSTEPS
if [[ " $JOBSTEPS " =~ " SPP " ]]
then
echo "pipeline: $label SPP"
D=$(deps $bam_jid1 $bam_jid3)
spp_jid1=$(sb --job-name=spp-${label}-1 $O $D $SUBMIT SPP $label ${ALIGN_DIR}/$rep1_bam ${ALIGN_DIR}/$input_bam $rep1_regionPeak)
D=$(deps $bam_jid2 $bam_jid3)
spp_jid2=$(sb --job-name=spp-${label}-2 $O $D $SUBMIT SPP $label ${ALIGN_DIR}/$rep2_bam ${ALIGN_DIR}/$input_bam $rep2_regionPeak)
stage_jids="$stage_jids $spp_jid1 $spp_jid2"
fi
# IDR launch
# file generated:
idr_out="${label}.narrowPeak"
idr_filenames="$idr_filenames ${IDR_DIR}/$idr_out"
# IDR JOBS
if [[ " $JOBSTEPS " =~ " IDR " ]]
then
echo "pipeline: $label IDR"
D=$(deps $spp_jid1 $spp_jid2)
idr_jid=$(sb --ntasks=1 --time=0:02:00 --job-name=idr-${label} $D $SUBMIT IDR ${SPP_DIR}/$rep1_regionPeak ${SPP_DIR}/$rep2_regionPeak ${IDR_DIR}/$idr_out)
stage_jids="$stage_jids $idr_jid"
fi
# Use IDR as the rejoin point from all of the branches.
IDR_JOB_IDS="$IDR_JOB_IDS $idr_jid"
#if [[ " $JOBSTEPS " =~ " LOG " ]]
if true
then
echo "pipeline: $label LOG "
echo $stage_jids
# Add a command to merge the temporary log files to the base log file
echo "# To merge the individual jobs logs into this file:" >> $BASE_LOGFILE
lognames=""
for jid in $stage_jids
do
lognames="$lognames slurm-$jid.out"
done
#D=$(deps $stage_ids)
#log_jid=$(sb --ntasks=1 --time=0:01:00 --job-name=$label.catlg --output=${label}.catlogs-%j.out $D $SUBMIT LOG $BASE_LOGFILE $lognames)
if [ -n "$lognames" ]
then
echo "cat $lognames >> $BASE_LOGFILE && rm -v $lognames" | tee -a $BASE_LOGFILE
fi
fi
all_ids="$all_ids $stage_jids"
done < $metadatafile
# UNION
# FILENAMES
if [[ " $JOBSTEPS " =~ " UNION " ]]
then
D=$(deps $IDR_JOB_IDS)
union_jid=$(sb $D --ntasks=1 --time=0:05:00 --job-name=union --output=union.%j.out $SUBMIT UNION $ALL_STAGES_UNION $idr_filenames)
all_ids="$all_ids $union_jid"
fi
# AGGREGATE
if [[ " $JOBSTEPS " =~ " AGGREGATE " ]]
then
D=$(deps $SCORE_JOB_IDS $union_jid)
aggregate_jid=$(sb $D --ntasks=1 --time=0:05:00 --job-name=aggregate --output=aggregate.%j.out $SUBMIT AGGREGATE $ALL_STAGES_UNION $ALL_STAGES_AGGREGATE $SCORE_FILEPATHS)
all_ids="$all_ids $aggregate_jid"
fi
# SEQUENCE
sequence_out=$SEQ_DIR/${ALL_STAGES_UNION/.bed/.fa}
if [[ " $JOBSTEPS " =~ " SEQUENCE " ]]
then
D=$(deps $union_jid)
sequence_jid=$(sb $D --ntasks=1 --time=0:01:00 --job-name=sequence --output=sequence.%j.out $SUBMIT SEQUENCE $ALL_STAGES_UNION $sequence_out)
all_ids="$all_ids $sequence_jid"
fi
echo "ALL JOBS SUBMITTED:"
all_ids=$(echo $all_ids) # trim ws
echo "jid=${all_ids// /,}"
else
######## THIS PART OF THE SCRIPT RUNS INSIDE SLURM, AND IS CALLED ########
######## FROM THE BASH SESSION. ########
export TMPDIR=$SLURM_SCRATCH
export TMP=$TMPDIR
jobstep=$1
shift
errecho "$jobstep SLURM_JOB_ID=$SLURM_JOB_ID"
date
#BWA ###################################
if [ $jobstep == "BWA" ]
then
2>&1 bwa | grep ^Program
2>&1 bwa | grep ^Version
infastq=$1
outsam=$2
cmd="bwa mem -t $SLURM_NTASKS $BWA_GENOME $infastq > $outsam"
run $cmd
#BAM ###################################
elif [ $jobstep == "BAM" ]
then
samtools --version
insam=$1
outbam=$2
filter="-F 1536"
quality="-q 30"
sort_prefix="$TMPDIR/samsort_$SLURM_JOB_ID"
cmd="samtools view -@ $SLURM_NTASKS -b $filter $quality -S ${insam} | samtools sort -T $sort_prefix -@ $SLURM_NTASKS -o ${outbam}"
run $cmd
cmd="samtools index $outbam"
run $cmd
cmd="samtools quickcheck $outbam && rm -v $insam"
run $cmd
#BW ###################################
elif [ $jobstep == "BW" ]
then
inbam_path=$1
inbam_filename=$(basename $inbam_path)
indir=$(dirname $inbam_path)
logbw=$2
logbw_filename=$(basename $logbw)
outdir=$(dirname $logbw)
outbed=${inbam_filename/.bam/.nonlog.bed}
# bedToBw.sh - creates a file according to its run parameters
bamToBw_outfile=$(makeFlankFilename $outbed $FLANK)
# example $bamToBw_outfile - LE_nonlogx150n.bw
# wig file - output of wigmath.LogTransform
logwig=${bamToBw_outfile/nonlogx${FLANK}n.bw/log.wig}
# get the chromosome locations from the alignment
cmd="bedtools bamtobed -i $inbam_path > $outdir/$outbed"
run $cmd
# bedToBw.sh- pad the chromosome locations according to $FLANK, scale by read depth.
# creates $outdir/$bamToBw_outfile
cmd="02_SCRIPTS/bedToBw.sh $outdir/$outbed $FLANK $CHROMLENGTHS -n -bw && rm -v $outdir/$outbed"
run $cmd
# perform a log transformation
# creates a wiggle file (ASCII)
cmd="02_SCRIPTS/javaGenomicsToolkit wigmath.LogTransform -p $SLURM_NTASKS -i $outdir/$bamToBw_outfile -o $outdir/$logwig"
run $cmd
# convert to bigWig (binary, compressed)
cmd="wigToBigWig $outdir/$logwig $CHROMLENGTHS $outdir/$logbw_filename && rm -v $outdir/$logwig"
run $cmd
#BW-SUBTRACT ###########################
elif [ $jobstep == "BW-SUBTRACT" ]
then
minuend=$1
subtrahend=$2
outfilename=$3
wigfilename=${outfilename/bw/wig}
# perform subtraction
# creates a wiggle file (ASCII)
cmd="02_SCRIPTS/javaGenomicsToolkit wigmath.Subtract -z -p $SLURM_NTASKS -m $minuend -s $subtrahend -o $wigfilename"
run $cmd
# convert to bigWig (binary, compressed)
cmd="wigToBigWig -clip $wigfilename $CHROMLENGTHS $outfilename && rm -v $wigfilename"
run $cmd
#BW-AVERAGE ###########################
elif [ $jobstep == "BW-AVERAGE" ]
then
rep1=$1
rep2=$2
outfilename=$3
wigfilename=${outfilename/bw/wig}
# perform average
# creates a wiggle file (ASCII)
cmd="02_SCRIPTS/javaGenomicsToolkit wigmath.Average -p $SLURM_NTASKS $rep1 $rep2 -o $wigfilename"
run $cmd
# convert to bigWig (binary, compressed)
# -clip is necessary to keep things inside CHROMLENGTHS
cmd="wigToBigWig -clip $wigfilename $CHROMLENGTHS $outfilename && rm -v $wigfilename"
run $cmd
# AGGREGATE ############################
elif [ $jobstep == "AGGREGATE" ]
then
bedfile=$1
shift
outfile=$1
shift
scorefiles="$@"
cmd="02_SCRIPTS/javaGenomicsToolkit ngs.SplitWigIntervalsToDataFrame -s -l $bedfile -o $outfile $scorefiles"
run $cmd
#SPP ###################################
elif [ $jobstep == "SPP" ]
then
prefix=$1
rep=$2
input=$3
output=$4
outdir=${SPP_DIR}
FRAGLEN=150
SPP=02_SCRIPTS/run_spp.R # loadbx has bin/spp as wrapper to run_spp.R
cmd="$SPP -c=$rep \
-i=$input \
-npeak=300000 \
-odir=${outdir} \
-speak=${FRAGLEN} \
-p=$SLURM_NTASKS \
-savr -rf \
-savp="${outdir}/$prefix.pdf" \
-out=${outdir}/$prefix.ccscores"
run $cmd
# results do not come out sorted, and sometimes have negatives in field 2 (awk)
_awk_filter_peakfile_()
{
# column 2: enforce a min of 0
# change 2 and 3 to ints
# print resultant columns 1-10
#awk 'BEGIN{OFS="\t"}{ if ($2<0) $2=0; print $1,int($2),int($3),$4,$5,$6,$7,$8,$9,$10;}'
awk 'BEGIN{OFS="\t"}{ if ($2<0) $2=0; print $0;}'
}
declare -f _awk_filter_peakfile_ # show this function in the log
sortTempfile="${outdir}/.${output}"
cmd="zcat ${outdir}/$output | _awk_filter_peakfile_ | sort -k1,1 -k2,2n | gzip -c - > $sortTempfile && mv $sortTempfile ${outdir}/$output"
run $cmd
#IDR ###################################
elif [ $jobstep == "IDR" ]
then
rep1=$1
rep2=$2
outfile=$3
outtmp=${outfile/.narrowPeak/.unthresh.narrowPeak}
IDR_THRESHOLD=0.05
echo "IDR_THRESHOLD=$IDR_THRESHOLD"
cmd="idr --samples $rep1 $rep2 --input-file-type narrowPeak \
--rank signal.value \
--soft-idr-threshold ${IDR_THRESHOLD} \
--plot --use-best-multisummit-IDR \
--random-seed 13 \
--output-file $outtmp"
run $cmd
IDR_THRESH_TRANSFORMED=$(awk -v p=${IDR_THRESHOLD} 'BEGIN{print -log(p)/log(10)}')
echo "IDR_THRESH_TRANSFORMED=$IDR_THRESH_TRANSFORMED"
idr_filter()
{
#awk 'BEGIN{OFS="\t"} $12>='"${IDR_THRESH_TRANSFORMED}"' {print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10}' $1
awk 'BEGIN{OFS="\t"} $12>='"${IDR_THRESH_TRANSFORMED}"' {print $0}' $1
}
declare -f idr_filter
cmd="idr_filter $outtmp | sort -k1 -k2,2n -u | bedtools intersect -v -a stdin -b ${BLACKLIST} > $outfile"
run $cmd
#BZ ####################################
elif [ $jobstep == "BW" ]
then
errecho "jobstep BW"
#UNION #################################
elif [ $jobstep == "UNION" ]
then
outfile=$1
shift
infiles=$@
noIds=${outfile/.bed/.bed.tmp}
cmd="cat $infiles | bed_merge_overlapping.py | sort -k1,1 -k2,2n > $noIds"
run $cmd
read nlines fname < <(wc -l $noIds)
nplaces=${#nlines} # this will determine the padding of the ELT2Peak000... IDs
# awk madness:
# escape the internal:" and $ vv vv vv
cmd="awk -v padding=$nplaces '{printf(\"%s\tELT2Peak%0*d\n\", \$0, padding,NR)}' $noIds > $outfile"
run $cmd
#SEQUENCE #################################
elif [ $jobstep == "SEQUENCE" ]
then
infile=$1
outfile=$2
cmd="bedtools getfasta -name -fi $GENOME_FASTA_N -bed $infile > $outfile"
run $cmd
#LOG ###################################
# append the individual slurm logs to the main log file and
# delete them
elif [ $jobstep == "LOG" ]
then
main=$1
shift
outfiles="$@"
#cmd="cat $outfiles >> $main && rm -v $outfiles"
cmd="cat $outfiles >> $main"
run $cmd
#NOT DEFINED
else
errecho "jobstep $jobstep is not defined. Must be one of $JOBSTEPS"
fi # END
fi
| true |
3adbce2a28ebf2030aaea35d41fb9c1bb9848d42 | Shell | ntp96/kai-auth | /generate.sh | UTF-8 | 308 | 3.078125 | 3 | [] | no_license | #!/bin/bash
for i in "$@"
do :
echo "generating ${i} service"
echo " - grpc bindings"
protoc \
--proto_path=protos/ \
--go_out=plugins=grpc:. \
protos/${i}.proto
# move generated sources
mkdir -p generated/${i}
mv protos/${i}.*.go generated/${i}/.
done
echo "services generated"
| true |
e42dc16a84b854124ab78305f724e3653f1c1ef3 | Shell | Svennito/gsaslanguage | /gsas_get_current_wtfrac | UTF-8 | 1,694 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#
# gsas_current_wtfrac - extract current weight fraction of a phase
# (C) Sven Vogel, sven@lanl.gov
#
# call
#
# gsas_current_wtfrac <phase>
#
# phase - the phase for which the weight fraction is extracted
#
# Check syntax
if [ "$1" = "" ]; then
echo "$0 called without a phase number!"
read -p "You should abort here, please hit Ctrl-C."
exit 0
fi
grep "Wt." `cat GSAS_EXP`.LST > temp.txt
if [ $? -ne 0 ]; then
if [ "$2" != "ignore_not_refined" ]; then
echo "Could not find wt. fraction in current LST file - maybe it was not refined yet?"
read -p "You should abort here, please hit Ctrl-C."
fi
exit 0
fi
grep "EXPR NPHAS" `cat GSAS_EXP`.EXP | awk '{print $3+$4+$5+$6+$7+$8+$9+$10+$11}' > temp.txt
num_phases=`gsas_get_number_of_phases`
num_hist=`gsas_get_number_of_histograms`
let phase=$1
let lines_per_phase=$num_hist/7+1
let modulus=$num_hist%7
if [ $modulus -eq 0 ]; then
let lines_per_phase=$lines_per_phase-1
fi
let lines_for_this_phase=$lines_per_phase*$phase
let lines_for_total_block=$lines_per_phase*$num_phases
# echo "Phase requested: <$phase>"
# echo "Number of phases in refinement: <$num_phases>"
# echo "Number of histograms in refinement: <$num_hist>"
# echo "Number of lines with wt. fractions per phase in LST file: <$lines_per_phase>"
# echo "Modulus: <$modulus>"
# echo "Lines for block of current weight fractions: <$lines_for_total_block>"
# echo "Last line for this phase: <$lines_for_this_phase>"
grep "Wt." `cat GSAS_EXP`.LST | tail -n $lines_for_total_block | head -n $lines_for_this_phase | tail -n 1 | awk '{print $3}' > temp.txt
# output weight fraction, so the calling process can read it
awk '{printf("%.5f\n", +$1) }' < temp.txt
| true |
d00e9163dbc22abbaa809e095bf330d338d78cc5 | Shell | RawIron/devops-my-ubuntu | /scripts/list-user-installed-packages | UTF-8 | 417 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env bash
#
# found in
# https://superuser.com/questions/48374/find-all-user-installed-packages/105000#105000
# apt-mark showauto <- /var/lib/apt/extended_states
# dpkg-query <- /var/log/dpkg.log*
comm -13 \
<(gzip -dc /var/log/installer/initial-status.gz | sed -n 's/^Package: //p' | sort) \
<(comm -23 \
<(dpkg-query -W -f='${Package}\n' | sed 1d | sort) \
<(apt-mark showauto | sort) \
)
| true |
8790dab7121fd3599376d0052bd37dce4545f38d | Shell | druchem/sus | /nfs.sh | UTF-8 | 1,423 | 3.375 | 3 | [] | no_license | #!/bin/bash
NETBOOT_FILENAME=networkboot.cfg
# Install NFS server
apt-get -y install nfs-kernel-server
# Backup initrd config
cp /etc/initramfs-tools/initramfs.conf /etc/initramfs-tools/initramfs.conf.bak
# Update initrd config
sed -i 's,MODULES=most,MODULES=netboot,' /etc/initramfs-tools/initramfs.conf
echo -e "BOOT=nfs" >> /etc/initramfs-tools/initramfs.conf
# Rebuild initrd image
mkinitramfs -o $TFTP_DIRECTORY/initrd.img
# Restore backed up initrd config
mv /etc/initramfs-tools/initramfs.conf.bak /etc/initramfs-tools/initramfs.conf
# Copy kernel
cp /boot/vmlinuz-`uname -r` $TFTP_DIRECTORY/vmlinuz
# Add network boot record
echo -e "label boot" >> $TFTP_DIRECTORY/$NETBOOT_FILENAME
echo -e "\tmenu label ^Boot" >> $TFTP_DIRECTORY/$NETBOOT_FILENAME
echo -e "\tkernel vmlinuz" >> $TFTP_DIRECTORY/$NETBOOT_FILENAME
echo -e "\tappend initrd=initrd.img root=/dev/nfs nfsroot=$LAN_SERVER_IP:$NFS_DIRECTORY ip=dhcp rw" >> $TFTP_DIRECTORY/$NETBOOT_FILENAME
# Modify boot record
sed -i 's,include debian-installer/amd64/boot-screens/gtk.cfg,include debian-installer/amd64/boot-screens/gtk.cfg\ninclude '"$NETBOOT_FILENAME"',' $TFTP_DIRECTORY/debian-installer/amd64/boot-screens/menu.cfg
# Create NFS export entry
echo -e "$NFS_DIRECTORY *(rw,sync,no_subtree_check,no_root_squash)\n" >> /etc/exports
echo -e "/home *(rw,sync,no_subtree_check,root_squash)\n" >> /etc/exports
# Apply NFS export entries
exportfs -a
| true |
dbe85f390e0aa7cb5db958aa3a185038a6a438d4 | Shell | bradlarsen/switchback | /tools/run_experiments.sh | UTF-8 | 2,643 | 3.78125 | 4 | [] | no_license | #!/bin/bash
set -e # exit on simple errors (?)
MEM_LIMIT=49283072 # 47 GB
#MEM_LIMIT=6291456 # 6 GB
TIME_LIMIT=1200 # 20 minutes
TILES_DIR="${HOME}/hg/switchback/testdata/korf100"
SEARCH="${HOME}/hg/switchback/build/spacerelease/search"
LOG_DIR="${HOME}/hg/switchback/run_log_hierarchical_algs_tiles"
ALGORITHMS="hidastar hastar switchback"
get_run_info ()
{
echo "start time: `date`"
echo "mercurial revision: `hg id -n`"
echo "system: `hostname`"
}
get_log_filename ()
{
local domain=$1
local algorithm=$2
local instance=$3
echo "${LOG_DIR}/${algorithm}_${domain}_${instance}.log"
}
run_korf_instance ()
{
local algorithm=$1
local instance_num=$2
local logfile=$(get_log_filename "korf100" "$algorithm" "$instance_num")
local instancefile="${TILES_DIR}/${instance_num}"
"$SEARCH" "tiles" "${algorithm}" "${instancefile}" 2>&1 | tee "$logfile"
}
run_macro_korf_instance ()
{
local algorithm=$1
local instance_num=$2
local logfile=$(get_log_filename "macro-korf100" "$algorithm" "$instance_num")
local instancefile="${TILES_DIR}/${instance_num}"
"$SEARCH" "macro_tiles" "${algorithm}" "${instancefile}" 2>&1 | tee "$logfile"
}
############################################################
# MAIN
############################################################
mkdir -p "$LOG_DIR"
echo "$(get_run_info)" > "$LOG_DIR/run_info.log"
#for algorithm in $ALGORITHMS; do
# for instance in `seq 1 100`; do
# (
# echo "########################################"
# echo "# Running $algorithm on Macro Korf #$instance"
# echo "########################################"
#
# echo "attempting to set time limit to $TIME_LIMIT"
# ulimit -t $TIME_LIMIT
# echo "attempting to set memory limit to $MEM_LIMIT"
# ulimit -v $MEM_LIMIT
# echo "showtime!"
#
# run_macro_korf_instance $algorithm $instance
# )
# done
#done
for algorithm in $ALGORITHMS; do
for instance in `seq 1 100`; do
(
echo "########################################"
echo "# Running $algorithm on Korf #$instance"
echo "########################################"
echo "attempting to set time limit to $TIME_LIMIT"
ulimit -t $TIME_LIMIT
echo "attempting to set memory limit to $MEM_LIMIT"
ulimit -v $MEM_LIMIT
echo "showtime!"
run_korf_instance $algorithm $instance
)
done
done
echo "stop time: `date`" >> "$LOG_DIR/run_info.log"
| true |
cdaa9715852da28a9f177d616631e33a7b33a2d4 | Shell | ma2o/VarCap | /batch/A1_read_filter_set.sh | UTF-8 | 355 | 3.46875 | 3 | [] | no_license | #!/bin/bash
REGEX=$1
MAX_READS=8000000
for file in $( ls -d */ | grep -E "$REGEX" ); do
BS=$( cat $file/info.txt | grep -e 'BothS' | cut -d' ' -f2 )
echo "$file"
echo "FILTER:$BS"
if [ $BS -gt $MAX_READS ]; then
BS=$MAX_READS
fi
echo "READS:$BS"
sed -i 's/SUBSAMPLE_SIZE_ALT=.*/SUBSAMPLE_SIZE_ALT\='"$BS"'/' $file/variant.config
done
| true |
c0a0b03b1b94b3d1138896f368cc0da040bc3072 | Shell | rgoulter/system-testing | /src/test/resources/run-sample.sh | UTF-8 | 1,214 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
##
## Helper script to capture output into a file,
## piping to a format like "CMD.ARG_ARG_ARG.FILE.HOST.VERSION.txt"
## e.g.
## $ cd /path/to/hg/repo
## $ /path/to/run-sample.sh ./sleek examples/working/sleek/sleek.slk
##
# Intend to echo A.B_C_..._D.E
# Assume good # args
function filename_for_command {
# Command
# (first arg)
# Use basename, for clarity
OUTPUT=$(basename ${1})
shift 1
# Args
# (once-and-loop, so we get _ only between the args)
if [ $# -gt 1 ]
then
OUTPUT="${OUTPUT}.${1}"
shift 1
fi
while [ $# -gt 1 ]
do
OUTPUT="${OUTPUT}_${1}"
shift 1
done
# Filename
# (last arg)
# Use basename, for clarity,
# and b/c I wouldn't know how to replace '/' with safe..
OUTPUT="${OUTPUT}.$(basename ${1})"
# Hostname
OUTPUT="${OUTPUT}.$(hostname)"
# Assuming we're running from HG directory
# HG version
OUTPUT="${OUTPUT}.$(hg identify)"
# Friendly extension
OUTPUT="${OUTPUT}.txt"
echo $OUTPUT
}
# directory of this bash script
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
FILENAME=$(filename_for_command $*)
$* | tee $DIR/$FILENAME
| true |
e88652da6ad5c4b4ac266da682f1f09efd0fc2f5 | Shell | AzureAdvocateBit/contosocrafts-fastapi-1 | /create_venvs.sh | UTF-8 | 1,397 | 3.65625 | 4 | [
"MIT"
] | permissive | venv_name=".venv"
requirements_file='requirements.txt'
# terminal colors
Red=$'\e[1;31m'
Yellow=$'\e[1;33m'
Normal=$'\e[1;0m'
# assumes the script is being executed from the workspace directory
cd src/
BASE_DIR=$PWD
echo $BASE_DIR
PROJECT_DIRS=(website xproductsapi xmessageprocessor)
for p_dir in "${PROJECT_DIRS[@]}" ; do
# check if dir exists
if [[ -d "$p_dir" && ! -L "$p_dir" ]]; then
echo "${Yellow}Working in $p_dir directory${Normal}"
cd $p_dir
# check for existing virtual directory named $venv_name
if [[ -d "$venv_name" ]]; then
echo "Existing ${venv_name} directory found in ${p_dir}.\n${Red}Attempting to remove.${Normal}"
rm -rf $venv_name
else
echo "${venv_name} directory not found in ${p_dir}."
fi
echo "${Yellow}Creating virtual environment in ${venv_name}${Normal}"
python -m venv --prompt "${PWD##*/}-${venv_name:1}" ${venv_name}
${venv_name}/bin/pip install --upgrade pip wheel
# install dependencies if requirements.txt exists
if [[ -f "$requirements_file" ]]; then
echo "${Yellow}A '$requirements_file' file was located."
echo "${Yellow}Installing requirements.${Normal}"
${venv_name}/bin/pip install -r requirements.txt
else
echo "${Red}No '$requirements_file' file not found in ${PWD}.${Normal}"
fi
echo "Moving on... 🚙"
fi;
done | true |
ed997e742d2a1768f7e0388f90bf5005eed53275 | Shell | obino/appscale | /AppTaskQueue/test/helpers/prepare-cassandra.sh | UTF-8 | 2,004 | 4.1875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Ensures that single-node Cassandra cluster is running on this machine.
# Creates AppScale-related tables in Cassandra.
set -e
set -u
usage() {
echo "Usage: ${0} --private-ip <IP> --zk-ip <IP>"
echo
echo "Options:"
echo " --private-ip <IP> Private IP of this machine"
echo " --zk-ip <IP> IP of the zookeeper machine"
exit 1
}
PRIVATE_IP=
ZK_IP=
# Let's get the command line arguments.
while [ $# -gt 0 ]; do
if [ "${1}" = "--private-ip" ]; then
shift
if [ -z "${1}" ]; then
usage
fi
PRIVATE_IP="${1}"
shift
continue
fi
if [ "${1}" = "--zk-ip" ]; then
shift
if [ -z "${1}" ]; then
usage
fi
ZK_IP="${1}"
shift
continue
fi
usage
done
log() {
LEVEL=${2:-INFO}
echo "$(date +'%a %b %d %T %Y'): $LEVEL $1"
}
if [ -z ${PRIVATE_IP} ] || [ -z ${ZK_IP} ]; then
usage
fi
echo ${PRIVATE_IP} > /etc/appscale/masters
echo ${PRIVATE_IP} > /etc/appscale/slaves
echo ${ZK_IP} > /etc/appscale/zookeeper_locations
log "Configuring Cassandra"
/root/appscale/scripts/setup_cassandra_config_files.py --local-ip ${PRIVATE_IP} \
--master-ip ${PRIVATE_IP}
log "Starting Cassandra"
su -c '/opt/cassandra/cassandra/bin/cassandra -p cassandra.pid' cassandra
cassandra_wait_start=$(date +%s)
while ! (/opt/cassandra/cassandra/bin/nodetool status | grep UN); do
current_time=$(date +%s)
elapsed_time=$((current_time - cassandra_wait_start))
if [ "${elapsed_time}" -gt 60 ]
then
log "Timed out waiting for Cassandra to start" "ERROR"
exit 1
fi
sleep 1
done
log "Creating tables"
for i in 1 2 3 ; do
RESULT=FAILED
appscale-prime-cassandra --replication 1 && RESULT=OK && break
log "Failed to create Cassandra tables" "WARNING"
sleep 15
done
if [ ${RESULT} = FAILED ]; then
log "Failed to create Cassandra tables after 3 retries" "ERROR"
exit 1
fi
| true |
81cdd0380a3e5796441a607ab45df8bee9b04bb1 | Shell | minghan/oh-my-zsh | /custom.zsh | UTF-8 | 1,791 | 2.625 | 3 | [] | no_license | # ============================
# Alias
alias lsc='ls --hide="*.o" --hide="*.dep"'
alias rake="noglob rake"
alias git-reset-all="git reset --hard ; git clean -df ; git pull ; "
alias git-addremove="git add . ; git add -u"
grep_() { grep $@ -r -i --color='always' * }
find_() { find ./ -name "*$@*" }
psgrep_() { ps ax | grep -i "$@" | grep -v grep }
pskill_() { ps ax | grep -i "$@" | grep -v grep | awk '{print $1}' | xargs kill -SIGKILL }
alias gcc_="gcc -ansi -pedantic -W -Wextra -Wall -g"
alias valgrind_="valgrind --tool=memcheck --leak-check=yes --show-reachable=yes"
alias diff_="diff -w -a -b -B"
alias rm="rm -i -v"
for c in cp chmod chown rename; do
alias $c="$c -v"
done
export EDITOR="vim"
# ============================
# Useful keybindings
# Meta-u to chdir to the parent directory
# bindkey -s '\eu' '^Ucd ..; ls^M'
# ===========================
# Host Specific
export PYTHONSTARTUP=~/.oh-my-zsh/load_pyhist.py
HOSTNAME=`hostname`
if [[ $HOSTNAME =~ '.*andrew\.cmu\.edu' ]]; then
alias aklog_="aklog cs.cmu.edu"
# export PATH=/afs/cs.cmu.edu/academic/class/15410-s11/bin:$PATH
bindkey "^[[3~" delete-char
alias simics='make && simics4'
# Override
export PYTHONSTARTUP=~/private/load_pyhist.py
export PYTHONPATH=$PYTHONPATH:~/eggs
export PATH=$PATH:~/eggs
elif [[ $HOSTNAME =~ '.*stanford\.edu' ]]; then
elif [[ $HOSTNAME =~ '^hanworks$' ]]; then
export HADOOP_HOME="/home/minghan/apps/hadoop-0.20.2-cdh3u4"
export HBASE_HOME="/home/minghan/apps/hbase-0.90.6-cdh3u4"
# export PATH="$PATH:$HADOOP_HOME/bin"
export JAVA_HOME="/usr/lib/jvm/jdk1.6.0_32"
elif [[ $HOSTNAME =~ '^hanworks-air$' ]]; then
export PATH=$PATH:/usr/local/mysql/bin
alias vi='vim'
else
fi
# ===========================
| true |
f8537beb56802ad4fb5f5e2c8b836747391be81e | Shell | jgarland79/sandbox2 | /test.sh | UTF-8 | 1,348 | 3.578125 | 4 | [] | no_license | #!/bin/bash
olddir=$(pwd)
git_user='jgarland79'
git_pass='password'
git_api='https://api.github.com'
git_web='https://github.com'
git_repo='sandbox2'
repo="${git_web}/${git_user}/${git_repo}.git"
file="README.md"
branch="$(date '+%Y%m%d%H%M%S')"
random=$(openssl rand -hex 16)
tmp_dir="/tmp/${random}"
rm -rf ${tmp_dir}
mkdir ${tmp_dir}
cd ${tmp_dir}
git clone -n ${repo} --depth 1 ${tmp_dir}
git checkout HEAD ${file}
#base_sha=$(git log -n 1 |head -n 1 |awk '{print $2}')
echo >>${file}
echo ${branch} >>${file}
echo ${random} >>${file}
git reset HEAD ./
echo Switching to ${branch}
git checkout -b ${branch}
#git branch ${branch}
git add ${file}
git status
git commit -m 'added date and random data'
echo Pushing to ${branch}
git push -u origin "${branch}"
#head_sha=$(git log --branches ${branch} -n 1 |head -n 1 |awk '{print $2}')
json="{
\"title\": \"Amazing new feature\",
\"body\": \"Please pull this in!\",
\"head\": \"${branch}\",
\"base\": \"master\"
}"
#json="{
#\"title\": \"Amazing new feature\",
#\"body\": \"Please pull this in!\",
#\"head\": \"${branch}\",
#\"head_sha\": \"${head_sha}\",
#\"base\": \"master\",
#\"base_sha\": \"${base_sha}\"
#}"
echo "${json}"
curl -u "${git_user}:${git_pass}" \
-XPOST "${git_api}/repos/${git_user}/${git_repo}/pulls" \
-d "${json}"
cd ${olddir}
rm -rf ${tmp_dir}
| true |
ae0f655e71ae863a96d8c210c584e0c6ebfae16d | Shell | jitendrakr54/Learning | /shell_scripting/code/015_while_loop.sh | UTF-8 | 219 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# Syntax
# while [ condition ]
# do
# command1
# command2
# command3
# done
n=1
# while [ $n -le 10 ]
while (( $n <= 10 ))
do
echo "$n"
# n=$(( n+1 ))
# (( n++ ))
(( ++n ))
done | true |
621d34baa9cbe03fd82792dffeb553a5ac0e6413 | Shell | d8bit/bashScripts | /scripts.sh | UTF-8 | 1,089 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
currentDate=`date '+%d-%m-%Y'`
errorFile="cron/log/$currentDate-error.txt"
logFile="cron/log/$currentDate-log.txt"
addTimestamps() {
echo "[`date '+%H:%M:%S'`]" >> $errorFile
echo "[`date '+%H:%M:%S'`]" >> $logFile
}
runCommand() {
addTimestamps
echo "command: $1" >> $logFile >> $errorFile
(eval "$1") >> $logFile 2>> $errorFile
echo $?
}
log() {
addTimestamps
echo $1 >> $logFile
}
logError() {
addTimestamps
echo $1 >> $errorFile
}
runScript() {
counter=0
executedOk=0
maxAttempts=3
sleepTime=5m
while [ $counter -le $maxAttempts -a $executedOk -eq 0 ]
do
commandResult=`runCommand "$1"`
if [ $commandResult -eq 0 ]; then
executedOk=1
else
sleep $sleepTime
((counter++))
fi
done
echo $executedOk
}
# script example
scriptResult=`runScript "mysql -u userName -ppassword databaseName < mysqlfile.sql"`
if [ $scriptResult -eq 0 ]; then
logError "Script with error"
exit 1
else
log "Script without error"
fi
log "Done"
| true |
ea902c323115eb4beb064451a89c6f3a93461b98 | Shell | aglarendil/nexus3-cli | /tests/wait-for-nexus.sh | UTF-8 | 407 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
function nexus_ready {
[[ "200" == $(curl -o /dev/null -s -w "%{http_code}\n" "$1") ]]
}
count=0
until nexus_ready "${1:-http://localhost:8081}"
do
count=$((count+1))
if [ ${count} -gt 100 ]
then
echo 'Timeout-out waiting for nexus container'
docker logs --tail 50 nexus
docker ps
curl -sv "%{http_code}\n" "$1"
netstat -ntlp
exit 1
fi
sleep 3
done
| true |
f645fa81bfdcb99a4547acfd503d4e1479e85fe6 | Shell | Smazle/Authorship-Verification | /test/svm_author/configuration_1/svm_test.sh | UTF-8 | 1,563 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env bash
if [ ! -f ./13_train.txt ]; then
echo "Generating features for PAN 2013 TRAIN"
../../../feature_extraction/main.py ../../../data/pan_2013/ ./13_train.txt \
--normalize false \
--corpus brown \
--char-n-gram 3 4 5 --char-n-gram-size 500 \
--word-n-gram 3 4 --word-n-gram-size 100 \
--postag-n-gram 2 3 4 --postag-n-gram-size 20
fi
if [ ! -f ./13_test_1.txt ]; then
echo "Generating features for PAN 2013 TEST 1"
../../../feature_extraction/main.py ../../../data/pan_2013_test_1/ ./13_test_1.txt \
--normalize false \
--corpus brown \
--char-n-gram 3 4 5 --char-n-gram-size 500 \
--word-n-gram 3 4 --word-n-gram-size 100 \
--postag-n-gram 2 3 4 --postag-n-gram-size 20
fi
if [ ! -f ./13_test_2.txt ]; then
echo "Generating features for PAN 2013 TEST 2"
../../../feature_extraction/main.py ../../../data/pan_2013_test_2/ ./13_test_2.txt \
--normalize false \
--corpus brown \
--char-n-gram 3 4 5 --char-n-gram-size 500 \
--word-n-gram 3 4 --word-n-gram-size 100 \
--postag-n-gram 2 3 4 --postag-n-gram-size 20
fi
echo "TESTING PAN2013 1"
for i in {1..100}
do
./svm_author.py ./13_train.txt ./13_test_1.txt \
--with-normalization \
--c 100 \
--gamma 0.00001
done | ./analyse.hs
echo "TESTING PAN2013 2"
for i in {1..100}
do
./svm_author.py ./13_train.txt ./13_test_2.txt \
--with-normalization \
--c 100 \
--gamma 0.00001
done | ./analyse.hs
| true |
882cf9ebdfc4ad806cf8c2c488a5c27c88582a11 | Shell | amalhotrabb/customApigee | /setup/provisioning/setup.sh | UTF-8 | 2,680 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
source ../setenv.sh
echo "Enter your password for the Apigee Enterprise organization, followed by [ENTER]:"
read -s password
echo using $username and $org
# Install API Products
#sh ./setProxy.sh $1
# curl -u $username:$password $url/v1/o/$org/apiproducts \
# -H "Content-Type: application/json" -X POST -T FreeProduct.json
# curl -u $username:$password $url/v1/o/$org/apiproducts \
# -H "Content-Type: application/json" -X POST -T CheapProduct.json
# curl -u $username:$password $url/v1/o/$org/apiproducts \
# -H "Content-Type: application/json" -X POST -T ExpensiveProduct.json
# mv FreeProduct.json.orig FreeProduct.json
# mv CheapProduct.json.orig CheapProduct.json
# mv ExpensiveProduct.json.orig ExpensiveProduct.json
# Create developers
curl -u $username:$password $url/v1/o/$org/developers \
-H "Content-Type: application/xml" -X POST -T raja.xml
# curl -u $username:$password $url/v1/o/$org/developers \
# -H "Content-Type: application/xml" -X POST -T joe.xml
# Create apps
# curl -u $username:$password \
# $url/v1/o/$org/developers/raja@osscube.com/apps \
# -H "Content-Type: application/xml" -X POST -T thomas-app.xml
# curl -u $username:$password \
# $url/v1/o/$org/developers/raja@osscube.com/apps \
# -H "Content-Type: application/xml" -X POST -T joe-app.xml
# Get consumer key and attach API product
# Do this in a quick and clean way that doesn't require python or anything
# key=`curl -u $username:$password -H "Accept: application/json" \
# $url/v1/o/$org/developers/raja@osscube.com/apps/thomas-app 2>/dev/null \
# | grep consumerKey | awk -F '\"' '{ print $4 }'`
# curl -u $username:$password \
# $url/v1/o/$org/developers/raja@osscube.com/apps/thomas-app/keys/${key} \
# -H "Content-Type: application/xml" -X POST -T thomas-app-product.xml
# key=`curl -u $username:$password -H "Accept: application/json" \
# $url/v1/o/$org/developers/raja@osscube.com/apps/joe-app 2>/dev/null \
# | grep consumerKey | awk -F '\"' '{ print $4 }'`
# curl -u $username:$password \
# $url/v1/o/$org/developers/raja@osscube.com/apps/joe-app/keys/${key} \
# -H "Content-Type: application/xml" -X POST -T joe-app-product.xml
# key=`curl -u $username:$password -H "Accept: application/json"\
# $url/v1/o/$org/developers/raja@osscube.com/apps/thomas-app 2>/dev/null \
# | grep consumerKey | awk -F '\"' '{ print $4 }'`
echo "\n\nConsumer key for raja-app is ${key}"
key=`curl -u $username:$password -H "Accept: application/json"\
$url/v1/o/$org/developers/raja@osscube.com/ 2>/dev/null \
| grep consumerKey | awk -F '\"' '{ print $4 }'`
echo "Consumer key for raja0903 is ${key}\n"
| true |
bd991f95b8bcc5bc47b64b6155d138a3f9bdac30 | Shell | hsingh23/lighthouse | /lighthouse-core/scripts/download-chrome.sh | UTF-8 | 384 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Download chrome inside of our CI env.
if [ x"$LIGHTHOUSE_CHROMIUM_PATH" == x ]
then
echo "Error: Environment variable LIGHTHOUSE_CHROMIUM_PATH not set"
exit 1
fi
if [ -e "$LIGHTHOUSE_CHROMIUM_PATH" ]
then
echo "cached chrome found"
else
wget 'https://download-chromium.appspot.com/dl/Linux_x64?type=snapshots' -O chrome.zip && unzip chrome.zip
fi
| true |
eb59679b2ca29e93b10230218dd24d103e8e65b6 | Shell | petronny/aur3-mirror | /evince-nognome/PKGBUILD | UTF-8 | 1,711 | 2.71875 | 3 | [] | no_license | pkgname=evince-nognome
_pkgname=evince
pkgver=2.30.3
pkgrel=1
pkgdesc="Evince document viewer without GNOME and D-Bus dependencies"
url="http://projects.gnome.org/evince/"
arch=('i686' 'x86_64')
license=('GPL')
depends=('libspectre>=0.2.6' 'gsfonts' 'poppler-glib>=0.14.0' 'libdjvu>=3.5.22' 't1lib' 'hicolor-icon-theme' 'desktop-file-utils')
makedepends=('gnome-doc-utils>=0.20.1' 'texlive-bin' 'intltool' 'gobject-introspection')
optdepends=('texlive-bin: DVI support')
replaces=('gpdf' 'evince')
conflicts=('evince')
provides=('evince')
groups=('gnome-extra')
install=evince-nognome.install
options=('!libtool' '!emptydirs')
source=(http://ftp.gnome.org/pub/gnome/sources/${_pkgname}/2.30/${_pkgname}-${pkgver}.tar.bz2)
sha256sums=('daddd9720bf8fc0156d9a4a5a85485c232393896376707cf7fcedfcbc515732f')
build() {
cd "${srcdir}/${_pkgname}-${pkgver}"
sed -i /gnome-icon-theme/d configure
./configure --prefix=/usr --sysconfdir=/etc \
--localstatedir=/var --libexecdir=/usr/lib/evince \
--disable-static \
--enable-pdf --enable-tiff \
--enable-djvu --enable-dvi \
--enable-t1lib --enable-pixbuf \
--enable-comics --enable-impress \
--enable-introspection \
--disable-scrollkeeper --disable-nautilus --without-keyring \
--without-gconf --disable-thumbnailer --disable-previewer \
--disable-dbus
make
make DESTDIR="${pkgdir}" install
cd ${pkgdir}/usr/share/applications/ || return 1
sed -i /GNOME-X/d evince.desktop
sed -i /NoDisplay/d evince.desktop
sed -i s/"Name=Document Viewer"/"Name=Evince"/ evince.desktop
sed -i s/"Categories=GNOME;GTK;Graphics;VectorGraphics;Viewer;"/"Categories=Viewer;Office;Graphics;"/ evince.desktop
}
| true |
860f4c48ccd22765ca3a660b056e6ffbdf84d65e | Shell | ejtaal/scripts | /print-box-chars.sh | UTF-8 | 118 | 2.625 | 3 | [] | no_license | #!/bin/bash
char=( 6a 6b 6c 6d 6e 71 74 75 76 77 78 )
for i in ${char[*]}
do
printf "0x$i \x$i \e(0\x$i\e(B\n"
done
| true |
e53c70f02988d6e8b81baa3dbba1b2d5d3e31b3c | Shell | AperLambda/lambdacommon | /build.sh | UTF-8 | 485 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Some color variables.
RED='\033[0;91m'
NC='\033[0m'
sh ./clean_workspace.sh
echo "cd to build directory"
mkdir -p build/
cd build/
echo "Building project..."
cmake -DLAMBDACOMMON_BUILD_C_WRAPPER=ON ..
if [ $? -ne 0 ]; then
echo "${RED}Error: CMake doesn't exit with success! Cleaning...${NC}"
cd ..
sh ./clean_workspace.sh
else
make -j
if [ $? -ne 0 ]; then
echo "${RED}Error: Make doesn't exit with success! Cleaning...${NC}"
cd ..
sh ./clean_workspace.sh
fi
fi
cd .. | true |
6b35d6121eef6e9b1dbc7a965623f64a24902031 | Shell | blaggacao/nixpkgs-devshell | /review-cli.sh | UTF-8 | 819 | 3.828125 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
shopt -s extglob
usage () {
printf "%b\n" \
"\e[4mUsage\e[0m: $(basename $0) [OPTS] COMMAND\n" \
"\e[4mOptions\e[0m:"
printf " %-30s %s\n\n" \
"--system" "review for a different architecure"
printf "\n\e[4mCommands\e[0m:\n"
printf " %-30s %s\n\n" \
"unstaged" "review unstaged changes" \
"staged" "review staged changes"
}
opts=()
while [ "$#" -gt 0 ]; do
i="$1"; shift 1
case "$i" in
""|"-h"|"help"|*(-)"help")
usage
exit 0
;;
"-s"|*(-)"system")
j="$1"; shift 1
opts+=("--system" "$j")
;;
"unstaged")
nixpkgs-review "${opts[@]}" wip
exit 0
;;
"staged")
nixpkgs-review "${opts[@]}" wip --staged
exit 0
;;
*)
usage
exit 1
;;
esac
done
usage
| true |
da148da4e554dbe27ba8a979027bcd67a5ce2f53 | Shell | lsfgrd/linux-setup | /dotfiles/polybar/polybar.sh | UTF-8 | 343 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env bash
pkill polybar
for m in $(polybar --list-monitors | cut -d":" -f1); do
if [ $m == "eDP1" ]; then
TRAY_POS=right polybar --reload main -c /home/bipirate/.config/polybar/config.ini &
else
MONITOR=$m polybar --reload extra -c /home/bipirate/.config/polybar/config.ini &
fi
done
#TRAY_POS=right polybar classic
| true |
4aac998045b551d4a4e52892b0e526be6d9e9f70 | Shell | arthurzam/pkgbuild | /dargui/PKGBUILD | UTF-8 | 1,339 | 2.59375 | 3 | [] | no_license | # Contributor: nisc <rirae@gmx.net>
# Maintainer: Arthur Zamarin <arthurzam@gmail.com>
pkgname=dargui
pkgver=0.7.2
pkgrel=2
pkgdesc="GUI for the backup tool dar (disk archiver)."
arch=('i686')
url="http://dargui.sourceforge.net/"
license=("GPL")
depends=('vte' 'gtk2' 'dar' 'xterm' 'at')
source=("http://downloads.sourceforge.net/$pkgname/${pkgname}-${pkgver}-bin.tar.gz")
sha256sums=('a699d5df7e7c6aa151c1c3cbdea54f54e03ccce3f33b622f14c7471795f1559b')
package() {
cd $srcdir/$pkgname-$pkgver/
mkdir -p ../../pkg/usr/share/doc/dargui
mkdir -p ../../pkg/usr/share/man/man1
mkdir -p ../../pkg/usr/share/dargui/locales
mkdir ../../pkg/usr/share/menu
mkdir ../../pkg/usr/share/applications
mkdir ../../pkg/usr/share/pixmaps
mkdir ../../pkg/usr/bin
cp -vR doc/* ../../pkg/usr/share/doc/dargui/
cp -v doc/copyright ../../pkg/usr/share/doc/dargui/
cp -v man/dargui.1.gz ../../pkg/usr/share/man/man1/
cp -v menu/* ../../pkg/usr/share/menu/
chmod 644 ../../pkg/usr/share/menu/dargui
cp -v applications/* ../../pkg/usr/share/applications/
chmod 644 ../../pkg/usr/share/applications/dargui.desktop
cp -v pixmaps/* ../../pkg/usr/share/pixmaps/
cp -v scripts/* ../../pkg/usr/share/dargui/
cp -v darlogger ../../pkg/usr/share/dargui/
cp -v locales/* ../../pkg/usr/share/dargui/locales/
cp -v dargui ../../pkg/usr/bin/
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.