blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4259342afc1477f4d8d268ad31cd718b04481a96
|
Shell
|
pjensen7/Week4_Assignment
|
/guessinggame.sh
|
UTF-8
| 536
| 3.9375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
clear
function Introduction {
echo
echo "Hello.....would you like to play a game?"
echo
echo "Try to guess how many files are in the current directory"
echo
}
Introduction
FileNumber=$(ls -1 | wc -l)
until [[ $response -eq $FileNumber ]]; do
read response
if [[ $response -gt $FileNumber ]]
then
echo "Sorry $response is to high please guess again."
elif [[ $response -lt $FileNumber ]]
then
echo "Sorry $response is to low please guess again"
fi
done
echo "Congratulations! You guessed correctly!"
| true
|
fc1d2d7940bc2352cc8b619ddb795e18f9cee0ee
|
Shell
|
oalbiez/docker-subcommand-demo
|
/start
|
UTF-8
| 550
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
VOLUME=/usr/src/app
USER=user
GROUP=user
VOLUME_UID=$(stat -c "%u" $VOLUME )
VOLUME_GID=$(stat -c "%g" $VOLUME )
if [ "$VOLUME_UID" != "0" ]
then
getent group $VOLUME_GID
[ $? -gt 0 ] && groupadd --gid $VOLUME_GID $GROUP
getent passwd $VOLUME_UID
[ $? -gt 0 ] && useradd --create-home --uid $VOLUME_UID --gid $VOLUME_GID $USER
else
USER="root"
fi
COMMAND=${1:-cli}
shift 1
set -x
if [ "$COMMAND" == "cli" ]
then
exec su $USER
else
exec su $USER -c "/usr/local/bin/$COMMAND $(printf " %q" "$@")"
fi
| true
|
7275f7b37e4963248d3186cd386b48f11693132c
|
Shell
|
AaronOldenburg/TerminalJam
|
/q-journal/JournalCode/dostats.sh
|
UTF-8
| 984
| 3.828125
| 4
|
[] |
no_license
|
# Output game stats
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
filename="../stats.txt"
DATE=`date +%c`
health=100
MAX_HIT=50
update_file() {
range_double=$(( MAX_HIT*2 ))
hit_amt=$(( $((RANDOM%range_double))-MAX_HIT ))
printf "\n\nOn "$DATE", " >> $filename
if [ $hit_amt -gt 0 ]; then
printf "you gained "$hit_amt" health.\n" >> $filename
elif [ $hit_amt -lt 0 ]; then
printf "you lost "$hit_amt" health.\n" >> $filename
else
printf "your health stayed the same.\n" >> $filename
fi
health=$((health+hit_amt))
if [ $health -gt 0 ]; then
printf "You are alive.\n" >> $filename
else
printf "You are dead.\n" >> $filename
fi
printf "Health now is:\n" >> $filename
printf $health >> $filename
}
if [[ -f $filename ]]; then
health=$(tail -n 1 $filename)
update_file
else
defaultText="Your game stats.\n\n"
printf $defaultText > $filename
update_file
fi
| true
|
d02cf5024d2affa9adae42e91503f8d672722e15
|
Shell
|
pajamapants3000/docker-infra
|
/devcontainer/library-scripts/create-dotnet-devcert.sh
|
UTF-8
| 2,500
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# May want to make these customizable
CERT_PW=""
DEVCERT_PATH=/aspnet/https
DEVCERT_NAME=dotnet-devcert
#
TMP_PATH=/var/tmp/localhost-dev-cert
if [ ! -d $TMP_PATH ]; then
mkdir $TMP_PATH
fi
if [ ! -d $DEVCERT_PATH ]; then
mkdir -p $DEVCERT_PATH
chmod -R 755 $DEVCERT_PATH
fi
KEYFILE=$DEVCERT_NAME.key
CRTFILE=$DEVCERT_NAME.crt
PFXFILE=$DEVCERT_NAME.pfx
NSSDB_PATHS=(
"$HOME/.pki/nssdb"
"$HOME/snap/chromium/current/.pki/nssdb"
"$HOME/snap/postman/current/.pki/nssdb"
)
CONF_PATH=$TMP_PATH/localhost.conf
cat >> $CONF_PATH <<EOF
[req]
prompt = no
default_bits = 2048
distinguished_name = subject
req_extensions = req_ext
x509_extensions = x509_ext
[subject]
commonName = clutter-noteservice
[req_ext]
basicConstraints = critical, CA:true
subjectAltName = @alt_names
[x509_ext]
basicConstraints = critical, CA:true
keyUsage = critical, keyCertSign, cRLSign, digitalSignature,keyEncipherment
extendedKeyUsage = critical, serverAuth
subjectAltName = critical, @alt_names
1.3.6.1.4.1.311.84.1.1 = ASN1:UTF8String:ASP.NET Core HTTPS development certificate # Needed to get it imported by dotnet dev-certs
[alt_names]
DNS.1 = clutter-noteservice
DNS.2 = localhost
EOF
function configure_nssdb() {
echo "Configuring nssdb for $1"
certutil -d sql:$1 -D -n $DEVCERT_NAME
certutil -d sql:$1 -A -t "CP,," -n $DEVCERT_NAME -i $TMP_PATH/$CRTFILE
}
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $TMP_PATH/$KEYFILE -out $TMP_PATH/$CRTFILE -config $CONF_PATH --passout pass:$CERT_PW
openssl pkcs12 -export -out $TMP_PATH/$PFXFILE -inkey $TMP_PATH/$KEYFILE -in $TMP_PATH/$CRTFILE --passout pass:$CERT_PW
for NSSDB in ${NSSDB_PATHS[@]}; do
if [ -d "$NSSDB" ]; then
configure_nssdb $NSSDB
fi
done
echo Installing public certificate to environment
rm -vf /etc/ssl/certs/$DEVCERT_NAME.pem
cp -v $TMP_PATH/$CRTFILE "/usr/local/share/ca-certificates"
update-ca-certificates
echo Installing public and private keys for dotnet and preserve for reference and reuse
dotnet dev-certs https --clean --import $TMP_PATH/$PFXFILE -p "$CERT_PW"
cp -v $TMP_PATH/$PFXFILE $TMP_PATH/$CRTFILE $DEVCERT_PATH/
chmod -vR 644 $DEVCERT_PATH/*
rm -vR $TMP_PATH
echo "Dev cert path: $DEVCERT_PATH/$PFXFILE"
echo "Dev cert password: \"$CERT_PW\""
echo "Public cert: $DEVCERT_PATH/$CRTFILE"
| true
|
e0577521af93f991cdb7c52d2605ec25dd9ddbde
|
Shell
|
freecracy/gvm
|
/gvm
|
UTF-8
| 4,300
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
{
BASE_PATH=$HOME/.gvm
GOPATH=$HOME/.go
GOBIN=$GOPATH/bin
GVMRC=$HOME/.gvmrc
VERSION_REPO=https://github.com/golang/dl.git
BASE_DOWNLOAD_URL=https://dl.google.com/go/
TARPATH=$BASE_PATH/tar
GOHOSTOS=linux
VERSION=
[[ ! -d $BASE_PATH ]] && mkdir -p $BASE_PATH
[[ ! -d $TARPATH ]] && mkdir -p $TARPATH
[[ ! -d $GOPATH ]] && mkdir -p $GOPATH
[[ ! -e $GVMRC ]] && touch $GVMRC
[[ $(uname -s) == "Darwin" ]] && GOHOSTOS="darwin"
if [[ ! -d $BASE_PATH/dl ]]; then
git clone $VERSION_REPO $BASE_PATH/dl >/dev/null 2>&1 || {
printf -- " 安装git后重试\n"
exit
}
fi
if [[ $1 == "help" || $1 == "-h" || $1 == "" ]]; then
printf -- "Use Age:\n"
printf -- " gvm list \v 列出已安装版本\n"
printf -- " gvm list all \v 列出用版本\n"
printf -- " gvm [add|install] [version] \v 安装对应版本\n"
printf -- " gvm [use|select] [version] \v 选择对应版本\n"
printf -- " gvm version \v 显示当前使用版本\n"
printf -- " gvm [rm|remove|uninstall] [version|gvm] \v 卸载对应版本或卸载gvm\n"
printf -- " gvm [help|-h] \v 显示帮助信息\n"
fi
if [[ $1 == "list" && $2 == "" ]]; then
printf -- "已安装以下版本:\n"
fi
if [[ $1 == "list" && $2 == "all" ]]; then
printf -- "可选版本:\n"
versionlist=
for version in $(ls $BASE_PATH/dl | grep -E "go[0-9]?\.[0-9]{1,2}" | grep -v 'beta' | grep -v 'rc'); do
versionlist+=$version" "
done
printf -- "%s " $versionlist
printf -- "\n"
fi
function setenv() {
VERSION=$1
GVM=true
GVMVERSION=$VERSION
GOROOT=$BASE_PATH/$VERSION
GOPATH=$GOPATH
GOBIN=$GOPATH/bin
gvmrc="export GVM=true;"
gvmrc+="export GVMVERSION=$GVMVERSION;"
gvmrc+="export GOROOT=$GOROOT;"
gvmrc+="export GOPATH=$GOPATH;"
gvmrc+="export PATH=$GOBIN:$GOROOT/bin:$PATH;"
echo $gvmrc >$GVMRC
source $GVMRC
}
if [[ $1 == "add" || $1 == "install" && $2 != "" ]]; then
printf -- "[1/4] 开始下载...\n"
VERSION=$2
downloadfilename=$VERSION.$GOHOSTOS-amd64.tar.gz
downloadfile=$BASE_PATH/tar/$downloadfilename
downloadurl=$BASE_DOWNLOAD_URL$downloadfilename
if [[ ! -e $downloadfile ]]; then
if command -v curl >/dev/null; then
curl -L $BASE_DOWNLOAD_URL$VERSION.$GOHOSTOS-amd64.tar.gz >$BASE_PATH/tar/$VERSION.$GOHOSTOS-amd64.tar.gz
elif command -v wget >/dev/null; then
wget -P $BASE_PATH/tar $BASE_DOWNLOAD_URL$VERSION.$GOHOSTOS-amd64.tar.gz
else
printf -- "\t安装wget后重试...\n"
exit
fi
fi
printf -- "[2/4] 开始解压...\n"
if [[ ! -d $BASE_PATH/$VERSION ]]; then
tar -xvzf $BASE_PATH/tar/$VERSION.$GOHOSTOS-amd64.tar.gz -C $BASE_PATH
mv $BASE_PATH/go $BASE_PATH/$VERSION
fi
printf -- "[3/4] 开始安装...\n"
if [[ ! $GVM ]]; then
setinit="source $GVMRC"
[[ -e $HOME/.zshrc ]] && echo $setinit >>$HOME/.zshrc && source $HOME/.zshrc
[[ -e $HOME/.bashrc ]] && echo $setinit >>$HOME/.bashrc && source $HOME/.bashrc
fi
setenv $VERSION
printf -- "[4/4] 安装完成\n"
source $GVMRC
gvm sersion
fi
if [[ $1 == "remove" || $1 == "uninstall" || $1 == "rm" && $2 != "" ]]; then
printf -- "卸载$2\n"
if [[ $2 == "gvm" ]]; then
sudo rm -rf /usr/local/bin/gvm && rm -rf $BASE_PATH && rm -rf $GVMRC
fi
if [[ -d $BASE_PATH/$2 ]]; then
rm -rf $BASE_PATH/$2
[[ -f $GOPATH/bin/go ]] && rm -rf $GOPATH/bin/go
fi
fi
if [[ $1 == "use" || $1 == "select" && $2 != "" ]]; then
VERSION=$2
printf -- "选择版本$VERSION\n"
setenv $VERSION
source $GVMRC
fi
if [[ $1 == "version" || $1 == "-v" ]]; then
if command -v go >/dev/null; then
go version
fi
fi
}
| true
|
33ba3cfd19bf5602b87cfb8b347d8aa060db0cf3
|
Shell
|
gianricardo/smart
|
/Models/QueenCoverGen
|
UTF-8
| 2,511
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
#
# Generate an instance of the queen cover problem
#
#1 : i
#2 : j
#3 : N
cover()
{
printf "\n// Make sure we cover square %d,%d\n\n" $1 $2
# check column
i="1"
while [ $i -le $3 ]; do
if [ $i -ne $1 ]; then
printf "(q_%d_%d>0) | " $i $2
fi
i=$[i + 1]
done
printf "\n"
# check row
j="1"
while [ $j -le $3 ]; do
if [ $j -ne $2 ]; then
printf "(q_%d_%d>0) | " $1 $j
fi
j=$[j + 1]
done
printf "\n"
# check diagonal: NW
i=$1
j=$2
p="0"
while [ $i -gt 1 -a $j -gt 1 ]; do
i=$[i - 1]
j=$[j - 1]
printf "(q_%d_%d>0) | " $i $j
p="1"
done
if [ $p -gt 0 ]; then
printf "\n"
fi
# check diagonal: SE
i=$1
j=$2
p="0"
while [ $i -lt $3 -a $j -lt $3 ]; do
i=$[i + 1]
j=$[j + 1]
printf "(q_%d_%d>0) | " $i $j
p="1"
done
if [ $p -gt 0 ]; then
printf "\n"
fi
# check diagonal: NE
i=$1
j=$2
p="0"
while [ $i -gt 1 -a $j -lt $3 ]; do
i=$[i - 1]
j=$[j + 1]
printf "(q_%d_%d>0) | " $i $j
p="1"
done
if [ $p -gt 0 ]; then
printf "\n"
fi
# check diagonal: SW
i=$1
j=$2
p="0"
while [ $i -lt $3 -a $j -gt 1 ]; do
i=$[i + 1]
j=$[j - 1]
printf "(q_%d_%d>0) | " $i $j
p="1"
done
if [ $p -gt 0 ]; then
printf "\n"
fi
# ourself
printf "(q_%d_%d>0);\n" $1 $2
}
if [ $# -lt 1 ]; then
printf "Usage: %s N\nwhere:\n\tN is the dimension of the board\n" $0
exit 1
fi
printf "\n/*\n Queen cover problem.\n\n"
printf " Try to put queens onto a %d x %d chessboard so that\n" $1 $1
printf " all empty squares are attacked by some queen\n*/\n\n"
printf "// Define the state: for each square, count number of queens\n"
n="1"
while [ $n -le $1 ]; do
printf "\nint ";
m="1"
while [ $m -le $1 ]; do
if [ $m -gt 1 ]; then
printf ", "
fi
printf "q_%d_%d" $n $m
m=$[m + 1]
done
printf " in {0..1};"
n=$[n + 1]
done
printf "\n"
n="1"
while [ $n -le $1 ]; do
m="1"
while [ $m -le $1 ]; do
cover $n $m $1
m=$[m + 1]
done
n=$[n + 1]
done
q="1"
while [ $q -lt $1 ]; do
printf "\n\n// Get solution for %d queens\n\n" $q
printf "satisfiable cover_with_%d :=" $q
n="1"
p="0"
while [ $n -le $1 ]; do
printf "\n"
m="1"
while [ $m -le $1 ]; do
if [ $p -gt 0 ]; then
printf " + "
else
printf " "
p="1"
fi
printf "q_%d_%d" $n $m
m=$[m + 1]
done
n=$[n + 1]
done
printf " == %d;\n" $q
q=$[q + 1]
done
| true
|
ce4a326f15adbfaba01b8351d1b459a4cdf164de
|
Shell
|
cvigilv/puntos
|
/sxiv/exec/key-handler
|
UTF-8
| 548
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
rotate() {
degree="$1"
tr '\n' '\0' | xargs -0 realpath | sort | uniq | while read file; do
case "$(file -b -i "$file")" in
image/jpeg*) jpegtran -rotate "$degree" -copy all -outfile "$file" "$file" ;;
*) mogrify -rotate "$degree" "$file" ;;
esac
done
}
case "$1" in
"C-c") while read file; do xclip -selection clipboard -target image/png "$file"; done ;;
"C-e") while read file; do gimp "$file" & done ;;
"C-E") while read file; do rawtherapee "$file" & done ;;
"C-r") rotate 90 ;;
"C-R") rotate 180 ;;
esac
| true
|
537ba14147974de0d90c045df5b7bb1f5169be5e
|
Shell
|
fr4nc3sc4/BinBench_
|
/docker/spawn_images.sh
|
UTF-8
| 1,874
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
gcc=( "gcc-10" "gcc-9" "gcc-8" "gcc-7" "gcc-6" )
clang=( "clang-11" "clang-10" "clang-8" "clang-6" "clang-4" )
clang10="https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz"
clang8="https://releases.llvm.org/8.0.0/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz"
clang6="https://releases.llvm.org/6.0.0/clang+llvm-6.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz"
clang4="https://releases.llvm.org/4.0.0/clang+llvm-4.0.0-x86_64-linux-gnu-ubuntu-16.10.tar.xz"
declare -A pids
for gcc_ver in ${gcc[@]}; do
c_compiler_name="gcc"
cxx_compiler_name="g++"
compiler_version="${gcc_ver##*-}"
package_name="$c_compiler_name$compiler_version"
echo "building docker image for $gcc_ver"
docker build --build-arg C_COMPILER_NAME=$c_compiler_name --build-arg CXX_COMPILER_NAME=$cxx_compiler_name \
--build-arg COMPILER_VERSION=$compiler_version --build-arg PACKAGE_NAME=$package_name -f Dockerfile --rm \
-t binbench/$gcc_ver . &
pids[$gcc_ver]=$!
done
for clang_ver in ${clang[@]}; do
c_compiler_name="clang"
cxx_compiler_name="clang++"
compiler_version="${clang_ver##*-}"
if [[ $compiler_version == "10" ]]; then
package_name=$clang10
elif [[ $compiler_version == "8" ]]; then
package_name=$clang8
elif [[ $compiler_version == "6" ]]; then
package_name=$clang6
elif [[ $compiler_version == "4" ]]; then
package_name=$clang4
fi
echo "building docker image for $clang_ver"
docker build --build-arg C_COMPILER_NAME=$c_compiler_name --build-arg CXX_COMPILER_NAME=$cxx_compiler_name \
--build-arg COMPILER_VERSION=$compiler_version --build-arg PACKAGE_NAME=$package_name -f Dockerfile --rm \
-t binbench/$clang_ver . &
pids[$clang_ver]=$!
done
for gcc_ver in ${gcc[@]}; do
wait ${pids[$gcc_ver]}
done
for clang_ver in ${clang[@]}; do
wait ${pids[$clang_ver]}
done
| true
|
69265156542aa2a8de356d86cb0843fbbeff6a09
|
Shell
|
mengyuan411/au-wing-cmp
|
/script/decisiontree_daemon.sh
|
UTF-8
| 4,837
| 3.203125
| 3
|
[] |
no_license
|
#compare dmac_avg dmac_50th dmac_90th dmaci_avg dmaci_50th dmaci_90th
if [ $# -lt 4 ]
then
echo "Usage: lccs_decision_daemon.sh <gap_time> <wlan_type> <roundnum> <channel> "
echo " "
echo " gap_time : gap_time=10 means every 10s we will get a slow or fast"
echo " wlan_type : wlan0 | wlan1"
echo " roundnum : the num of all gaps in one same channel"
echo " channel : which channel work on "
#echo " T_silent_time : T_silent=30 means even after T_run_time, we have to find the "
#echo " relative silent time to adjuct the channel"
#echo " T_silent : T_silent=0 means if wap sends less than 0 bytes "
#echo " continue_flag : 1|0 if execute the algorithm continuely or just one round"
#echo " in $prob_intervals interverl, then flag this $prob_interval seconds as siltent"
exit 1
fi
gap_time=$1
roundnum=$3
channel=$4
mac=`ifconfig wlan0 | grep HWaddr | awk '{print $5}'|sed 's/://g'`
#sleep 124
#state_large=0
#pre_bytes=0
#prob_interval=10
#run_time=`expr 0 - $prob_interval`
#silent_time=`expr 0 - $prob_interval`
#flag_sent_data=1
#flag_time_to_change=0
#flag_choose=0 #if continue choose a new channel
#init every ap to channel 1 at first
type=0
if [ "$2" == "wlan0" ]
then
type="radio0"
else
type="radio1"
fi
#echo "chan1,0" > /tmp/wifiunion-uploads/decisiontree_decision_$2.txt
#echo "chan6,0" >> /tmp/wifiunion-uploads/decisiontree_decision_$2.txt
#echo "chan11,0" >> /tmp/wifiunion-uploads/decisiontree_decision_$2.txt
before_exec_time=`date '+%s'`
while true
do
# exec_time=`date '+%s'`
# echo "[$exec_time]: I will change $type to Channel:1"
# uci set wireless.$type.channel=1
# uci commit
# hostapd_cli chan_switch 10 2412
# echo '0' > /tmp/wifiunion-uploads/$mac/wlan0_slow
# for i in `seq 1 $roundnum`
# do
# echo '' > /tmp/wifiunion-uploads/$mac/wlan0_channel
# echo '' > /tmp/wifiunion-uploads/$mac/wlan0_station
# sleep $gap_time
# # echo "round $i"
# /lib/pch/decisiontree.sh
# echo "round $i"
# done
# SLOW=`cat /tmp/wifiunion-uploads/$mac/wlan0_slow | grep -o "[-0-9]*"`
# slowratio=`expr $SLOW "*" 100 "/" $roundnum`
# sed -i "1s/.*/chan1,$(echo $slowratio)/" /tmp/wifiunion-uploads/decisiontree_decision_$2.txt
# exec_time=`date '+%s'`
# echo "[$exec_time]: I will change $type to Channel:6"
# uci set wireless.$type.channel=6
# uci commit
# hostapd_cli chan_switch 10 2437
# echo '0' > /tmp/wifiunion-uploads/$mac/wlan0_slow
# for i in `seq 1 $roundnum`
# do
# echo '' > /tmp/wifiunion-uploads/$mac/wlan0_channel
# echo '' > /tmp/wifiunion-uploads/$mac/wlan0_station
# sleep $gap_time
# /lib/pch/decisiontree.sh
# done
#
# SLOW=`cat /tmp/wifiunion-uploads/$mac/wlan0_slow | grep -o "[-0-9]*"`
# slowratio=`expr $SLOW "*" 100 "/" $roundnum`
# sed -i "2s/.*/chan6,$(echo $slowratio)/" /tmp/wifiunion-uploads/decisiontree_decision_$2.txt
#
exec_time=`date '+%s'`
echo "[$exec_time]: I will change $type to Channel:$4"
uci set wireless.$type.channel=$4
uci commit
if [ $4 == 11 ]
then
hostapd_cli chan_switch 10 2462
elif [ $4 == 6 ]
then
hostapd_cli chan_switch 10 2437
else
hostapd_cli chan_switch 10 2412
fi
echo '0' > /tmp/wifiunion-uploads/$mac/wlan0_slow
for i in `seq 1 $roundnum`
do
echo '' > /tmp/wifiunion-uploads/$mac/wlan0_channel
echo '' > /tmp/wifiunion-uploads/$mac/wlan0_station
sleep $gap_time
/lib/pch/decisiontree.sh
done
SLOW=`cat /tmp/wifiunion-uploads/$mac/wlan0_slow | grep -o "[-0-9]*"`
slowratio=`expr $SLOW "*" 100 "/" $roundnum`
#tmpstring='chan'
sed -i "3s/.*/'chan'${channel},$(echo $slowratio)/" /tmp/wifiunion-uploads/decisiontree_decision_$2.txt
break
#slowratio1=`cat /tmp/wifiunion-uploads/decisiontree_decision_$2.txt | grep chan1, | awk -F ',' '{print $2}'`
#echo "channel 1 slowratio1: $slowratio1"
#slowratio6=`cat /tmp/wifiunion-uploads/decisiontree_decision_$2.txt | grep chan6 | awk -F ',' '{print $2}'`
#echo "channel 6 slowratio6: $slowratio6"
#slowratio11=`cat /tmp/wifiunion-uploads/decisiontree_decision_$2.txt | grep chan11 | awk -F ',' '{print $2}'`
#echo "channel 11 slowratio11: $slowratio11"
#chan=0
#if [ $slowratio1 -lt $slowratio6 ]
#then
# slowratio=$slowratio1
# chan=1
#else
# slowratio=$slowratio6
# chan=6
#fi
#if [ $slowratio11 -lt $slowratio ]
#then
# slowratio=$dmac_avg11
# chan=11
#fi
#exec_time=`date '+%s'`
#echo "[$exec_time]: I will change $type to Channel:$chan"
#uci set wireless.$type.channel=$chan
#uci commit
#if [ $chan -eq 1 ]
# then
# hostapd_cli chan_switch 10 2412
#elif [ $chan -eq 6 ]
# then
# hostapd_cli chan_switch 10 2437
#else
# hostapd_cli chan_switch 10 2462
#fi
#sleep `expr $roundnum \* $gap_time`
done
| true
|
762a7a01b5a3c09acf59a0d0b536ed71c758860f
|
Shell
|
chernic/Cacti
|
/YumLAMP_Cacti/cactiroot/CrondDeviceCheck.sh
|
UTF-8
| 5,976
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
function BreakPoint()
{
while [ "y" != "$AUTO_FLAG_yn" ]
do
read -p "\033[33mDo you Make Sure to Continue? [y/n/q] \033[0m" AUTO_FLAG_yn;
[ "$AUTO_FLAG_yn" == "q" ] && exit 0;
done
AUTO_FLAG_yn="n"
}
function NotRootOut()
{
[ "0" != "$(id -u)" ] && echo "Error: You must be root to run this script" && exit 1
}
# @function Log2File
# @brief 写日志到文件。
# @param1 LogLevel(字符串) 日志级别(字符串)
# @param2 LogContext(字符串) 日志内容
# @param3 LogFile(字符串) 日志文件
# @return 无
function Log2File()
{
local LogLevel=$1;
local LogCtx=$2;
local LogFile=$3;
local LogTime="$(date '+%Y-%m-%d %H:%M:%S')";
local LogStr="[${LogTime}][${LogLevel}]: ${LogCtx}";
if [ "$LogFile" = "" ]; then
echo -e "${LogStr}";
else
echo -e "${LogStr}" >> "${LogFile}";
fi
}
# @function LOG_INFO,LOG_WARN,LOG_ERROR
# @brief 按指定级别写日志到文件。
# @param1 LogContext(字符串) 日志内容
# @param2 LogFile(字符串) 日志文件
# @return 无
function LOG_INFO(){
Log2File "\033[32minfo\033[0m" "$1" "$2";
}
function LOG_WARN(){
Log2File "\033[33mwarn\033[0m" "$1" "$2";
}
function LOG_ERROR(){
Log2File "\033[31merror\033[0m" "$1" "$2";
}
NotRootOut;
############### Template Version 0.1.2- #############
#####################################################
# Function Of Device Check
# Run this Script every 5 min on CACTI SERVER.
# Format should be : HOSTNAME:<IP-Addr>:<Description>
#####################################################
# Version : 0.0.1
# Make by Chernic.Y.Chen @ China
# E-Mail : iamchernic@gmail.com
# Date : 2014-08-07
# v0.0.1(2014-08-07) : File Created
# v0.1.0(2014-08-08) : Version 1 Done. Local Tested.
DB_user="cacticn" # 数据库用户 : 默认 cacticn
DB_pswd="cacticn" # 数据库密码 : 默认 cacticn
PHP_PATH="/usr/bin" # PHP程序路径
CHK_PATH=`pwd` # "/usr/share/cacti/"
CLI_PATH="/var/www/html/cacticn" # Cacti
LOG_INFO "Read $CHK_PATH/cacti_clients"
while read line
do
# 信息提取
IP=`echo -e "$line" | grep "HOSTNAME" | awk -F: '{print $2}'`
DESC=`echo -e "$line" | grep "HOSTNAME" | awk -F: '{print $3}'`
Device_Flag="NEW"
#IP=192.168.1.2
#DESC="Chernic"
# 信息匹配
if [ "$DESC" != "" ];then
# 查找该设备描述是否已经对应存在IP
# LOG_INFO "\n`$PHP_PATH/php -q $CLI_PATH//cli/add_data_query.php --list-hosts | grep "$DESC"`"
for i in `$PHP_PATH/php -q $CLI_PATH/cli/add_data_query.php --list-hosts | grep "$DESC" | sed 's/\t/,/g' | awk -F, '{print $2}'`
do
if [ "$IP" == "$i" ];then
Device_Flag="IPDESC"
break;
fi
Device_Flag="DESC"
done
else
Device_Flag="NODESC"
fi
# 方案判断
case $Device_Flag in
NEW)
LOG_INFO "A1 ADD New";
;;
DESC)
LOG_INFO "A2 ADD weN";
OLDIP="$i"
NUM=0;
# IP地址冲突
while [ "$OLDIP" != "" ];
do
# 修改设备描述符
NDESC="$DESC""_""$NUM";
# 再次查找是否存在冲突
OLDIP=`$PHP_PATH/php -q $CLI_PATH/cli/add_data_query.php --list-hosts | grep -w "\b$NDESC\b" | sed 's/\t/,/g' | awk -F, '{print $2}'`
# 冲突标号自加1
NUM=$[$NUM+1];
done
DESC="$NDESC"
;;
IPDESC)
LOG_WARN "A3 Added yet, $IP $DESC";
continue;
;;
NODESC)
LOG_INFO "A4 ADD None";
continue;
;;
*)
LOG_ERROR "A5 ADD What";
continue;
;;
esac
# 执行:添加设备
LOG_INFO "Try Adding Host : ID=$ID DESC=$DESC IP=$IP"
$PHP_PATH/php -q $CLI_PATH/cli/add_device.php --description=$DESC --ip=$IP --template=3 --avail=pingsnmp --ping_method=udp --community="public" --version=2
ID=`$PHP_PATH/php -q $CLI_PATH/cli/add_data_query.php --list-hosts | grep -w "\b$DESC\b" | sed 's/\t/,/g' | awk -F, '{print $1}'`
LOG_INFO "Added Host : ID=$ID DESC=$DESC IP=$IP"
# 执行:添加图像
LOG_INFO "Adding Maps."
# $PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=ds --graph-template-id=2 --snmp-query-id=1 --snmp-query-type-id=13 --snmp-field=ifOperStatus --snmp-value=Up
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=4
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=7
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=8
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=9
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=10
# $PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=11
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=12
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=13
# $PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=cg --graph-template-id=21
# 执行:添加硬盘(先查询)
Disk=`$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --snmp-field=dskDevice --list-snmp-values | grep -v "Known"`
for i in $Disk
do
$PHP_PATH/php -q $CLI_PATH/cli/add_graphs.php --host-id=$ID --graph-type=ds --graph-template-id=21 --snmp-query-id=6 --snmp-query-type-id=15 --snmp-field=dskDevice --snmp-value=$i
done
# 执行:添加树
LOG_INFO "Adding Host to Tree : ID=$ID DESC=$DESC IP=$IP"
dbase=`mysql -u$DB_user -p$DB_pswd -e"use cacticn; select host_id from graph_tree_items where host_id =$ID;"`
res=`echo $dbase | awk '{print $2}'`
if [ "$res" != "$ID" ];then
$PHP_PATH/php -q $CLI_PATH/cli/add_tree.php --type=node --node-type=host --tree-id=1 --host-id=$ID
else
echo -e "\nAlready Graph Tree Exists fo this Host\n"
fi
done <"$CHK_PATH/cacti_clients"
LOG_INFO "End."
| true
|
947e610011116106198dce33abc74024367b77cd
|
Shell
|
ut-osa/ryoan
|
/apps/testing_preamble.sh
|
UTF-8
| 1,360
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
cd $(dirname $0)
logfile_number=
encrypt=1
padding=1
checkpoint=1
usage="Usage: $0 [{--log-append|-l} string] [--no-checkpoint] [--no-encrypt] [--no-io-model] path/to/pipe_description.json"
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-l|--log-append)
logfile_number="$2"
shift
shift
;;
--no-encrypt)
encrypt=0
shift # past argument
;;
--no-checkpoint)
checkpoint=0
shift # past argument
;;
--help)
echo "$usage"
exit 1
;;
--no-io-model)
padding=0
shift # past argument
;;
*) # unknown option
break
;;
esac
done
if [ $# -ne 1 ]; then
echo "$usage"
exit 1
fi
export json_file=$1
export port=$[$$ + 6000]
export start_prog=./start_pipeline
export logfile_number
if [[ "$encrypt" == "0" ]]; then
export RYOAN_NO_ENCRYPT=yes
fi
if [[ "$checkpoint" == "0" ]]; then
export RYOAN_NO_CHECKPOINT=yes
fi
export time="$(which time)"
# launch the server and wait for initialization
# there are 3 arguments:
# * the command which starts the server
# * the number of ryoan instances
# * the name of the log file
launch_server() {
cmd=$1
N=$2
logfile=$3
i=0
exec 3< <($cmd 2>&1 || true)
while [ $i -lt $N ]; do
sed '/-----------CHECKPOINT---------$/q' <&3
let i+=1
done
cat <&3 >$logfile &
}
| true
|
6ff793581f58c4dab0bc04787b0e6f2543de4eb7
|
Shell
|
fmihaich/deviget
|
/script/ui_test
|
UTF-8
| 914
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
DEFAULT_BROWSER=chrome
DEFAULT_ENV=PDR_DOCKER
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-b|--browser)
BROWSER=$2
shift
;;
-e|--env)
TEST_ENV="$2"
shift
;;
*)
# unknown option
;;
esac
shift
done
if [[ -z $BROWSER ]]
then
BROWSER=$DEFAULT_BROWSER
fi
if [[ -z $TEST_ENV ]]
then
TEST_ENV=$DEFAULT_ENV
fi
echo "Stopping & starting docker container"
docker-compose -f ui_tests.yml down
docker-compose -f ui_tests.yml up -d
sleep 3
docker-compose -f ui_tests.yml ps
echo "RUNNING UI TESTS"
echo "Running UI tests using browser: $BROWSER"
echo "Test environment: $TEST_ENV"
docker-compose -f ui_tests.yml exec -T -e BROWSER=$BROWSER -e TEST_ENV=$TEST_ENV test_runner tests/ui/run
echo ""
echo "Stopping container"
docker-compose -f ui_tests.yml down
| true
|
5e62770be0386f1f40ab9898de533e7130aa57ed
|
Shell
|
opyate/uys.io-playbook
|
/roles/linkbaiter/templates/linkbaiter.sh.j2
|
UTF-8
| 406
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
LOC=/tmp/linkbaiter
mkdir -p $LOC
cd {{linkbaiter_install_dir}}
for spider in bi bf ; do
OUTPUT="$LOC/$spider.$(date +'%s.%N').json"
PLUSONENEWS_MYSQL_HOST="{{PLUSONENEWS_MYSQL_HOST}}" \
PLUSONENEWS_MYSQL_DBNAME="{{linkbaiter_db_name}}" \
PLUSONENEWS_MYSQL_USER="{{PLUSONENEWS_MYSQL_USER}}" \
PLUSONENEWS_MYSQL_PASSWD="{{PLUSONENEWS_MYSQL_PASSWD}}" \
scrapy crawl $spider -o $OUTPUT -t json
done
| true
|
12dd9031012faafb73d031015d85f9fb5ed04b75
|
Shell
|
0xdead8ead/firmware_reversing
|
/firmware/Netgear_Centria_WNDR4700/squashfs-root/etc/email/email_log
|
UTF-8
| 4,588
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
nvram=/bin/config
smtpc=/usr/sbin/ssmtp
LOG_FILE=/var/log/messages
# email related configs (in nvram get command) :
# email_smtp : smtp server address
# email_endis_auth : smtp sever needs authentication or not ?
# email_username : username for smtp server authentication
# email_password : password for smtp server authentication
#
# email_addr : "To addr", the addr of the receiver.
# email_from_assign : assign "From addr" or not ?
# email_this_addr : "From addr", the addr the email (says itself) comes from.
#
# email_notify :
# email_send_alert :
tls_required() # $1: smtp_server
{
# smtp.gmail.com requests TLS support
echo $1 | grep -q 'gmail'
}
print_smtpc_conf()
{
local smtp_server=$($nvram get email_smtp)
echo "mailhub=$smtp_server"
echo "FromLineOverride=yes"
if [ $($nvram get email_endis_auth) = "1" ]; then
echo "AuthUser=$($nvram get email_username)"
echo "AuthPass=$($nvram get email_password)"
if tls_required "$smtp_server"; then
echo "UseTLS=YES"
echo "UseSTARTTLS=YES"
fi
fi
}
print_email_header()
{
local hostname="$(cat /proc/sys/kernel/hostname)"
local from
local addr="$($nvram get email_addr)"
# as I know, different smtp servers have different rules about "From addr" :
# * dni : drops mails that "From addr" != "account email addr" silently.
# * pchome : rejects sending mails that "From addr" != "account email addr".
# * gmail : tranforms the "From addr" to "account email addr".
# the smtp servers that don't care about "From addr" and just send mails are getting
# scarce.
if [ "$($nvram get email_from_assign)" = "1" ]; then
from="\"root@$hostname\"<$($nvram get email_this_addr)>"
else
from="\"root@$hostname\"<$($nvram get email_addr)>"
fi
cat <<EOF
Subject: NETGEAR $hostname Log
From: $from
To: $addr
EOF
}
print_log()
{
print_email_header
if [ -s $LOG_FILE ]; then
sed -n '1! G;$p;h' $LOG_FILE | sed -n '1,256 p'
else
echo "The system doesn't have any logs yet"
fi
}
sendlog() # $1: clearlog_if_success
{
local conf=/tmp/ssmtp.conf
local email_file=/tmp/tmp_email_file
local err_file=/tmp/tmp_email_err_file
local addr="$($nvram get email_addr)"
print_smtpc_conf > $conf
print_log > $email_file
if ! cat $email_file | $smtpc -C$conf $addr >/dev/null 2>$err_file; then
logger -- "[email sent to: $addr]"
logger -- "[email failed] $(cat $err_file)"
rm -f $conf $email_file $err_file
return 1
fi
rm -f $conf $email_file $err_file
if [ "$1" = "clearlog_if_success" ]; then
rm -f $LOG_FILE
fi
logger -- "[email sent to: $addr]"
return 0
}
print_email_header_for_hdd()
{
local hostname="$(cat /proc/sys/kernel/hostname)"
local from
local addr="$($nvram get email_addr)"
if [ "$($nvram get email_from_assign)" = "1" ]; then
from="\"root@$hostname\"<$($nvram get email_this_addr)>"
else
from="\"root@$hostname\"<$($nvram get email_addr)>"
fi
cat <<EOF
Subject: Warning!WNDR4700 Internal HDD might have some issues
From: $from
To: $addr
EOF
}
print_hdd_log()
{
print_email_header_for_hdd
echo "[HDD ERROR] Warning! The internal hard drive have the reallocated sector error frequently, we suggest you to replace the internal hard drive now."
}
# per NTGR's requirement, when the internal disk have something wrong, we need to email to the user ath 9:30 AM.
email_HDD_err_log()
{
local conf=/tmp/hdd_err.conf
local email_file=/tmp/tmp_hdd_email.file
local err_file=/tmp/tmp_hdd_err_email.file
local addr="$($nvram get email_addr)"
echo "email_HDD_err_log in ..." > /dev/console
print_smtpc_conf > $conf
print_hdd_log > $email_file
if ! cat $email_file | $smtpc -C$conf $addr >/dev/null 2>$err_file; then
logger -- "[email sent to: $addr]"
logger -- "[email failed] $(cat $err_file)"
rm -f $conf $email_file $err_file
return 1
fi
rm -f $conf $email_file $err_file
logger -- "[email sent to: $addr]"
return 0
}
### start here ###
prog=${0##*/}
case "$prog" in
email_log)
[ $($nvram get email_notify) = "0" ] && exit
sendlog "clearlog_if_success"
;;
email_full_log)
[ $($nvram get email_notify) = "0" ] && exit
# send log only if lines of log file > 256 * 90% = 230.4
[ ! -s $LOG_FILE ] && exit
[ "$(wc -l $LOG_FILE | sed -n 's/[^0-9]*//gp')" -le "230" ] && exit
sendlog "clearlog_if_success"
;;
send_email_alert)
[ $($nvram get email_notify) = "0" ] && exit
[ "$($nvram get email_send_alert)" = "0" ] && exit
sendlog "clearlog_if_success"
;;
send_log)
sendlog
;;
email_HDD_err_log)
[ $($nvram get email_notify) = "0" ] && exit
email_HDD_err_log
;;
esac
| true
|
03bb2e0cf2169b3f0aaa1768533e1493c173b9df
|
Shell
|
dbyio/spotifyMyPi
|
/do.sh
|
UTF-8
| 2,062
| 4.125
| 4
|
[] |
no_license
|
#! /bin/bash
function require_root() {
if [ $(id -u) -eq 0 ]; then
return 0
else
echo "Please run me as root."
exit 1
fi
}
function is_running() {
id=$(docker ps -f name=spotifyd -f status=running --format "{{.ID}}")
if [[ $id ]]; then
return 0
else
return 1
fi
}
function container_exists() {
id=$(docker container ls -a -f name=spotifyd --format "{{.ID}}")
if [[ $id ]]; then
return 0
else
return 1
fi
}
function image_exists() {
id=$(docker images spotifyd --format "{{.ID}}")
if [[ $id ]]; then
return 0
else
return 1
fi
}
function do_run() {
require_root
if is_running; then
echo "Container running already, doing nothing."
return 0
fi
if container_exists; then
echo "Starting existing container.."
docker start spotifyd
return $?
fi
echo "Starting new container."
docker run --name spotifyd -d -v "/opt/spotifyd/etc/spotifyd.conf:/etc/spotifyd.conf:ro" --rm --device /dev/snd spotifyd
return $?
}
function do_update() {
require_root
is_running && systemctl stop spotifyd
is_running && docker stop spotifyd
container_exists && docker rm spotifyd
if image_exists; then docker rmi spotifyd; fi
if image_exists; then
echo "Failed to rm existing image, please fix and re-run."
return 1
fi
docker build -t spotifyd --no-cache .
if image_exists; then
echo "Image updated successfully."
else
echo "Failed to build image."
return 1
fi
}
function do_build() {
require_root
if image_exists; then
echo "Image exists already. Run $0 update if you want to update the existing build."
return 0
fi
docker build -t spotifyd:latest .
return $?
}
function usage() {
echo "$0 [COMMAND]"
echo
echo "Commands:"
echo " build : build the Docker image"
echo " run : run a container named spotifyd using the image"
echo " update : clean existing containers and update existing image with latest binaries"
}
if [ "$1" == "run" ]; then do_run; exit $?;
elif [ "$1" == "build" ]; then do_build; exit $?;
elif [ "$1" == "update" ]; then do_update; exit $?;
else usage
fi
exit 0
| true
|
dffce2ee168462738d5aab11cb5bf4807da7a3e6
|
Shell
|
hongbin8237/slam
|
/bin/realtime.sh
|
UTF-8
| 1,567
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# bin/realtime.sh
# Martin Miller
# Created: 2014/07/07
# Simulate real-time
DATA=raw/2nd
BODY=data/bodyHist3.txt
DT=data/dt.fifo
ALT=data/alt.fifo
ACC=data/acc.fifo
QBW=data/qbw.fifo
ANGVEL=data/w.fifo
DISP=data/disp.fifo
#BODYFF=data/body.fifo
pkill slam
pkill sensor
pkill multitap
rosrun rviz rviz -d config/rviz_display.rviz &
rm -f data/*.fifo
mkfifo $ALT 2>/dev/null
mkfifo $ACC 2>/dev/null
mkfifo $QBW 2>/dev/null
mkfifo $ANGVEL 2>/dev/null
mkfifo $DT 2>/dev/null
mkfifo $DISP 2>/dev/null
#mkfifo $BODYFF 2>/dev/null
./bin/sensor-emu $DATA/alt | \
stdbuf -eL -oL sed 's/[0-9]*,\(.*\),/\1/' | \
stdbuf -eL -oL ./bin/multitap $ALT &
./bin/sensor-emu $DATA/acc | \
stdbuf -eL -oL sed 's/[0-9]*,//' | \
stdbuf -eL -oL ./bin/fir config/gravity.txt | \
stdbuf -eL -oL ./bin/rmbias -- -0.0171 0.0116 0.0158 | \
stdbuf -eL -oL ./bin/multitap $ACC &
./bin/sensor-emu $DATA/attitude | \
stdbuf -eL -oL sed 's/[0-9]*,//' | \
stdbuf -eL -oL ./bin/euler2qbw | \
stdbuf -eL -oL ./bin/multitap $QBW &
./bin/sensor-emu $DATA/gyro | \
stdbuf -eL -oL sed 's/[0-9]*,//' | \
stdbuf -eL -oL ./bin/rmbias -- 0.0006 0.0009 -0.0011 | \
stdbuf -eL -oL ./bin/fir ./config/coeffs.txt | \
stdbuf -eL -oL ./bin/multitap $ANGVEL &
./bin/sensor-emu $DATA/dt -d | \
stdbuf -eL -oL sed 's/[0-9]*,//' > $DT &
FASTOPTS="$BODY $ALT $ACC $DT $QBW $ANGVEL"
#valgrind --leak-check=full ./bin/slam $FASTOPTS
stdbuf -eL -oL ./bin/slam $FASTOPTS > $DISP &
rosrun using_markers display_realtime $DISP
rm -f data/*.fifo
| true
|
ba01ed97201399c81ff315c9210923fba84293af
|
Shell
|
dijonkitchen/but_why
|
/examples/main/bin/run
|
UTF-8
| 849
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Recommended way of starting an unpacked repo of Edge in production
# Feel free to change this
APP_NAME=Edge
# Comment this to disable the production REPL, otherwise use it to specify the port.
SOCKET_REPL_PORT=50505
# Exported so it can accessed inside the JVM
export NREPL_PORT=5601
# The -Xms JVM argument
INIT_MEMORY=256m
# The -Xmx JVM argument
MAX_MEMORY=1200m
# Profile
PROFILE=$1
aliases=build:prod/build:prod
if [[ -n $NREPL_PORT ]]; then
aliases+=:dev/nrepl
fi
if [[ -n $SOCKET_REPL_PORT ]]; then
prod_repl_arg=-J-Dclojure.server.myrepl="{:port,$SOCKET_REPL_PORT,:accept,clojure.core.server/repl,:address,\"localhost\"}"
fi
echo "Starting $APP_NAME"
COMMAND="clojure $prod_repl_arg -J-Djava.awt.headless=true -J-Xms$INIT_MEMORY -J-Xmx$MAX_MEMORY -A:$aliases -m user $PROFILE"
echo $COMMAND
exec $COMMAND
| true
|
7f9324c251ff730b138be93ea305e57cb552126a
|
Shell
|
bossjones/debug-tools
|
/install-fx.sh
|
UTF-8
| 686
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#-----------------------------------------------------------------------
# Linux (Linux/x86_64, Darwin/x86_64, Linux/armv7l)
#
# install-fx.sh - Install fx
#
# usage: install-fx.sh NON_ROOT_USER (use a non root user on your file system, eg install-fx.sh vagrant)
#
# Copyright (c) 2020 Malcolm Jones
# All Rights Reserved.
#-----------------------------------------------------------------------
# SOURCE: https://github.com/tkyonezu/Linux-tools/blob/98a373f3756fe9e27d27a8c3cf7d39fd447ea5c1/install-fx.sh
# Install fx
# https://github.com/antonmedv/fx
set -e
echo " [install-fx] see https://github.com/antonmedv/fx"
logmsg() {
echo ">>> $1"
}
npm install -g fx
exit 0
| true
|
975de804b442bc06cd63cc04909165898ead8a7d
|
Shell
|
h8h/MasterMind
|
/compileme.sh
|
UTF-8
| 448
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
javac -target 1.6 -source 1.6 -encoding utf-8 *.java mastermind_core/*.java mastermind_save_load/*.java mastermind_gui/mastermind_templates/*.java mastermind_gui/*.java -Xlint:-options 2> mastermind_error.log
if [ -e mastermind_error.log ] && [ $(wc -l mastermind_error.log | awk '{print $1}') -gt "0" ]; then
echo "Es traten Fehler auf, weitere Infos in mastermind_error.log"
else
echo "MasterMind wurde erfolgreich erstellt!"
fi
| true
|
4b8d020510691084b9595f20077f8de0c09488be
|
Shell
|
stuart-warren/k8sh
|
/k8sh
|
UTF-8
| 4,874
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
ct() {
if [ -z "$1" ]; then
local contexts=$(k config get-contexts -o=name | sort -n)
echo "$contexts"
return
fi
export KUBECTL_CONTEXT=$1
}
export -f ct
_ct_completions()
{
local contexts=$(k config get-contexts -o=name | sort -n)
COMPREPLY=($(compgen -W "${contexts}" "${COMP_WORDS[1]}"))
}
export -f _ct_completions
ns() {
if [ -z "$1" ]; then
local namespaces=$(k get namespaces -o=jsonpath='{range .items[*].metadata.name}{@}{"\n"}{end}')
echo "${namespaces}"
return
fi
export KUBECTL_NAMESPACE=$1
}
export -f ns
_ns_completions()
{
local namespaces=$(k get namespaces -o=jsonpath='{range .items[*].metadata.name}{@}{"\n"}{end}')
COMPREPLY=($(compgen -W "${namespaces}" "${COMP_WORDS[1]}"))
}
export -f _ns_completions
reloadExtensions() {
if [ -e ~/.k8sh_extensions ]; then
echo "Sourcing in ~/.k8sh_extensions..."
source ~/.k8sh_extensions
fi
}
export -f reloadExtensions
k8sh_init() {
RED='\033[00;31m'
GREEN='\033[00;32m'
YELLOW='\033[00;33m'
BLUE='\033[00;34m'
PURPLE='\033[00;35m'
CYAN='\033[00;36m'
LIGHTGRAY='\033[00;37m'
LRED='\033[01;31m'
LGREEN='\033[01;32m'
LYELLOW='\033[01;33m'
LBLUE='\033[01;34m'
LPURPLE='\033[01;35m'
LCYAN='\033[01;36m'
WHITE='\033[01;37m'
RESTORE='\033[0m'
# Colors for the PS1 prompt
# For an explanation of the format, please see the Stack Exchange answer
# from Runium (https://unix.stackexchange.com/users/28489/runium)
# at http://unix.stackexchange.com/questions/105958/terminal-prompt-not-wrapping-correctly
PS_RED='\[\033[00;31m\]'
PS_GREEN='\[\033[00;32m\]'
PS_YELLOW='\[\033[00;33m\]'
PS_BLUE='\[\033[00;34m\]'
PS_PURPLE='\[\033[00;35m\]'
PS_CYAN='\[\033[00;36m\]'
PS_LIGHTGRAY='\[\033[00;37m\]'
PS_LRED='\[\033[01;31m\]'
PS_LGREEN='\[\033[01;32m\]'
PS_LYELLOW='\[\033[01;33m\]'
PS_LBLUE='\[\033[01;34m\]'
PS_LPURPLE='\[\033[01;35m\]'
PS_LCYAN='\[\033[01;36m\]'
PS_WHITE='\[\033[01;37m\]'
PS_RESTORE='\[\033[0m\]'
# Functional colors
CONTEXT_COLOR=$LRED
PS_CONTEXT_COLOR=$PS_LRED
NAMESPACE_COLOR=$LCYAN
PS_NAMESPACE_COLOR=$PS_LCYAN
echo ""
echo -e "${LPURPLE}Welcome to k${LRED}8${LPURPLE}sh${RESTORE}"
if [ -e ~/.bash_profile ]; then
echo "Sourcing in ~/.bash_profile..."
source ~/.bash_profile
fi
echo "Gathering current kubectl state..."
export KUBECTL_CONTEXT=$(kubectl config current-context)
export KUBECTL_NAMESPACE=${DEFAULT_NAMESPACE-default}
echo "Making aliases..."
alias kubectl="kubectl --context \$KUBECTL_CONTEXT --namespace \$KUBECTL_NAMESPACE"
alias k="kubectl"
# Common actions
alias describe="k describe"
alias get="k get"
alias create="k create"
alias apply="k apply"
alias delete="k delete"
alias scale="k scale"
alias rollout="k rollout"
alias logs="k logs"
# Query common resources
# Resource reference list:
# pods (po), services (svc), deployments, replicasets (rs)
# replicationcontrollers (rc), nodes (no), events (ev),
# limitranges (limits), persistentvolumes (pv),
# persistentvolumeclaims (pvc), resourcequotas (quota),
# namespaces (ns), serviceaccounts (sa), ingresses (ing),
# horizontalpodautoscalers (hpa), daemonsets (ds), configmaps,
# componentstatuses (cs), endpoints (ep), and secrets.
alias pods="get pods"
alias services="get svc"
alias deployments="get deployments"
alias dep="get deployments" # NON-STANDARD!!
alias replicasets="get rs"
alias replicationcontrollers="get rc"
alias rc="get rc"
alias nodes="get nodes"
alias limitranges="get limitranges"
alias limits="get limitranges"
alias events="get events"
alias persistentvolumes="get pv"
alias pv="get pv"
alias persistentvolumeclaims="get pvc"
alias pvc="get pvc"
alias namespaces="get ns"
alias ingresses="get ing"
alias ing="get ing"
alias configmaps="get configmaps"
alias secrets="get secrets"
complete -F _ns_completions ns
complete -F _ct_completions ct
local bash_completion_present=$(type -t _get_comp_words_by_ref)
if [[ ! -z "$bash_completion_present" ]]; then
echo "Setting up k completion..."
# without sourcing completion for `k` does not recognize __start_kubectl
source <(kubectl completion bash)
# make completion work for `k`
complete -F __start_kubectl k
else
echo -e "${RED}For k completion please install bash-completion${RESTORE}"
fi
reloadExtensions
# Set up PS1 prompt
export PS1="($PS_CONTEXT_COLOR\$KUBECTL_CONTEXT$PS_RESTORE/$PS_NAMESPACE_COLOR\$KUBECTL_NAMESPACE$PS_RESTORE) \W ${PS_LPURPLE}\$${PS_RESTORE} "
echo ""
echo -e "Context: $CONTEXT_COLOR$KUBECTL_CONTEXT$RESTORE"
echo -e "Namespace: $NAMESPACE_COLOR$KUBECTL_NAMESPACE$RESTORE"
}
export -f k8sh_init
echo "Initializing..."
export PS1="" # Clear PS1 for prettier init
bash -i <<< 'k8sh_init; exec </dev/tty'
| true
|
4844f8db09b6c546058c9bc539b5c8257dab4ebe
|
Shell
|
komarios/weblogic_wlst
|
/infra/shutdownNMandAdminServer.sh
|
UTF-8
| 279
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Killing nodemanager"
nm_pid=`ps -efx | grep java | grep wlogic | grep -v wlogic2 | grep -i NodeManager | awk '{print $2}'`
kill -9 ${nm_pid}
cd $DOMAIN_HOME/bin
. ./setDomainEnv.sh
cd $DOMAIN_HOME/bin
echo "Stopping Admin Server "
./stopWebLogic.sh
| true
|
503a8c0c73c47ec324e9703c8a541ff25552b09c
|
Shell
|
merckey/dropkick-manuscript
|
/run_and_test.sh
|
UTF-8
| 726
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
FILES=`ls data/*.h5ad 2>/dev/null` # get names of all .h5ad files from data dir
for f in $FILES;
do
file=`basename $f` # get name of .h5ad file
printf "\nStarting dropkick qc on $file:\n"
time dropkick qc $f # generate dropkick QC report
printf "\nRunning dropkick filtering on $file:\n"
time dropkick run $f -j 5 # run dropkick filtering with 5 jobs
NAME=`basename $f`
kitchen label_info ${NAME%.h5ad}_dropkick.h5ad -l dropkick_label # show number of cells identified by dropkick
done
# aggregate stats comparing dropkick to EmptyDrops and CellRanger
printf "\nSummarizing statistics comparing dropkick to EmptyDrops and CellRanger:\n"
python dropkick_agg_stats.py `ls | grep .h5ad` -l CellRanger_2 EmptyDrops
| true
|
df5f30eb599ce15d19be268b66f1e52526435672
|
Shell
|
bradtm/pulsar
|
/docker/build.sh
|
UTF-8
| 2,026
| 3.328125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Get the project version from Maven
pushd .. > /dev/null
MVN_VERSION=$(mvn -q \
-Dexec.executable="echo" \
-Dexec.args='${project.version}' \
--non-recursive \
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
popd > /dev/null
echo "Pulsar version: ${MVN_VERSION}"
PULSAR_TGZ=$(dirname $PWD)/all/target/pulsar-${MVN_VERSION}-bin.tar.gz
if [ ! -f $PULSAR_TGZ ]; then
echo "Pulsar bin distribution not found at ${PULSAR_TGZ}"
exit 1
fi
LINKED_PULSAR_TGZ=pulsar-${MVN_VERSION}-bin.tar.gz
ln -f ${PULSAR_TGZ} $LINKED_PULSAR_TGZ
echo "Using Pulsar binary package at ${PULSAR_TGZ}"
# Build base image, reused by all other components
docker build --build-arg VERSION=${MVN_VERSION} \
-t pulsar:latest .
if [ $? != 0 ]; then
echo "Error: Failed to create Docker image for pulsar"
exit 1
fi
rm pulsar-${MVN_VERSION}-bin.tar.gz
# Build pulsar-grafana image
docker build -t pulsar-grafana grafana
if [ $? != 0 ]; then
echo "Error: Failed to create Docker image for pulsar-grafana"
exit 1
fi
# Build dashboard docker image
docker build -t pulsar-dashboard ../dashboard
if [ $? != 0 ]; then
echo "Error: Failed to create Docker image for pulsar-dashboard"
exit 1
fi
| true
|
2491f4bca896bd434968615e126bbe98f51fdd98
|
Shell
|
danielsuo/libphase
|
/scripts/spec/invoke.sh
|
UTF-8
| 868
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
DIM=$1000000
DIR=$(realpath "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && echo $(pwd)/../../)")
mkdir -p $DIR/tmp/spec_$1m
cmds=$DIR/tmp/spec_$1m/cmds.sh
rm -f $(dirname $cmds)/*
for i in `find $DIR/cpu2017/benchspec -maxdepth 3 -type d | grep run`; do
benchmark=$(basename $(dirname $i))
speccmds=$(find $i | grep -E "speccmds\.cmd" | head -n 1)
echo $benchmark
# Copy executable
rundir=$(dirname $speccmds)
cp $rundir/../../exe/* $rundir
# Convert commands
invoke=$(dirname $cmds)/$benchmark.sh
$DIR/cpu2017/bin/specinvoke -n $speccmds > $invoke
# Delete last line (speccmds exit: rc=1)
sed -i '$d' $invoke
# Remove all comments
sed -i '/^#/d' $invoke
# Export OMP variables every time
sed -i "s@^@export OMP_NUM_THREADS=1 \&\& cd $rundir \&\& @" $invoke
done
chmod +x $(dirname $cmds)/*
| true
|
7d416547153a210a13559b8d84318f3abd69fa97
|
Shell
|
usp-engineers-community/Open-usp-Tukubai
|
/TEST/block_getlast.test
|
UTF-8
| 9,792
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#!/usr/local/bin/bash -xv # コマンド処理系の変更例
#
# test script of block_getlast
#
# usage: [<test-path>/]calclock.test [<command-path> [<python-version>]]
#
# <test-path>は
# 「現ディレクトリーからみた」本スクリプトの相対パス
# または本スクリプトの完全パス
# 省略時は現ディレクトリーを仮定する
# <command-path>は
# 「本スクリプトのディレクトリーからみた」test対象コマンドの相対パス
# またはtest対象コマンドの完全パス
# 省略時は本スクリプトと同じディレクトリーを仮定する
# 値があるときまたは空値("")で省略を示したときはあとにつづく<python-version>を指定できる
# <python-version>は
# 使用するpython処理系のversion(minor versionまで指定可)を指定する
# (例 python2 python2.6 phthon3 python3.4など)
# 単にpythonとしたときは現実行環境下でのdefault versionのpythonを使用する
# 文字列"python"は大文字/小文字の区別をしない
# 省略時はpythonを仮定する
name=block_getlast # test対象コマンドの名前
testpath=$(dirname $0) # 本スクリプト実行コマンドの先頭部($0)から本スクリプトのディレトリー名をとりだす
cd $testpath # 本スクリプトのあるディレクトリーへ移動
if test "$2" = ""; # <python-version>($2)がなければ
then pythonversion="python" # default versionのpythonとする
else pythonversion="$2" # <python-version>($2)があれば指定versionのpythonとする
fi
if test "$1" = ""; # <command-path>($1)がなければ
then commandpath="." # test対象コマンドは現ディレクトリーにある
else commandpath="$1" # <command-path>($1)があればtest対象コマンドは指定のディレクトリーにある
fi
com="${pythonversion} ${commandpath}/${name}" # python処理系によるtest対象コマンド実行の先頭部
tmp=/tmp/$$
ERROR_CHECK(){
[ "$(echo ${PIPESTATUS[@]} | tr -d ' 0')" = "" ] && return
echo $1
echo "${pythonversion} ${name}" NG
rm -f $tmp-*
exit 1
}
BOMandEOLvariation(){ # BOM無しLF改行ファイル($1)からBOM付きCRLF改行ファイル($2)とBOM付きCR改行ファイル($3)を生成する
[ $# -eq 3 ]; ERROR_CHECK "TESTスクリプト内のBOMandEOLvariation()でファイル指定が不正"
awk '{print '\xEF\xBB\xBF' $0}' $1 > $2 # $1の先頭にBOMを付け全行をCRLFで連接し終端にCRを付加して$2に出力
awk 'BEGIN {ORS = "\r"} {print '\xEF\xBB\xBF' $0}' $1 > $3 # $1の先頭にBOMを付け全行をCRで連接して$3に出力し$3最終行のLFをCRに変換
}
###########################################
# TEST1
# 同じ伝票番号を持つレコードのうち、直近の日付のレコードを出力する。
# (data のレイアウト)
# 1:伝票No 2:行 3:項目1 4:項目2 5:入力年月日
cat << FIN > $tmp-in
0001 1 A 15 20081203
0001 2 K 25 20081203
0001 3 F 35 20081203
0001 1 A 15 20081205
0001 3 F 25 20081205
0002 2 X 30 20081201
0002 1 H 80 20081208
FIN
cat << FIN > $tmp-ans
0001 1 A 15 20081205
0001 3 F 25 20081205
0002 1 H 80 20081208
FIN
${com} key=NF-4 ref=NF $tmp-in > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST1 error"
###########################################
# TEST2
# キーフィールドの列挙指定と範囲指定の連続混用および重複指定解消の試験
# 同じ店番号と伝票番号を持つレコードのうち、直近の日付のレコードを出力する。
# (data のレイアウト)
# 1:店番号 2:伝票No 3:行 4:項目1 5:項目2 6:入力年月日
cat << FIN > $tmp-data
a店 0001 1 A 15 20081202
a店 0001 2 K 25 20081203
b店 0001 3 F 35 20081203
b店 0001 1 A 15 20081205
b店 0001 3 F 25 20081205
c店 0002 2 X 30 20081201
c店 0002 1 H 80 20081208
FIN
cat << FIN > $tmp-ans
a店 0001 2 K 25 20081203
b店 0001 1 A 15 20081205
b店 0001 3 F 25 20081205
c店 0002 1 H 80 20081208
FIN
${com} key=1/2@NF-5/NF-4@1@2@2/1 ref=NF $tmp-data > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST2 error"
###########################################
#TEST3
#TEST1の拡大版
# BOM付きCRLFとBOM付きCRの試験
# 同じ伝票番号を持つレコードのうち、直近の日付のレコードを出力する。
# (data のレイアウト)
# 1:伝票No 2:行 3:項目1 4:項目2 5:入力年月日
cat << FIN > $tmp-in
0001 1 A 15 20081203
0001 2 K 25 20081203
0001 3 F 35 20081203
0001 1 A 15 20081205
0001 3 F 25 20081205
0002 2 X 30 20081201
0002 1 H 80 20081208
FIN
cat << FIN > $tmp-ans
0001 1 A 15 20081205
0001 3 F 25 20081205
0002 1 H 80 20081208
FIN
# 入力用tmpファイルからBOM付きCRLFとBOM付きCRの各ファイルを作る
BOMandEOLvariation $tmp-in $tmp-inBOMCRLF $tmp-inBOMCR
# BOM付きCRLF
${com} key=NF-4 ref=NF $tmp-inBOMCRLF > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST3-1 error"
# BOM付きCR
${com} key=NF-4 ref=NF $tmp-inBOMCR > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST3-2 error"
# pipe接続
# BOM付きCRLF
cat $tmp-inBOMCRLF | ${com} key=NF-4 ref=NF - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST3-3 error"
# BOM付きCR
cat $tmp-inBOMCR | ${com} key=NF-4 ref=NF - > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST3-4 error"
###########################################
# TEST4
# TEST2の変形版
# NFC文字(濁音/半濁音が1文字)とNFD文字(濁音/半濁音が清音+結合用濁点/結合用半濁点の2文字で構成される)を同値化して処理する
# 同じ店番号と伝票番号を持つレコードのうち、項目3が同じ最後のフロックを出力する。
# (data のレイアウト)
# 1:店番号 2:伝票No 3:行 4:項目1 5:項目2 6:項目3
# 同一店名の2番目にはNFDを使用 同一店名/同一伝票番号で同一項目3の2番目(ふたつめの「ザジズゼゾ」)にはNFDを使用
cat << FIN > $tmp-in
がぎぐげご店 0001 1 A 15 あいうえお
がぎぐげご店 0001 2 K 25 かきくけこ
ざじずぜぞ店 0001 3 F 35 さしすせそ
ざじずぜぞ店 0001 1 A 15 ザジズゼゾ
ざじずぜぞ店 0001 3 F 25 ザジズゼゾ
だぢづでど店 0002 2 X 30 たちつてと
だぢづでど店 0002 1 H 80 なにぬねの
FIN
cat << FIN > $tmp-ans
がぎぐげご店 0001 2 K 25 かきくけこ
ざじずぜぞ店 0001 1 A 15 ザジズゼゾ
ざじずぜぞ店 0001 3 F 25 ザジズゼゾ
だぢづでど店 0002 1 H 80 なにぬねの
FIN
${com} key=NF-5/NF-4 ref=NF $tmp-in > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4 error"
###########################################
# TEST5
# 空白処理の試験
# 欄数変化処理の試験
# 同じ店番号と伝票番号を持つレコードのうち、直近の日付のレコードを出力する。
# (data のレイアウト)
# 1:店番号 2:項目1 3:伝票番号 4:項目2 5:入力年 7:項目3 7:入力月日
# TEST5-1 正常終了の例
cat << FIN > $tmp-data
a店 1 0001 A 2008 1202 15
a店 2 0001 K 2008 1203 25
b店 3 0001 F 2008 1203 35
b店 1 0001 K 2008 1205 15
b店 3 0001 F 2008 1205 25
c店 2 0002 X 2008 1201 30
c店 1 0002 H 2008 1208 80
FIN
cat << FIN > $tmp-ans
a店 2 0001 K 2008 1203 25
b店 1 0001 K 2008 1205 15
b店 3 0001 F 2008 1205 25
c店 1 0002 H 2008 1208 80
FIN
${com} key=NF-6@NF-4 ref=NF-2@NF-1 $tmp-data > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-1 error"
# TEST5-2 空白処理の例
cat << FIN > $tmp-data
a店 1 0001 A 2008 1202 15
a店 2 0001 K 2008 1203 25
b店 3 0001 F 2008 1203 35
b店 1 0001 K 2008 1205 15
b店 3 0001 F 2008 1205 25
c店 2 0002 X 2008 1201 30
c店 1 0002 H 2008 1208 80
FIN
${com} key=NF-6@NF-4 ref=5@NF-1 $tmp-data > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-2 error"
# TEST5-3 欄数変化処理の例
# 第3行でそれまでの「項目2」が欠けて欄数が減っているがrefはNF相対襴位置指定なので正常に取得できる
# このためこの欄数変化は受理される
cat << FIN > $tmp-data
a店 1 0001 A 2008 1202 15
a店 2 0001 K 2008 1203 25
b店 3 0001 2008 1203 35
b店 1 0001 K 2008 1205 15
b店 3 0001 F 2008 1205 25
c店 2 0002 X 2008 1201 30
c店 1 0002 H 2008 1208 80
FIN
${com} key=1@3 ref=NF-2@NF-1 $tmp-data > $tmp-out
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST5-3 error"
# TEST5-4 欄数変化処理でエラーとなる例
# TEST5-3と同じく第3行において欄数変化が起きている
# TEST5-4ではref指定を「5@NF-1」としたため
# 第3行ではref指定の第5欄とNF-1欄が衝突する(エラーメッセージでは「交叉」としている)
# このためこの欄数変化は受理されない
echo ${name}のTEST5-4はエラーで終了するように設定されている
cat << FIN > $tmp-data
a店 1 0001 A 2008 1202 15
a店 2 0001 K 2008 1203 25
b店 3 0001 2008 1203 35
b店 1 0001 K 2008 1205 15
b店 3 0001 F 2008 1205 25
c店 2 0002 X 2008 1201 30
c店 1 0002 H 2008 1208 80
FIN
${com} key=1@3 ref=5@NF-1 $tmp-data > $tmp-out
[ $? -ne 0 ] ; ERROR_CHECK "TEST5-4 error"
###########################################
rm -f $tmp-*
echo "${pythonversion} ${name}" OK
exit 0
| true
|
1034de18474fd062762bad7ab7e49bd964ddca17
|
Shell
|
pandrewhk/apicli
|
/google
|
UTF-8
| 501
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
. ~/.googlerc
case $1 in
delete|put|get|post)
method=`echo $1 | tr '[:lower:]' '[:upper:]'`
shift
;;
esac
if [ "$1" = data ]; then
args="$args -F @-;type=application/json"
shift
fi
for p; do
if [ -z "${p%%*=*}" ] ; then
query="$query $p"
else
path="$path $p"
fi
done
path=`echo ${path# } | tr ' ' '/'`
query=`echo ${query# } | tr ' ' '&'`
curl -v\
-# \
-H "Authorization: Bearer $key" \
-X ${method:-GET} \
$args \
"https://www.googleapis.com/$path?$query" | less
| true
|
b7c05defeb76959368c96b03e556736755d3ab04
|
Shell
|
Shubashree/EEGLAB2Hadoop
|
/create_env.sh
|
UTF-8
| 730
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
ENV=$MY_STOR/python
#Create a virtual environment
pip install virtualenv
virtualenv $ENV --system-site-packages
$ENV/python/bin/activate
#Create lapack
wget http://www.netlib.org/lapack/lapack-3.4.2.tgz
tar -xvf lapack-3.4.2.tgz
rm lapack-3.4.2.tgz
cd lapack-3.4.2
cp make.inc.example make.inc
make blaslib
make
#Install cvxopt
wget http://abel.ee.ucla.edu/src/cvxopt-1.1.5.tar.gz
tar -xvf cvxopt-1.1.5.tar.gz
rm cvxopt-1.1.5.tar.gz
cd cvxopt-1.1.5/src
#setup.py probably needs some tuning... change BLAS/LAPACK dir
python setup.py install
#Install cvxpy
cd $MY_STOR
svn checkout http://cvxpy.googlecode.com/svn/trunk/ cvxpy-read-only
python cvxpy-read-only/setup.py install
python cvxpy-read-only/setup.py test
| true
|
05248b509fa91ad9d7b479add14ae9b00ad7c387
|
Shell
|
plkx/mordred
|
/extra/ci/test_script.sh
|
UTF-8
| 431
| 3.0625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
source ./extra/ci/common.sh
PYTHON=python
conda list
if [[ -n "$COVERAGE" ]]; then
info $PYTHON -m mordred.tests -q --with-coverage
else
info $PYTHON -m mordred.tests -q
fi
echo "test README.rst" >&2
info $PYTHON -m doctest README.rst
for example in `find examples -name '*.py'`; do
echo "test $example" >&2
PYTHONPATH=. info $PYTHON $example > /dev/null
done
info $PYTHON setup.py flake8
| true
|
5a05ce7f0266babd7c308daec698ff7cebb52e52
|
Shell
|
IBMCloudDevOps/bluemix-openstack-deploy
|
/setup_tools/install_python.sh
|
UTF-8
| 383
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
CURR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DIR="$(dirname "$CURR")"
mkdir $DIR/src
mkdir $DIR/.localpython
cd $DIR/src
# Get/Unpack python
wget https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz
tar xfz Python-2.7.9.tgz
# Configure/install python
cd Python-2.7.9/
./configure CFLAGS=-w --prefix=${DIR}/.localpython --enable-ipv6
make
make install
| true
|
6f9832b554c4cd7220f584db1284c6ffdcf91078
|
Shell
|
NattyNarwhal/wmx
|
/rsharman-patch/example-wmx-keybding-files/move-this-window-to-previous-channel
|
UTF-8
| 246
| 2.671875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#! /bin/sh
# Move this window to the previous channel
echo channel | nc localhost 6999 | \
gawk '/Current channel is/{ n = $4;
if (--n > 0) {
print "wchannel",'"$1"', n ;
}
print "quit"
}' | \
nc localhost 6999
| true
|
6492f62376f972d49b9f5ec581cc206edffb0cfa
|
Shell
|
bcskda/scripts
|
/diht-lectorium-compress-queue.sh
|
UTF-8
| 260
| 3.046875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
IFS=$'\n'
for x in `cat "$1"`
do
stat $x || (echo Error: $x does not exist && exit 1)
done
for x in `cat "$1"`
do
(cd "$x" && $HOME/bin/diht-lectorium-compress.sh ${2:-MTS} ${3:-source_compressed.mp4} && echo OK $x) || echo Error $x
done
| true
|
9057e118fe703bf9ff01a9f8a06735a25b8730a9
|
Shell
|
gtjamesa/.dotfiles
|
/shell/helpers/asdf
|
UTF-8
| 467
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
asdf-reload() {
awk '{print $1}' "$HOME/.tool-versions" | xargs -I {} asdf plugin add {}
asdf plugin update --all
asdf install
if grep -q krew ~/.tool-versions; then
mkdir -p "$HOME/.krew"
# Symlink ~/.krew/backup to ~/.dotfiles/shell/.krew-plugins
[ ! -h "$HOME/.krew/backup" ] && symlink_file "$HOME/.dotfiles/shell/.krew-plugins" "$HOME/.krew/backup"
# Install plugins
kubectl krew install < "$HOME/.krew/backup"
fi
}
| true
|
51f98169ac776be093ca5de1baa8016b64a9175e
|
Shell
|
00dani/dot-zsh
|
/zshenv
|
UTF-8
| 917
| 3.34375
| 3
|
[] |
no_license
|
# Initialise each of the XDG vars to their default values if they're unset.
: ${XDG_CACHE_HOME:=~/.cache}
: ${XDG_CONFIG_HOME:=~/.config}
: ${XDG_DATA_HOME:=~/.local/share}
# For XDG_RUNTIME_DIR, pick a temporary directory - the spec actually
# *requires* that it's destroyed when the user logs out, so that's handled by a
# file in zlogout. The reason for suffixing /xdg-$UID is that TMPDIR does not
# necessarily belong to the current user exclusively (although it does under
# MacOS and PAM). This is especially true if $TMPDIR isn't set at all and /tmp
# is used!
[[ -z $XDG_RUNTIME_DIR ]] && XDG_RUNTIME_DIR=${${TMPDIR-/tmp}%/}/xdg-$UID
if ! [[ -d $XDG_RUNTIME_DIR ]]; then
mkdir -p $XDG_RUNTIME_DIR
chmod 0700 $XDG_RUNTIME_DIR
fi
export XDG_CONFIG_HOME XDG_CACHE_HOME XDG_DATA_HOME XDG_RUNTIME_DIR
# The real zsh config lives in XDG_CONFIG_HOME! ;)
: ${ZDOTDIR:=$XDG_CONFIG_HOME/zsh}
source $ZDOTDIR/zshenv
| true
|
da06b3320790d2bf5960a19bc3d99efe73a6d682
|
Shell
|
raplin/electric
|
/src/sd-image/old-scripts/sudo_get_ssid_name.sh
|
UTF-8
| 213
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
[ "root" != "$USER" ] && exec sudo $0 "$@"
# gets the current configured SSID value from wpa_supplicant.conf file
grep -n ssid /etc/wpa_supplicant/wpa_supplicant.conf | awk -F \" '{print $2;}'
| true
|
6fe176219de6eeea55651de2a80a32d37cfa6914
|
Shell
|
fabledfoe/ferda
|
/.sandos
|
UTF-8
| 386
| 2.515625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
printf "Setting up projects…\n"
printf "\n Install Node 14 and set as default \n"
source ~/.zshrc
nvm install 14
nvm use 14
nvm alias default 14
printf "Clone and install portal repos \n"
cd ~
git clone git@github.com:RollKall/RollKall-Portal.git
cd RollKall-Portal
yarn
cd ..
git clone git@github.com:RollKall/RollKall-Client-Portal.git
cd RollKall-Client-Portal
yarn
cd ..
| true
|
d9a4b1ac1f19f73cc68124ac722a707a1d920e56
|
Shell
|
ComplianceAsCode/content
|
/linux_os/guide/system/network/network_sniffer_disabled/tests/no_promisc_interfaces.pass.sh
|
UTF-8
| 144
| 2.578125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
for interface in $(ip link show | grep -i promisc | sed 's/^.*: \(.*\):.*$/\1/'); do
ip link set dev $interface promisc off
done
| true
|
0966479fbfa60f0b71c016d25635c711ab8d71f1
|
Shell
|
creidinger/virtualbox-backup-shell-scripts
|
/vm-shutdown.sh
|
UTF-8
| 809
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Purpose: Shutdown all Virtualbox VMS
#
# Location: Stored on the host machine 192.168.1.222
# Directory: ~/
#
# Author: Chase Reidinger
# list the vms
# grab only the txt between ""
# remove the ""
VMS_RUNNING=$(vboxmanage list runningvms | grep -o '".*"'| sed 's/"//g')
# Shud down each VM
function shutdown_vms(){
# for each vm in the list -> shutdown
for VM in $@
do
echo "Shutting down VM: ${VM}"
vboxmanage controlvm ${VM} poweroff
done
return 0
}
echo -e "Start ${0}\n"
if [ ! -z "${VMS_RUNNING}" ]
then
echo -e "List running VMS: \n ${VMS_RUNNING}\n"
else
echo -e "No VirtualBox VMs are running."
echo -e "\nEnd ${0}"
exit 0
fi
echo -e "\n======================================\n"
echo -e "\nExecuting shutdown_vms()\n"
shutdown_vms $VMS_RUNNING
echo -e "\nEnd ${0}"
exit 0
| true
|
0724c75178a8247a1cf29bda63fdcea139d762df
|
Shell
|
jlauman/data_engineering_project_04
|
/bin/build_spark_image.sh
|
UTF-8
| 1,543
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
docker network create -d bridge --subnet 192.168.0.0/24 --gateway 192.168.0.1 sparknet
docker build --tag local:spark --file - ./ <<EOF
FROM centos:centos7
RUN yum -y install epel-release &&\
yum -y update &&\
yum -y install openssh-server openssh-clients &&\
yum -y install java-1.8.0-openjdk &&\
yum -y install ack wget curl &&\
wget https://downloads.lightbend.com/scala/2.11.12/scala-2.11.12.rpm &&\
yum -y install scala-2.11.12.rpm
RUN wget http://apache.cs.utah.edu/spark/spark-2.4.3/spark-2.4.3-bin-hadoop2.7.tgz &&\
tar xzf spark-2.4.3-bin-hadoop2.7.tgz &&\
mv spark-2.4.3-bin-hadoop2.7 /usr/local/spark
RUN ssh-keygen -N '' -t dsa -f /etc/ssh/ssh_host_dsa_key &&\
ssh-keygen -N '' -t rsa -f /etc/ssh/ssh_host_rsa_key &&\
ssh-keygen -N '' -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key &&\
ssh-keygen -N '' -t ed25519 -f /etc/ssh/ssh_host_ed25519_key
COPY ./etc/alias.sh ./etc/spark.sh /etc/profile.d/
COPY ./etc/id_rsa ./etc/id_rsa.pub /root/.ssh/
COPY ./etc/id_rsa.pub /root/.ssh/authorized_keys
RUN rm -rf /root/.ssh/known_hosts &&\
touch /root/.ssh/known_hosts &&\
echo "worker01 \$(cat /etc/ssh/ssh_host_ecdsa_key.pub)" >> /root/.ssh/known_hosts &&\
echo "worker02 \$(cat /etc/ssh/ssh_host_ecdsa_key.pub)" >> /root/.ssh/known_hosts &&\
echo "worker03 \$(cat /etc/ssh/ssh_host_ecdsa_key.pub)" >> /root/.ssh/known_hosts
COPY ./etc/slaves /usr/local/spark/conf/slaves
COPY ./etc/spark-env.sh /usr/local/spark/conf/spark-env.sh
CMD [ "/bin/sh" ]
EOF
| true
|
d6d76980bc69ebed240efa71df18ebd522f4bbd0
|
Shell
|
joaniznardo/smxm7
|
/uf2-ftp-email/labs/lab44b/boot/vm4/provision.sh
|
UTF-8
| 2,159
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eux
config_domain=$(hostname --domain)
apt-get install -y --no-install-recommends vim
cat >/etc/vim/vimrc.local <<'EOF'
syntax on
set background=dark
set esckeys
set ruler
set laststatus=2
set nobackup
autocmd BufNewFile,BufRead Vagrantfile set ft=ruby
EOF
# install nginx to host the Thunderbird Autoconfiguration xml file.
# thunderbird will make a request alike:
# GET /.well-known/autoconfig/mail/config-v1.1.xml?emailaddress=alice%40example.com
# see https://wiki.mozilla.org/Thunderbird:Autoconfiguration:ConfigFileFormat
# see https://developer.mozilla.org/en-US/docs/Mozilla/Thunderbird/Autoconfiguration
# see https://developer.mozilla.org/en-US/docs/Mozilla/Thunderbird/Autoconfiguration/FileFormat/HowTo
apt-get install -y --no-install-recommends nginx
cp -R /vagrant/public/{.well-known,*} /var/www/html
find /var/www/html \
-type f \
-not \( \
-name '*.png' \
\) \
-exec sed -i -E "s,@@config_domain@@,$config_domain,g" {} \;
# send a test email from the command line.
echo Hello World | sendmail alice
# dump the received email directly from the server store.
sleep 2; sudo cat /var/vmail/$config_domain/alice/new/*.mail
# send a test email from alice to bob.
python3 /var/www/html/examples/python/smtp/send-mail/example.py
# dump the received email directly from the server store.
sleep 2; sudo cat /var/vmail/$config_domain/bob/new/*.mail
# send an authenticated test email from bob to alice.
python3 /var/www/html/examples/python/smtp/send-mail-with-authentication/example.py
# dump the received email directly from the server store.
sleep 2; sudo cat /var/vmail/$config_domain/alice/new/*.mail
# list the messages on the alice imap account.
python3 /var/www/html/examples/python/imap/list-mail/example.py
# print software versions.
dpkg-query -f '${Package} ${Version}\n' -W pdns-server
dpkg-query -f '${Package} ${Version}\n' -W postfix
dpkg-query -f '${Package} ${Version}\n' -W dovecot-imapd
# list the DNS zone.
##pdnsutil list-all-zones
##pdnsutil check-zone $config_domain
##pdnsutil list-zone $config_domain
# query for all records.
dig any $config_domain
dig any joandaustria.org
| true
|
03010ba991f76fe6b8391f875402d98d96445fcb
|
Shell
|
nathanph/zsh-completions
|
/src/go/_circleci
|
UTF-8
| 20,256
| 2.859375
| 3
|
[] |
no_license
|
#compdef circleci
# -----------------------------------------------------------------------------
# The BSD-3-Clause License
#
# Copyright (c) 2018, Koichi Shiraishi
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of que nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# github.com/CircleCI-Public/circleci-cli
#
# -----------------------------------------------------------------------------
#
# This project is the seed for CircleCI's new command-line application.
#
#
# Usage:
# circleci [command]
#
# Available Commands:
# config Operate on build config files
# diagnostic Check the status of your CircleCI CLI.
# help Help about any command
# local Debug jobs on the local machine
# namespace Operate on namespaces
# orb Operate on orbs
# query Query the CircleCI GraphQL API.
# setup Setup the CLI with your credentials
# update Update the tool
# version Display version information
#
# Flags:
# -h, --help help for circleci
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
# Use "circleci [command] --help" for more information about a command.
#
# -----------------------------------------------------------------------------
#
# Operate on build config files
#
#
# Usage:
# circleci config [command]
#
# Available Commands:
# pack Pack up your CircleCI configuration into a single file.
# process Process the config.
# validate Check that the config file is well formed.
#
# Flags:
# -h, --help help for config
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
# Use "circleci config [command] --help" for more information about a command.
#
# -----------------------------------------------------------------------------
#
# Pack up your CircleCI configuration into a single file.
#
#
# Usage:
# circleci config pack <path> [flags]
#
# Args:
# <path> The path to your config (use "-" for STDIN)
#
#
# Flags:
# -h, --help help for pack
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI (default "7eef1e5278edbf095ba225dcee6c0a01513c26c6")
#
# -----------------------------------------------------------------------------
#
# Process the config.
#
#
# Usage:
# circleci config process <path> [flags]
#
# Args:
# <path> The path to your config (use "-" for STDIN)
#
#
# Flags:
# -h, --help help for process
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI (default "7eef1e5278edbf095ba225dcee6c0a01513c26c6")
#
# -----------------------------------------------------------------------------
#
# Check that the config file is well formed.
#
#
# Usage:
# circleci config validate <path> [flags]
# Aliases:
# validate, check
#
# Args:
# <path> The path to your config (use "-" for STDIN)
#
#
# Flags:
# -h, --help help for validate
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI (default "7eef1e5278edbf095ba225dcee6c0a01513c26c6")
#
# -----------------------------------------------------------------------------
#
# Check the status of your CircleCI CLI.
#
#
# Usage:
# circleci diagnostic [flags]
#
# Flags:
# -h, --help help for diagnostic
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
#
# -----------------------------------------------------------------------------
#
# Help provides help for any command in the application.
# Simply type circleci help [path to command] for full details.
#
#
# Usage:
# circleci help [command] [flags]
#
# Flags:
# -h, --help help for help
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
#
# -----------------------------------------------------------------------------
#
# Debug jobs on the local machine
#
#
# Usage:
# circleci local [command]
#
# Available Commands:
# execute Run a job in a container on the local machine
#
# Flags:
# -h, --help help for local
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
# Use "circleci local [command] --help" for more information about a command.
#
# -----------------------------------------------------------------------------
#
# Operate on namespaces
#
#
# Usage:
# circleci namespace [command]
#
# Available Commands:
# create create a namespace
#
# Flags:
# -h, --help help for namespace
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
# Use "circleci namespace [command] --help" for more information about a command.
#
# -----------------------------------------------------------------------------
#
# Operate on orbs
#
#
# Usage:
# circleci orb [command]
#
# Available Commands:
# create Create an orb in the specified namespace
# list List orbs
# process Validate an orb and print its form after all pre-registration processing
# publish Publish an orb to the registry
# source Show the source of an orb
# validate Validate an orb.yml
#
# Flags:
# -h, --help help for orb
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# Use "circleci orb [command] --help" for more information about a command.
#
# -----------------------------------------------------------------------------
#
# Create an orb in the specified namespace
# Please note that at this time all orbs created in the registry are world-readable.
#
#
# Usage:
# circleci orb create <namespace>/<orb> [flags]
#
# Flags:
# -h, --help help for create
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
#
# -----------------------------------------------------------------------------
#
# List orbs
#
#
# Usage:
# circleci orb list <namespace> [flags]
#
# Args:
# <namespace> The namespace used for the orb (i.e. circleci) (Optional)
#
#
# Flags:
# -h, --help help for list
# -u, --uncertified include uncertified orbs
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
#
# -----------------------------------------------------------------------------
#
# Validate an orb and print its form after all pre-registration processing
#
#
# Usage:
# circleci orb process <path> [flags]
#
# Args:
# <path> The path to your orb (use "-" for STDIN)
#
#
# Flags:
# -h, --help help for process
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
#
# -----------------------------------------------------------------------------
#
# Publish an orb to the registry.
# Please note that at this time all orbs published to the registry are world-readable.
#
#
# Usage:
# circleci orb publish <path> <orb> [flags]
# circleci orb publish [command]
#
# Available Commands:
# increment Increment a released version of an orb
# promote Promote a development version of an orb to a semantic release
#
# Args:
# <path> The path to your orb (use "-" for STDIN)
# <orb> A fully-qualified reference to an orb. This takes the form namespace/orb@version
#
#
# Flags:
# -h, --help help for publish
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# Use "circleci orb publish [command] --help" for more information about a command.
#
# -----------------------------------------------------------------------------
#
# Increment a released version of an orb.
# Please note that at this time all orbs incremented within the registry are world-readable.
#
# Example: 'circleci orb publish increment foo/orb.yml foo/bar minor' => foo/bar@1.1.0
#
#
# Usage:
# circleci orb publish increment <path> <namespace>/<orb> <segment> [flags]
# Aliases:
# increment, inc
#
# Args:
# <path> The path to your orb (use "-" for STDIN)
# <segment> "major"|"minor"|"patch"
#
#
# Flags:
# -h, --help help for increment
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
#
# -----------------------------------------------------------------------------
#
# Promote a development version of an orb to a semantic release.
# Please note that at this time all orbs promoted within the registry are world-readable.
#
# Example: 'circleci orb publish promote foo/bar@dev:master major' => foo/bar@1.0.0
#
#
# Usage:
# circleci orb publish promote <orb> <segment> [flags]
#
# Args:
# <orb> A fully-qualified reference to an orb. This takes the form namespace/orb@version
# <segment> "major"|"minor"|"patch"
#
#
# Flags:
# -h, --help help for promote
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
#
# -----------------------------------------------------------------------------
#
# Show the source of an orb
#
#
# Usage:
# circleci orb source <orb> [flags]
#
# Examples:
# circleci orb source circleci/python@0.1.4 # grab the source at version 0.1.4
# circleci orb source my-ns/foo-orb@dev:latest # grab the source of dev release "latest"
#
# Args:
# <orb> A fully-qualified reference to an orb. This takes the form namespace/orb@version
#
#
# Flags:
# -h, --help help for source
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
#
# -----------------------------------------------------------------------------
#
# Validate an orb.yml
#
#
# Usage:
# circleci orb validate <path> [flags]
#
# Args:
# <path> The path to your orb (use "-" for STDIN)
#
#
# Flags:
# -h, --help help for validate
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
#
# -----------------------------------------------------------------------------
#
# Query the CircleCI GraphQL API.
#
#
# Usage:
# circleci query PATH [flags]
#
# Args:
# PATH The path to your query (use "-" for STDIN)
#
#
# Flags:
# -h, --help help for query
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
#
# -----------------------------------------------------------------------------
#
# Setup the CLI with your credentials
#
#
# Usage:
# circleci setup [flags]
#
# Flags:
# -h, --help help for setup
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
#
# -----------------------------------------------------------------------------
#
# Update the tool
#
#
# Usage:
# circleci update [command]
#
# Available Commands:
# build-agent Update the build agent to the latest version
# check Check if there are any updates available
# install Update the tool to the latest version
#
# Flags:
# -h, --help help for update
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
# Use "circleci update [command] --help" for more information about a command.
#
# -----------------------------------------------------------------------------
#
# Display version information
#
#
# Usage:
# circleci version [flags]
#
# Flags:
# -h, --help help for version
#
# Global Flags:
# --host string URL to your CircleCI host (default "https://circleci.com")
# --token string your token for using CircleCI
# --verbose Enable verbose logging.
#
# -----------------------------------------------------------------------------
function _circleci() {
local context curcontext=$curcontext state line ret=1
declare -A opt_args
local -a commands
commands=(
'config:Operate on build config files'
'diagnostic:Check the status of your CircleCI CLI.'
'help:Help about any command'
'local:Debug jobs on the local machine'
'namespace:Operate on namespaces'
'orb:Operate on orbs'
'query:Query the CircleCI GraphQL API.'
'setup:Setup the CLI with your credentials'
'update:Update the tool'
'version:Display version information'
)
local -a _global_flags
_global_flags=(
'--host[URL to your CircleCI host \(default "https://circleci.com"\)]:host'
{-h,--help}'[help for circleci]'
'--token[your token for using CircleCI]:token'
'--debug[Enable debug logging.]'
)
_arguments -C \
"1: :{_describe 'circleci command' commands}" \
'*:: :->args' \
&& ret=0
case $words[1] in
config)
local -a config_cmds
config_cmds=(
'pack:Pack up your CircleCI configuration into a single file.'
'process:Process the config.'
'validate:Check that the config file is well formed.'
)
_arguments \
"1: :{_describe 'config subcommand' config_cmds}" \
'*:: :->args' \
${_global_flags[@]} \
&& ret=0
case $words[1] in
pack|process|validate|check)
_arguments \
${_global_flags[@]} \
'*:<path> The path to your orb (use "-" for STDIN):_files' \
;;
esac
;;
namespace)
local -a namespace_cmds
namespace_cmds=(
'create:create an namespace'
)
_arguments \
"1: :{_describe 'namespace subcommand' namespace_cmds}" \
'*:: :->args' \
${_global_flags[@]} \
&& ret=0
case $words[1] in
create)
_arguments \
"1:name" \
"2:vcs" \
"3:org-name" \
${_global_flags[@]}
;;
esac
;;
orb)
local -a orb_cmds
orb_cmds=(
'create:Create an orb in the specified namespace'
'list:List orbs'
'process:Validate an orb and print its form after all pre-registration processing'
'publish:Publish an orb to the registry'
'source:Show the source of an orb'
'validate:Validate an orb.yml'
)
_arguments \
"1: :{_describe 'orb subcommand' orb_cmds}" \
'*:: :->args' \
${_global_flags[@]} \
&& ret=0
case $words[1] in
create)
_arguments \
${_global_flags[@]} \
"*:<namespace>/<name>"
;;
list)
_arguments \
{-u,--uncertified}'[include uncertified orbs]' \
${_global_flags[@]} \
"*:<namespace> \(optional\)"
;;
process)
_arguments \
${_global_flags[@]} \
'*:<path> The path to your orb (use "-" for STDIN):_files'
;;
publish)
local -a orb_publish_cmds
orb_publish_cmds=(
'increment:Increment a released version of an orb'
'promote:Promote a development version of an orb to a semantic release'
)
_arguments \
${_global_flags[@]} \
"1: :{_describe 'orb publish subcommand' orb_publish_cmds}"
case $words[1] in
increment)
_arguments \
'1:<path> The path to your orb (use "-" for STDIN):_files' \
"2:<namespace/orb> A fully-qualified reference to an orb. This takes the form namespace/orb@version]" \
"3:segment:(major minor patch)" \
${_global_flags[@]}
;;
promote)
_arguments \
'1:<path> The path to your orb (use "-" for STDIN):path:_files' \
"2:segment:segment:(major minor patch)" \
${_global_flags[@]}
;;
*)
_arguments \
'1:<path> The path to your orb (use "-" for STDIN):_files' \
"2:<orb> A fully-qualified reference to an orb. This takes the form namespace/orb@version" \
${_global_flags[@]}
;;
esac
;;
source)
_arguments \
"*:<orb> A fully-qualified reference to an orb. This takes the form namespace/orb@version:_files" \
${_global_flags[@]}
;;
validate)
_arguments \
'1:<path> The path to your orb (use "-" for STDIN):_files' \
${_global_flags[@]}
;;
esac
;;
query)
_arguments \
'1:<path> The path to your orb (use "-" for STDIN):_files' \
${_global_flags[@]} \
&& ret=0
;;
update)
local -a update_cmds
update_cmds=(
'build-agent:Update the build agent to the latest version'
'check:Check if there are any updates available'
'install:Update the tool to the latest version'
)
_arguments \
"1: :{_describe 'update subcommand' update_cmds}" \
'*:: :->args' \
${_global_flags[@]} \
&& ret=0
case $words[1] in
build-agent|check|install)
_arguments \
${_global_flags[@]}
;;
esac
;;
(diagnostic|query|setup|version)
_arguments \
${_global_flags[@]} \
&& ret=0
;;
esac
return ret
}
_circleci "$*"
# vim:ft=zsh:et:sts=2:sw=2
| true
|
dd3f7997bf335c0a4629d3bf6b238dbdfae85cdf
|
Shell
|
zhaogaolong/graphql-metrics
|
/patch/graphql_patch.sh
|
UTF-8
| 232
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo prefix: $prefix
if [ -z $prefix ]; then
prefix=vendor
fi
graphql_path=$prefix/github.com/graph-gophers/graphql-go/graphql.go
if ! grep -q MySchema $graphql_path; then
cat patch/graphql.txt >> $graphql_path
fi
| true
|
8d9f6d631fa50e9c64f06c1a170cc19d2d1725e6
|
Shell
|
neilhsieh94/getBranchUrl
|
/get_branch_url.sh
|
UTF-8
| 3,079
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
##############################################################
# README
#
# Quick Set Up
# 1. Copy this file into your home directory
# a) In terminal: cd ~
# b) To copy, open the current folder in finder,
# In Terminal: open `pwd`
# c) Copy and pasta in there
#
# 2. Open your .bash_profile
# a) From anywhere in terminal,
# In Terminal: code ~/.bash_profile
#
# 3. Copy paste this in the last line
#
# source ~/get_branch_url.sh
#
# (this will allow you to run this file in all new terminals)
#
# 4. Restart your terminal and voila!
#
#
# To change name of function call, simply change the
# function name below where indicated. Then restart your terminal.
#
##############################################################
# Grabs git branch
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
searchForLine() {
filename='dev-options.js'
arg1=$1
while read line
do
# reading each line
IFS=" " #setting space as delimiter
read -ra eachLine <<<"$line" #reading str as an array as tokens separated by IFS
for i in "${eachLine[@]}"
do
if [[ $i == $arg1 ]]
then
local retval=$line
echo $retval
fi
done
done < $filename
}
findInString() {
arg1=$1 #string
arg2=$2 #IFS
arg3=$3 #array index
IFS="$arg2"
read -ra ADDR <<<"$arg1" #reading str as an array as tokens separated by IFS
echo ${ADDR[$arg3]}
}
########## Change Function Name Here ##########
function getbranch() {
local gitBranch=$(parse_git_branch)
gitBranch=$(findInString "$gitBranch" "(" "1")
gitBranch=$(findInString "$gitBranch" ")" "0")
if [ `expr "$gitBranch" : '.*'` -ne "0" ]
then
local fullHubUrl
local hyper
local httpObj
local hubsObj
local finalUrl
filename='dev-options.js'
fullHubUrl=$(searchForLine "fullHubUrl:")
read -ra urlArr <<<"$fullHubUrl" #reading str as an array as tokens separated by IFS
for word in "${urlArr[@]}"
do
if [[ $word == *"http"* ]]
then
fullHubUrl="${word:1:${#word}-2}"
fi
done
if [[ $fullHubUrl == *"{"* ]]
then
local tempWord=$fullHubUrl
hyper=$(findInString "$tempWord" "$" "0")
local tempStr=$(findInString "$fullHubUrl" "{" "1")
httpObj=$(findInString "$tempStr" "}" "0")
else
finalUrl=$fullHubUrl
fi
if [[ `expr "$httpObj" : '.*'` -ne "0" ]]
then
if [[ $httpObj == *"."* ]]
then
hubsObj=$(findInString "$httpObj" "." "1")
else
hubsObj=$httpObj
fi
local relUrl=$(searchForLine "$hubsObj:")
relUrl=$(findInString "$relUrl" " " "1")
relUrl=$(findInString "$relUrl" "''" "1")
finalUrl="${hyper}${relUrl}"
fi
echo "${finalUrl}?ufcc_onbrand_branch=${gitBranch}"
else
echo "Bruh, you're not on a git branch."
fi
}
| true
|
ff85c0af8bf2b47535498bf2d949f755d4bf3802
|
Shell
|
commshare/testLiveSRS
|
/trunk/objs/_srs_build_summary.sh
|
UTF-8
| 7,041
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#####################################################################################
# linux shell color support.
RED="\\e[31m"
GREEN="\\e[32m"
YELLOW="\\e[33m"
BLACK="\\e[0m"
echo -e "${GREEN}build summary:${BLACK}"
echo -e " ${BLACK}+------------------------------------------------------------------------------------${BLACK}"
echo -e " |${GREEN}{disabled} gperf @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_GPERF${BLACK}"
echo -e " | ${GREEN}{disabled} gmc @see: http://google-perftools.googlecode.com/svn/trunk/doc/heap_checker.html${BLACK}"
echo -e " | ${GREEN}{disabled} gmc: gperf memory check${BLACK}"
echo -e " | ${GREEN}{disabled} env PPROF_PATH=./objs/pprof HEAPCHECK=normal ./objs/srs -c conf/console.conf # start gmc${BLACK}"
echo -e " | ${GREEN}{disabled} killall -2 srs # or CTRL+C to stop gmc${BLACK}"
echo -e " | ${GREEN}{disabled} gmp @see: http://google-perftools.googlecode.com/svn/trunk/doc/heapprofile.html${BLACK}"
echo -e " | ${GREEN}{disabled} gmp: gperf memory profile${BLACK}"
echo -e " | ${GREEN}{disabled} rm -f gperf.srs.gmp*; ./objs/srs -c conf/console.conf # start gmp${BLACK}"
echo -e " | ${GREEN}{disabled} killall -2 srs # or CTRL+C to stop gmp${BLACK}"
echo -e " | ${GREEN}{disabled} ./objs/pprof --text objs/srs gperf.srs.gmp* # to analysis memory profile${BLACK}"
echo -e " | ${GREEN}{disabled} gcp @see: http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html${BLACK}"
echo -e " | ${GREEN}{disabled} gcp: gperf cpu profile${BLACK}"
echo -e " | ${GREEN}{disabled} rm -f gperf.srs.gcp*; ./objs/srs -c conf/console.conf # start gcp${BLACK}"
echo -e " | ${GREEN}{disabled} killall -2 srs # or CTRL+C to stop gcp${BLACK}"
echo -e " | ${GREEN}{disabled} ./objs/pprof --text objs/srs gperf.srs.gcp* # to analysis cpu profile${BLACK}"
echo -e " ${BLACK}+------------------------------------------------------------------------------------${BLACK}"
echo -e " |${GREEN}{disabled} gprof @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_GPROF${BLACK}"
echo -e " |${GREEN}{disabled} gprof: GNU profile tool, @see: http://www.cs.utah.edu/dept/old/texinfo/as/gprof.html${BLACK}"
echo -e " | ${GREEN}{disabled} rm -f gmon.out; ./objs/srs -c conf/console.conf # start gprof${BLACK}"
echo -e " | ${GREEN}{disabled} killall -2 srs # or CTRL+C to stop gprof${BLACK}"
echo -e " | ${GREEN}{disabled} gprof -b ./objs/srs gmon.out > gprof.srs.log && rm -f gmon.out # gprof report to gprof.srs.log${BLACK}"
echo -e " ${BLACK}+------------------------------------------------------------------------------------${BLACK}"
echo -e " |${GREEN}{disabled} research: ./objs/research, api server, players, ts info, librtmp.${BLACK}"
echo -e " ${BLACK}+------------------------------------------------------------------------------------${BLACK}"
echo -e " |${YELLOW}{disabled} utest: ./objs/srs_utest, the utest for srs${BLACK}"
echo -e " ${BLACK}+------------------------------------------------------------------------------------${BLACK}"
echo -e " |${YELLOW}{disabled} librtmp @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_SrsLibrtmp${BLACK}"
echo -e " |${YELLOW}{disabled} librtmp: ./objs/include, ./objs/lib, the srs-librtmp library${BLACK}"
echo -e " | ${YELLOW}{disabled} simple handshake: publish/play stream with simple handshake to server${BLACK}"
echo -e " | ${YELLOW}{disabled} complex handshake: it's not required for client, recommend disable it${BLACK}"
echo -e " | ${YELLOW}{disabled} librtmp-sample: ./research/librtmp, the srs-librtmp client sample${BLACK}"
echo -e " | ${YELLOW}{disabled} librtmp-sample: ./research/librtmp/objs/srs_ingest_flv${BLACK}"
echo -e " | ${YELLOW}{disabled} librtmp-sample: ./research/librtmp/objs/srs_ingest_rtmp${BLACK}"
echo -e " | ${YELLOW}{disabled} librtmp-sample: ./research/librtmp/objs/srs_detect_rtmp${BLACK}"
echo -e " | ${YELLOW}{disabled} librtmp-sample: ./research/librtmp/objs/srs_bandwidth_check${BLACK}"
echo -e " ${BLACK}+------------------------------------------------------------------------------------${BLACK}"
echo -e " |${GREEN}server: ./objs/srs -c conf/srs.conf, start the srs server${BLACK}"
echo -e " | ${GREEN}hls @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_DeliveryHLS${BLACK}"
echo -e " | ${GREEN}hls: generate m3u8 and ts from rtmp stream${BLACK}"
echo -e " | ${GREEN}dvr @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_DVR${BLACK}"
echo -e " | ${GREEN}dvr: record RTMP stream to flv files.${BLACK}"
echo -e " | ${GREEN}{disabled} nginx @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_DeliveryHLS${BLACK}"
echo -e " | ${GREEN}{disabled} nginx: delivery HLS stream by nginx${BLACK}"
echo -e " | ${GREEN}{disabled} nginx: sudo ./objs/nginx/sbin/nginx${BLACK}"
echo -e " | ${GREEN}ssl @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_RTMPHandshake${BLACK}"
echo -e " | ${GREEN}ssl: support RTMP complex handshake for client required, for instance, flash${BLACK}"
echo -e " | ${YELLOW}{disabled} ffmpeg @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_FFMPEG${BLACK}"
echo -e " | ${YELLOW}{disabled} ffmpeg: transcode, mux, ingest tool${BLACK}"
echo -e " | ${YELLOW}{disabled} ffmpeg: ./objs/ffmpeg/bin/ffmpeg${BLACK}"
echo -e " | ${GREEN}transcode @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_FFMPEG${BLACK}"
echo -e " | ${GREEN}transcode: support transcoding RTMP stream${BLACK}"
echo -e " | ${GREEN}ingest @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_Ingest${BLACK}"
echo -e " | ${GREEN}ingest: support ingest file/stream/device then push to SRS by RTMP stream${BLACK}"
echo -e " | ${GREEN}http-callback @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_HTTPCallback${BLACK}"
echo -e " | ${GREEN}http-callback: support http callback for authentication and event injection${BLACK}"
echo -e " | ${GREEN}http-server @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_HTTPServer${BLACK}"
echo -e " | ${GREEN}http-server: support http server to delivery http stream${BLACK}"
echo -e " | ${GREEN}http-api @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_HTTPApi${BLACK}"
echo -e " | ${GREEN}http-api: support http api to manage server${BLACK}"
echo -e " ${BLACK}+------------------------------------------------------------------------------------${BLACK}"
echo -e "${GREEN}binaries @see: https://github.com/simple-rtmp-server/srs/wiki/v1_CN_Build${BLACK}"
echo "you can:"
echo " ./objs/srs -c conf/srs.conf"
echo " to start the srs server, with config conf/srs.conf."
| true
|
1ee15e239b7ad68c12e34ea2650c986203dbd58a
|
Shell
|
svpmtrust/autotest
|
/testserver/provision.sh
|
UTF-8
| 1,292
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
echo Installing Python
echo -----------------
sudo apt-get update
sudo apt-get install python python-pip -y
echo Installing Celery..
echo -----------------
sudo pip install celery==3.1.17
echo Installing Pymongo..
echo ------------------
sudo pip install pymongo==2.8
echo creating Participants,programs directories
echo -------------------------------------------------
cd $1
mkdir programs
mkdir participants
chown -R autotest:autotest /home/autotest/autotest/programs
chown -R autotest:autotest /home/autotest/autotest/participants
echo Installing git
echo --------------
sudo apt-get install git -y
echo Installing javac..
echo --------------
sudo apt-get install default-jre -y
echo Installing java..
echo --------------
sudo apt-get install default-jdk -y
echo Installing amqp..
echo ---------------
sudo apt-get install rabbitmq-server
echo Upstart the Test Server
echo ----------------------------
echo env GITSERVER_ROOT=$1 > /etc/init/testserver.conf
echo env DB_HOST=$2 >> /etc/init/testserver.conf
echo env CONTEST_NAME=$3 >> /etc/init/testserver.conf
echo env GIT_HOST=$4 >> /etc/init/testserver.conf
echo ----------------------------
cat $1/testserver/test-server.Upstart.templ >> /etc/init/testserver.conf
sudo service testserver start
echo DONE
| true
|
6f29698598efe046b927c9075cdcd33e1601249f
|
Shell
|
sebialex/tools
|
/low-mem/init.d.sh
|
UTF-8
| 1,195
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: lowMemAlert.sh
# Required-Start: $local_fs $network $named $time $syslog
# Required-Stop: $local_fs $network $named $time $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: Provides warning when system memory is too low, and will stop chromium if running
### END INIT INFO
# See http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/facilname.html for Required-Start/Stop options
name=lowMemAlert
dir_path=/usr/local/bin
path_to_start=$dir_path/${name}.sh
path_to_log=/var/log/${name}/log.txt
process_name="$name"
start_() {
daemon --name=${name} --output=$path_to_log sh $path_to_start
}
stop_() {
kill -9 $(ps aux | grep $process_name | grep -v grep | awk '{print $2}')
}
case "$1" in
start)
#daemon --name="lowMemAlert" --output=/var/log/low-mem-alert.txt sh /usr/local/bin/low-mem-alert
start_
;;
stop)
stop_
;;
restart)
stop_
start_
;;
status)
if [ -e $path_to_log ]; then
tail $path_to_log
else
echo "Not running"
fi
;;
*)
echo "Usage: $0 {start|stop|restart|status|uninstall}"
esac
| true
|
e570a44d6294b2d018502994a2df16aa94313abe
|
Shell
|
eeuneeun/kvmManager
|
/vnet/kvmVnetDel.sh
|
UTF-8
| 373
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
#가상네트워크삭제
. ./kvmLib
delTitle="삭제할네트워크선택"
vListUpdate vnet
vnetListView $delTitle
while [ 1 ]
do
dialogCall ${vnetListArray[*]}
if [ -n $selectNum ] && [[ $selectNum =~ $isNum ]]
then
vnetDel ${vnetListArray[$selectNum]}
vListUpdate vnet
vnetListView $delTitle
dialogCall ${vnetListArray[*]}
else
exit
fi
done
| true
|
8a3aa40bb40ac112445ebd14608ba203e4a63cf6
|
Shell
|
rhgkstjf/Web-Log-Mining
|
/DataAnalysis_scala_version2.sh
|
UTF-8
| 2,306
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
P=$(date "+%Y-%m-%d")
mkdir [UserHomePath]/AutoLab/Week_DF/$P
echo {"-------------오늘은 $P입니다. 데이터 배치 분석 자동화 시작합니다.-------------"}
echo "-------------url 크롤링 시작 ----------------------"
python3 crw.py
sleep 2m
hadoop fs -put urldata.json /urldata/urldata.json
echo "-------------url 데이터 갱신 완료 -------------------"
echo "-------------데이터 분석 시작------------"
$SPARK_HOME/bin/spark-submit --class Auto --master yarn [UserHomePath]/AutoLab/Auto/target/scala-2.11/autolog-project_2.11-1.3.jar
echo "-------------데이터 분석 완료------------"
echo "-------------ES INDEX 삭제 시작----------"
sleep 5s
curl -X DELETE "[ElasticsearchURL]/[Index]?pretty"
echo "-------------ES INDEX 삭제 완료 ---------"
echo "-------------HDFS -> LocalFS 로 복사 시작 -----"
hadoop fs -copyToLocal /Auto/$P/*.json /home/hadoop/AutoLab/Week_DF/$P/Total.json
hadoop fs -copyToLocal /Auto/Class/*.json /home/hadoop/AutoLab/Week_DF/$P/Class.json
hadoop fs -copyToLocal /Auto/Content/*.json /home/hadoop/AutoLab/Week_DF/$P/Content.json
hadoop fs -copyToLocal /Auto/Hack/*.json /home/hadoop/AutoLab/Week_DF/$P/Hack.json
hadoop fs -copyToLocal /Auto/User/*.json /home/hadoop/AutoLab/Week_DF/$P/User.json
hadoop fs -copyToLocal /Auto/Korea/*.json /home/hadoop/AutoLab/Week_DF/$P/Korea.json
hadoop fs -rm /Auto/$P/*
hadoop fs -rm /Auto/Class/*
hadoop fs -rm /Auto/Content/*
hadoop fs -rm /Auto/Hack/*
hadoop fs -rm /Auto/User/*
hadoop fs -rm /Auto/Korea/*
hadoop fs -rmdir /Auto/$P
hadoop fs -rmdir /Auto/Class
hadoop fs -rmdir /Auto/Content
hadoop fs -rmdir /Auto/Hack
hadoop fs -rmdir /Auto/User
hadoop fs -rmdir /Auto/Korea
echo "-------------LocalFS로 복사 완료 ----------"
sleep 5s
echo "-------------url data 초기화 위해 삭----------------"
rm urldata.json
hadoop fs -rm /urldata/urldata.json
echo "-------------삭제 완료---------------------"
echo "---------------웹 서버에 데이터 전송 중--------------"
ssh -p [web-server-port] [web-server-user]@[web-server-ip] "echo '[sudo pwd]' | sudo -S mkdir /var/www/html/FLASKAPPS/Data/$P"
ssh -p [web-server-port][web-server-user]@[web-server-ip] "sh /home/[user]/DataLoad.sh
echo "---------------웹 서버에 데이터 전송 완료--------------"
| true
|
015c87bc820f2ef9ba9d6306c45d5da4e3b2e288
|
Shell
|
crr0004/workspace-setup
|
/scripts/copyme.sh
|
UTF-8
| 305
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Copies relevant files to the correct locations
#$1 should be dir to copy to
if [[ -n "$1" ]] && [ -d $1 ];
then
for file in $( ls -A ../files/ ); do
(cd ../files && ln -bs $(pwd)/$file $1/.$file)
done
exit
else
echo "Must supply a directory to copy to. Suggest home directory of ~/"
fi
| true
|
88007332164d3bb3955ca577c02cee98a90ea5b3
|
Shell
|
lukeburpee/archived-legalease-code
|
/legalease/scripts/shell/email-data-extraction/run_ocr_processing.sh
|
UTF-8
| 455
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set +x
set -e
MEMORY=$1
echo "===========================================$0"
OUTPUT_DIR='ocr_output'
if [[ -d "/usr/src/app/pst-extract/$OUTPUT_DIR" ]]; then
rm -rf "/usr/src/app/pst-extract/$OUTPUT_DIR"
fi
spark-submit --master local[*] --driver-memory $MEMORY --files spark/filters.py,ocr/ocr_opencv.py,ocr/resize_image.py ocr/run_ocr.py file:///usr/src/app/pst-extract/pst-json file:///usr/src/app/pst-extract/$OUTPUT_DIR
| true
|
96927d3e5b73cd3f44793cab4a3091ab607f0484
|
Shell
|
kabir/large-domain
|
/client-scripts/tag-started-slaves.sh
|
UTF-8
| 1,205
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
echo ========================================================
echo Tagging all non running servers as Type=Slave, Name=SlaveXX
echo ========================================================
if [[ -z "${1+x}" ]] ; then
COUNTER=0
else
COUNTER=$1
fi
function tag {
if [[ "$1" != "0" ]] ; then
let COUNTER=COUNTER+1
NUM=$COUNTER
if [ $COUNTER -lt 10 ] ; then
NUM=00$COUNTER
elif [ $COUNTER -lt 100 ] ; then
NUM=0$COUNTER
fi
echo tagging $1 as Type=Slave,Name=Slave$NUM
ec2-create-tags $1 --tag Name=Slave$NUM --tag Type=Slave
fi
}
IFS=$'\n'
#Read the slave entries
echo "Checking servers on each slave..."
INSTANCE_ID="0"
for line in `ec2-describe-instances --filter "instance-state-code=16"`
do
if [[ $line == INSTANCE* ]] ; then
#If the INSTANCE_ID isn't 0 tag the instance
tag $INSTANCE_ID
#Line starts with INSTANCE - get the instance id
INSTANCE_ID=$(echo $line|awk '{print $2}')
fi
if [[ $line == TAG* ]] ; then
tag_value=$(echo $line|awk '{print $5}')
if [[ "x$tag_value" != "x" ]] ; then
INSTANCE_ID="0"
fi
fi
done
tag $INSTANCE_ID &
| true
|
3e0fd81500af910a67e97f5f4ca0bd04054b2653
|
Shell
|
hgrif/data-science-box
|
/provisioning/common/templates/bash_aliases
|
UTF-8
| 756
| 2.9375
| 3
|
[
"BSD-2-Clause-Views"
] |
permissive
|
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -1 --group-directories-first'
alias j='jump'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# start ipython notebook server
alias start_ipynb_server="screen -S ipython_notebook -dm bash -c 'ipython notebook --ip=* --no-browser --port=8888'"
# stop ipython notebook server
alias stop_ipynb_server="screen -ls ipython_notebook | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs kill"
# kill all screens
alias screen_killall="screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs kill"
| true
|
048d343300afab8183813620603a7d26dc2d9bab
|
Shell
|
jrabinow/homedir-scripts
|
/clean_env
|
UTF-8
| 306
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function main()
{
tmpdir="$(mktemp -d)"
tmprc="$(mktemp)"
cat > "$tmprc" << EOF
PS1='\$ '
cd "$tmpdir"
EOF
env - HOME="$tmpdir" TERM="$TERM" bash --noprofile --rcfile "$tmprc"
rm -rf "$tmpdir" "$tmprc"
}
if [ "${BASH_SOURCE[0]}" == "$0" ]; then
main "$@"
fi
| true
|
f132213ebef89aff7d00e260507623e1fe9be096
|
Shell
|
hasim-a/axpbox
|
/test/rom/test.sh
|
UTF-8
| 660
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# Download the firmware
wget 'http://80.211.96.38/s/inc/downloads/es40-srmon/cl67srmrom.exe'
# Start AXPbox
../../build/axpbox run &
AXPBOX_PID=$!
# Wait for AXPbox to start
sleep 3
# Connect to terminal
ncat -t 127.0.0.1 21000 | tee axp.log &
NETCAT_PID=$!
# Wait for the last line of log to become P00>>>
timeout=300
while true
do
if [ $timeout -eq 0 ]
then
echo "waiting for SRM prompt timed out" >&2
exit 1
fi
if [ "$(tail -n 1 axp.log | tr -d '\0')" == "P00>>>" ]
then
break
fi
sleep 1
timeout=$(($timeout - 1))
done
kill $NETCAT_PID
kill $AXPBOX_PID
diff axp_correct.log axp.log
rm -f axp.log cl67* *.rom
| true
|
c052db84689899ad6b2130c306edcae90d673432
|
Shell
|
mvandorp/server-addons
|
/install-plugins.sh
|
UTF-8
| 2,399
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#######################################################################
### EDIT THESE PATHS FOR YOUR OWN SETUP ###
#######################################################################
# The left4dead2 directory of your server
L4D2_DIR="$HOME/Steam/steamapps/common/Left 4 Dead 2 Dedicated Server/left4dead2/"
#######################################################################
### EDIT THESE IF YOU WANT TO INSTALL ONLY SOME OF THE ADDONS ###
#######################################################################
main()
{
echo "================================================================================"
echo "=== Installing plugins... ==="
echo "================================================================================"
# Install unmaintained configs first
install "scavogl"
install "practiceogl"
install "witchparty"
# Install sourcemod, metamod, stripper
install "mmsource"
install "sourcemod"
install "stripper"
# Install various extensions
install "tickrate-enabler"
install "dhooks"
install "l4dtoolz"
install "builtinvotes"
install "geoip"
install "cannounce"
# Install configs
install "confogl"
install "rotoblin"
install "sky"
install "promod"
install "eq"
install "skeet"
# Install various plugins
install "sourcebans"
install "smac"
install "various"
install_maps
}
#######################################################################
### SHOULD NOT HAVE TO EDIT BELOW HERE ###
#######################################################################
install()
{
# Create a temporary directory
dir=$(mktemp -d)
# Extract the addon to the temp directory
tar zxvf "${1}.tar.gz" -C "${dir}"
# Merge the temp directory into the server's left4dead2 folder
rsync -av "${dir}/" "${L4D2_DIR}"
# Clean up the temp directory
rm -rf "${dir}"
}
install_maps()
{
# Save working directory
dir=$(pwd)
# Download maps
mkdir "$HOME/maps"
cd "$HOME/maps"
wget -A zip -N -m -p -E -k -K -nd -np "http://82.217.237.3/maps/"
# Extract and install maps
unzip -u -n '*.zip' -d "${L4D2_DIR}/addons/"
# Restore working directory
cd "${dir}"
}
main
| true
|
cf181cc4b09abec20e9f09df3f885f2e9e685d4d
|
Shell
|
lennartkester/CNVanalysis
|
/collectNormalCountsv2.sh
|
UTF-8
| 1,812
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
## script to laucn CNV calling GATK CNV calling pipeline on tumor normal pairs in folder ##
## folder needs to be specified and two files normal.csv and tumor.csv need to be present which list the PMABM IDs of the normal and tumor samples respectively ##
## normal.csv and tumor.csv need to be in corresponding order ##
## the scripts first copies the cram files from the isilon storage to the cluster and then launches the GATK pipeline ##
cramList=20200602_PoN_Cramlist.csv
#######################################################
wd=$PWD
mkdir -p ${wd}/stdout
numberOfSamples=$(cat $cramList | wc -l )
reference='/hpc/pmc_gen/lkester/CNV_calling/reference/Homo_sapiens_assembly38.fasta'
#interval_list='/hpc/pmc_gen/references/hg38bundle/v0/MedExome_hg38_capture_targets.interval_list'
interval_list='/hpc/pmc_gen/lkester/CNV_calling/reference/MedExome_hg38_capture_targets_padded250.interval_list'
echo $numberOfSamples
for i in $(seq 1 $numberOfSamples)
do
normal=$(head -$i $cramList | tail -1)
normal2=${normal##*/}
normalCounts=${normal2/cram/counts.hdf5}
cat > ${wd}/${normal2/%.cram/}.collectCounts.sh <<EOF
#!/bin/bash
#SBATCH -t 01:00:00
#SBATCH --mem=30G
#SBATCH -o ${wd}/stdout/${normal2/%cram/out}
#SBATCH -e ${wd}/stdout/${normal2/%cram/err}
cd $wd
## perform GATK CollectReadCounts for tumor and normal ##
module load Java/1.8.0_60
/hpc/pmc_gen/lkester/bin/gatk-4.1.5.0/gatk CollectReadCounts \
-I $normal \
-R $reference \
-L $interval_list \
--interval-padding 0 \
--interval-merging-rule OVERLAPPING_ONLY \
-O $normalCounts
EOF
#cat arrayLine.sh /hpc/pmc_gen/lkester/CNV_calling/CNVcallingScript.sh > ${wd}/${tumor/%.cram/}_CNVcalling.sh
#rm arrayLine.sh
#sbatch ${wd}/${normal/%.cram/}.collectCounts.sh
done
| true
|
8f9c9d3bfa300e1b878a89a4cfafb6e8334fef3e
|
Shell
|
lingobug/TRmorph
|
/SFST/archive.sh
|
UTF-8
| 327
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
dirname=$1
mkdir -p /tmp/archive-tmp-$$/$dirname
make
rsync --exclude-from=dist-exclude -avH . /tmp/archive-tmp-$$/$dirname
rsync trmorph.a /tmp/archive-tmp-$$/$dirname/$dirname.a
cd /tmp/archive-tmp-$$
tar czvf $dirname.tar.gz $dirname
cd -
mv /tmp/archive-tmp-$$/$dirname.tar.gz .
rm -fr /tmp/archive-tmp-$$
| true
|
18300e3dd9764c0527868c81491ec88fbe40fea9
|
Shell
|
goelrhea1992/UsingKnowledgeGraphs
|
/run.sh
|
UTF-8
| 233
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 2 ] && [ "$#" -ne 6 ]; then
echo "Illegal number of parameters"
fi
if [ "$#" -eq 6 ]; then
python InfoBox.py "-$1" $2 $3 "'$4'" $5 $6
else if [ "$#" -eq 2 ]; then
python InfoBox.py "-$1" $2
fi
fi
| true
|
c0a06a48d54bba4fa9d8ab34212fc7401c00e829
|
Shell
|
AnNiran-zz/operator-v1
|
/scripts/generate.sh
|
UTF-8
| 242
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
crd=$1
current_dir=$PWD
cd $GOPATH/src/k8s.io/code-generator
./generate-groups.sh all operator-v1/pkg/client/${crd} operator-v1/pkg/apis ${crd}:v1
res=$?
if [ $res -ne 0 ]; then
cd $current_dir
exit 1
fi
cd $current_dir
| true
|
be8375ab9fc88404b75d810151bc228c99adfef7
|
Shell
|
vojto/stefan-server
|
/add_slovak_girls_to_db.sh
|
UTF-8
| 652
| 3.046875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
cat slovak_girls.txt | tr [:upper:] [:lower:] > zoznam_sk_dievcat.txt
for i in $( cat slovak_girls.txt ); do echo $i.jpg >> cesta_sk_dievcat.txt; done
DATE=$(date +%Y-%m-%d)
mena="zoznam_sk_dievcat.txt"
cesta="cesta_sk_dievcat.txt"
#mysql
MYSQL_SERVER="127.0.0.1"
MYSQL_USER="root"
MYSQL_PW="jahodka;"
MYSQL_DB="images_1"
MYSQL="/usr/bin/mysql"
while read -r -u4 line2 && read -u5 line3; do {
echo "INSERT INTO img_dict(string, image, url, date, id, cesta) VALUES ('$line2', '', '','$DATE','', '$line3');" > /tmp/mysql1.sql
$MYSQL -h $MYSQL_SERVER -u $MYSQL_USER -p$MYSQL_PW $MYSQL_DB < /tmp/mysql1.sql
}; done 4< $mena 5< $cesta
| true
|
4bfc0bbfac2e627f502e3a4bfc769b78af8856cc
|
Shell
|
apache/jena
|
/apache-jena/bin/tdbbackup
|
UTF-8
| 2,471
| 3.9375
| 4
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
## Licensed under the terms of http://www.apache.org/licenses/LICENSE-2.0
resolveLink() {
local NAME=$1
if [ -L "$NAME" ]; then
case "$OSTYPE" in
darwin*|bsd*)
# BSD style readlink behaves differently to GNU readlink
# Have to manually follow links
while [ -L "$NAME" ]; do
NAME=$( cd $NAME && pwd -P ) ;
done
;;
*)
# Assuming standard GNU readlink with -f for
# canonicalize and follow
NAME=$(readlink -f "$NAME")
;;
esac
fi
echo "$NAME"
}
# If JENA_HOME is empty
if [ -z "$JENA_HOME" ]; then
SCRIPT="$0"
# Catch common issue: script has been symlinked
if [ -L "$SCRIPT" ]; then
SCRIPT=$(resolveLink "$0")
# If link is relative
case "$SCRIPT" in
/*)
# Already absolute
;;
*)
# Relative, make absolute
SCRIPT=$( dirname "$0" )/$SCRIPT
;;
esac
fi
# Work out root from script location
JENA_HOME="$( cd "$( dirname "$SCRIPT" )/.." && pwd )"
export JENA_HOME
fi
# If JENA_HOME is a symbolic link need to resolve
if [ -L "${JENA_HOME}" ]; then
JENA_HOME=$(resolveLink "$JENA_HOME")
# If link is relative
case "$JENA_HOME" in
/*)
# Already absolute
;;
*)
# Relative, make absolute
JENA_HOME=$(dirname "$JENA_HOME")
;;
esac
export JENA_HOME
fi
if [ -z "$JAVA" ]
then
if [ -z "$JAVA_HOME" ]
then
JAVA="$(which java)"
else
JAVA="$JAVA_HOME/bin/java"
fi
fi
if [ -z "$JAVA" ]
then
(
echo "Cannot find a Java JDK."
echo "Please set either set JAVA or JAVA_HOME and put java (>=Java 11) in your PATH."
) 1>&2
exit 1
fi
# ---- Setup
# JVM_ARGS : don't set here but it can be set in the environment.
# Expand JENA_HOME but literal *
JENA_CP="$JENA_HOME"'/lib/*'
LOGGING="${LOGGING:--Dlog4j.configurationFile=file:$JENA_HOME/log4j2.properties}"
# Platform specific fixup
# On CYGWIN convert path and end with a ';'
case "$(uname)" in
CYGWIN*) JENA_CP="$(cygpath -wp "$JENA_CP");";;
esac
# Respect TMPDIR or TMP (windows?) if present
# important for tdbloader spill
if [ -n "$TMPDIR" ]
then
JVM_ARGS="$JVM_ARGS -Djava.io.tmpdir=\"$TMPDIR\""
elif [ -n "$TMP" ]
then
JVM_ARGS="$JVM_ARGS -Djava.io.tmpdir=\"$TMP\""
fi
## Append any custom classpath
if [ -n "$CLASSPATH" ]
then
JENA_CP="$JENA_CP:$CLASSPATH"
fi
"$JAVA" $JVM_ARGS $LOGGING -cp "$JENA_CP" tdb.tdbbackup "$@"
| true
|
397087d00a0ee7eca208ad923ef1fd021b152c3e
|
Shell
|
andrea993/UXIbot
|
/question
|
UTF-8
| 474
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Why hello there.
Would you like some tea (y/n)?"
read answer
[[ $answer =~ ^([yY][eE][sS]|[yY])$ ]] && echo "OK then, here you go: http://www.rivertea.com/blog/wp-content/uploads/2013/12/Green-Tea.jpg" || echo "OK then."
until [ "$SUCCESS" = "y" ] ;do
echo "Do you like Music? mykeyboardstartshere Yass! No"
read answer
case $answer in
'Yass!') echo "Goody!";SUCCESS=y;;
'No') echo "Well that's weird.";SUCCESS=y;;
*) SUCCESS=n;;
esac
done
exit
| true
|
52e208dddad82c0cfede31007275354f133656ac
|
Shell
|
dukkhadevops/myrepo
|
/certbot/cf-update.sh
|
UTF-8
| 627
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#see api url below to see how these are used
ZONE_ID=1bee9bffffffffffffffffffffffffffff # zone id for contoso.com
RECORD_ID=3b71eeeeeeeeeeeeeeeeeeeeeeeeeeee # record id for _acme-challenge.contoso.com
echo "Update Cloudflare DNS TXT record"
curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
-H "Authorization: Bearer $CF_API_TOKEN" \
-H "Content-Type:application/json" \
--data "{\"type\":\"TXT\",\"name\":\"_acme-challenge.$CERTBOT_DOMAIN\",\"content\":\"$CERTBOT_VALIDATION\",\"ttl\":1}"
echo "Sleep to make sure the DNS change has time to propagate"
sleep 20
| true
|
ed93a53680f6844f5ccb252845ef5f4c4146f1f2
|
Shell
|
brenard/eesyvpn
|
/src/etc/eesyvpn/eesyvpn.conf
|
UTF-8
| 1,479
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
VPN_NAME="eesyvpn"
VPN_PORT="1190"
# Certification Authority
CAHOME=/etc/eesyvpn/$VPN_NAME
CRT_DIR=$CAHOME/certs
CRT_REVOKED_DIR=$CRT_DIR/revoked
KEY_DIR=$CAHOME/private
KEY_REVOKED_DIR=$KEY_DIR/revoked
CSR_DIR=$CAHOME/csr
CSR_REVOKED_DIR=$CSR_DIR/revoked
PEM_DIR=$CAHOME/pem
PEM_REVOKED_DIR=$PEM_DIR/revoked
CACRT=$CRT_DIR/ca.crt
CAKEY=$KEY_DIR/ca.key
CRL_DIR=$CAHOME/crl
CRLPEM=$CRL_DIR/crl.pem
NEW_CRT_DIR=$CAHOME/misc/ca.db.certs
TA_KEY_DIR=$CAHOME/private
TA_KEY=$TA_KEY_DIR/ta.key
CCD_DIR=$CAHOME/ccd
DH1024=$KEY_DIR/dh1024.pem
# Log configuration
VPN_LOG_DIR="/var/log"
VPN_LOG_NAME="openvpn-$VPN_NAME"
VPN_LOG_FILE="$VPN_LOG_DIR/${VPN_LOG_NAME}.log"
VPN_LOG_STATUS_FILE="$VPN_LOG_DIR/${VPN_LOG_NAME}-status.log"
# OpenSSL
OSSL_CNF_DIR=$CAHOME/openssl
OSSL_CNF=$OSSL_CNF_DIR/openssl.conf
OSSL_INDEX=$OSSL_CNF_DIR/index
OSSL_SERIAL=$OSSL_CNF_DIR/serial
OSSL_VAR_C="FR"
OSSL_VAR_ST="Ile de France"
OSSL_VAR_L="Paris"
OSSL_VAR_O="Easter-eggs"
OSSL_VAR_OU="$OSSL_VAR_O"
OSSL_VAR_EMAIL="postmaster@easter-eggs.com"
OSSL_VAR_CAHOME="$CAHOME"
OSSL_VAR_CANAME="$VPN_NAME"
OSSL_CERT_SUBJ_FORMAT="/C=$OSSL_VAR_C/ST=$OSSL_VAR_ST/L=$OSSL_VAR_L/O=$OSSL_VAR_O/OU=$OSSL_VAR_OU/CN=%%CN%%/emailAddress=$OSSL_VAR_EMAIL"
# OpenVPN
OVPN_CNF_DIR=$CAHOME/openvpn
OVPN_CNF_CLIENTS_DIR=$OVPN_CNF_DIR/clients
OVPN_CNF_CLIENTS_ZIP_DIR=$OVPN_CNF_CLIENTS_DIR
OVPN_CNF_CLIENTS_TPL=$OVPN_CNF_DIR/client.ovpn.tpl
OVPN_SERVER_CNF=$OVPN_CNF_DIR/openvpn.conf
OVPN_SERVER_LINK=/etc/openvpn/${VPN_NAME}.conf
| true
|
49601eac8fa158b3df1effcc03c7b6bf396e86e2
|
Shell
|
tunix/digitalocean-dyndns-armv7
|
/dyndns.sh
|
UTF-8
| 1,517
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/sh
api_host="https://api.digitalocean.com/v2"
sleep_interval=${SLEEP_INTERVAL:-300}
die() {
echo "$1"
exit 1
}
test -z $DIGITALOCEAN_TOKEN && die "DIGITALOCEAN_TOKEN not set!"
test -z $DOMAIN && die "DOMAIN not set!"
test -z $NAME && die "NAME not set!"
dns_list="$api_host/domains/$DOMAIN/records"
while ( true ); do
domain_records=$(curl -s -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \
$dns_list)
record_id=$(echo $domain_records| jq ".domain_records[] | select(.type == \"A\" and .name == \"$NAME\") | .id")
record_data=$(echo $domain_records| jq -r ".domain_records[] | select(.type == \"A\" and .name == \"@\") | .data")
test -z $record_id && die "No record found with given domain name!"
ip="$(curl -s ipinfo.io/ip)"
data="{\"type\": \"A\", \"name\": \"$NAME\", \"data\": \"$ip\"}"
url="$dns_list/$record_id"
if [[ -n $ip ]]; then
if [[ "$ip" != "$record_data" ]]; then
echo "existing DNS record address ($record_data) doesn't match current IP ($ip), sending data=$data to url=$url"
curl -s -X PUT \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \
-d "$data" \
"$url" &> /dev/null
fi
else
echo "IP wasn't retrieved within allowed interval. Will try $sleep_interval seconds later.."
fi
sleep $sleep_interval
done
| true
|
8f8e6667bb6b98dce83e17b95f1964810a5260f6
|
Shell
|
iridium-browser/iridium-browser
|
/native_client/tools/create_redirectors.sh
|
UTF-8
| 989
| 3.9375
| 4
|
[
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -Ceu
prefix="$1"
if [[ ! -d "$prefix" ]]; then
echo "Usage: $0 toolchain-prefix"
exit 1
fi
# Redirectors on *nix:
# The purpose of redirectors on *nix is to provide convenience wrappers, for
# example to emulate 32-bit compiler by calling 64-bit compiler with -m32.
# Another purpose of redirectors is to save space by replacing duplicate
# binaries with wrappers or links.
#
# Symbolic links vs. hard links:
# On windows/cygwin, hard links are needed to run linked programs outside of
# the cygwin shell. On *nix, there is no usage difference.
# Here we handle only the *nix case and use the symbolic links.
while read src dst arg; do
if [[ -e "$prefix/$(dirname "$src")/$dst" ]]; then
./create_redirector.sh "$prefix/$src" "$dst" "$arg"
fi
done < redirect_table.txt
| true
|
171c1561eae382639c4de4e824bb742063cced95
|
Shell
|
Ed-ITSolutions/Xen-Server-Backup
|
/vm_backup.sh
|
UTF-8
| 1,645
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Citrix XenServer 5.5 VM Backup Script
# This script provides online backup for Citrix Xenserver 5.5 virtual machines
#
# @version 3.01
# @created 24/11/2009
# @lastupdated 01/12/2009
#
# @author Andy Burton
# @url http://www.andy-burton.co.uk/blog/
# @email andy@andy-burton.co.uk
#
# Get current directory
dir=`dirname $0`
# Load functions and config
. $dir"/vm_backup.lib"
. $dir"/vm_backup.cfg"
touch $log_path
# Switch backup_vms to set the VM uuids we are backing up in vm_backup_list
case $backup_vms in
"all")
if [ $vm_log_enabled ]; then
log_message "Backup All VMs"
fi
set_all_vms
;;
"running")
if [ $vm_log_enabled ]; then
log_message "Backup running VMs"
fi
set_running_vms
;;
"argv")
if [ $vm_log_enabled ]; then
log_message "Backup argv VMs"
fi
set_argv_vms "$1"
;;
"list")
if [ $vm_log_enabled ]; then
log_message "Backup list VMs"
fi
;;
*)
if [ $vm_log_enabled ]; then
log_message "Backup no VMs"
fi
reset_backup_list
;;
esac
# Check backup_dir exists if not create it
if [ ! -d $backup_dir ];
then
`mkdir -p $backup_dir`
fi
# Check if backing to CIFS share
# if not run backups
# else mount share and if successful run backups
if [ -z $cifs_share ]; then
backup_vm_list
else
`mount -t cifs "$cifs_share" $backup_dir -o username=$cifs_username,password="$cifs_password"`
if [ $? -eq 0 ]; then
backup_vm_list
else
console_alert "backups mount error" "Unable to mount share, please check cfg"
exit
fi
fi
if [ -n "$keep_backups_for" ]; then
remove_old_backups $keep_backups_for
fi
# End
if [ $vm_log_enabled ]; then
log_disable
fi
| true
|
c7edbcf36ba97b66b454ab3a922f7877d52ed94b
|
Shell
|
anndoan/optional_class
|
/chip-seq/coverage_chipseq.sh
|
UTF-8
| 1,373
| 3.4375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
#BSUB -J ChIP_coverage
#BSUB -o logs/chip_coverage_%J.out
#BSUB -e logs/chip_coverage_%J.err
#BSUB -R "select[mem>4] rusage[mem=4]"
#BSUB -n 1
set -u -e -x -o pipefail
# install programs if necessary (for macOSX)
# brew install meme bowtie2 samtools
# pip install macs2
# load modules if on Tesla
# module load meme python
# define file locations
workdir="$HOME/dev/optional-class/chip-seq"
fasta="$workdir/dbases/GRCm38.p4.genome.fa"
genome="$workdir/dbases/chrom_sizes_mm38.txt"
bams="$workdir/bams"
mkdir -p coverage
for bam in $bams/*.bam
do
echo "getting coverage data for "$bam
# extract out only fastq filename without directory
base_name=$(basename $bam)
out_name=${base_name/.bam}
# make compressed bedgraph (coverage data)
bedtools genomecov -ibam $bam -bg \
| gzip -c > "coverage/"$out_name".bg.gz"
# find enriched regions (i.e. peaks)
macs2 callpeak -t $bam -n $out_name --outdir "coverage/"
# get intervals +/- 25bp around peak summit
bedtools slop -i "coverage/"$out_name"_summits.bed" \
-b 25 -g $genome > "coverage/"$out_name"summits.slop.25.bed"
# get DNA sequences for 25bp +/- surround peak summit
bedtools getfasta -fi $fasta \
-bed "coverage/"$out_name"summits.slop.25.bed" \
-fo "coverage/"$out_name"summits.slop.25.fasta"
done
| true
|
6897c40e88bf27b181fc011d093a6296d2610496
|
Shell
|
bikash/ScriptsDebian
|
/install_pylucene.sh
|
UTF-8
| 313
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#
source $VIRTUAL_ENV/bin/activate
mkdir tmp-lucene
pushd tmp-lucene
ROOT=$(dirname $0)
wget http://www.apache.org/dist/lucene/pylucene/pylucene-3.0.2-1-src.tar.gz
tar -xzf pylucene-3.0.2-1-src.tar.gz
pushd pylucene-3.0.2-1
cp ../../$ROOT/Makefile .
make
make install
popd
popd
rm -rf tmp-lucene
| true
|
bba6156144f020f426f53a48f5eb82d9a0385dea
|
Shell
|
llx2wyf/k8s
|
/dnsimage.sh
|
UTF-8
| 389
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
images=(k8s-dns-dnsmasq-nanny-amd64:1.14.4 k8s-dns-kube-dns-amd64:1.14.4 k8s-dns-sidecar-amd64:1.14.4)
for imageName in ${images[@]} ; do
# printf "%s\n" $imageName
#docker pull llx2wyf/$imageName
#docker tag llx2wyf/$imageName gcr.io/google_containers/$imageName
#docker rmi llx2wyf/$imageName
docker save -o out/$imageName gcr.io/google_containers/$imageName
done
| true
|
d5635a631d0d55409a1af12ae169d3b3070f05d8
|
Shell
|
winunix/winunix-icefy
|
/main.sh
|
UTF-8
| 678
| 3.046875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
cat /etc/passwd | grep -v 'syslog' | grep /home/ | awk '{split($0,a,":"); print a[1]":"a[6]}' > /tmp/ListOfUsers.ice
for reg in $(cat /tmp/ListOfUsers.ice); do
user=$(echo $reg | cut -d ":" -f 1);
folder=$(echo $reg | cut -d ":" -f 2);
echo "Freezing user "$user
cd $folder
find . | grep -v "$(cat /opt/icefy/ignore)" | grep -v "\.$" > /tmp/ListToRemove.ice
sed -i 's/ /\\ /g' /tmp/ListToRemove.ice
xargs rm -rf < /tmp/ListToRemove.ice
rsync -a --chown=$user:$user /etc/skel/ $folder/
sed -i 's#/usr/software-center/postInstall##g' $folder/.config/lxsession/Lubuntu/autostart
chown $user:$user $folder/.config/lxsession/Lubuntu/autostart
done
| true
|
f85c695e2fefd722ce2c66fbbb162cf9d8d41ce8
|
Shell
|
emersion/mrsh
|
/test/read.sh
|
UTF-8
| 196
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -eu
printf | read a
echo $?
i=0
printf "a\nb\nc\n" | while read line; do
printf "%s\n" "${line:-blank}"
i=$((i+1))
[ $i -gt 10 ] && break
done
[ $i = 3 ] && echo "correct!"
| true
|
7b3b9e66d3df7e299b3a1ea73ce7f9e63b1e9c75
|
Shell
|
mlcommons/ck
|
/cm-mlops/script/get-cuda-devices/run.sh
|
UTF-8
| 460
| 3.09375
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Compile
rm a.out
echo ""
echo "Checking compiler version ..."
echo ""
${CM_NVCC_BIN_WITH_PATH} -V
echo ""
echo "Compiling program ..."
echo ""
cd ${CM_TMP_CURRENT_SCRIPT_PATH}
${CM_NVCC_BIN_WITH_PATH} print_cuda_devices.cu
test $? -eq 0 || exit 1
# Return to the original path obtained in CM
echo ""
echo "Running program ..."
echo ""
cd ${CM_TMP_CURRENT_PATH}
${CM_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out
test $? -eq 0 || exit 1
| true
|
50a9857d5fd2315921db73cb13dffbea553dea23
|
Shell
|
LoveEatChicken/CrystalScrapy2
|
/onlinesys/searchkit/indexupdate/bin/run.sh
|
UTF-8
| 500
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ x"$#" == x"0" ];then
echo "Usage : $0 program"
exit
fi
PROGRAM=$1
shift
RUN_PATH=`cd $(dirname $0)/..;pwd`
export RUN_PATH
echo "program home : $RUN_PATH"
mkdir -p "$RUN_PATH/tmp"
mkdir -p "$RUN_PATH/log"
mkdir -p "$RUN_PATH/data"
mkdir -p "$RUN_PATH/status"
if [ ! -f "${RUN_PATH}/conf/${PROGRAM}.conf" ];then
echo "not found conf : ${RUN_PATH}/conf/${PROGRAM}.conf"
exit
fi
python "$RUN_PATH/bin/main.py" \
--conf "$RUN_PATH/conf/${PROGRAM}.conf"
| true
|
fd42b9e289b57e0a9c09af0198b898d8d454ea0a
|
Shell
|
zhengchalei/spring-boot-plus
|
/docs/bin/install/install-mysql.sh
|
UTF-8
| 1,857
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/shell
# Copyright 2019-2029 geekidea(https://github.com/geekidea)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#======================================================================
# 快速安装MySQL
# CentOS7 中已成功验证
# 使用yum+rpm方式安装
#
# author: geekidea
# date: 2019-8-29
#======================================================================
# 配置阿里云yum镜像源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all
yum makecache
# 下载mysql rpm
wget https://repo.mysql.com//mysql80-community-release-el7-3.noarch.rpm
# 安装rpm
rpm -Uvh mysql80-community-release-el7-3.noarch.rpm
# yum 安装mysql服务
yum install -y mysql-community-server
# 启动mysql服务
systemctl start mysqld.service
# 查看mysql服务状态
systemctl status mysqld.service
# 查看安装的mysql密码
grep 'temporary password' /var/log/mysqld.log
TEMP_PWD=$(grep 'temporary password' /var/log/mysqld.log)
PWD=${TEMP_PWD##* }
echo "${PWD}"
# 登陆
mysql -uroot -p${PWD}
# 进入到mysql命令行时,修改密码
# 修改密码
# ALTER USER 'root'@'localhost' IDENTIFIED BY 'Springbootplus666!';
# 使用新密码登陆
# exit
# mysql -uroot -pSpringbootplus666!
# 导入spring-boot-plus数据库脚本
# use mysql;
# source /root/mysql_spring_boot_plus.sql;
| true
|
521579aa74fa701e0df89eb545d13504821bbb1d
|
Shell
|
DreamSworK/Wakfu-Mod-Installer
|
/upx.sh
|
UTF-8
| 222
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
file="WakfuModInstaller"
if [ $(uname -s) == 'Darwin' ]
then
upx -9 "$file.app/Contents/MacOS/$file"
else
if [ $(uname -m) == 'x86_64' ]; then m="x86_64"; else m="i386"; fi
upx -9 "$file.$m.run"
fi
| true
|
d998925f89c465b9f4e66330dba18ff31904b97f
|
Shell
|
vaeth/set_prompt
|
/bin/set_prompt.sh
|
UTF-8
| 1,186
| 3.671875
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env cat
# (C) Martin V\"ath <martin at mvath.de>
# SPDX-License-Identifier: BSD-3-Clause
#
# This sample script is meant to be evaluated by zsh or bash (or the script
# sourced) if in interactive mode.
# In this example, HOSTTEXT is an additional information about the HOST,
# and HOSTTEXTSAVE provides the previous such information.
# In particular, if HOSTTEXTSAVE != HOSTTEXT, we pass the argument "1" to
# set_prompt.config to denote that we want special colors for a changed host
set_prompt() {
[ -z "${HOSTTEXT:++}" ] || set -- -e "($HOSTTEXT)" "$@"
[ "$HOSTTEXTSAVE" = "$HOSTTEXT" ] || set -- "$@" 1
local t=
t=$(PATH=$PATH:. . set_prompt "$@" && echo /) || return
[ -z "${t%/}" ] || PS1=${t%/}
}
# For bash, we patch the above function to add the arguments -b.
# For broken bash versions, also add the argument -l0
if [ -n "${BASH:++}" ]
then eval "$(funcdef=$(declare -f set_prompt)
if [ "${BASH_VERSINFO[0]}" -eq 3 ] && [ "${BASH_VERSINFO[1]}" -eq 1 ] \
&& [ "${BASH_VERSINFO[2]}" -le 17 ] && [ "${BASH_VERSINFO[3]}" -le 1 ]
then args='-bl0'
else args='-b'
fi
find='{'
replace="$find
set -- $args \"\$@\"
"
printf '%s' "${funcdef/$find/$replace}")"
fi
| true
|
ba46de2c8ec2f0c041c1d5298dffb254aa56967c
|
Shell
|
tejan74/openstack-k8s
|
/rabbitmq-server/data/bootstrap/bootstrap.sh
|
UTF-8
| 618
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
source /bootstrap/functions.sh
get_environment
echo $RABBIT_COOKIE > /var/lib/rabbitmq/.erlang.cookie
chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie
chmod 400 /var/lib/rabbitmq/.erlang.cookie
rabbitmq-server -detached
sleep 6
rabbitmqctl add_user $RABBIT_USERID $RABBIT_PASSWORD
rabbitmqctl set_user_tags $RABBIT_USERID administrator
rabbitmqctl set_permissions -p / $RABBIT_USERID ".*" ".*" ".*"
rabbitmqctl delete_user guest
rabbitmqctl stop
sleep 4
echo "*** User creation completed. ***"
echo "*** Log in the WebUI at port 15672 ***"
ulimit -S -n 65536
rabbitmq-server
| true
|
320d5cca6d9f40cf5aa61df9000157101ccd1f94
|
Shell
|
snatella/wine-runner-sc
|
/wine-git.sh
|
UTF-8
| 3,832
| 3.953125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. $DIR/common.sh
if [[ "$wine_version" == "" ]]; then
echo "Please set wine_version to continue, e.g. export wine_version=4.21"
exit 1
fi
if [[ "$no_limit_depth" == "yes" ]]; then
echo "Not limiting depth - beware, checkout will be slow."
checkout_depth=""
elif [[ "$limit_depth" != "" ]]; then
checkout_depth="--depth $limit_depth"
else
checkout_depth="--depth 1"
fi
#if [[ "$http_proxy" != "" ]]; then
# git config --global http.proxy $http_proxy
#fi
echo "Clearing previous"
rm -rf $DIR/build/wine-git/
rm -rf $DIR/build/wine-staging/
rm -rf $DIR/build/data64/{build,wine-cfg}
rm -rf $DIR/build/data32/{build,wine-cfg,wine-tools}
echo "Cloning wine from git"
if [[ "$wine_version" == "master" ]]; then
git clone $checkout_depth --branch master ${wine_repo} $DIR/build/wine-git
else
git clone $checkout_depth --branch wine-${wine_version} ${wine_repo} $DIR/build/wine-git
fi
if [[ "$do_wine_staging" == "yes" ]]; then
echo "Doing wine staging"
if [[ "$wine_staging_list" == "" ]]; then
echo "with default patch list (did you mean to set one with wine_staging_list?)"
wine_staging_list="wined3d* d3d11* winex11-Vulkan_support"
fi
echo "Cloning wine staging from git"
if [[ "$wine_staging_version" != "" ]]; then
git clone $checkout_depth --branch ${wine_staging_version} ${wine_staging_repo} $DIR/build/wine-staging
else
git clone $checkout_depth --branch v${wine_version} ${wine_staging_repo} $DIR/build/wine-staging
fi
cd $DIR/build/wine-staging
staging_extra_exclude=""
if [[ "$wine_staging_exclude" != "" ]]; then
echo "Excluding $wine_staging_exclude"
staging_extra_exclude="-W $wine_staging_exclude"
fi
if [[ "$wine_staging_list" == "all" ]] || [[ "$wine_staging_list" == "*" ]]; then
echo "Installing ALL wine staging patches"
set -x
docker run --rm -t -v $DIR/build:/build --name wine-builder-patcher wine-builder64:latest /build/wine-staging/patches/patchinstall.sh DESTDIR=/build/wine-git/ --force-autoconf --all $staging_extra_exclude
else
patchlist=""
echo "Expanding patch list expansions..."
for match in $wine_staging_list; do
patchlist="$patchlist $(cd patches && echo $match)"
done
echo "Run patcher (in container)"
set -x
docker run --rm -t -v $DIR/build:/build --name wine-builder-patcher wine-builder64:latest /build/wine-staging/patches/patchinstall.sh DESTDIR=/build/wine-git/ --force-autoconf $staging_extra_exclude $patchlist
fi
docker run --rm -t -v $DIR/build:/build --name wine-builder-patcher wine-builder64:latest chown -R $UID:$UID /build/
set +x
echo "Fixed permissions in $DIR/build"
fi
echo "Checking for/applying local patches"
cd $DIR/build/wine-git
do_patches() {
local dir="$1"
for file in $(ls $dir/*.patch 2> /dev/null || true); do
echo "Applying $file"
patch -l -p1 < $file
done
}
if [[ -e "$DIR/patches/$wine_version/staging" ]] && [[ "$do_wine_staging" == "yes" ]]; then
echo "Found staging patch folder"
do_patches "$DIR/patches/$wine_version/staging"
elif [[ -e "$DIR/patches/$wine_version" ]]; then
echo "Found patch folder"
do_patches "$DIR/patches/$wine_version"
else
echo "No patches found for $wine_version";
fi
echo "Wine $wine_version ready for build"
if [[ "$do_wine_staging" == "yes" ]]; then
echo "... with staging '$wine_staging_list'"
if [[ "$wine_staging_list" == "all" ]] || [[ "$wine_staging_list" == "*" ]]; then
echo "... which is ALL patches"
else
echo "... which expanded to '$patchlist'"
fi
else
echo "... without staging"
fi
| true
|
35dbbeaa77e18f6ada7bcb1ec31fb4cf6d5f0b75
|
Shell
|
antoniomzrl/gitrepo3
|
/simus/2011au/plothg.sh
|
UTF-8
| 165
| 2.953125
| 3
|
[] |
no_license
|
mkdir -p plots
for d in $* ; do
cd $d
echo 'xxx ' $d
gnuplot ../plothg.gp
for f in *eps ; do
mv $f ../${d}_plots/${d}_$f
done
cd -
done
| true
|
2283cd15a8e54eb8d0e31e097f34a1fd0256f800
|
Shell
|
wedmonster/myconf
|
/install_linux.sh
|
UTF-8
| 332
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# set vim configuration
echo "set vim configuration"
cp -r .vim ~/
if [ -f ~/.vimrc ]; then
echo "remove existing ~/.vimrc"
rm ~/.vimrc
fi
if [ -f ~/.gvimrc ]; then
echo "remove existing ~/.gvimrc"
rm ~/.gvimrc
fi
ln -s ~/.vim/.vimrc ~/.vimrc
ln -s ~/.vim/.gvimrc ~/.gvimrc
vim +PluginInstall +qall
| true
|
4a126db828c8d24ab4b5c207efdd5a209b7e7fc7
|
Shell
|
canders1/ProSPer
|
/formatting/format_all.sh
|
UTF-8
| 2,041
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
cat lemmalist.txt | while read lemma
do
echo $lemma;
python3 formatfile.py "../corpus_data/OANC_spoken_all/spoken_"$lemma"_all.txt" OANC_spoken $lemma gpt "../corpus_data/formatted/OANC_spoken_"$lemma".tsv";
python3 formatfile.py "../corpus_data/OANC_written_all/written_1_"$lemma"_all.txt" OANC_written $lemma gpt "../corpus_data/formatted/OANC_written_1_"$lemma".tsv";
python3 formatfile.py "../corpus_data/OANC_written_all/written_2_"$lemma"_all.txt" OANC_written $lemma gpt "../corpus_data/formatted/OANC_written_2_"$lemma".tsv";
python3 formatfile.py "../corpus_data/masc_spoken_all/spoken_"$lemma"_all.txt" masc_spoken $lemma gpt "../corpus_data/formatted/masc_spoken_"$lemma".tsv";
python3 formatfile.py "../corpus_data/masc_written_all/written_"$lemma"_all.txt" masc_written $lemma gpt "../corpus_data/formatted/masc_written_"$lemma".tsv";
done
cat '../corpus_data/formatted/'*'_'*'.tsv' > '../corpus_data/formatted/all.tsv'
python3 addcomparison.py ../corpus_data/formatted/all.tsv corpus ../categories.txt ../corpus_data/corpus_all.tsv
python3 format_annotated_file.py "../annotated_data/annotated_ex_arrive.tsv" natural arrive gpt "../annotated_data/formatted/formatted_annotated_arrive.tsv"
python3 format_annotated_file.py "../annotated_data/annotated_ex_come.tsv" natural come gpt "../annotated_data/formatted/formatted_annotated_come.tsv"
python3 format_annotated_file.py "../annotated_data/annotated_ex_drive.tsv" natural drive gpt "../annotated_data/formatted/formatted_annotated_drive.tsv"
python3 format_annotated_file.py "../annotated_data/annotated_ex_go.tsv" natural go gpt "../annotated_data/formatted/formatted_annotated_go.tsv"
python3 format_annotated_file.py "../annotated_data/annotated_ex_walk.tsv" natural walk gpt "../annotated_data/formatted/formatted_annotated_walk.tsv"
cat '../annotated_data/formatted/formatted_annotated_'*'.tsv' > '../annotated_data/formatted/all.tsv'
python3 addcomparison.py ../annotated_data/formatted/all.tsv annotated ../categories.txt ../annotated_data/annotated_all.tsv
| true
|
fe4475e54e8e3e65566b261aff1aa2220c52785d
|
Shell
|
LukeMathWalker/kube-rs
|
/examples/admission_setup.sh
|
UTF-8
| 1,885
| 3.453125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# This script is loosely adapting the TLS setup described in
# https://kubernetes.io/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/#tls-certificates
# for local development
# Require: a private ip reachable from your cluster.
# If using k3d to test locally, then probably 10.x.x.x or 192.168.X.X
# When running behind a Service in-cluster; 0.0.0.0
test -n "${ADMISSION_PRIVATE_IP}"
# Cleanup: Remove old MutatingWebhookConfiguration if exists (immutable)
kubectl delete mutatingwebhookconfiguration admission-controller-demo || true
# Get your IP into the cert
echo "subjectAltName = IP:${ADMISSION_PRIVATE_IP}" > admission_extfile.cnf
# Generate the CA cert and private key
openssl req -nodes -new -x509 \
-keyout ca.key \
-out ca.crt -subj "/CN=admission-controller-demo"
# Generate the private key for the webhook server
openssl genrsa -out admission-controller-tls.key 2048
# Generate a Certificate Signing Request (CSR) for the private key
# and sign it with the private key of the CA.
openssl req -new -key admission-controller-tls.key \
-subj "/CN=admission-controller-demo" \
| openssl x509 -req -CA ca.crt -CAkey ca.key \
-CAcreateserial -out admission-controller-tls.crt \
-extfile admission_extfile.cnf
CA_PEM64="$(openssl base64 -A < ca.crt)"
# shellcheck disable=SC2016
sed -e 's@${CA_PEM_B64}@'"$CA_PEM64"'@g' < admission_controller.yaml.tpl |
sed -e 's@${PRIVATE_IP}@'"$ADMISSION_PRIVATE_IP"'@g' \
| kubectl create -f -
# if behind a service:
#kubectl -n default create secret tls admission-controller-tls \
# --cert admission-controller-tls.crt \
# --key admission-controller-tls.key
# similar guide: https://www.openpolicyagent.org/docs/v0.11.0/kubernetes-admission-control/
# Sanity:
kubectl get mutatingwebhookconfiguration admission-controller-demo -oyaml
| true
|
90ae5bb88d8c48b6480cd265b1c608b55f4a270d
|
Shell
|
kaji-project/kaji-project
|
/tools/repo/repo-create-rpm.sh
|
UTF-8
| 784
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# DEPS
# createrepo
REPO_FOLDER=$1
DISTRO=$2
SIG_NAME=$3
# bash tools/repo-create-rpm.sh /srv/kaji-repo/ RHEL7 "Kaji Project"
DISTRO_FOLDER="${REPO_FOLDER}/${DISTRO}"
CREATEREPO=`which createrepo`
# Check reprepro
if [ "$CREATEREPO" = "" ]
then
echo "createrepo is missing"
exit 2
fi
# Check args
if [ "$REPO_FOLDER" = "" ]
then
echo "Missing repo folder"
exit 1
fi
# Should be rhel/centos
if [ "$DISTRO" = "" ]
then
echo "Missing distro"
exit 1
fi
# Should be rhel/centos
if [ "$SIG_NAME" = "" ]
then
echo "Missing signature name"
exit 1
fi
mkdir -p "${DISTRO_FOLDER}"
${CREATEREPO} ${DISTRO_FOLDER}
if [ ! -f ~/.rpmmacros ];
then
echo "%_signature gpg" > ~/.rpmmacros
fi
echo "%_gpg_name ${SIG_NAME}" >> ~/.rpmmacros
| true
|
b6d9669dc8813ddc0131f82ee4d174bd88171b46
|
Shell
|
Yaselley/pyVAD
|
/run_vad.sh
|
UTF-8
| 1,290
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH -A shchowdhury@hbku.edu.qa
#SBATCH -J VADtest
#SBATCH -o vad_out.txt
#SBATCH -e vad_err.txt
#SBATCH -p gpu-all
#SBATCH --gres gpu:0
#SBATCH -c 40 #number of CPUs needed
module load cuda10.1/toolkit gcc6 slurm cmake
source ~/anaconda3/bin/activate ~/anaconda3/envs/vad
#pip install -r ./requirements.txt --ignore-installed
SLURM_SUBMIT_DIR="/alt/asr/shchowdhury/vad/vad_simple_pipeline"
WORK_PATH="/alt/asr/shchowdhury/vad/vad_simple_pipeline"
in=$1
out=$2
THRESHOLD=$3
TASK_ID="VAD_TEST_1"
INPUT_FOLDER=$WORK_PATH"/"$in
OUTPUT_FOLDER=$WORK_PATH"/"$out #data_out_merged/"
SMOOTHING_FACTOR=1.0 #smooth window (in seconds)
WEIGHT_FACTOR=0.4 #weight factor (0 < weight < 1) :the higher, the more strict
#MODEL_FILE=
VAD=1
mkdir -p $OUTPUT_FOLDER
if [ $THRESHOLD -eq 0 ]; then
python src/vad_simple.py VAD_simple -i $INPUT_FOLDER -o $OUTPUT_FOLDER --smoothing $SMOOTHING_FACTOR --weight $WEIGHT_FACTOR --classifier /alt/asr/shchowdhury/vad/vad_simple_pipeline/models/svm_rbf_sm
echo "Done Spliting"
else
python src/vad_simple.py VAD_simple_merged -i $INPUT_FOLDER -o $OUTPUT_FOLDER --smoothing $SMOOTHING_FACTOR --weight $WEIGHT_FACTOR --classifier /alt/asr/shchowdhury/vad/vad_simple_pipeline/models/svm_rbf_sm -t $THRESHOLD
echo "Done Spliting and Merged by Threshold"
fi
| true
|
4f38da04d87a45afec131813aefd546251d1a27b
|
Shell
|
kumaraguru98/shelL_script_problems
|
/day6/whileloop/extension_program_to_count_head_and_tail_in_flipcoin.sh
|
UTF-8
| 353
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
while [[ $headcount -ne 11 && $tailcount -ne 11 ]]
do
coinflipped=$(( $RANDOM%2 ))
if [[ $coinflipped -eq 1 ]]
then
echo Heads
headcount=$(( $headcount+1 ))
else
echo Tails
tailcount=$(( $tailcount+1 ))
fi
done
echo headcount: $headcount and tailcount $tailcount
if [[ headcount -ge 11 ]]
then
echo heads won
else
echo tails won
fi
| true
|
eb9b30f997c91fb741f6cb4a8d4a55225873261a
|
Shell
|
muschellij2/greedyreg
|
/configure
|
UTF-8
| 1,161
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
CXX_STD=CXX11
ITKDIR=`${R_HOME}/bin/Rscript -e 'a<-ITKR:::itkDir(); cat(a)'`
# get a version of cmake
CMAKE_BUILD_TYPE=Release
# get / set up ANTs code
gh=https://github.com/pyushkevich/greedy
git clone $gh greedydir
mkdir -p src
cd ./src
echo "execs = c('greedy', 'lddmm', 'libgreedyapi.a', 'test_accum')" > install.libs.R
echo "if ( any(file.exists(execs)) ) { " >> install.libs.R
echo "dest <- file.path(R_PACKAGE_DIR, paste0('bin', R_ARCH))" >> install.libs.R
echo "dir.create(dest, recursive = TRUE, showWarnings = FALSE)" >> install.libs.R
echo "file.copy(execs, dest, overwrite = TRUE)" >> install.libs.R
echo "}" >> install.libs.R
echo "CXX_STD = CXX11" > Makevars
echo ".PHONY: all libs" >> Makevars
echo "all: \$(SHLIB)" >> Makevars
echo "\$(SHLIB): libs" >> Makevars
echo "libs: ; \$(MAKE) -j 2 && \$(MAKE) all install" >> Makevars
# mkdir -p build
# cd build
cmake -DITK_DIR:PATH=${ITKDIR} \
-DCMAKE_INSTALL_PREFIX:PATH=${R_PACKAGE_DIR}/libs/ \
-DModule_ITKDeprecated:BOOL=ON \
-DNDEBUG:BOOL=ON \
-DCMAKE_BUILD_TYPE:STRING="${CMAKE_BUILD_TYPE}" ../greedydir
cd ../
# needed for warning
rm -rf greedydir/.git
| true
|
26cc95ad3660b18458ad4021b04d0703b8b5de22
|
Shell
|
jvm-repo-rebuild/reproducible-central
|
/build_diffoscope.sh
|
UTF-8
| 1,122
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# path to .buildcompare file
compare=$1
# relative path to source code, usually buildcache/${artifactId}
builddir=$2
diffoscope_file=$(dirname ${compare})/$(basename ${compare} .buildcompare).diffoscope
count="$(grep "^# diffoscope" ${compare} | wc -l)"
echo -e "saving build diffoscope file to \033[1m${diffoscope_file}\033[0m for $count issues"
sed="sed"
if [ "$(uname -s)" == "Darwin" ]
then
command -v gsed >/dev/null 2>&1 || { echo "require GNU sed: brew install gsed Aborting."; exit 1; }
sed="gsed"
fi
counter=0
grep '# diffoscope ' ${compare} | ${sed} -e 's/# diffoscope //' | ( while read -r line
do
((counter++))
echo -e "$counter / $count \033[1m$line\033[0m"
docker run --rm -t -w /mnt -v $(pwd)/$(dirname ${compare})/${builddir}:/mnt:ro registry.salsa.debian.org/reproducible-builds/diffoscope --no-progress --exclude META-INF/jandex.idx $line
echo
done ) | tee ${diffoscope_file}
# remove ansi escape codes from file
${sed} -i 's/\x1b\[[0-9;]*m//g' ${diffoscope_file}
echo -e "build diffoscope file saved to \033[1m${diffoscope_file}\033[0m"
du -h ${diffoscope_file}
| true
|
d188449c45c2bbcac40fd03190ab6cf5c3c4bf15
|
Shell
|
linsalrob/EdwardsLab
|
/prophages/run_phispy_snakemakes.sh
|
UTF-8
| 874
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
# shell script so I can run all the snakemakes!
WD=$PWD
cd phispy_metrics
echo "Running phispy in phispy_metrics"
snakemake -s phispy_metrics.snakefile -j 12
snakemake -s phispy_no_metrics.snakefile -j 12
python3 summarize.py
cd $WD
cd phispy_tests
echo "Running phispy in phispy_tests"
snakemake -s /home3/redwards/GitHubs/EdwardsLab/prophages/phispy_training_vs_test.snakefile -j 12
cd $WD
cd phispy_training_set
echo "Running phispy in phispy_training_set";
snakemake -s /home3/redwards/GitHubs/EdwardsLab/prophages/phispy_with_training.snakefile -j 12
cd $WD
cd phispy_phage_genes
echo "Running phispy in phispy_phage_genes"
snakemake -s /home3/redwards/GitHubs/EdwardsLab/prophages/phispy_phage_genes.snakefile -j 12
cd $WD
cd PhiSpy_SN
echo "Running phispy in phispy_SN"
snakemake -s /home3/redwards/GitHubs/EdwardsLab/snakemake/phispy.snakefile -j 12
cd $WD
| true
|
92b4206633ef61bb1b93e16d5b7f2e36419f592f
|
Shell
|
soloboan/imputation
|
/FIMPUTE/FIminor.sh
|
UTF-8
| 11,516
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
###############################
ref=$1
outref=$2
finaloutfile=$3
outformat=$4
Allelecode=$5
Pedigree=$6
###############################
echo " "
echo " "
echo "@***********************************************************************@"
echo "@ Genotype imputation of sporadic missing alleles @"
echo "@ using FImpute version 2.0 @"
echo "@-----------------------------------------------------------------------@"
echo "@ bash script written by: @"
echo "@ Solomon A. Boison |soloboan@yahoo.com| @"
echo "@ bash_version 2.0.0 @"
echo "@ 22/09/2015 @"
echo "@***********************************************************************@"
echo " "
echo " "
#### Parameter file information
if [ ${ref} = help ]; then
echo " These are the parameters needed (the order is important)"
echo " ==== see the README file for more details ===="
echo "*** Arguments *****
1. Reference file (should be a PLINK binary file with alleles coded as 11, 12, 22)
2. output name of Reference file
3. Output name of final file after imputation
4. Format of output file (either in 'plink' or 'genoytpes')
5. Allelecode (12 or AB)
6. Pedigree data (IID Sire Dam Sex) (optional) "
echo " "
echo " "
exit
fi
###################################################################
# # Download PLINK from the web if not available
if [ ! -f plink2 ]; then
echo "Please - Download Plink v2 and rename it as plink2 "
exit
fi
# Download FImpute from the web if not available
if [ ! -f FImpute ]; then
echo "FImpute was not found in the current directory, it been downloaded ....... "
echo " "
wget http://www.aps.uoguelph.ca/~msargol/fimpute/FImpute_Linux.zip
unzip FImpute_Linux.zip
cp FImpute_Linux/FImpute .
rm -r FImpute_Linux*
fi
###################################################################
#### Checking if input file is available
if [ ! -f ${ref}.bed ] || [ ! -f ${ref}.bim ] || [ ! -f ${ref}.fam ]; then
echo "*** File " ${ref}.* " representing the genotype file of PLINK binary format were not found ***"
echo " "
echo " "
exit
fi
if [ -d $finaloutfile ]; then
echo "*** Directory "$finaloutfile" already exist, delete it and re-run the script ***"
echo " "
echo " "
exit
fi
###
if [ $outformat = plink ]; then
echo " "
elif [ $outformat = genotypes ]; then
echo ""
else
echo "Specify the correct output format***"
echo " either as 'plink' or 'genotypes' "
echo " "
echo " "
exit
fi
################################################
# Create temporary folder for analysis
FOLDER=tmp$RANDOM
mkdir ${FOLDER}
cp FImpute plink2 ${FOLDER}/.
cd ${FOLDER}
#################################################
echo " "
echo "**** Data processing for imputation started ****"
#Allelecode=$(awk '{print $6}' ../${ref}.bim | sort | uniq | awk '{if ($1==1) print "12"; else if ($1=="B") print "AB"; else if($1=="G" || $1=="T" || $1=="C") print "ACGT"}')
Allelecode=$(echo $Allelecode)
##### REFERENCE #######
if [ $Allelecode = 12 ]; then
cat ../${ref}.bim | awk '{print $2,2}' > recodeallele.txt
./plink2 --silent --cow --nonfounders --bfile ../${ref} --make-bed --out ref_upd
./plink2 --silent --cow --noweb --nonfounders --bfile ref_upd --recode A --recode-allele recodeallele.txt --out geno
rm recodeallele.txt
elif [ $Allelecode = AB ]; then
cat ../${ref}.bim | awk '{print $2,"A","B",1,2}' > alleleupdate.txt
./plink2 --silent --cow --nonfounders --bfile ../${ref} --update-alleles alleleupdate.txt --make-bed --out ref_upd
cat ref_upd.bim | awk '{print $2,2}' > recodeallele.txt
./plink2 --silent --cow --noweb --nonfounders --bfile ref_upd --recode A --recode-allele recodeallele.txt --out geno
rm alleleupdate.txt recodeallele.txt
elif [ $Allelecode = ACGT ]; then
echo "ACGT format is not allowed -- Please use an AB coding or 12"
echo "The 12 format can be obtained with PLINK using the --recode12"
echo " "
echo " "
exit
fi
awk 'NR>1 {print $2,1}' geno.raw > IDs_sons.txt
awk 'NR>1' geno.raw | cut -d' ' -f7- | awk '{gsub(/NA/,5); print}' |
awk 'BEGIN {FS=" ";OFS=""} {$1=$1; print}' |
paste -d' ' IDs_sons.txt - > Fimpsons.geno
echo 'IID Chip Call.........' > header
cat header Fimpsons.geno > $outref.geno
cat ref_upd.bim | awk '{print $2,$1,$4,NR}' > tmp
echo 'SNP_ID Chr Pos Chip1' > chipheader
cat chipheader tmp > $outref.snpinfo
rm geno.* IDs_sons.txt Fimpsons.geno chipheader header tmp
rm ref_upd.*
if [ -z "${Pedigree}" ]; then
echo "title=*population based imputation of ${finaloutfile}*;
genotype_file=*${outref}.geno*;
snp_info_file=*${outref}.snpinfo*;
output_folder=*$finaloutfile*;
save_genotype;
save_hap_lib /diplotype;
njob=4;" > $finaloutfile.ctr
sed -i 's/*/"/g' $finaloutfile.ctr
else
thresmm=$(awk 'END {print 250/NR}' ../${ref}.bim)
thresm=$(awk 'END {print 50/NR}' ../${ref}.bim)
echo "title=*population based imputation of ${finaloutfile}*;
genotype_file=*${outref}.geno*;
snp_info_file=*${outref}.snpinfo*;
ped_file=*../${Pedigree}*;
output_folder=*${finaloutfile}*;
parentage_test /ert_mm=$thresmm /ert_m=$thresm /find_match_cnflt /find_match_mp /find_match_ugp /remove_conflict;
add_ungen /min_fsize=5 /output_min_fsize=5 /output_min_call_rate=0.95 /save_sep;
save_genotype;
save_hap_lib /diplotype;
njob=4;" > $finaloutfile.ctr
sed -i 's/*/"/g' $finaloutfile.ctr
fi
echo " "
echo "data processing eneded, FImpute will start soon ............"
echo " "
#***** run FImpute *****#
./FImpute $finaloutfile.ctr
if [ ! -d $finaloutfile ]; then
echo "*** Imputation unsuccessful errors were detected ***"
echo " "
echo " "
exit
fi
if [ ! -f ${finaloutfile}/genotypes_imp.txt ]; then
echo "***** Imputation unsuccessful errors were detected *****"
if [ -f ${finaloutfile}/report.txt ]; then
cat ${finaloutfile}/report.txt
fi
echo " "
echo " "
exit
fi
echo "**********************************************************"
echo "****** Imputation finished *********"
echo "****** *********"
echo "**********************************************************"
if [ $outformat = plink ]; then
echo "** Preparing imputed files into PLINK binary data format *"
nloci=$(awk 'END {print NR}' ../${ref}.bim)
nanim=$(awk 'END {print NR}' ../${ref}.fam)
if [ $nloci -gt 50000 ]; then
echo "This process might take some time to complete depending on the "
echo "1. number of markers and samples"
echo "2. computer processor speed"
elif [ $nanim -gt 1000 ]; then
echo "This process might take some time to complete depending on the "
echo "1. number of markers and samples"
echo "2. computer processor speed"
fi
#******** Extract the imputed data and make a PLINK file ***********#
cat ${finaloutfile}/genotypes_imp.txt | awk 'NR>1 {print $3}' |
awk 'BEGIN {FS="";OFS=" "} {$1=$1; print $0}' |
awk '{for (i=1;i<=NF;i++) { if($i==0) $i="1 1"; else if($i==1) $i="1 2"; else if($i==2) $i="2 2"; else if($i==5) $i="0 0"} print}' > geno
cat ${finaloutfile}/genotypes_imp.txt | awk 'NR>1 {print $1,$1,0,0,0,-9}' > ids.txt
paste -d' ' ids.txt geno > file.ped
cat $outref.snpinfo | awk 'NR>1 {print $2,$1,0,$3}' > file.map
rm ids.txt geno
if [ $Allelecode = 12 ]; then
./plink2 --silent --cow --nonfounders --file file --make-bed --out imp
./plink2 --silent --cow --nonfounders --bfile imp --make-bed --out ${finaloutfile}_imp
rm file.map file.ped imp.*
elif [ $Allelecode = AB ]; then
cat ../${ref}.bim | awk '{print $2,1,2,"A","B"}' > alleleupdate.txt
./plink2 --silent --cow --nonfounders --file file --update-alleles alleleupdate.txt --make-bed --out imp
./plink2 --silent --cow --nonfounders --bfile imp --make-bed --out ${finaloutfile}_imp
rm alleleupdate.txt file.map file.ped imp.*
fi
#******** Extract the imputed ungenotyped animals and make a PLINK file ***********#
if [ -f ${finaloutfile}/genotypes_imp_chip0.txt ]; then
cat ${finaloutfile}/genotypes_imp_chip0.txt | awk 'NR>1 {print $3}' |
awk 'BEGIN {FS="";OFS=" "} {$1=$1; print $0}' |
awk '{for (i=1;i<=NF;i++) { if($i==0) $i="1 1"; else if($i==1) $i="1 2"; else if($i==2) $i="2 2"; else if($i==5) $i="0 0"} print}' > geno
cat ${finaloutfile}/genotypes_imp_chip0.txt | awk 'NR>1 {print $1,$1,0,0,0,-9}' > ids.txt
paste -d' ' ids.txt geno > file.ped
cat $outref.snpinfo | awk 'NR>1 {print $2,$1,0,$3}' > file.map
rm ids.txt geno
if [ $Allelecode = 12 ]; then
./plink2 --silent --cow --nonfounders --file file --make-bed --out ungenoimp
./plink2 --silent --cow --nonfounders --bfile ungenoimp --make-bed --out ${finaloutfile}_ungenoimp
rm file.map file.ped ungenoimp.*
elif [ $Allelecode = AB ]; then
cat ../${ref}.bim | awk '{print $2,1,2,"A","B"}' > alleleupdate.txt
./plink2 --silent --cow --nonfounders --file file --update-alleles alleleupdate.txt --make-bed --out ungenoimp
./plink2 --silent --cow --nonfounders --bfile ungenoimp --make-bed --out ${finaloutfile}_ungenoimp
rm alleleupdate.txt file.map file.ped ungenoimp.*
fi
fi
elif [ $outformat = genotypes ]; then
echo "** Preparing imputed files into genotype file format *"
echo ' '
nloci=$(awk 'END {print NR}' ../${ref}.bim)
nanim=$(awk 'END {print NR}' ../${ref}.fam)
if [ $nloci -gt 50000 ]; then
echo "This process might take some time to complete depending on the "
echo "1. number of markers and samples"
echo "2. computer processor speed"
elif [ $nanim -gt 1000 ]; then
echo "This process might take some time to complete depending on the "
echo "1. number of markers and samples"
echo "2. computer processor speed"
fi
#******** Extract the imputed data ***********#
cat ${finaloutfile}/genotypes_imp.txt | awk 'NR>1 {print $3}' |
awk 'BEGIN {FS="";OFS=" "} {$1=$1; print $0}' > geno
cat ${finaloutfile}/genotypes_imp.txt | awk 'NR>1 {print $1}' > ids.txt
paste -d' ' ids.txt geno > ${finaloutfile}_imp.genotype
cat $outref.snpinfo | awk 'NR>1 {print $2,$1,0,$3}' > ${finaloutfile}_imp.map
rm ids.txt geno
#******** Extract the imputed ungenotyped animals and make a PLINK file ***********#
if [ -f ${finaloutfile}/genotypes_imp_chip0.txt ]; then
cat ${finaloutfile}/genotypes_imp_chip0.txt | awk 'NR>1 {print $3}' |
awk 'BEGIN {FS="";OFS=" "} {$1=$1; print $0}' > geno
cat ${finaloutfile}/genotypes_imp_chip0.txt | awk 'NR>1 {print $1}' > ids.txt
paste -d' ' ids.txt geno > ${finaloutfile}_ungenoimp.genotype
rm ids.txt geno
fi
fi
rm *.snpinfo *.ctr *.geno
rm plink2 FImpute
cp -r * ../.
cd ..
rm -r $FOLDER
echo " "
echo " Imputation finished - files per chromosome are stored in the folder
interMS-summary${finaloutfile} "
echo " "
echo " Imputed genotypes for all chromsomes are merge and stored in the currect directory as "
if [ $outformat = plink ]; then
echo "
${finaloutfile}_imp.bed
${finaloutfile}_imp.bim
${finaloutfile}_imp.fam "
elif [ $outformat = genotypes ]; then
echo "
${finaloutfile}_imp.genotype
${finaloutfile}_imp.map "
fi
if [ -f ${finaloutfile}/genotypes_imp_chip0.txt ]; then
echo "
${finaloutfile}_ungenoimp"
fi
echo " "
echo " "
echo " "
echo "@***********************************************************************@"
echo "@ Report bugs to: solobaon@yahoo.com @"
echo "@ "$(date)" @"
echo "@***********************************************************************@"
| true
|
6344d0620d3b6326ea445391c00d56c1d2f537fe
|
Shell
|
faenrir/dots
|
/scripts/drawterm
|
UTF-8
| 245
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
# spawn a new terminal window inside a drawn rectangle
set -e
wglyph=9
hglyph=18
border=2
slop | tr 'x+' ' ' | {
read w h x y
w=$(( ( w - border ) / wglyph ))
h=$(( ( h - border ) / hglyph ))
exec st -g ${w}x${h}+${x}+${y} &
}
| true
|
74d50dc4100b20570a49c4d570b708768befbdbb
|
Shell
|
ahmedcs/FLASHE
|
/experiments/app/standalone_install.sh
|
UTF-8
| 2,142
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# cannot even proceed with FATE if not having sudo privilege
echo "[INFO] Check sudo privilege..."
if ! groups | grep "\<sudo\>" &> /dev/null; then
echo "[FAILED] You need to have sudo priviledge for deploying FATE"
fi
# install anaconda if necessary
CONDA_DIR=$HOME/anaconda3
if [ ! -d $CONDA_DIR ]; then
echo "[INFO] Install Anaconda Package Manager..."
wget https://repo.anaconda.com/archive/Anaconda3-2020.11-Linux-x86_64.sh
bash Anaconda3-2020.11-Linux-x86_64.sh -b -p $CONDA_DIR
export PATH=$CONDA_DIR/bin:$PATH
rm Anaconda3-2020.11-Linux-x86_64.sh
conda init bash
fi
source ~/anaconda3/etc/profile.d/conda.sh
# These are explicitly required by FATE's cluster deployment
echo "[INFO] Make deployment directory and extend system limits..."
sudo mkdir -p /data/projects
sudo chown -R ubuntu:ubuntu /data/projects
sudo sed -i '/^\*\s\{1,\}soft\s\{1,\}nofile/{h;s/nofile\s\{1,\}.*/nofile 65536/};${x;/^$/{s//\* soft nofile 65536/;H};x}' /etc/security/limits.conf
sudo sed -i '/^\*\s\{1,\}hard\s\{1,\}nofile/{h;s/nofile\s\{1,\}.*/nofile 65536/};${x;/^$/{s//\* hard nofile 65536/;H};x}' /etc/security/limits.conf
sudo sed -i '/^\*\s\{1,\}soft\s\{1,\}nproc/{h;s/nproc\s\{1,\}.*/nproc unlimited/};${x;/^$/{s//\* soft nproc unlimited/;H};x}' /etc/security/limits.d/20-nproc.conf
# These are necessary to accommodate FATE's CentOS scripts
# when you are running atop Ubuntu
# make /bin/sh symlink to bash instead of dash
echo "[INFO] Adapt to Ubuntu..."
echo "dash dash/sh boolean false" | sudo debconf-set-selections
sudo dpkg-reconfigure -f noninteractive dash
sudo apt update
sudo apt-get install -y gcc g++ make openssl supervisor libgmp-dev libmpfr-dev libmpc-dev libaio1 libaio-dev numactl autoconf automake libtool libffi-dev libssl1.0.0 libssl-dev liblz4-1 liblz4-dev liblz4-1-dbg liblz4-tool zlib1g zlib1g-dbg zlib1g-dev libgflags-dev
cd /usr/lib/x86_64-linux-gnu
if [ ! -f "libssl.so.10" ];then
sudo ln -s libssl.so.1.0.0 libssl.so.10
sudo ln -s libcrypto.so.1.0.0 libcrypto.so.10
fi
if [ ! -f "libgflags.so.2" ];then
sudo ln -s libgflags.so.2.2 libgflags.so.2
fi
echo "[SUCCEEDED] Finish"
| true
|
99e7a1b61ff5b95f50ca030ce9e5af10526d6723
|
Shell
|
jpgalmeida/bep-iot
|
/hlf_kafka/generateArtifacts.sh
|
UTF-8
| 2,692
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash +x
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
#set -e
CHANNEL_NAME1=$1
: ${CHANNEL_NAME1:="mychannel"}
echo $CHANNEL_NAME1
export PATH=$GOPATH/src/github.com/hyperledger/fabric/build/bin:${PWD}/../bin:${PWD}:$PATH
export FABRIC_CFG_PATH=${PWD}
echo
## Generates Org certs using cryptogen tool
function generateCerts (){
./bin/cryptogen generate --config=./crypto-config.yaml
if [ "$?" -ne 0 ]; then
echo "Failed to generate crypto material..."
exit 1
fi
}
## Generate orderer genesis block , channel configuration transaction and anchor peer update transactions
function generateChannelArtifacts() {
echo "##########################################################"
echo "######### Generating Orderer Genesis block ##############"
echo "##########################################################"
# Note: For some unknown reason (at least for now) the block file can't be
# named orderer.genesis.block or the orderer will fail to launch!
./bin/configtxgen -profile TwoOrgsOrdererGenesis -outputBlock ./channel-artifacts/genesis.block
echo
echo "#################################################################"
echo "### Generating channel configuration transaction 'channel.tx' ###"
echo "#################################################################"
./bin/configtxgen -profile TwoOrgsChannel -outputCreateChannelTx ./channel-artifacts/mychannel.tx -channelID $CHANNEL_NAME1
echo
echo "#################################################################"
echo "####### Generating anchor peer update for Org1MSP ##########"
echo "#################################################################"
./bin/configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate ./channel-artifacts/Org1MSPanchors$CHANNEL_NAME1.tx -channelID $CHANNEL_NAME1 -asOrg Org1MSP
echo
echo "#################################################################"
echo "####### Generating anchor peer update for Org2MSP ##########"
echo "#################################################################"
./bin/configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate ./channel-artifacts/Org2MSPanchors$CHANNEL_NAME1.tx -channelID $CHANNEL_NAME1 -asOrg Org2MSP
echo
echo
echo "#################################################################"
echo "####### Generating anchor peer update for Org3MSP ##########"
echo "#################################################################"
./bin/configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate ./channel-artifacts/Org3MSPanchors$CHANNEL_NAME1.tx -channelID $CHANNEL_NAME1 -asOrg Org3MSP
echo
}
generateCerts
generateChannelArtifacts
| true
|
f6d13c0d3ff8667ec8a5e83ffb6d76bcc74e49fe
|
Shell
|
brahimrafik/bluebank-atm-server
|
/atm-startup.sh
|
UTF-8
| 278
| 2.5625
| 3
|
[] |
no_license
|
#! /bin/bash
# ATM run server
cd /tmp/ATM-SERVER/lib
runningJar=$(ls bluebank-atm-*.jar)
echo " -- launching ATM CLIENT (NGINX) : nginx"
nginx
echo " -- launching ATM SERVER : java -jar ${runningJar} org.bluebank.Application"
java -jar ${runningJar} org.bluebank.Application
| true
|
d619d7d19fe3571765a53941081b40d1184f32b0
|
Shell
|
net3f/web3-assets-chart
|
/rclone-container/sync.sh
|
UTF-8
| 1,894
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
mkdir -p /root/.config/rclone/
echo [$DRIVE_NAME] > /root/.config/rclone/rclone.conf
echo type = $DRIVE_NAME >> /root/.config/rclone/rclone.conf
echo scope = $DRIVE_NAME.$DRIVE_SCOPE >> /root/.config/rclone/rclone.conf
echo root_folder_id = $ROOT_FOLDER_ID >> /root/.config/rclone/rclone.conf
echo token = $DRIVE_TOKEN >> /root/.config/rclone/rclone.conf
assets_dir="${ASSETS_DIRECTORY:-/assets}"
repo_dir="${REPO_DIRECTORY:-/repo}"
selector="${PUBLISH_SELECTOR:-Public}"
mkdir -p $repo_dir
mkdir -p $assets_dir
init_directory(){
if [ -d "$repo_dir/.git" ]; then
echo .git dir, procceding with pull
cd $repo_dir
git pull https://github.com/w3f/web3-assets.git
cd ..
else
echo NOT a .git dir, procceding with clone
git clone https://github.com/w3f/web3-assets.git $repo_dir
fi
echo Sync with drive
rclone sync --progress drive: $assets_dir
}
sync_files(){
echo Selecting files
OIFS="$IFS"
IFS=$'\n'
for path in $(find $assets_dir -type f | grep "$selector" )
do
printf "File to publish: %s\n" "$path"
trg="$repo_dir/$(dirname "${path}")/"
mkdir -p "$trg"
cp "$path" "$trg"
done
echo Checking for deleted files
for path in $(find $repo_dir -type f | grep -v ".git" )
do
assetfile="${path##repo}"
echo Cheking for $path at: $assetfile
if [ -f $assetfile ]
then
echo $assetfile exists.
else
echo $assetfile removed, removing from repo $path
rm -f $path
fi
done
echo Removing empty directories
find $repo_dir -type d -empty -delete || true
}
sync_repo(){
echo Sync repo
cd $repo_dir
git config --global user.name "w3fbot"
git config --global user.email "devops@web3.foundation"
git add -A
git commit -m "Auto sync"
git status
if [ -z "$CI" ]; then
git push -q https://${GITHUB_BOT_TOKEN}@github.com/w3f/web3-assets.git
fi
}
init_directory
sync_files
sync_repo
| true
|
f2e4400a7636c3190ee359714ad41b25886db9b2
|
Shell
|
zhenjun85/myscript
|
/mymemo
|
UTF-8
| 1,158
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
memofolder=~/bin/data/memo/
addmemo() {
myecho -y "=========Enter The Memo Title========="
read _title
title=`echo $_title |sed "s/ /_/g"`
cd $memofolder
vim $title
}
showmemo() {
myecho -c "==========Select The Memo =========="
cd $memofolder
select memo in `ls`
do
clear
echo $memo
myecho -bo "-------------------------------------------"
cat $memo
myecho -bo "-------------------------------------------"
break
done
}
deletememo() {
cd $memofolder
select memo in `ls`
do
myecho -r "delete the memo $memo ???"
select res in "Y" "N"
do
if [ $res == "Y" ]
then
rm $memo
fi
break
done
break
done
}
while getopts adh: arg
do
case $arg in
a)
addmemo
exit
;;
d)
deletememo
exit
;;
h)
#just for test
echo "$OPTARG"
exit
;;
?)
exit
;;
esac
done
showmemo
| true
|
c0d10086a4ca1aa5e882acaa5ea93134fb6cc715
|
Shell
|
knochelh/deptools
|
/contrib/gerrit/update-change
|
UTF-8
| 1,009
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Usage: update-change change_id [ref]
#
# Push a new patchset for the given change_id.
# The new ref is pushed for the code review (default: HEAD).
#
# If the gerrit remote is defined, use it, otherwise
# push directly to the server defined in config.sh.
#
# User email is excluded from the reviewer list.
#
set -e
change=${1?}
ref=${2:-HEAD}
source `dirname $0`/common.sh
source `dirname $0`/config.sh
[ ! -f `dirname $0`/local.sh ] || source `dirname $0`/local.sh
# Forge reviewer options from reviewers list
reviewer_options=`forge_reviewer_options "$reviewers"`
# Forge actual remote if the first one does not exits
remote=`forge_remote_parameter $remote $protocol $server/$project`
# Update patchset
echo "Updating patchset $change with ref $ref to remote $remote."
echo "Executing: git push --receive-pack=\"git receive-pack $reviewer_options\" $remote $ref:refs/changes/$change"
git push --receive-pack="git receive-pack $reviewer_options" $remote $ref:refs/changes/$change
| true
|
85a582a38cc7ddfdb5f305d16700365dee90335b
|
Shell
|
thomasrogerlux/fromwebtopdf
|
/fromwebtopdf.sh
|
UTF-8
| 1,733
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
cd $(dirname $0)
source ./src/config.sh
rebuild=0
clean=0
log=0
parallel_runs=$default_parallel_runs
function get_args {
while [[ $# -gt 0 ]]
do
arg=$1
case $arg in
-r|--rebuild)
rebuild=1
shift
;;
-c|--clean)
clean=1
shift
;;
-l|--log)
log=1
shift
;;
-p|--parallel)
parallel_runs=$2
shift
shift
;;
-i|--input)
list=$2
shift
shift
;;
-h|--help)
cat "./src/helper.txt"
exit
;;
*)
echo "Error: Unknow option \"$1\""
exit 1
;;
esac
done
}
function init {
if [ $clean = 1 ]
then
rm -f log-*.txt
rm -f fail-*.txt
fi
if [ $log = 0 ]
then
log_file="/dev/null"
fi
if [ ! -d "./dist" ]
then
mkdir "./dist"
fi
}
function check_list {
if [ -z $list ]
then
echo "Error: You have to specify a list"
exit 1
else
if [ ! -f $list ]
then
echo "Error: The specified list does not exist"
exit 1
fi
fi
}
function build_docker_image {
echo -ne "[..] Build docker image (This may take a long time)\r"
if [ $rebuild = 1 ]
then
docker build --no-cache -t fromwebtopdf -f ./docker/Dockerfile . >> $log_file
else
docker build -t fromwebtopdf -f ./docker/Dockerfile . >> $log_file
fi
if [ $? = 0 ]
then
echo -e "[${GREEN}OK${NORMAL}]"
else
echo -e "[${RED}KO${NORMAL}]"
exit 1
fi
}
function launch_container {
docker run $docker_flags fromwebtopdf /mnt/src/generator.sh $list $log_file $fail_file $parallel_runs
}
function main {
get_args $@
init
check_list
build_docker_image
launch_container
}
main $@
| true
|
a8b5644e44d0ba904ee08fee660a68f8a33e3bc3
|
Shell
|
sKanoodle/ns-server
|
/client.sh
|
UTF-8
| 223
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
SERVER_NAME=''
SERVER_PORT='24654'
ENTRY=''
PW=''
IP=$( curl -s 'ipinfo.io' | jq -r '.ip' )
(echo -e "login: ${ENTRY}\n${PW}\n\nchange-ip: ${IP}\n"; sleep 5) | openssl s_client -connect $SERVER_NAME:$SERVER_PORT
| true
|
50e7c47126c15d473e59fa0e9c0a79a5327ef6f6
|
Shell
|
brontosaurusrex/bucentaur
|
/.experiments/bin/wallpapers
|
UTF-8
| 366
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
# rotate wallpapers
setit (){
nitrogen --set-zoom-fill --set-color=#456 --random "$HOME/wallpapers"
}
# restart
restart() {
if pgrep -x "$1" > /dev/null
then
(echo "$1 running, restarting"
killall -w "$1"
"${1}" &) &
else
echo "$1 wasn't running"
fi
}
# main loop 4ever
while true
do
setit && restart wbar
sleep 3600
done
| true
|
5dd7973bd721841601701d1d66a116bc7b21e103
|
Shell
|
aws-samples/aws-service-catalog-reference-architectures
|
/codepipeline/install.sh
|
UTF-8
| 3,895
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script will setup the Automated pipeline, IAM Roles, and a ServiceCatalog Portfolio using the
# reference architecture as example products. This will create resources in your currently active CLI account
# across three regions using CloudFormation StackSets. You will be responsible for any costs resulting from the usage
# of this script.
ACC=$(aws sts get-caller-identity --query 'Account' | tr -d '"')
# add child accounts as space delimited list.
# You will need to ensure StackSet IAM roles are correctly setup in each child account
childAcc=""
childAccComma=${childAcc// /,}
allACC="$ACC $childAcc"
export AWS_DEFAULT_REGION=us-east-1
allregions="us-east-1 us-east-2 us-west-1"
LinkedRole1=""
S3RootURL="https://s3.amazonaws.com/aws-service-catalog-reference-architectures"
date
echo "Using Account:$ACC Region:$AWS_DEFAULT_REGION Child Accounts:$childAcc All Regions:$allregions"
echo "Creating the StackSet IAM roles"
aws cloudformation create-stack --region $AWS_DEFAULT_REGION --stack-name IAM-StackSetAdministrator --template-url https://s3.amazonaws.com/cloudformation-stackset-sample-templates-us-east-1/AWSCloudFormationStackSetAdministrationRole.yml --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM
aws cloudformation create-stack --region $AWS_DEFAULT_REGION --stack-name IAM-StackSetExecution --parameters "[{\"ParameterKey\":\"AdministratorAccountId\",\"ParameterValue\":\"$ACC\"}]" --template-url https://s3.amazonaws.com/cloudformation-stackset-sample-templates-us-east-1/AWSCloudFormationStackSetExecutionRole.yml --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM
echo "waiting for stacks to complete..."
aws cloudformation wait stack-create-complete --stack-name IAM-StackSetAdministrator
aws cloudformation wait stack-create-complete --stack-name IAM-StackSetExecution
echo "creating the automation pipeline stack"
aws cloudformation create-stack --region $AWS_DEFAULT_REGION --stack-name SC-RA-IACPipeline --parameters "[{\"ParameterKey\":\"ChildAccountAccess\",\"ParameterValue\":\"$childAccComma\"}]" --template-url "$S3RootURL/codepipeline/sc-codepipeline-ra.json" --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND
echo "creating the ServiceCatalog IAM roles StackSet"
aws cloudformation create-stack-set --stack-set-name SC-IAC-automated-IAMroles --template-url "$S3RootURL/iam/sc-demosetup-iam.json" --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND
SSROLEOPID=$(aws cloudformation create-stack-instances --stack-set-name SC-IAC-automated-IAMroles --regions $AWS_DEFAULT_REGION --accounts $allACC --operation-preferences FailureToleranceCount=0,MaxConcurrentCount=1 | jq '.OperationId' | tr -d '"')
STATUS=""
until [ "$STATUS" = "SUCCEEDED" ]; do
STATUS=$(aws cloudformation describe-stack-set-operation --stack-set-name SC-IAC-automated-IAMroles --operation-id $SSROLEOPID | jq '.StackSetOperation.Status' | tr -d '"')
echo "waiting for IAMrole Stackset to complete. current status: $STATUS"
sleep 10
done
echo "creating the ServiceCatalog Portfolio StackSet"
aws cloudformation create-stack-set --stack-set-name SC-IAC-automated-portfolio --parameters "[{\"ParameterKey\":\"LinkedRole1\",\"ParameterValue\":\"$LinkedRole1\"},{\"ParameterKey\":\"LinkedRole2\",\"ParameterValue\":\"\"},{\"ParameterKey\":\"LaunchRoleName\",\"ParameterValue\":\"SCEC2LaunchRole\"},{\"ParameterKey\":\"RepoRootURL\",\"ParameterValue\":\"$S3RootURL/\"}]" --template-url "$S3RootURL/ec2/sc-portfolio-ec2demo.json" --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND
aws cloudformation create-stack-instances --stack-set-name SC-IAC-automated-portfolio --regions $allregions --accounts $ACC --operation-preferences FailureToleranceCount=0,MaxConcurrentCount=3
date
echo "Complete. See CloudFormation Stacks and StackSets Console in each region for more details: $allregions"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.