blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b23dbe96098b0cd9db505a6a985822d8f98d5ed4
|
Shell
|
petronny/aur3-mirror
|
/libqgit2-git/PKGBUILD
|
UTF-8
| 1,462
| 3.046875
| 3
|
[] |
no_license
|
pkgbase=libqgit2-git
pkgname=('libqgit2-qt4-git' 'libqgit2-qt5-git')
pkgver=0.22.1.r339.b4f0e31
pkgrel=1
pkgdesc='libgit2 bindings for Qt. (GIT Version)'
arch=('i686' 'x86_64')
url="https://projects.kde.org/projects/playground/libs/libqgit2"
license=('GPL2')
makedepends=('cmake' 'git' 'qt5-base' 'qt4' 'libgit2-git')
source=('git://anongit.kde.org/libqgit2.git')
sha1sums=('SKIP')
pkgver() {
cd libqgit2
_ver="$(cat qgit2.h | grep LIBQGIT2_VERSION | cut -d '"' -f2)"
echo "$(echo ${_ver} | tr ' ' .).r$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
}
prepare() {
mkdir -p build-qt{4,5}
}
build() {
cd "${srcdir}/build-qt4"
cmake ../libqgit2 \
-DQT4_BUILD=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DLIBGIT2_TESTS=OFF
make
cd "${srcdir}/build-qt5"
cmake ../libqgit2 \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DLIBGIT2_TESTS=OFF
make
}
package_libqgit2-qt4-git() {
pkgdesc='libgit2 bindings for Qt4. (GIT Version)'
depends=('qt4' 'libgit2-git')
conflicts=('libqgit2-qt4' 'libqgit2-qt5' 'libqgit2-common-git')
provides=('libqgit2-qt4')
make -C build-qt4 DESTDIR="${pkgdir}" install
}
package_libqgit2-qt5-git() {
pkgdesc='libgit2 bindings for Qt5. (GIT Version)'
depends=('qt5-base' 'libgit2-git')
conflicts=('libqgit2-qt5' 'libqgit2-qt4' 'libqgit2-common-git')
provides=('libqgit2-qt5')
make -C build-qt5 DESTDIR="${pkgdir}" install
}
| true
|
6db2411d3fa772c475d87011e36383a2d1c49725
|
Shell
|
ftnapps/qico
|
/stuff/ftn
|
UTF-8
| 580
| 3.609375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#! /bin/sh
#
# Start FTN services.
#
# This is a script for RedHat's startup process.
#
# $Id: ftn,v 1.1.1.1 2004/12/29 21:23:34 mitry Exp $
# chkconfig: 345 86 14
# description: FTN services
#
. /etc/rc.d/init.d/functions
case "$1" in
start)
echo -n "Starting FTN services: "
su news -c 'qico -d'
echo qico
touch /var/lock/subsys/fido
;;
stop)
echo -n "Stopping FTN services: "
qctl -q
echo qico
rm -f /var/lock/subsys/fido
;;
status)
status qico
;;
restart|reload)
qctl -R
;;
*)
echo "Usage: $0 {start|stop|status|restart|reload}"
exit 1
esac
exit 0
| true
|
36686f36c612e4456b818fa63121f83686db8539
|
Shell
|
duckinator/dotfiles
|
/.bash_aliases
|
UTF-8
| 1,240
| 2.984375
| 3
|
[] |
no_license
|
# Default Bash aliases.
source "$HOME/.bash_demo_aliases"
alias sysinfo='konsole --workdir ~ --hide-menubar -e neofetch --loop'
alias venv='if [ -n "$VIRTUAL_ENV" ]; then deactivate; else . venv/bin/activate; fi'
alias strstrip='python3 -c "import sys; print(sys.stdin.read().strip(), end=\"\")"'
alias copy='xclip -selection clipboard -i'
alias paste='xclip -selection clipboard -o'
alias scopy='strstrip | copy'
alias grep='grep --color=auto'
alias sshproxy='ssh -ND 9999'
alias drop-caches='echo 3 | sudo tee /proc/sys/vm/drop_caches'
alias flush-swap='sudo bash -c "swapoff -a; swapon -a"'
alias b='bundle'
alias bi='bundle install'
alias be='bundle exec'
# A desperate attempt to make fluid work with a dark system theme.
# The colors could be better, but it works.
alias fluid='fluid -fg "#000000" -bg "#cccccc" -bg2 "#ccccdd" -scheme plastic'
if which nvim &>/dev/null; then
alias vim=nvim
fi
# -p is the same as --indicator-style=slash on GNU coreutils' `ls`.
if [ -n "$DISABLE_FANCY_LS" ]; then
# Do nothing.
true
elif $(ls --version 2>/dev/null | grep -q GNU); then
alias ls='ls --color=auto --group-directories-first -p'
elif [ "$(uname)" = "FreeBSD" ]; then
alias ls='ls -p -F -G'
else
alias ls='ls -p'
fi
| true
|
73044c329cc19907813f22aa80350163bc81c3fa
|
Shell
|
harriish/anm
|
/evaluateLabs_a2.sh
|
UTF-8
| 1,852
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "-h" ]; then
echo "Evaluate ANM labs.";
echo "Usage:"
echo "$1 <labfolder>"
echo "Labfolder contains the downloaded folder from Its learning."
exit
fi
myBasedir=$(pwd)
timestr=$(date +%Y-%m-%d_%H%M)
feedbackFile="$myBasedir/Feedback".$timestr."log"
echo "Feedback will be in $feedbackFile"
myRootDir=$1
cd "$myRootDir"
echo "Going into $myRootDir"
for student in *
do
echo "[EvalLab] Checking $student; in $myRootDir"
cd "$myRootDir"
if [[ ! -e /tmp/A2 ]]; then
mkdir -p /tmp/A2;
fi
echo "[EvalLab] Cleaning /tmp/A2/"
rm -rf /tmp/A2
mkdir /tmp/A2
echo "[EvalLab] /tmp/A2/"
ls /tmp/A2/
echo "[EvalLab] Copying; cp -r $myRootDir/$student/* /tmp/A2"
cp -rv "$myRootDir/$student"/* /tmp/A2
cd /tmp/A2
echo "[EvalLab] content of /tmp/A2"
ls
## Remove txt||pdf from filename
rename 's/\.txt$//' *.txt
chmod a+x /tmp/A2/prober
dos2unix /tmp/A2/prober
echo "[EvalLab] content of /tmp/A2"
ls -la
echo "[EvalLab] Checking if any prober runs..interference with snmp."
if pidof -x "prober" >/dev/null; then
echo "[prober] Allready running"
echo " killing"
q=$(pidof -x "prober")
echo "Got this; $q <>"
pkill prober
else
echo " Did not detect any prober"
q=$(pidof -x "prober")
echo "Got this; $q <>"
fi
echo "[EvalLab] Executing test: $student"
$myBasedir/A2/checkA2.sh
ls
echo "student name:$student " > /tmp/A2/student.txt
studTmp=$(mktemp -d /tmp/evaluation-a2-XXXXXXXX)
echo "[EvalLab] Moving /tmp/A2 to $studTmp ."
mv /tmp/A2/* $studTmp
echo "********* $student **************************"
echo "********************DELIMITER****************"
read -p "Press enter to continue"
# echo "[EvalLab] Killing snmpd"
# sudo killall snmpd
done
| true
|
1f2f1f304b7342808a90a597372a9aa779195ddf
|
Shell
|
kwikwag/voicefilter
|
/utils/ffmpeg_convert_ext.sh
|
UTF-8
| 609
| 3.265625
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
# thanks to https://unix.stackexchange.com/questions/428018/how-to-convert-all-wav-files-in-subdirectories-to-flac
# and to https://unix.stackexchange.com/questions/291108/parallel-processing-using-xargs
source_ext="${1:-wav}"
target_ext="${2:-flac}"
shift 2
temp_dir=$(mktemp -d)
trap "rm -rf ${temp_dir}" EXIT
find -type f -iname "*.${source_ext}" -print0 > ${temp_dir}/filelist
< ${temp_dir}/filelist xargs -0 -n1 -P$(nproc) -I '{}' sh -c \
'ffmpeg -loglevel panic -n -i "${0}" "${0%.*}.'${target_ext}'"; echo .' {} \; | \
tqdm --total=$(< ${temp_dir}/filelist tr '\0' '\n' | wc -l) > /dev/null
| true
|
72aea2da115cd27f9e106cab6e511e6594b7466a
|
Shell
|
vinavfx/natron_utils
|
/install.sh
|
UTF-8
| 546
| 2.875
| 3
|
[] |
no_license
|
#! /usr/bin/sh
cd $(dirname "$0") # Directorio Raiz
plugins="/usr/share/Natron/Plugins"
vinarender="$(cat /etc/vinarender)/modules/natron"
mkdir -p $plugins
rm -rf $plugins/*
mkdir -p $vinarender
rm -rf $vinarender/*
# copia el core a la carpeta plugins de natron
cp -rf ./core $plugins
cp -rf ./plugins $plugins
cp -rf ./testing $plugins
cp -rf ./vinarender/* $vinarender
cp ./utils/python/util.py $plugins
# inserta la ruta de la instancia a init.py
init="$plugins/core/init.py"
sed -i "s|{path}|$plugins|g" $init
chmod 777 -R $plugins
| true
|
642148bc970ffb938901466e7edbf24edade085e
|
Shell
|
nskspb/ubuntu-hdl-tools-install
|
/bin/07_install_vivado.sh
|
UTF-8
| 670
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CUR_DIR=${BASH_SOURCE%/*}
source $CUR_DIR/config.bash
source $CUR_DIR/include.bash
# download the web installer or find it in pkg folder
INSTALLER=`check_and_download ${VIVADO_URL} ${DOWNLOAD_DIR} ${VIVADO_PKG_NAME}`
# create the target folder
CUR_USER=`whoami`
sudo mkdir -p $VIVADO_INSTALL_PATH
sudo chown $CUR_USER $VIVADO_INSTALL_PATH
# run the web installer
chmod a+x $INSTALLER
echo "Please use $VIVADO_INSTALL_PATH for your installation"
$INSTALLER
# set the profile settings
SETTINGS_FILE=`find /opt/xilinx/Vivado/ -regextype sed -regex ".*/Vivado/[a-f0-9\.]*/settings64.sh" | sort -r | head -n 1`
sudo ln -s $SETTINGS_FILE $VIVADO_PROFILE
| true
|
41030d8eb2457a9203f4dddbd65459dd7d901a29
|
Shell
|
pflanze/ml2json
|
/html/gendoc
|
UTF-8
| 168
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
mydir=`dirname "$0"`
inbase="$mydir/.."
outbase="$mydir/out"
configpath="$mydir/gen-ml2json.pl"
"$mydir/gen" "$configpath" "$inbase" "$outbase"
| true
|
b1eb8f165726175f4bdd3804f1e24f76ff7ac3da
|
Shell
|
andcebrod/SCRIPTS-ISO
|
/menus_vairables_zenity/menuvariable_z_2.sh
|
UTF-8
| 408
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#Radiolist zenity con todos los modulos cargados en memoria y desactivarlos
lsmod|cut -f1 -d' '>/tmp/modulos.txt
fija1="zenity --list --radiolist --column "Elige" --column "Modulos" "
var=""
exec</tmp/modulos.txt
read linea
while [ "$linea" ]
do
var="$var $linea "
read linea
done
exec<&-
m="$fija1 $var "
elegido=$($m)
if [ "$elegido" ]
then
rmmod $elegido
fi
echo "Se ha eliminado $elegido"
| true
|
011b41e6ee6914482899b8a7d1d2e2f8fef628c5
|
Shell
|
XavierMoon/docker-nginx-php7
|
/start.sh
|
UTF-8
| 622
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
Nginx_Install_Dir=/usr/local/nginx
set -e
sed -i "s#API_URL#${API_URL}#g" /usr/local/nginx/conf/nginx.conf
sed -i "s#WEB_URL#${WEB_URL}#g" /usr/local/nginx/conf/nginx.conf
sed -i "s#BASE_DIR#${BASE_DIR}#g" /usr/local/nginx/conf/nginx.conf
#chmod -R 777 $BASE_DIR
echo -e "127.0.0.1 $API_URL \n 117.114.196.166 api.test.yundoukuaiji.com" > /etc/hosts
__create_user() {
# Create a user to SSH into as.
SSH_USERPASS=root
echo -e "$SSH_USERPASS\n$SSH_USERPASS" | (passwd --stdin root)
echo ssh root password: $SSH_USERPASS
}
# Call all functions
__create_user
/usr/bin/supervisord -n -c /etc/supervisord.conf
| true
|
146c6e8534e702c7917a838cb6ee63e4f71d4d98
|
Shell
|
bopopescu/trex-emu
|
/scripts/b
|
UTF-8
| 473
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /usr/bin/env bash
export GOPATH=`pwd`/..
export PKG_CONFIG_PATH=$GOPATH/external_share/zmq/x86_64/
export CGO_CFLAGS="-g -O2 -I"$GOPATH/external_share/zmq/x86_64/include
export CGO_LDFLAGS="-g -O2 -L"$GOPATH/external_share/zmq/x86_64/
export GOBIN=$GOPATH/bin
if [[ -f $PKG_CONFIG_PATH/libzmq.so && ! -f $PKG_CONFIG_PATH/libzmq.so.5 ]]; then
ln -s $PKG_CONFIG_PATH/libzmq.so $PKG_CONFIG_PATH/libzmq.so.5
fi
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PKG_CONFIG_PATH
| true
|
9cc01c11d8fb6507118318624ea540f051a11bf8
|
Shell
|
tyagi-iiitv/meld
|
/scripts/plots/plot_dir.sh
|
UTF-8
| 2,306
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Plots a directory with results from instrumentation.
#
DIR="${1}"
PROG="${2}"
CPU="${3}"
HTML="$DIR/index.html"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ -z "${DIR}" ]; then
echo "Usage: plot_dir.sh <directory with files>"
exit 1
fi
plot_part ()
{
FILE="${1}"
SCRIPT="${2}"
TITLE="${3}"
if [[ $SCRIPT == *.py ]]; then
INTER="python"
else
INTER="bash"
fi
if [ -z "${SCRIPT}" ]; then
echo "Missing script."
exit 1
fi
if [ ! -f "${FILE}" ]; then
echo "Cannot find file ${FILE}."
exit 1
fi
if [[ ! -f $FILE.png || $FILE -nt $FILE.png ]]; then
CMD="${INTER} $SCRIPT_DIR/${SCRIPT} ${FILE}"
echo -n "Processing ${FILE}..."
$CMD
if [ $? -eq 0 ]; then
echo "<h2>$TITLE</h2><img src=\"$(basename $FILE).png\" />" >> $HTML
echo "done."
else
echo "fail."
fi
fi
}
try_plot_part ()
{
FILE="${1}"
SCRIPT="${2}"
TITLE="${3}"
if [ -f "${FILE}" ]; then
plot_part "${FILE}" "${SCRIPT}" "${TITLE}"
else
echo "${FILE} does not exist"
fi
}
echo "<html><head><title>$PROG - $CPU threads</title></head><body>" > $HTML
plot_part "${DIR}/data.state" "active_inactive.py" "State"
plot_part "${DIR}/data.bytes_used" "plot_quantity.sh" "Bytes Used"
plot_part "${DIR}/data.node_lock_ok" "plot_quantity.sh" "Node Locks (First Ok)"
plot_part "${DIR}/data.node_lock_fail" "plot_quantity.sh" "Node Locks (First Failed)"
plot_part "${DIR}/data.derived_facts" "plot_quantity.sh" "Derived Facts"
plot_part "${DIR}/data.consumed_facts" "plot_quantity.sh" "Consumed Facts"
plot_part "${DIR}/data.rules_run" "plot_quantity.sh" "Rules Run"
plot_part "${DIR}/data.sent_facts_same_thread" "plot_quantity.sh" "Sent Facts (in same thread)"
plot_part "${DIR}/data.sent_facts_other_thread" "plot_quantity.sh" "Sent Facts (to other thread)"
plot_part "${DIR}/data.sent_facts_other_thread_now" "plot_quantity.sh" "Sent Facts (to other thread now, indexing included)"
try_plot_part "${DIR}/data.stolen_nodes" "plot_quantity.sh" "Stolen Nodes"
try_plot_part "${DIR}/data.priority_nodes_thread" "plot_quantity.sh" "Set Priority (in same thread)"
try_plot_part "${DIR}/data.priority_nodes_others" "plot_quantity.sh" "Set Priority (in another thread)"
echo "</body></html>" >> $HTML
| true
|
8ba704af6eb680070a8844089b4f155c598d3d25
|
Shell
|
aussieboy88/skywire
|
/static/script/unix/start
|
UTF-8
| 890
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# TODO needs revision, see:
# https://github.com/skycoin/skywire/issues/161
# local vars
SKYWIRE_UNIX_SCRIPTS=/usr/local/skywire/go/src/github.com/skycoin/skywire/static/script
# check for the env vars
if [ ! -f /etc/default/skywire ] ; then
# does not exist, link it
ln -s ${SKYWIRE_UNIX_SCRIPTS}/skywire.defaults /etc/default/skywire
fi
# now load it
. /etc/default/skywire
# node bin
NODE_EXEC_FILE=${GOPATH}/bin/node
# function to get MD5 of node binary
getMd5(){
if hash md5 2>/dev/null; then
echo $(md5 ${NODE_EXEC_FILE})
else
echo $(md5sum ${NODE_EXEC_FILE})
fi
}
# get md5 for actual (old) bin
OLDNODE=$(getMd5)
# install it
cd ${SKYWIRE_DIR}/cmd
go install ./...
# now we get the md5 of the "new" node
NEWNODE=$(getMd5)
# output in consecuence
if [ "$OLDNODE" = "$NEWNODE" ]; then
echo false
else
echo true
fi
| true
|
a278b49403277b56287c868aa9a0885164326dd3
|
Shell
|
CoolCold/iptv-channels
|
/actions/update/helper/skip.sh
|
UTF-8
| 470
| 3.921875
| 4
|
[] |
no_license
|
# @param string $1 (step_number)
# @return string (0=true, 1=false)
function _isSkippedStep() {
local step_number="${1}"
local step
local skip_steps=()
local result=1
if [[ -n "${SKIP}" ]]; then
while IFS='' read -r line; do skip_steps+=("$line"); done < <(echo "${SKIP}" | tr ":" "\n")
for step in "${skip_steps[@]}"; do
if [[ "${step}" == "${step_number}" ]]; then
result=0
break
fi
done
fi
echo "${result}"
}
| true
|
a3fc4d5acc2bf2fd689feb29941e82c021c4eeae
|
Shell
|
tijira-1999/CodinClub
|
/day1_8/day6/for/factors.sh
|
UTF-8
| 250
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
read -p "Enter a number " num
echo -n "Factors of $num are "
for ((i=1;i*i<=$num;i++))
do
if [ $(( $num%$i )) -eq 0 ]
then
if [ $(( $num/$i )) -eq $i ]
then
echo -n $i
else
echo -n "$i $(($num/$i)) "
fi
fi
done
echo " "
| true
|
cce605c6f0191031de3c6c790a1326344b84fbae
|
Shell
|
isolesty/pkgbuilder
|
/lib/utils
|
UTF-8
| 2,860
| 3.65625
| 4
|
[] |
no_license
|
#! /bin/bash
dquilt() {
local quiltrc=${HOME}/.quiltrc-dpkg
hasBin quilt || die EPACKAGE "Install quilt!!!"
if [[ ! -f $quiltrc ]] ; then
info "Write quilt configuration: $quiltrc"
cat <<'EOF' >>$quiltrc
d=. ; while [ ! -d $d/debian -a `readlink -e $d` != / ]; do d=$d/..; done
if [ -d $d/debian ] && [ -z $QUILT_PATCHES ]; then
# if in Debian packaging tree with unset $QUILT_PATCHES
QUILT_PATCHES="debian/patches"
QUILT_PATCH_OPTS="--reject-format=unified"
QUILT_DIFF_ARGS="-p ab --no-timestamps --no-index --color=auto"
QUILT_REFRESH_ARGS="-p ab --no-timestamps --no-index"
QUILT_COLORS="diff_hdr=1;32:diff_add=1;34:diff_rem=1;31:diff_hunk=1;33:diff_ctx=35:diff_cctx=33"
if ! [ -d $d/debian/patches ]; then mkdir $d/debian/patches; fi
fi
EOF
fi
quilt --quiltrc=${quiltrc} -f $@
}
hasPbuilderChroot() {
local pbuilderroot=${WORKBASE}/base.cow-${ARCH}
if [[ -x ${pbuilderroot}/bin/ls ]] ; then
return 0
else
if [[ -d ${pbuilderroot} ]] ; then
warn "Incompleted pbuilder environment detected at ${pbuilderroot}"
die "You should manually remove ${pbuilderroot} and run the script again"
fi
return 1
fi
}
# taken from http://stackoverflow.com/questions/3183444/check-for-valid-link-url
checkValidURL() {
regex='(https?|ftp|file)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]'
if [[ $1 =~ $regex ]] ; then
return 0
else
return 1
fi
}
printBuildInfo() {
local builder='None'
info "Wait a few seconds, I'll show your upstream package versions, for reference only"
rmadison $package
sleep 5
[[ $build_pkg -eq 1 ]] && builder=dpkg-buildpackage
[[ $build_pkg_via_pbuilder -eq 1 ]] && builder=cowbuilder
info "Build script version: $SVERSION"
cat <<EOF
We're building on architecture ${ARCH}:
package: ${package}
changelog: ${changelog}
dsc: ${dsc}
builder: ${builder}
maintianer: ${DEBFULLNAME}
EOF
}
urlDecode() {
if echo $1 | grep -qs % ; then
echo -e "$(echo $1 | sed 's/+/ /g;s/%/\\x/g')"
else
echo $1
fi
}
createWorkdir() {
assert package
workdir=${WORKBASE}/${package}
createdir ${workdir} "work base"
}
# http://wiki.bash-hackers.org/howto/mutex
lockScript() {
local lockfile=/tmp/${package}-${version}-lock
if ( set -o noclobber; echo $$ > $lockfile ) 2>/dev/null ; then
info "Acquiring a mutex lock on $lockfile"
trap "reset; rm -f $lockfile; exit $?" INT TERM EXIT
else
die "Someone is building ${package} ${version} via PID $(cat $lockfile)"
fi
}
unlockScript() {
local lockfile=/tmp/${package}-${version}-lock
info "Releasing mutex lock on $lockfile"
rm -f $lockfile
trap - INT TERM EXIT
}
| true
|
dafaca514b8254bc7354ccb99cd45e22ed5cee4a
|
Shell
|
pmjordan/turandot
|
/scripts/nuke-namespace
|
UTF-8
| 377
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
HERE=$(dirname "$(readlink --canonicalize "$BASH_SOURCE")")
. "$HERE/_env"
# https://medium.com/@clouddev.guru/how-to-fix-kubernetes-namespace-deleting-stuck-in-terminating-state-5ed75792647e
NAMESPACE=$1
kubectl get namespaces "$NAMESPACE" --output=json | \
jq .spec.finalizers=[] | \
kubectl replace --raw="/api/v1/namespaces/$NAMESPACE/finalize" -f -
| true
|
9dc5b255ea5263ddd937f0381ab4802f579761b0
|
Shell
|
pesoklp13/minecraft_bedrock
|
/stop-server.sh
|
UTF-8
| 572
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
##
## script for Shutdown Minecraft Bedrock server already running in tmux instance
## Created by Animator
##
tmuxName=minecraft-server
if ! tmux ls | grep -q "$tmuxName"; then
echo "No tmux with name $tmuxName is running. Exitting."
exit
fi
execute-command () {
local COMMAND=$1
if [[ $tmuxName != "" ]]; then
tmux send-keys -t $tmuxName "$COMMAND$(printf \\r)"
fi
}
execute-command "tellraw @a {\"rawtext\":[{\"text\":\"§c[SERVER]§a Shutting down server...\"}]}"
sleep 2
execute-command stop
echo "Sending stop to minecraft server..."
| true
|
52d390b14f3e43b8c4fa3cd222221da5e5659490
|
Shell
|
Inkering/Arduino-Starter
|
/ccls-reg.sh
|
UTF-8
| 258
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
# copy the flags file to a ccls file
# in same directory
cp .clang_complete .ccls
# add text "clang" to the beginning of the file
# so that ccls will recognize it as a compile_commands file
echo "$(echo 'clang' | cat - .ccls)" > .ccls
# Done!
| true
|
4ff564707201ca5d8c303de7027b4eb4c7a33481
|
Shell
|
stevenflo/MindProx
|
/memory_profiler/champsim.sh
|
UTF-8
| 1,002
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 5 ]; then
echo "Illegal number of parameters"
echo "Usage: ./run_champsim.sh [BINARY] [N_WARM] [N_SIM] [TRACE] [OPTION] [TRACE DIR]"
exit 1
fi
BINARY=${1}
N_WARM=${2}
N_SIM=${3}
TRACE=${4}
TRACE_DIR=${5}
OPTION=${6}
if [ ! -f "bin/$BINARY" ] ; then
echo "[ERROR] Cannot find a ChampSim binary: bin/$BINARY"
exit 1
fi
re='^[0-9]+$'
if ! [[ $N_WARM =~ $re ]] || [ -z $N_WARM ] ; then
echo "[ERROR]: Number of warmup instructions is NOT a number" >&2;
exit 1
fi
re='^[0-9]+$'
if ! [[ $N_SIM =~ $re ]] || [ -z $N_SIM ] ; then
echo "[ERROR]: Number of simulation instructions is NOT a number" >&2;
exit 1
fi
if [ ! -f "$TRACE_DIR$TRACE" ] ; then
echo "[ERROR] Cannot find a trace file: $TRACE_DIR$TRACE"
exit 1
fi
mkdir -p results_${N_SIM}M
(./bin/${BINARY} -warmup_instructions ${N_WARM}000000 -simulation_instructions ${N_SIM}000000 ${OPTION} -traces ${TRACE_DIR}${TRACE}) &> results_${N_SIM}M/${TRACE}-${BINARY}${OPTION}.txt
| true
|
cb03ad3d33684ef0b7fd5a78f2f29cef4627ba45
|
Shell
|
SyneticNL/service-ssh-agent
|
/scripts/release.sh
|
UTF-8
| 459
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ "${TRAVIS_PULL_REQUEST}" == "false" ]]; then
# develop => edge
[[ "${TRAVIS_BRANCH}" == "develop" ]] && TAG="edge"
# master => latest
[[ "${TRAVIS_BRANCH}" == "master" ]] && TAG="latest"
# tags/v1.2.0 => 1.2
[[ "${TRAVIS_TAG}" != "" ]] && TAG="${TRAVIS_TAG:1:3}"
if [[ "$TAG" != "" ]]; then
docker login -u "${DOCKER_USER}" -p "${DOCKER_PASS}"
docker tag ${REPO}:dev ${REPO}:${TAG}
docker push ${REPO}:${TAG}
fi;
fi;
| true
|
9a55df78afd40e933aed2e9a0390ff2ffab1912e
|
Shell
|
gakkistyle/comp9021
|
/Ass1/a1_sanity_check/test_a1.sh
|
UTF-8
| 1,201
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
for i in {0..35} ; do
if ! [ -e tests/test$i.in ] ; then
echo "You are missing the file tests/test$i.in. Please add it and start again."
exit 1
fi
if ! [ -e tests/test$i.out ] ; then
echo "You are missing the file tests/test$i.out. Please add it and start again."
exit 1
fi
done
num_tests_failed=0
for i in {0..35} ; do
temp_file=$(mktemp --tmpdir=.)
python3 roman_arabic.py < tests/test$i.in > $temp_file
DIFF=$(diff tests/test$i.out $temp_file)
if [ "$DIFF" != "" ] ; then
printf "test $i failed (Required input is in tests/test$i.in and required output is in tests/test$i.out)\n########## Command to reproduce\n"
printf "python3 roman_arabic.py < tests/test$i.in\n"
printf "########## Expected output\n"
cat tests/test$i.out
printf "########## Your output\n"
cat $temp_file
((num_tests_failed++))
else
printf "test $i passed\n"
fi
printf "***********************************\n\n"
rm -f $temp_file
done
if [ $num_tests_failed -ne 0 ] ; then
echo "You have failed $num_tests_failed tests"
else
echo "All tests passed! Good job!"
fi
| true
|
0551c776835e4bf93265ed21cb3f85d3ef37a486
|
Shell
|
mv28jam/GIT-sh-legacy
|
/list.sh
|
UTF-8
| 162
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source ./sh/init.sh
for path in $target_path/$dev_path/$branch/$repo_path/*
do
pathname=$(basename $path)
echo $pathname
done
| true
|
2095146917dace69518530dca71a2b5910b5ae04
|
Shell
|
revrepo/revsw-api
|
/utils/upgrade_mongodb/mongodb_upgrade.sh
|
UTF-8
| 1,504
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# Log commands to stdout
set -o xtrace
# Exit on error
set -o errexit
# Exit on use of unset variables
set -o nounset
# Exit and report on pipe failure
set -o pipefail
export PWD=$(pwd)
rm -rf ${PWD}/data/db
mkdir -p ${PWD}/data/db/dump
cp -R ${PWD}/data/dump/ ${PWD}/data/db/dump
step=0
# Get major versions from https://hub.docker.com/r/library/mongo/tags/
for major_version in 2.6.12 3.0.14 3.2.11 3.4.1; do
sudo docker stop some-mongo ||:
sudo docker rm -f some-mongo ||:
sudo docker run --name some-mongo -p 27017:27017 -v ${PWD}/data/db:/data/db -d mongo:$major_version
set +o errexit
false; while [[ $? > 0 ]]; do
sleep 0.5
sudo docker exec -it some-mongo mongo --eval 'printjson((new Mongo()).getDBNames())' --verbose
done
set -o errexit
if (( $step == 0 )); then
sudo docker exec -it some-mongo mongorestore /data/db/dump --host localhost:27017 --verbose
sleep 1
fi
((step += 1))
done
# Finish with last mongodb version
sudo docker exec -it some-mongo mongo --eval 'db.adminCommand( { setFeatureCompatibilityVersion: "3.4" } )'
sudo docker stop some-mongo
sudo docker rm -f some-mongo
sleep 3
sudo docker run --name some-mongo -p 27017:27017 -v ${PWD}/data/db:/data/db -d mongo:3.6.3
# Make dump with mongo:3.6.3
sudo rm -rf ${PWD}/data/db/dump/*
sudo docker exec -it some-mongo bash -c 'cd /data/db; mongodump'
rm -rf ${PWD}/data/dump_3_6_3
cp -R ${PWD}/data/db/dump/ ${PWD}/data/dump_3_6_3
sleep 3
| true
|
b2a269334ac384b7d36afc7dcd90a9bbe9aff166
|
Shell
|
VirtusLab/git-machete
|
/ci/docker/ci-deploy.sh
|
UTF-8
| 666
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e -o pipefail -u
if [[ ${1-} == "--dry-run" || ${CIRCLE_BRANCH-} != "master" ]]; then
do_push=false
else
do_push=true
fi
version=$(python3 setup.py --version)
docker build \
-t gitmachete/git-machete:$version \
-t gitmachete/git-machete:latest \
-f ci/docker/Dockerfile .
[[ $(docker run gitmachete/git-machete:latest version) == "git-machete version $version" ]]
if [[ $do_push == true ]]; then
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
docker push gitmachete/git-machete:$version
docker push gitmachete/git-machete:latest
else
echo "Refraining from push since it's a dry run"
fi
| true
|
46b46c655717b370d42d35f2d0f48d51f12de86f
|
Shell
|
arun786/shellscript
|
/ShellScript/ToKnowTheHostname.sh
|
UTF-8
| 118
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
Hostname=$(hostname)
echo "Hostname is $Hostname"
HOSTNAME=`hostname`
echo "Hostname is $HOSTNAME"
| true
|
4ddda24a8a7daec5bb193f5e261037d764a84341
|
Shell
|
BioLockJ-Dev-Team/sheepdog_testing_suite
|
/test/feature/defaultProps/checkProperty.sh
|
UTF-8
| 379
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
MASTER_PROP_FILE=$1
PROP_NAME=$2
PROP_VAL=$3
OUT="../output/results.txt"
CORRECT_LINE="${PROP_NAME}=${PROP_VAL}"
FOUND_LINE=$(grep "${PROP_NAME}=" $MASTER_PROP_FILE)
echo "Expected: $CORRECT_LINE" > $OUT
echo "Found: $FOUND_LINE" >> $OUT
if [ $FOUND_LINE == $CORRECT_LINE ]; then
echo "Good!" >> $OUT
exit 0
else
echo "That is not correct." >> $OUT
exit 1
fi
| true
|
df892b2796b49fa8586811c339ed7ec36f2cae3e
|
Shell
|
kafkaesqu3/misc-scripts
|
/Helpers/TunnelGuardian.sh
|
UTF-8
| 842
| 2.71875
| 3
|
[] |
no_license
|
while true; do
p1=$(netstat -tlpn | grep 127.0.0.1:8081)
p2=$(netstat -tlpn | grep 127.0.0.1:8082)
p3=$(netstat -tlpn | grep 127.0.0.1:8083)
p4=$(netstat -tlpn | grep 127.0.0.1:8084)
p5=$(netstat -tlpn | grep 127.0.0.1:8085)
if [ -z "$p1" ] ; then
echo "Lost proxy 1 at $(date)"
ssh -i /home/david/.ssh/id_rsa -N -f -D 8081 root@ip
fi
if [ -z "$p2" ] ; then
echo "Lost proxy 2 at $(date)"
ssh -i /home/david/.ssh/id_rsa -N -f -D 8082 root@ip
fi
if [ -z "$p3" ] ; then
echo "Lost proxy 3 at $(date)"
ssh -i /home/david/.ssh/id_rsa -N -f -D 8083 root@ip
fi
if [ -z "$p4" ] ; then
echo "Lost proxy 4 at $(date)"
ssh -i /home/david/.ssh/id_rsa -N -f -D 8084 root@IP
fi
if [ -z "$p5" ] ; then
echo "Lost proxy 5 at $(date)"
ssh -i /home/david/.ssh/id_rsa -N -f -D 8085 root@IP
fi
sleep 60
done
| true
|
0f41627c6875685590023d44e42e9012d441929f
|
Shell
|
lineCode/c2go
|
/run-tests.sh
|
UTF-8
| 1,334
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
CLANG_BIN=${CLANG_BIN:-clang}
CLANG_VERSION=$($CLANG_BIN --version)
echo "CLANG_BIN=$CLANG_BIN"
echo "CLANG_VERSION=$CLANG_VERSION"
echo
function run_test {
export TEST=$1
echo $TEST
# Compile with clang.
$CLANG_BIN -lm $TEST
if [ $? != 0 ]; then
exit 1
fi
# Run the program in a subshell so that the "Abort trap: 6" message is not
# printed.
$(echo "7" | ./a.out some args 2> /tmp/1-stderr.txt 1> /tmp/1-stdout.txt)
C_EXIT_CODE=$?
mkdir -p build
./c2go $TEST > build/main.go
cd build && go build && cd ..
if [ $? != 0 ]; then
echo "=== out.go"
cat --number build/main.go
exit 1
fi
# Run the program in a subshell so that the "Abort trap: 6" message is not
# printed.
$(echo "7" | ./build/build some args 2> /tmp/2-stderr.txt 1> /tmp/2-stdout.txt)
GO_EXIT_CODE=$?
if [ $C_EXIT_CODE -ne $GO_EXIT_CODE ]; then
echo "ERROR: Received exit code $GO_EXIT_CODE from Go, but expected $C_EXIT_CODE."
exit 1
fi
# Compare the output of the stdout and stderr from C and Go.
diff /tmp/1-stderr.txt /tmp/2-stderr.txt
diff /tmp/1-stdout.txt /tmp/2-stdout.txt
}
# Before we begin, lets build c2go
go build
for TEST in ${@-$(find tests -name "*.c")}; do
run_test $TEST
done
| true
|
a9f20263c9bde92230f9b1ef4660d425c88dcbbf
|
Shell
|
suquark/IoT-Client
|
/scripts/hostname_modify.sh
|
UTF-8
| 243
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# For raspberrypi only
[ ! $1 ] && echo "Need new hostname" && exit
sudo sed -ri "s/127\.0\.1\.1.*/127\.0\.1\.1\t$1/g" /etc/hosts
sudo echo $1 > /etc/hostname
sudo /etc/init.d/hostname.sh
sudo service avahi-daemon restart
| true
|
01b648d0d7701fe7b87b36d917c130430bbee83f
|
Shell
|
ajishfrancis/AWS-Backup
|
/Ansible_MPC_Development/csv_to_inventory.sh
|
UTF-8
| 3,580
| 2.71875
| 3
|
[] |
no_license
|
input="/home/ec2-user/Ansible_MPC_Development/serverdetails.csv"
echo "variables: " >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
while IFS="," read -r f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22
do
#Condition 0
if [[ $f2 == "windows" && $f10 == '' && $f11 == '' && $f12 == '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type: $f5, group : sg-010fbaf4d03b4039d }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 1
if [[ $f2 == "linux" && $f10 == '' && $f11 == '' && $f12 == '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : sg-045a90a80e61cedca }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 2
if [[ $f10 == '' && $f11 == '' && $f12 != '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : ['$f12'] }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 3
if [[ $f10 == '' && $f11 != '' && $f12 == '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : ['$f11'] }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 4
if [[ $f10 == '' && $f11 != '' && $f12 != '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : ['$f11','$f12'] }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 5
if [[ $f10 != '' && $f11 == '' && $f12 == '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : ['$f10'] }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 6
if [[ $f10 != '' && $f11 == '' && $f12 != '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : ['$f10','$f12'] }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 7
if [[ $f10 != '' && $f11 != '' && $f12 == '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : ['$f10','$f11'] }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
#Condition 8
if [[ $f10 != '' && $f11 != '' && $f12 != '' ]]
then
echo " - { volumes : $f6, volumes1 : $f7, volumes2 : $f8, region : $f13, keypair : $f14, ec2_name : $f3, image : $f4, subnet_id : $f9 , vpc_id : $f8, instnace_type : $f5, group : ['$f10','$f11','$f12'] }" >> /home/ec2-user/Ansible_MPC_Development/roles/aws_ec2creation_role/vars/main.yml
fi
done < "$input"
| true
|
200a559f1fed2087e76b2f3695de0d5509a2bbb7
|
Shell
|
FauxFaux/debian-control
|
/u/unicorn/unicorn_5.4.0-1+b1_amd64/prerm
|
UTF-8
| 252
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
case "$1" in
remove|deconfigure)
if [ -x /etc/init.d/unicorn ]; then
if [ -x "$(which invoke-rc.d)" ]; then
invoke-rc.d unicorn stop
else
/etc/init.d/unicorn stop
fi
fi
;;
esac
exit 0
| true
|
bff3fa7f286ca1d59e7992d502ae5908cb28dcea
|
Shell
|
brisskit-uol/i2b2-admin-procedures-jl-128
|
/procedures/bin/apps/catissue/import.sh
|
UTF-8
| 6,164
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#-----------------------------------------------------------------------------------------------
# Import catissue ontology and catissue pdo's into i2b2.
#
# Mandatory: the following environment variables must be set
# I2B2_ADMIN_PROCS_HOME
# Optional : the I2B2_ADMIN_PROCS_WORKSPACE environment variable.
# The latter is an optional full path to a workspace area. If not set, defaults to a workspace
# within the procedures' home.
#
# USAGE: {script-file-name}.sh job-name
# Where:
# job-name is a suitable tag that groups all jobs associated within the overall workflow
# Notes:
# The job-name must be associated with the prerequisite run of the refine-metadata script.
#
#-----------------------------------------------------------------------------------------------
source $I2B2_ADMIN_PROCS_HOME/bin/common/functions.sh
source $I2B2_ADMIN_PROCS_HOME/bin/common/setenv.sh
#=======================================================================
# First, some basic checks...
#=======================================================================
#
# Check on the usage...
if [ ! $# -eq 1 ]
then
echo "Error! Incorrect number of arguments"
echo ""
print_usage
exit 1
fi
#
# Retrieve the argument into its variable...
JOB_NAME=$1
#
# It is possible to set your own procedures workspace.
# But if it doesn't exist, we create one for you within the procedures home...
if [ -z $I2B2_ADMIN_PROCS_WORKSPACE ]
then
I2B2_ADMIN_PROCS_WORKSPACE=$I2B2_ADMIN_PROCS_HOME/work
fi
#
# We use the log file for the job...
LOG_FILE=$I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$JOB_LOG_NAME
#===========================================================================
# The real work is about to start.
#===========================================================================
#================================================================
# Remove, then create working directories...
#================================================================
print_message "Creating working directories..." $LOG_FILE
rm -Rf $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/ >/dev/null 2>/dev/null
mkdir $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY
exit_if_bad $? "Failed to create $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY"
mkdir $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY/$METADATA_SQL_DIRECTORY
exit_if_bad $? "Failed to create $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY/$METADATA_SQL_DIRECTORY"
mkdir $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY/$REFINED_METADATA_CATISSUE_DIRECTORY
exit_if_bad $? "Failed to create $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY/$REFINED_METADATA_CATISSUE_DIRECTORY"
mkdir $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY/$REFINED_METADATA_ENUMS_CATISSUE_DIRECTORY
exit_if_bad $? "Failed to create $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY/$REFINED_METADATA_ENUMS_CATISSUE_DIRECTORY"
print_message "Working directories created." $LOG_FILE
#==========================================================================
# Copy main refined metadata file into appropriate working directory
#==========================================================================
cp $I2B2_ADMIN_PROCS_HOME/ontologies/catissue/ontology/$MAIN_REFINED_METADATA_CATISSUE_FILE_NAME \
$I2B2_ADMIN_PROCS_WORKSPACE/$REFINED_METADATA_CATISSUE_DIRECTORY
exit_if_bad $? "Failed to copy $I2B2_ADMIN_PROCS_HOME/ontologies/catissue/ontology/$MAIN_REFINED_METADATA_CATISSUE_FILE_NAME"
print_message "Copied $MAIN_REFINED_METADATA_CATISSUE_FILE_NAME." $LOG_FILE
#==========================================================================
# Copy remotely produced enums into appropriate working directory
#==========================================================================
cp $I2B2_ADMIN_PROCS_HOME/remote-holding-area/catissue/ontology-enums/* \
$I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$REFINED_METADATA_CATISSUE_ENUMS_DIRECTORY
exit_if_bad $? "Failed to copy remotely produced enums into $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$REFINED_METADATA_CATISSUE_ENUMS_DIRECTORY"
print_message "Copied remotely produced enums into $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$REFINED_METADATA_CATISSUE_ENUMS_DIRECTORY" $LOG_FILE
#==========================================================================
# Copy remotely produced pdo's into appropriate working directory
#==========================================================================
cp $I2B2_ADMIN_PROCS_HOME/remote-holding-area/catissue/pdo/* \
$I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY
exit_if_bad $? "Failed to copy remotely produced PDO's into $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY"
print_message "Copied remotely produced PDO's into $I2B2_ADMIN_PROCS_WORKSPACE/$JOB_NAME/$PDO_DIRECTORY" $LOG_FILE
#==========================================================================
# Produce catissue main ontology sql commands...
#==========================================================================
$I2B2_ADMIN_PROCS_HOME/bin/apps/catissue/ontology-prep/6-xslt-refined-2ontcell-catissue.sh $JOB_NAME
$I2B2_ADMIN_PROCS_HOME/bin/apps/catissue/ontology-prep/7-xslt-refined-2ontdim-catissue.sh $JOB_NAME
#==========================================================================
# Produce catissue enumerations ontology sql commands...
#==========================================================================
$I2B2_ADMIN_PROCS_HOME/bin/apps/catissue/ontology-prep/8-xslt-refined-enum2ontcell-catissue.sh $JOB_NAME
$I2B2_ADMIN_PROCS_HOME/bin/apps/catissue/ontology-prep/9-xslt-refined-enum2ontdim-catissue.sh $JOB_NAME
#==========================================================================
# Upload the ontology data by executing the sql commands...
#==========================================================================
$I2B2_ADMIN_PROCS_HOME/bin/apps/catissue/meta-upload/metadata-upload-sql.sh $JOB_NAME
#==========================================================================
# Upload the PDO's...
#==========================================================================
#$I2B2_ADMIN_PROCS_HOME/bin/participant-upload/participant-upload-ws.sh $JOB_NAME
| true
|
1799cbf3e5893988e2f43516d60522d8063dab0e
|
Shell
|
wvengen/Rlgi
|
/resource/scripts/job_prologue_script
|
UTF-8
| 852
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
# fetch all files from repository
repository=`xml -i LGI_job_specifics repository_url`
if [ "$repository" ]; then
# Since LGI provides no real xml utilities, we have to do some xml
# parsing ourselves. This is not something to be copied, don't try
# this at home!
# Don't download anything starting with "LGI" to avoid overwriting
# files that the resource daemon uses; ".LGI_*" files may be
# used by the project server and are not useful here.
# Download 'empty' file first to avoid LGI_filetransfer help message
# when no files are in the repository; this is just ignored.
LGI_filetransfer -x -j . list "$repository" | \
xml repository_content | \
sed 's/\(<file\)/\n\1/g' | \
sed 's/^<file name="\(.*\)\">.*$/\1/p;d' | \
grep -v '^.\?LGI' | \
xargs LGI_filetransfer -j . download "$repository" ""
fi
exit 0
| true
|
02e28f297ef2ec03ec780db9b5c973ccf58bf9b6
|
Shell
|
UX-admin/RTPfoundation
|
/postinstall
|
UTF-8
| 1,443
| 2.59375
| 3
|
[] |
no_license
|
#!/sbin/sh
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the Common
# Development and Distribution License, Version 1.0 only (the
# "License"). You may not use this file except in compliance with
# the License.
#
# A copy of the Common Development and Distribution License is
# available at:
#
# https://github.com/UX-admin/rpmmacros/blob/master/LICENSE
#
# see the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL header in each
# file, and include the "LICENSE" file.
#
# If applicable, add the following below this CDDL header, with the
# fields enclosed by brackets "[]" replaced with your own
# identifying information: Portions Copyright [yyyy] [name of
# copyright owner]
#
# CDDL HEADER END
#
PKG_INSTALL_ROOT="${PKG_INSTALL_ROOT:-/}"; export PKG_INSTALL_ROOT
BaseDir="${PKG_INSTALL_ROOT}/${RTP_BASE}"
ISA64=`ls -l "${PKG_INSTALL_ROOT}/usr/lib/64" | awk '{print $NF;}'`
if [ ! -h "${BaseDir}/lib/64" ]
then
cd "${BaseDir}/lib/" && ln -s "${ISA64}" "64"
fi
chown -h ${RTP_LOGIN}:${RTP_GROUP} "${BaseDir}/lib/64"
installf "${PKGINST}" "/${RTP_BASE}/lib/64"
installf -f "${PKGINST}"
case "${PKG_INSTALL_ROOT}"
in
""|/)
svcs -H "${RTP_WINDEX_FMRI}:${RTP_WINDEX_INSTANCE}" > /dev/null 2>&1 && svcadm refresh "${RTP_WINDEX_FMRI}:${RTP_WINDEX_INSTANCE}" || true
;;
esac
| true
|
3420c17cb4212c8fecdb738baf94f5824826bdf0
|
Shell
|
Java4all/aws-infra.jenkins-scripts
|
/provision.sh
|
UTF-8
| 4,777
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
env
getprivateip ()
{
DESCRIPTION=$(ec2-api-tools/bin/ec2-describe-instances ${INSTANCE})
PRIVATEIP=$(echo "$DESCRIPTION" | sed -n -e '/^PRIVATEIPADDRESS/p' | awk '{print $2}')
}
getstate ()
{
DESCRIPTION=$(ec2-api-tools/bin/ec2-describe-instances ${INSTANCE})
STATE=$(echo "$DESCRIPTION" | sed -n -e '/^INSTANCE/p' | awk '{print $5}')
if [[ "$STATE" == ip-* ]]; then
STATE=$(echo "$DESCRIPTION" | sed -n -e '/^INSTANCE/p' | awk '{print $6}')
fi
}
wait_state_running ()
{
echo "[$(date)] Waiting for running state ..."
MAX_TESTS=20
SLEEP_AMOUNT=10
OVER=0
TESTS=0
while [[ $OVER != 1 ]] && [[ $TESTS -le $MAX_TESTS ]]; do
getstate
echo "[$(date)] $STATE"
if [[ "$STATE" != "running" ]]; then
OVER=0
TESTS=$(echo $(( TESTS+=1 )))
sleep $SLEEP_AMOUNT
else
OVER=1
fi
done
}
wait_ping ()
{
echo "[$(date)] Waiting for ping ..."
MAX_TESTS=20
SLEEP_AMOUNT=10
OVER=0
TESTS=0
getprivateip
while [[ $OVER != 1 ]] && [[ $TESTS -le $MAX_TESTS ]]; do
ping -c1 $PRIVATEIP
if [[ $? == 0 ]]; then
OVER=1
else
TESTS=$(echo $(( TESTS+=1 )))
sleep $SLEEP_AMOUNT
fi
done
}
wait_ssh ()
{
MAX_TESTS=20
SLEEP_AMOUNT=10
OVER=0
TESTS=0
getprivateip
echo "[$(date)] Waiting for ssh ..."
while [[ $OVER != 1 ]] && [[ $TESTS -le $MAX_TESTS ]]; do
ssh -q -o StrictHostKeyChecking=no -i /var/jenkins_home/.ssh/aws.pem root@$PRIVATEIP exit
if [[ $? != 255 ]]; then
OVER=1
else
TESTS=$(echo $(( TESTS+=1 )))
sleep $SLEEP_AMOUNT
fi
done
}
SECURITYGROUPS=$(echo $SECURITYGROUPS | sed 's/ .*//')
IMAGE=$(echo $IMAGE | sed 's/ .*//')
INSTANCE_TYPE=$(echo $INSTANCE_TYPE | sed 's/ .*//')
NEWROLE=$(echo $NEWROLE | sed 's/ .*//')
SECURITYGROUPS=$(echo $SECURITYGROUPS | sed 's/ .*//')
SUBNET=$(echo $SUBNET | sed 's/ .*//')
TARGETENVIRONMENT=$(echo $TARGETENVIRONMENT | sed 's/ .*//')
export INSTANCE=$(ec2-api-tools/bin/ec2-run-instances $IMAGE --subnet $SUBNET --key $KEYNAME --group $SECURITYGROUPS --instance-type $INSTANCE_TYPE --block-device-mapping "/dev/sda=:${DISKSIZE}:true:gp2" | sed -n '2p' | awk '{print $2}')
if [[ -z "$INSTANCE" ]]; then
echo "Could not get an instance id."
exit 1
fi
echo $INSTANCE > /var/jenkins_home/workspace/provision-teardown/INSTANCE
wait_state_running
wait_ping
wait_ssh
echo "[$(date)] $INSTANCE resizing disk."
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP "resize2fs /dev/xvde"
echo "[$(date)] $INSTANCE update /etc/sysconfig/network"
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP "sed -i 's/localhost.localdomain/${NEWHOSTNAME}/' /etc/sysconfig/network && cat /etc/sysconfig/network"
echo "[$(date)] $INSTANCE update /etc/sysconfig/clock"
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP sed -i 's#ZONE=\"UTC\"#ZONE=\"America/Denver\"#' /etc/sysconfig/clock
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP "sed -i 's#UTC=True#UTC=False#' /etc/sysconfig/clock && ln -sf /usr/share/zoneinfo/America/Denver /etc/localtime && cat /etc/sysconfig/clock"
echo "[$(date)] $INSTANCE create /etc/chef/encrypted_data_bag_secret"
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP 'mkdir -p /etc/chef'
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP 'echo "0o4LcdbuNZmATnMoXeZDdZmgkCO9YiHsHsyNNtiMgJ94eSimiL74HuXWdGKXeVVGypEvaxy/JpmXdwydICDHH9qsIWbfgH0hTMqImU/2EBm8Bn4q8tlJ4WyxNl1cFtsG+vicyv6dQB5B9rlltECj+cvCIf0pjakqwVswMchm4iWqhipGVceyMQ8b7f8zIAwfQTCbfOxhQoAp5Gd89e5pT+qR2KpREtyDXSZflGsxd6MipTw+nj3WFrUfNrGh0wIBfM1r8q7x3uMRF6DgofwrYgNUuaAHh0Ky8SFrRTyYlLY37pS4QPI5kRjVI94jlK6SRkRxoHrVENiLYEswhLydAY8xg9u+kIpyqpg0fHakkqMb0FTIqpuuf4jG6VgzyPZg51Q6ZfRnQolnutk0Kth67riQAPAp+sRnxIFCDkbB5A696/GVdqBdWDzMeXVN1bWhF7lSxhzlHYVLzRIF1S6pLGduEjKRJFdxpFoYS31kGxwUoAPEACT/6QxGCvfTGo0wgMY+2e1QR2EZobqKlTqcNVpTNlaDP40wPtLf+Y+Rx62IDAp491plnYbOZH9kR3wdCqw/EVFipvvrXQ8BV0BbfRKn7RYDmaTlGZ6Xjfv1Pwnr2THZpHj+DbeOAU1W4ednjHlJhCqyt+Z4TZfY1pUOhECAnGQh5pddft6rydB+8mE=" > /etc/chef/encrypted_data_bag_secret'
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP 'cat /etc/chef/encrypted_data_bag_secret'
echo "[$(date)] $INSTANCE update /etc/hosts"
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP 'sed -i "2i/${PRIVATEIP} ${NEWHOSTNAME} ${NEWHOST}/" /etc/hosts'
echo "[$(date)] $INSTANCE yum update"
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP 'yum update -y && reboot'
echo "[$(date)] $INSTANCE rebooting."
wait_state_running
wait_ping
wait_ssh
echo "[$(date)] delete node from chef if it exists"
knife node $NEWHOSTNAME delete -y
echo "[$(date)] delete client from chef if it exists"
knife client $NEWHOSTNAME delete -y
echo "[$(date)] bootstrap the instance"
knife bootstrap $PRIVATEIP -u root -N $NEWHOSTNAME -E $TARGETENVIRONMENT -i /var/jenkins_home/.ssh/aws.pem --no-host-key-verify
echo "[$(date)] $INSTANCE update /etc/chef/client.rb"
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP 'echo "ssl_verify_mode :verify_peer" >> /etc/chef/client.rb'
echo "[$(date)] $INSTANCE apply role."
knife node run_list add $NEWHOSTNAME "role[$NEWROLE]"
ssh -i /var/jenkins_home/.ssh/aws.pem -o StrictHostKeyChecking=no -t -t root@$PRIVATEIP 'chef-client -l debug'
| true
|
1ee1a9f41baf539a3b4a294996da0173369f1139
|
Shell
|
mscharmann/ABC-pipeline
|
/collection_of_scripts/simulate.ddRAD_Dianthus.2018-08-29.sh
|
UTF-8
| 3,792
| 3
| 3
|
[] |
no_license
|
# simulate.sh
# This script executes the following pipeline:
# draw_priors | simulate_coalescent_samples | record_summary_statistics
# it is designed for an LSB batch submission system on a computer cluster, requiring the variable $LSB_JOBINDEX
# example use: bash simulate.2018-05-15.sh bpfile.txt spinput.txt test
# toolbase is the ABSOLUTE path to where you keep the scripts
toolbase=/cluster/project/gdc/people/schamath/Dianthus.2018-08/tools_used
# collect trailing arguments and validate
bpfile=$1
spinput=$2
scenario=$3
if [ -f ./${bpfile} ]
then
echo "bpfile found, proceeding."
else
echo "bpfile (1st trailing argument) not found, dying."
exit
fi
if [ -f ./${spinput} ]
then
echo "spinput file found, proceeding."
else
echo "spinput file (2nd trailing argument) not found, dying."
exit
fi
argfile=argfile.${scenario}.txt
if [ -f ./argfile.${scenario}.txt ]
then
echo "argfile.${scenario}.txt found, proceeding."
else
echo "argfile.${scenario}.txt not found, dying. Make an argfile.SCENARIO.txt and give SCENARIO as 3rd trailing argument."
exit
fi
# common msnsam string to all models (drop of parameters is achieved by setting them zero or equal to a redundant one)
# verbose of msnsam_model_cmd
v="-t tbs[theta] -r tbs[rec_rate] tbs[nsites] -I 2[npop] tbs[nsam1] tbs[nsam2] 0[symm_migration_matrix]
-en 0.0[present time] 1[popID] tbs[N1_recent]
-eg 0.0[present time] 1[popID] tbs[p1_growth_rate_during_recent_phase !unfree: constant OR exponential rate given by N1_recent and N1_ancient; controlled in argfile!]
-en 0.0[present time] 2[popID] tbs[N2_recent]
-eg 0.0[present time] 2[popID] tbs[p2_growth_rate_during_recent_phase !unfree: constant OR exponential rate given by N2_recent and N2_ancient; controlled in argfile!]
-en tbs[T_N1_ancient] 1[popID] tbs[N1_ancient]
-en tbs[T_N2_ancient] 2[popID] tbs[N2_ancient]
-em tbs[T_m12_start] 1[recipient_ID] 2[donor_ID] tbs[m12 !given by hyper-parameters m12_scale and m12_prop_mig!]
-em tbs[T_m12_stop] 1[recipient_ID] 2[donor_ID] 0[mig_rate]
-em tbs[T_m21_start] 2[recipient_ID] 1[donor_ID] tbs[m21 !given by hyper-parameters m21_scale and m21_prop_mig!]
-em tbs[T_m21_stop] 2[recipient_ID] 1[donor_ID] 0[mig_rate]
-ej tbs[T_merge_2_into_1] 2[popID] 1[popID]
-en tbs[T_merge_2_into_1] 1[popID] tbs[pop_size_of_anc12, constant]"
# the actual (non-verbose) msnsam_model_cmd: copy verbose version and then in TextWrangler grep-replace \[.*?\] with nothing:
msnsam_model_cmd="-t tbs -r tbs tbs -I 2 tbs tbs 0
-en 0.0 1 tbs
-eg 0.0 1 tbs
-en 0.0 2 tbs
-eg 0.0 2 tbs
-en tbs 1 tbs
-en tbs 2 tbs
-em tbs 1 2 tbs
-em tbs 1 2 0
-em tbs 2 1 tbs
-em tbs 2 1 0
-ej tbs 2 1
-en tbs 1 tbs"
module load python/2.7
ori_dir=..
#LSB_JOBINDEX=1
mkdir temp_${scenario}_${LSB_JOBINDEX}
cd temp_${scenario}_${LSB_JOBINDEX}
cp ${ori_dir}/${bpfile} ./
cp ${ori_dir}/${argfile} ./
cp ${ori_dir}/${spinput} ./
mv $spinput spinput.txt
# build msnsam_arg2 from the arg and spinput files (nloci * nreps); cannot be a tbs argument to msnsam!:
nloci=$(head -2 spinput.txt | tail -1 )
nreps=$( cat $argfile | grep "nreps" | awk 'FS=" = " {print $3}' )
msnsam_arg2=$(( $nloci * $nreps ))
# make sure spinput also contains correct nreps from argfile: this is only relevant for correct output of the progress report (the simulations would run fine without)
head -n -3 spinput.txt > firstpart
tail -2 spinput.txt > lastpart
echo $nreps > middlepart
cat firstpart middlepart lastpart > spinput.txt
rm firstpart middlepart lastpart
# the pipeline itself
python $toolbase/draw_ms_priors.ddRAD_Dianthus.2pop.2018-08-29.py -bpfile $bpfile -argfile $argfile | $toolbase/msnsam/msnsam tbs $msnsam_arg2 $msnsam_model_cmd | python $toolbase/ms2stats.arbitrary_npop.counter.py | python $toolbase/ms2stats.arbitrary_npop.stats.py
| true
|
9384f22d8764fdbc21c5c3bb4479a2ab7ced3cea
|
Shell
|
cemvarol/AZ-303-Labs
|
/Lab-04/Lab-04-Resources.bash
|
UTF-8
| 2,672
| 2.640625
| 3
|
[] |
no_license
|
az provider register --namespace 'Microsoft.Insights'
a=$(az ad signed-in-user show --query userPrincipalName)
A=$(echo "$a" | sed -e 's/\(.*\)/\L\1/')
B=${A:$(echo `expr index "$A" @`)}
C=${B:: -24}
RG=AZ-303Lab-04a
VNet=Lab04-VNet01
Nsg=L04NSG
NsgR=L04Rule1
L=eastus
LB=Lab04-1LB
BEndPool=Lab04-1BEpool
VM01=L04-VM01
VM02=L04-VM02
Nic01=$(echo "$VM01"VMNic)
Nic02=$(echo "$VM02"VMNic)
OS=Win2019DataCenter
VMSize=standard_B2ms
APA="10.205.0.0/16"
SNA01="10.205.1.0/24"
user=QA
pass=1q2w3e4r5t6y*
az group create -n $RG -l $L
az network nsg create -g $RG -n $Nsg
az network nsg rule create -g $RG --nsg-name $Nsg -n $NsgR --priority 100 --destination-port-ranges "*" --direction Inbound
az network vnet create --resource-group $RG --name $VNet --address-prefixes $APA --subnet-name SNA01 --subnet-prefix $SNA01
export SUBNETID01=$(az network vnet subnet show --resource-group $RG --vnet-name $VNet --name SNA01 --query id -o tsv)
export SUBNETN01=$(az network vnet subnet show --resource-group $RG --vnet-name $VNet --name SNA01 --query name -o tsv)
az network vnet subnet update -g $RG --vnet-name $VNet -n $SUBNETN01 --network-security-group $Nsg
az vm availability-set create -n AVS01 -g $RG -l $L --platform-fault-domain-count 3 --platform-update-domain-count 2
#Load Balancer Create
az network lb create -g $RG -n $LB -l $L --sku Basic --public-ip-address Lab04-NLBpip --frontend-ip-name Lab04-1FEip --backend-pool-name $BEndPool
#Load Balancer Health Probe Create
az network lb probe create -g $RG --lb-name $LB --name Web --protocol tcp --port 80
#Load Balancer Rule Create
az network lb rule create -g $RG --lb-name $LB --name http --protocol tcp --frontend-port 80 --backend-port 80 --frontend-ip-name Lab04-1FEip --backend-pool-name $BEndPool --probe-name Web --disable-outbound-snat true
#Create NICs
az network nic create --resource-group $RG --name $Nic01 --vnet-name $VNet --subnet $SUBNETN01 --lb-name $LB --lb-address-pools $BEndPool --no-wait
az network nic create --resource-group $RG --name $Nic02 --vnet-name $VNet --subnet $SUBNETN01 --lb-name $LB --lb-address-pools $BEndPool
#VM Create
az vm create --resource-group $RG -n $VM01 -l $L --image $OS --admin-username $user --admin-password $pass --size $VMSize --nics $Nic01 --license-type Windows_Server --nsg "" --availability-set AVS01 --no-wait
az vm create --resource-group $RG -n $VM02 -l $L --image $OS --admin-username $user --admin-password $pass --size $VMSize --nics $Nic02 --license-type Windows_Server --nsg "" --availability-set AVS01
z1=$(az network public-ip show -g $RG -n Lab04-NLBpip --query ipAddress)
z2=${z1:$(echo `expr index "$z1" '"'`)}
NLBip=${z2:: -1}
#
| true
|
98be434dcd6979503c6b8ece021f493bdb441466
|
Shell
|
Jrr1232/Fall2020_CS407_BashScripts
|
/Joaquin/mybash.sh
|
UTF-8
| 49
| 2.6875
| 3
|
[] |
no_license
|
for myval in $*
do
echo "Argument: $myval"
done
| true
|
49a9c9da40063f5a8b31922571db8db5e115c849
|
Shell
|
aseemsethi/monitor
|
/bin/logs
|
UTF-8
| 1,010
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
i=0
colors=( blue green yellow orange )
useColor=${colors[i]}
i=$(( $i+1 ))
i=$(( $i%${#colors[@]} ))
case $2 in
ssl)
echo "Log file for $1 $2"
xterm -bg ${colors[i]} -T "SSL FUNC $1 $2" -e "tail -f /var/monT/$1/ssl_logs"&
xterm -bg ${colors[i]} -T "SSL PERF $1 $2" -e "tail -f /var/monT/$1/ssl_stats"&
;;
ssl_perf)
echo "Log file for $1 $2"
xterm -bg ${colors[i]} -T "SSL FUNC $1 $2" -e "tail -f /var/monT/$1/ssl_perf_logs"&
xterm -bg ${colors[i]} -T "SSL PERF $1 $2" -e "tail -f /var/monT/$1/ssl_perf_stats"&
;;
http)
echo "Log file for $1 $2"
xterm -bg ${colors[i]} -T "HTTP $1 $2" -e "tail -f /var/monT/$1/http_logs"&
xterm -bg ${colors[i]} -T "HTTP STATS $1 $2" -e "tail -f /var/monT/$1/http_stats"&
;;
bgp)
echo "Log file for $1 $2"
xterm -bg ${colors[i]} -T "BGP $1 $2" -e "tail -f /var/monT/$1/bgp_logs"&
xterm -bg ${colors[i]} -T "BGP STATS $1 $2" -e "tail -f /var/monT/$1/bgp_stats"&
;;
*)
echo "Invalid input"
exit 1
esac
| true
|
1939edd0636ff595239432f9582d604ad536c0ec
|
Shell
|
AllenAn9410/LinuxTry
|
/test11/build.sh
|
UTF-8
| 2,499
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
function get_input(){
printf "\n -----${BLUE}EE Docker test${NC} ----- \nbegin prepare your docker env.[default value is the first item]\n"
read -p "Project id:" proj_id
if [ -z $proj_id ] ; then
exit 0
fi
if [ $proj_id -lt 2000 ] || [ $proj_id -gt 4999 ] ; then
echo "error id : ${proj_id},please check your regist info!"
exit
fi
s_port=$(( $proj_id * 10 ))
read -p "Project name:" proj_name
if [ -z $proj_name ] ; then
exit 0
fi
lists=(none ora_test1 ora_test2 ora_test3)
read -p "database type :(1-ora_test1, 2-ora_test2, 3-ora_test3 : ) " i
if [ -z $i ] ; then
i=1
fi
db_type=${lists[i]}
lists=(none mq_test1 mq_test2 )
read -p "mq : (1-mq_test1 , 2-mq_test2 ) " i
if [ -z $i ]; then
i=1
fi
mq_type=${lists[i]}
lists=(none was_test1 was_test2 was_test3)
read -p "ap/web server:(1-was_test1 , 2-was_test2 , 3-was_test3 : ) " i
if [ -z $i ]; then
i=1
fi
ap_type=${lists[i]}
if [ -z proj_id ] || [ -z proj_name ]; then
echo "id and name cannot null!"
exit
fi
}
function apweb_ee() {
echo "create folder: $proj_name"
if [ ! -d "${proj_name}/data/EE_PARA" ]; then
mkdir -p ${proj_name}/data/EE_PARA
mkdir -p ${proj_name}/data/EE_Y
fi
let web_port=$s_port+0
let admin_port=$s_port+1
let debug_port-$s_port+7
case "${ap_type}" in
was_test1)
echo "was_test1";;
was_test2)
echo "was_test2";;
was_test3)
echo "was_test3";;
esac
}
function db_ee() {
let db_port=$s_port+2
case "${db_type}" in
ora_test1)
echo "ora_test1";;
ora_test2)
echo "ora_test2";;
ora_test3)
echo "ora_test3";;
esac
}
# ---main---
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
COMMON=${pwd}
if [ "$1" == "clean" ] ; then
echo "clean folder"
exit
fi
input_ok="no"
while [ "$input_ok" == "no" ] ; do
get_input
printf "\n\nproj:${RED}${proj_id}${NC}\n"
printf " DB:${RED}$db_type${NC} AP:${RED}$ap_type${NC} MQ:${RED}${mq_type}${NC}\n\n"
read -p "are you sure?[yes,no,quit]" input_ok
while [ "${input_ok}" != "yes" ] && [ "${input_ok}" != "no" ] && [ "${input_ok}" != "quit" ] ; do
read -p "are you sure?[yes,no,quit]" input_ok
done
done
if [ !${input_ok} == "yes" ]; then
exit 1
fi
if [ -f ${proj_name}/docker-compse.yml ]; then
mkdir -p ${proj_name}/data
cp ${COMMON}/dc_00.yml ${proj_name}/docker-compse.yml
cp ${COMMON}/README.md ${proj_name}/README.md
fi
apweb_ee
db_ee
| true
|
92af40651f0de2d48817139988aa082e10516b15
|
Shell
|
chimpwizards-wand/spell-model
|
/scripts/new-spell.sh
|
UTF-8
| 342
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-h|--help)
help="true"
shift # past argument
;;
-n|--name)
export NAME="$2"
shift # past argument
;;
--default)
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
gh repo create chimpwizards-wand/spell-$NAME
| true
|
4f3eb544c2343da2da269eec01b280797ee0fca0
|
Shell
|
davinkevin/PodcastScripts
|
/Cauet sur NRJ/Download/CauetDownload.sh
|
UTF-8
| 223
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
while [ true ]; do
Date=`date --date now '+%Y-%m-%d - %H-%M-%S'`
wget $2 -O "/home/kevin/Cauet/$(date --date now '+%Y')/$(date --date now '+%m')/$(date --date now '+%d')/$1 - $Date.mp3" -a "/dev/null"
done
| true
|
5a2bcedb4115d5e9449ff74b476b56f915064599
|
Shell
|
JakDar/magec
|
/data/pl/all/stage1.sh
|
UTF-8
| 912
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
mkdir -p data/stage1/{sub,wiki,ppc}
stage1() {
../../tools/moses-scripts/scripts/tokenizer/normalize-punctuation.perl -l pl <"$1" |
sed -E 's/[^[:alnum:] ,\.\?\!]/ /Ig; s/ +/ /g' | # alphaa will alow for czech, arabic and so on
rg -v '^\s*$' |
sd '[0-9]' '1' >"$2" # replace all numbers to ones, keeping length
}
export -f stage1
files=$(find data -type f)
max_count=$(echo "$files" | wc -l)
BAR='########################################' # 40
EMPTY=' ' # 40
bar_size=$(echo $BAR| wc -c)
count=0
for file in $files; do
replaced="$(echo "$file" | sed 's/\/in\//\/stage1\//')"
sem -j 7 stage1 "$file" "$replaced"
count=$((count + 1))
i=$((count * bar_size / max_count))
echo -ne "\r${BAR:0:$i}${EMPTY:$i:$bar_size} $count/$max_count" #bashisms
done
echo ""
echo "Waiting for last tasks"
sem --wait
echo "Done"
| true
|
b5820830e5a3459f8d3301d1fee18ad51f017b34
|
Shell
|
23ua/dotfiles
|
/.zshrc
|
UTF-8
| 2,021
| 2.59375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
# locale
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# fix emacs-zsh integration
if [ -n "$INSIDE_EMACS" ]; then
export TERM=screen-256color
fi
# theme
if [ -n "$INSIDE_EMACS" ]; then
ZSH_THEME="pygmalion"
# ZSH_THEME="gentoo"
else
ZSH_THEME="agnoster"
fi
# default user for theme not to show user@host locally
DEFAULT_USER="the23ua"
# how often to auto-update (in days).
export UPDATE_ZSH_DAYS=13
# enable command auto-correction.
ENABLE_CORRECTION="false"
# change the command execution timestamp shown in the history command output.
HIST_STAMPS="yyyy-mm-dd"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git colorize docker sublime httpie vi-mode jira)
source $ZSH/oh-my-zsh.sh
# User configuration
export PATH="$HOME/.cargo/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
# export MANPATH="/usr/local/man:$MANPATH"
export EDITOR="vim"
# set Java home on OS X
#if [ $(uname) = "Darwin" ]; then
# export JAVA_HOME=$(/usr/libexec/java_home)
#fi
# auto cd
setopt AUTO_CD
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# newer php; only for OS X
#if [ $(uname) = "Darwin" ]; then
# export PATH="$(brew --prefix homebrew/php/php55)/bin:$PATH"
#fi
# Add path for Haskell/stack builds
export PATH="$PATH:$HOME/.local/bin"
# fix problem with molokai vim theme under tmux
if [ $(uname) = "Linux" ]; then
alias tmux="TERM=screen-256color-bce tmux"
fi
# fix ack perl package problem on arch linux
if [ $(uname) = "Linux" ]; then
export PATH="$PATH:/usr/bin/vendor_perl"
fi
alias erltags="~/.vim/bundle/vim-erlang-tags/bin/vim-erlang-tags.erl"
if [[ -f ~/.aliases || -h ~/.aliases ]]; then
. ~/.aliases
fi
alias py="python3"
alias pyserv="python3 -m http.server"
source $HOME/.zshenv
source ~/.iterm2_shell_integration.zsh
| true
|
5663d8addd9884964aedbfc399dff3251077683e
|
Shell
|
rvaughan/rosbot
|
/bash/handler_build
|
UTF-8
| 3,794
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# handler_build - builds PDF quotes and reports from XML files
#
# This script is part of the PenText framework
# https://pentext.org
#
# Copyright (C) 2016 Radically Open Security
# https://www.radicallyopensecurity.com
#
# Author(s): Peter Mosmans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
VERSION=0.11
DOCBUILDER=/usr/local/bin/docbuilder.py
TEMPLOC=$(mktemp -d)
# These variables should be set environment-specific
[ -z $GITSERVER ] && GITSERVER=gitlab.local
[ -z $GITWEB ] && GITWEB=https://${GITSERVER}
[ -z $NAMESPACE ] && NAMESPACE=ros
BRANCH=master
# Read standard 'command line' variables
[[ ! -z $1 ]] && TARGET=$1
[[ ! -z $2 ]] && REPO=$2
# Set the default PDF name based on the target name
TARGETPDF="target/$TARGET-latest.pdf"
# Reading positional parms is a bit ugly, shifting parms or getopt would be nicer
if [[ ! -z $3 ]]; then
if [[ ! $3 == -* ]]; then
NAMESPACE=$3
else
PARMS=$3
fi
fi
if [[ ! -z $4 ]]; then
if [[ ! $3 == -* ]]; then
BRANCH=$4
else
PARMS="$PARMS $4"
fi
fi
if [[ $# -ge 5 ]]; then
shift 4
PARMS="$PARMS $@"
fi
trap cleanup EXIT QUIT
# Make sure that the temporary files are always removed
cleanup() {
trap '' EXIT INT QUIT
[ -d $TEMPLOC ] && rm -rf $TEMPLOC &>/dev/null
exit
}
# As quote used to be called offer or even offer,
# this function retains backward compatibility - v0.1
backwards_compatible() {
if [[ $TARGET == "quote" ]] && [ ! -f $TARGET.xml ]; then
TARGET="offerte"
fi
}
# Clones repo using global (!) variables - v0.2
clone_repo() {
pushd $TEMPLOC 1>/dev/null
git clone -b $BRANCH --depth=1 -q ssh://git@${GITSERVER}/${NAMESPACE}/${REPO}.git &>/dev/null
if [ ! -d $TEMPLOC/$REPO ]; then
echo "[-] could not clone repo ${NAMESPACE}/${REPO}"
exit 1
else
cd $REPO
fi
}
# Preflight checks using global (!) variables - v0.2
preflight_checks() {
if ([[ $TARGET != "quote" ]] && [[ $TARGET != "report" ]]) || [ -z $REPO ]; then
echo "Usage: build quote|report REPOSITORY [NAMESPACE [BRANCH] [-v]"
exit
fi
if [ ! -f $DOCBUILDER ]; then
echo "[-] this script needs docbuilder.py ($DOCBUILDER)"
fi
}
build() {
if [ ! -d source ]; then
echo "[-] missing necessary pentext framework files"
exit 1
fi
pushd source &>/dev/null
backwards_compatible
if ([[ $TARGET == "quote" ]] || [[ $TARGET == "offerte" ]]); then
TARGETPDF="target/quote_${REPO/off-/}.pdf"
elif [[ $TARGET == "report" ]]; then
TARGETPDF="target/report_${REPO/pen-/}.pdf"
fi
$DOCBUILDER -c -i $TARGET.xml -o ../$TARGETPDF -x ../xslt/generate_$TARGET.xsl $PARMS
if [[ $? -ne 0 ]]; then
echo "[-] Sorry, failed to parse $TARGET. Use \`build $TARGET $REPO $NAMESPACE $BRANCH -v\` for more information."
exit 1
fi
popd &>/dev/null
if [ ! -f $TARGETPDF ]; then
echo "[-] hmmm... failed to build PDF file (could not find $TARGETPDF)"
exit 1
fi
}
add_to_repo() {
git add $TARGETPDF
git add target/waiver_?*.pdf &>/dev/null
git add target/execsummary.pdf &>/dev/null
git commit -q -m "$TARGETPDF proudly manufactured using ChatOps" &>/dev/null
git push -q >/dev/null
}
preflight_checks
echo "builder v$VERSION - Rocking your world, one build at a time..."
clone_repo
build
add_to_repo
echo "[+] listo! Check out $GITWEB/$NAMESPACE/$REPO/raw/$BRANCH/$TARGETPDF"
exit 0
| true
|
e90acae0aa6cd480d9afc1006e49cb514b8c8183
|
Shell
|
Mittemi/fog
|
/infrastructure/base-image/docker-entrypoint.sh
|
UTF-8
| 1,220
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Start the first process
cd /beat
filebeat -e &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start filebeat: $status"
exit $status
fi
# Start the second process
/usr/share/run_app.sh
status=$?
echo "Application stopped, wait 60 seconds to ship some logs"
sleep 60
if [ $status -ne 0 ]; then
echo "Failed to start application: $status"
echo "Failed to start application" > /logs/container.log
exit $status
fi
# Naive check runs checks once a minute to see if either of the processes exited.
# This illustrates part of the heavy lifting you need to do if you want to run
# more than one service in a container. The container will exit with an error
# if it detects that either of the processes has exited.
# Otherwise it will loop forever, waking up every 60 seconds
#while /bin/true; do
# PROCESS_1_STATUS=$(ps aux |grep -q filebeat |grep -v grep)
# PROCESS_2_STATUS=$(ps aux |grep -q java | grep -v grep)
# # If the greps above find anything, they will exit with 0 status
# # If they are not both 0, then something is wrong
# if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then
# echo "One of the processes has already exited."
# exit -1
# fi
# sleep 15
#done
| true
|
33721188fd3c15dfef10dc5a53bdb0a594e30622
|
Shell
|
zhaofutao/install_python
|
/install_python.sh
|
UTF-8
| 886
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/shell
curl "https://www.python.org/ftp/python/2.7.14/Python-2.7.14.tgz" -O Python-2.7.14.tgz
tar zxvf Python-2.7.14.tgz
pathname=`pwd`
cd Python-2.7.14 && ./configure -prefix=${pathname}/Python-2.7.14 --enable-optimizations && make -j && make install
cd -
wget "https://pypi.python.org/packages/72/c2/c09362ab29338413ab687b47dab03bab4a792e2bbb727a1eb5e0a88e3b86/setuptools-39.0.1.zip#md5=75310b72ca0ab4e673bf7679f69d7a62" setuptools-39.0.1.zip
unzip setuptools-39.0.1.zip
cd setuptools-39.0.1
${pathname}/Python-2.7.14/bin/python setup.py install
cd -
wget "https://pypi.python.org/packages/source/p/pip/pip-1.5.4.tar.gz#md5=834b2904f92d46aaa333267fb1c922bb" --no-check-certificate
tar zxvf pip-1.5.4.tar.gz
cd pip-1.5.4
${pathname}/Python-2.7.14/bin/python setup.py install
cd -
echo "export PYTHONPATH=${pathname}/Python-2.7.14"
echo "export PIP_HOME=${pathname}/pip-1.5.4"
| true
|
99dd2ea044db65134e47b1ac7270df3ccb7d1a04
|
Shell
|
ZhizhouTian/buildroot-busybox
|
/build.sh
|
UTF-8
| 7,582
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/sh
usage()
{
cat <<EOM
Usage:
build [-a] [-j <N>]
build -m <module> [-j <N>]
build -p
build -c
build -h
-a
Build all components and output the ramdisk.img. (default)
-m <module>
Build the specified module and output to target dir
in <top>/buildroot/output/target. The module may be
the following names,
rootfs -- rootfs created from buildroot
ltp -- linux test project
utest -- customized testsuites
-j
Assign the paralleled build jobs number. (4 by default)
-p
Pack the ramdisk.img from the target dir.
-q
Pack the ramdisk.img into the boot.img to be sprdiskboot.img.
-c
Clean all the build objects and output
-h
Show this help
EOM
}
# check return error
check_err()
{
if [ $? -ne 0 ]; then
echo Error: $* >&2
exit 2
fi
}
check_usage()
{
if [ $? -ne 0 ]; then
echo
echo Error: $* >&2
usage
exit 2
fi
}
build_rootfs()
{
echo "==== build_rootfs ===="
echo $SRC_ROOTFS
cd $SRC_ROOTFS
check_err "$SRC_ROOTFS is not found!"
if [ ! -e ./output/.config ]; then
check_err ".config is missing!"
exit
fi
$MAKE O=$OUTPUTDIR -j $JOBS
check_err "Failed to build rootfs!"
echo "==== build_rootfs done! ===="
}
build_ltp()
{
echo "==== build_ltp ===="
cd $SRC_LTP
check_err "$SRC_LTP is not found!"
if [ ! -e ./configure ]; then
$MAKE O=$OUTPUTDIR autotools
fi
if [ ! -e include/mk/config.mk ]; then
platform=$(echo ${CROSS_COMPILE%%-*})-linux
./configure \
CC=${CROSS_COMPILE}gcc \
AR=${CROSS_COMPILE}ar \
STRIP=${CROSS_COMPILE}strip \
RANLIB=${CROSS_COMPILE}ranlib \
--build=i686-pc-linux-gnu \
--target=$platform --host=$platform \
--prefix=$INSTDIR/opt/ltp
check_err "Failed to configure ltp!"
fi
$MAKE O=$OUTPUTDIR -j $JOBS
check_err "Failed to build ltp!"
$MAKE O=$OUTPUTDIR install
check_err "Failed to install ltp!"
echo "==== build_ltp done! ===="
}
build_utest()
{
echo "==== build_utest ===="
cd $SRC_UTEST
check_err "$SRC_UTEST is not found!"
if [ ! -d "$INSTDIR/../build" ]; then
mkdir -p $INSTDIR/../build
fi
echo "CROSS_COMPILE := $CROSS_COMPILE" > $INSTDIR/../build/utest.config
$MAKE O=$OUTPUTDIR install
check_err "Failed to install utest!"
echo "==== build_utest done! ===="
}
pack_ramdisk()
{
echo "==== pack_ramdisk ===="
cp $TOPDIR/utils/adbd $INSTDIR/bin
cd $INSTDIR
check_err "$INSTDIR is not found!"
rm -rf THIS_IS_NOT_YOUR_ROOT_FILESYSTEM
# strip all binaries
bins=`find * -type f -perm /111`
for exe in $bins; do
${CROSS_COMPILE}strip $exe 2>/dev/null
done
fakeroot -- $TOPDIR/utils/mkrootfs.sh $TOPDIR/$IMG
check_err "Failed to pack the ramdisk !"
echo "$TOPDIR/$IMG created!"
echo "==== pack_ramdisk done ===="
}
pack_sprdisk_2_bootimage()
{
if [ -f "$TOPDIR/$IMG" -a -f "$TOPDIR/boot.img" ]; then
mkdir $TOPDIR/out
cp -rf $TOPDIR/$IMG $TOPDIR/out
$TOPDIR/utils/unpackbootimg -i $TOPDIR/boot.img -o $TOPDIR/out
check_err "Failed to unpack the bootimage !"
mv $TOPDIR/out/$IMG $TOPDIR/out/boot.img-ramdisk.gz
if [ -f "$TOPDIR/out/boot.img-dt" ]; then
withdtimg="--dt $TOPDIR/out/boot.img-dt"
else
withdtimg=
fi
$TOPDIR/utils/mkbootimg --kernel $TOPDIR/out/boot.img-zImage --ramdisk $TOPDIR/out/boot.img-ramdisk.gz --cmdline "console=ttyS1,115200n8" --base 0x00000000 $withdtimg --output $TOPDIR/sprdiskboot.img
check_err "Failed to pack the bootimage !"
rm -rf $TOPDIR/out
echo "$TOPDIR/sprdiskboot.img created!"
else
echo "Be lack of img file."
fi
}
pack_sprdiskboot()
{
echo "==== pack_sprdiskboot ===="
cp -rf $ANDROID_PRODUCT_OUT/boot.img $TOPDIR
pack_sprdisk_2_bootimage
echo "==== pack_sprdisk done ===="
}
repack_userdata_img()
{
userdatainfo=$ANDROID_PRODUCT_OUT/obj/PACKAGING/userdata_intermediates/userdata_image_info.txt
userdatasize=2000000000
ramdisksize=0
while read line
do
item=$(echo $line | (awk -F "=" '{print $1}') | tr -d ' ')
if [ $item = "userdata_size" ] ; then
userdatasize=$(echo $line | (awk -F "=" '{print $2}') | tr -d ' ')
userdatasize=$(expr $userdatasize + 0)
break
fi
done < $userdatainfo
mkfs=$ANDROID_HOST_OUT/bin/make_ext4fs
if [ -f $mkfs -a -d $sprdir -a -f "$ANDROID_PRODUCT_OUT/userdata.img" ]; then
make_ext4fs -s -T -1 -S $ANDROID_PRODUCT_OUT/root/file_contexts -l $userdatasize -a data $ANDROID_PRODUCT_OUT/userdata.img $ANDROID_PRODUCT_OUT/data
echo "==== repack_userdata done ===="
fi
}
move_img_2_android()
{
sprdir=$ANDROID_PRODUCT_OUT/data/sprdisk
if [ ! -d $sprdir ]; then
mkdir -p $sprdir
fi
if [ -f $TOPDIR/$IMG ]; then
cp -rf $TOPDIR/$IMG $sprdir
else
echo "==== no ramdisk.img generated, not move to android. ===="
fi
if [ -f $TOPDIR/sprdiskboot.img ]; then
cp -rf $TOPDIR/sprdiskboot.img $ANDROID_PRODUCT_OUT
else
echo "==== no sprdiskboot.img generated, not move to android. ===="
fi
repack_userdata_img
}
judge_sprdiskboot()
{
if [ -n "${ANDROID_PRODUCT_OUT}" ]; then
if [ ! -f "$ANDROID_PRODUCT_OUT/boot.img" ]; then
echo "==== no boot.img generated, not pack sprdiskboot. ===="
else
pack_sprdiskboot
move_img_2_android
fi
else
echo "==== no product generated, not pack sprdiskboot. ===="
fi
}
build_all()
{
build_rootfs
build_ltp
build_utest
pack_ramdisk
judge_sprdiskboot
}
clean_objs()
{
echo "==== clean_objs ===="
cd $SRC_UTEST
$MAKE O=$OUTPUTDIR -s clean
cd $SRC_ROOTFS
$MAKE O=$OUTPUTDIR -s distclean
cd $SRC_LTP
$MAKE O=$OUTPUTDIR -s distclean
if [ -d "$OUTPUTDIR" ]; then
rm -rf $OUTPUTDIR
fi
if [ -f "$SRC_ROOTFS/.config" ]; then
rm -rf $SRC_ROOTFS/.config
fi
if [ -f "$SRC_ROOTFS/..config.tmp" ]; then
rm -rf $SRC_ROOTFS/..config.tmp
fi
echo "==== clean_objs done ===="
}
MAKE=make
JOBS=4
MODULE=all
IMG=ramdisk.img
TOPDIR=$(dirname `readlink -f $0`)
SRC_ROOTFS=$TOPDIR/buildroot
SRC_LTP=$TOPDIR/ltp
SRC_UTEST=$TOPDIR/testsuites
case $1 in
x86|x86_64)
TOOLCHAIN=$TOPDIR/toolchain/x86_64-linux-android-4.9
CROSS_COMPILE=x86_64-linux-android-
exit 0
;;
arm)
TOOLCHAIN=$TOPDIR/toolchain/linaro-arm-linux-gcc
CROSS_COMPILE=arm-linux-gnueabihf-
;;
arm64)
TOOLCHAIN=$TOPDIR/toolchain/aarch64-linux-gnu
CROSS_COMPILE=aarch64-linux-gnu-
;;
mips)
TOOLCHAIN=$TOPDIR/toolchain/mipsel-linux-android-4.8
CROSS_COMPILE=mipsel-linux-android-
;;
mips64)
TOOLCHAIN=$TOPDIR/toolchain/mips64el-linux-android-4.9
CROSS_COMPILE=mips64el-linux-android-
exit 0
;;
*)
echo "Can't find toolchain for unknown architecture: $1"
exit 0
;;
esac
SPRD_ARCH=$1
export PATH=$PATH:$TOOLCHAIN/bin
if [ -d "$ANDROID_PRODUCT_OUT" ]; then
INSTDIR=$ANDROID_PRODUCT_OUT/sprdisk/target
export PATH=$PATH:$ANDROID_PRODUCT_OUT/sprdisk/host/usr/bin
OUTPUTDIR=$ANDROID_PRODUCT_OUT/sprdisk
else
INSTDIR=$SRC_ROOTFS/output/target
export PATH=$PATH:$SRC_ROOTFS/output/host/usr/bin
OUTPUTDIR=$SRC_ROOTFS/output
fi
while [ -n "$2" ]; do
case "$2" in
-a)
MODULE=all
;;
-m)
test -n "$3"
check_usage "No module is specified!"
MODULE=$3
shift
;;
-j)
test -n "$3"
check_usage "No job number is specified!"
JOBS=$3
shift
;;
-p)
pack_ramdisk
judge_sprdiskboot
exit 0
;;
-q)
pack_sprdisk_2_bootimage
exit 0
;;
-c)
clean_objs
exit 0
;;
-h)
usage
exit 0
;;
*)
echo
echo "Unknown options: $2"
usage
exit 1
;;
esac
shift
done
if [ ! -d "$ANDROID_PRODUCT_OUT" ]; then
mkdir -p $INSTDIR
check_err "Failed to create $INSTDIR!"
fi
build_$MODULE
| true
|
b32bf2dd7a5d02732e8426c821ecd64186e5198f
|
Shell
|
github188/test
|
/performance/self-run/test-raid.sh
|
UTF-8
| 889
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
mddev=""
timelimit=1800
result_dir="/root/xdd-result"
[ -z "$1" ] && echo "usage $@ <mddev> [timelimit]" && exit 1
mddev="$1"
[ ! -z "$2" ] && timelimit="$2"
mkdir -p $result_dir
while :;
do
# 顺序写半个小时
tmp="$result_dir/xdd-write-$(date +%Y%m%d-%H%M%S)"
xdd -op write -targets 1 $mddev -dio -datapattern random -reqsize 500000 -mbytes 16000000 -passes 1 -verbose -timelimit $timelimit > "$tmp" 2>&1
# 顺序读半个小时
tmp="$result_dir/xdd-read-$(date +%Y%m%d-%H%M%S)"
xdd -op read -targets 1 $mddev -dio -datapattern random -reqsize 500000 -mbytes 16000000 -passes 1 -verbose -timelimit $timelimit > "$tmp" 2>&1
# 随机读写
tmp="$result_dir/xdd-random-$(date +%Y%m%d-%H%M%S)"
xdd -rwratio 50 -targets 1 $mddev -dio -datapattern random -seek random -reqsize 100 -mbytes 1600000 -passes 1 -verbose -timelimit $timelimit "$tmp" 2>&1
done
| true
|
abd439a551b3d4c6b519e39f044bb83bed45f721
|
Shell
|
ajt/dotfiles
|
/brew.sh
|
UTF-8
| 2,451
| 2.921875
| 3
|
[] |
no_license
|
# Install command-line tools using Homebrew
# Make sure we’re using the latest Homebrew
brew update
# Upgrade any already-installed formulae
brew upgrade
BREW_PREFIX=$(brew --prefix)
# Install GNU core utilities (those that come with OS X are outdated)
# Don’t forget to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.
brew install coreutils
ln -s "${BREW_PREFIX}/bin/gsha256sum" "${BREW_PREFIX}/bin/sha256sum"
# Install some other useful utilities like `sponge`
brew install moreutils
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, `g`-prefixed
brew install findutils
# Install GNU `sed`, overwriting the built-in `sed`
brew install gnu-sed
# Install Bash 4
# Note: don’t forget to add `/usr/local/bin/bash` to `/etc/shells` before running `chsh`.
brew install bash
brew install bash-completion
# Switch to using brew-installed bash as default shell
if ! fgrep -q "${BREW_PREFIX}/bin/bash" /etc/shells; then
echo "${BREW_PREFIX}/bin/bash" | sudo tee -a /etc/shells;
chsh -s "${BREW_PREFIX}/bin/bash";
fi;
# generic colouriser http://kassiopeia.juls.savba.sk/~garabik/software/grc/
brew install grc
brew install gnupg
# Install wget with IRI support
brew install wget
# Install more recent versions of some OS X tools
brew install vim
brew install grep
brew install screen
# brew install z # let me handle this
brew install entr
brew install hub
# mtr - ping & traceroute. best.
brew install mtr
# allow mtr to run without sudo
mtrlocation=$(brew info mtr | grep Cellar | sed -e 's/ (.*//') # e.g. `/Users/paulirish/.homebrew/Cellar/mtr/0.86`
sudo chmod 4755 $mtrlocation/sbin/mtr
sudo chown root $mtrlocation/sbin/mtr
# Install other useful binaries
brew install the_silver_searcher
brew install fzf
brew install ack
brew install git
brew install gs
brew install p7zip
brew install imagemagick
brew install node # This installs `npm` too using the recommended installation method
brew install pv
brew install rename
brew install tree
brew install zopfli
brew install ffmpeg
brew install terminal-notifier
brew install pidcat
brew install ncdu
# more
brew install python
brew install htop-osx
# brew install irssi # goodby sweet prince...
brew install jpeg
brew install knock
brew install p7zip
brew install sqlite
brew install ssh-copy-id
brew install tmux
brew install tmuxinator
brew install reattach-to-user-namespace
# Remove outdated versions from the cellar
brew cleanup
| true
|
aa96a84036f06d7821bb81af84642ac5a23c3d11
|
Shell
|
aidanharris/You-Dont-Know-JS
|
/makeEbook.sh
|
UTF-8
| 1,141
| 3.84375
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
set -e
if [ -z "$1" ]
then
echo "Usage: "
echo " $0 parent directory of ebooks to build e.g:"
echo ' '"$0 " '"'"$PWD"'"'
exit 1
fi
printf '\n'
for d in "$1"/*/
do
if [[ $(basename "$d") != "epubs" ]]
then
if [[ $(basename "$d") != "kickstarter-survey-site" ]]
then
CWD=""
for f in "$1/$(basename "$d")"/*
do
if [[ "$(basename "$f")" != "README.md" ]]
then
if [[ "$CWD" == "" ]]
then
CWD=""$(dirname "$f")""
fi
fi
done
echo ""Processing "$CWD"...""
if [ -f "$CWD"/cover.jpg ]
then
EPUBARTWORK=""--epub-cover-image=\'"$CWD"/cover.jpg\'""
else
echo "Cover is missing!"
echo "We tried to find: "
echo \'"$CWD"/cover.jpg\'
EPUBARTWORK=""
fi
CMD=$(echo ""cd \'$(dirname "$CWD")/$(basename "$CWD")\' \&\& pandoc -S -o \'$(dirname "$CWD")/epubs/$(basename "$CWD").epub\' \'"$CWD"/title.txt\' "$EPUBARTWORK" \'"$CWD"\'/*.md"")
bash -c "$CMD"
if [ -f "$(dirname "$CWD")/epubs/$(basename "$CWD").epub" ]
then
echo ""Markdown exported to \'$(dirname "$CWD")/epubs/$(basename "$CWD").epub\'""
printf '\n'
fi
fi
fi
done
| true
|
1e54ac9d8ac9c8b546ac259fdf1135d553901968
|
Shell
|
jildertmiedema/Vaprobash
|
/scripts/nodejs.sh
|
UTF-8
| 615
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo ">>> Adding PPA's and Installing Node.js"
# Add repo for latest Node.js
sudo add-apt-repository -y ppa:chris-lea/node.js
# Update
sudo apt-get update
# Install node.js
sudo apt-get install -y nodejs
# Change where npm global packages location
npm config set prefix ~/npm
# Add new npm global packages location to PATH
printf "\n# Add new npm global packages location to PATH\n%s" 'export PATH=$PATH:~/npm/bin' >> ~/.bash_profile
# Add new npm root to NODE_PATH
printf "\n# Add the new npm root to NODE_PATH\n%s" 'export NODE_PATH=$NODE_PATH:~/npm/lib/node_modules' >> ~/.bash_profile
| true
|
17fdc68ba4cddac8b9c30b31ac14a28b8472eaf5
|
Shell
|
JohnOmernik/zeta
|
/1_remote_zetaprep.sh
|
UTF-8
| 2,463
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Change to the root dir
cd "$(dirname "$0")"
# Make sure you edit cluster.conf prior to running this.
. ./cluster.conf
##########################
# Get a node list from the connecting node, save it to nodes.list
# Run the Package Manager for a clean copy of packages
# Upload the private key to the node
# Upload the runcmd.sh, nodes.list, and cluster.conf, install_scripts.list files to the cluster
# Upload zeta_packages.tgz to the cluster
# Upload the numbered scripts to the cluster
# Provide instructions on the next step
##########################
SSHHOST="${IUSER}@${IHOST}"
# Since we use these a lot I short cut them into variables
SCPCMD="scp -i ${PRVKEY}"
SSHCMD="ssh -i ${PRVKEY} -t ${SSHHOST}"
#########################
# Get a list of IP addresses of local nodes
NODES=$($SSHCMD -o StrictHostKeyChecking=no "sudo maprcli node list -columns ip")
if [ "$NODES" == "" ]; then
echo "Did not get list of nodes from remote cluster"
echo "Result of NODES: $NODES"
echo "Cannot proceed without NODES"
exit 1
fi
rm -f nodes.list
touch nodes.list
for n in $NODES
do
g=$(echo $n|grep -E "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}")
if [ "$g" != "" ]; then
echo $g|cut -d"," -f1 >> ./nodes.list
fi
done
cat nodes.list
NODE_CNT=$(cat ./nodes.list|wc -l)
if [ ! "$NODE_CNT" -gt 2 ]; then
echo "Node Count is not greater than 3"
echo "Node Count: $NODE_CNT"
exit 1
fi
##########################
# Build Packages
echo "Running the Packager to ensure we have the latest packages"
cd package_manager
./package_tgzs.sh
cd ..
#####################
# Copy private key
$SCPCMD ${PRVKEY} ${SSHHOST}:/home/${IUSER}/.ssh/id_rsa
# Copy next step scripts and helpers
$SCPCMD runcmd.sh ${SSHHOST}:/home/${IUSER}/
$SCPCMD nodes.list ${SSHHOST}:/home/${IUSER}/
$SCPCMD install_scripts.list ${SSHHOST}:/home/${IUSER}/
$SCPCMD cluster.conf ${SSHHOST}:/home/${IUSER}/
$SCPCMD zeta_packages.tgz ${SSHHOST}:/home/${IUSER}/
$SCPCMD 2_zeta_user_prep.sh ${SSHHOST}:/home/${IUSER}/
SCRIPTS=`cat ./install_scripts.list`
for SCRIPT in $SCRIPTS ; do
$SCPCMD $SCRIPT ${SSHHOST}:/home/${IUSER}/
done
$SSHCMD "chmod +x runcmd.sh"
$SSHCMD "chmod +x 2_zeta_user_prep.sh"
echo "Cluster Scripts have been prepped."
echo "Log into cluster node and execute user prep script"
echo ""
echo "Login to initial node:"
echo "> ssh -i ${PRVKEY} $SSHHOST"
echo ""
echo "Initiate next step:"
echo "> ./2_zeta_user_prep.sh"
| true
|
64d949d8edcfdbdf69abfb6de4146a0eb20de4ee
|
Shell
|
durongze/shellscript
|
/django/install_django.sh
|
UTF-8
| 4,162
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#1.install
sudo python -m pip install "django<2"
#2.check
python
#>>> import django
#>>> django.VERSION
#(1, 6, 0, 'final', 0)
#>>>
#>>> django.get_version()
#'1.6.0'
#3.新建项目
django-admin.py startproject http
#创建成功后会有以下目录结构
#http
#├── manage.py
#└── http
#├── __init__.py
#├── settings.py
#├── urls.py
#└── wsgi.py
#4.新建应用
cd http
python manage.py startapp httpmodule # httpmodule 是一个app的名称
#5.修改配置
#views.py # 这个是我这个项目的主要文件,所有逻辑也会在这里进行体现
#urls.py # 网页对应的内容
#settings.py # 配置
#wsgi.py # 配置apache要用到
#5.1 修改/http/http/settings.py
#把我们新定义的app加到settings.py中的INSTALL_APPS中
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'httpmodule',
)
#5.2 修改/http/http/urls.py
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from httpmodule import views as http_views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'http.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^get/$', http_views.get, name='get'), # 新增get方法 需要对应到views.py
url(r'^set/$', http_views.set, name='set'), # 新增set方法 需要对应到views.py
)
#这里的方法定义成功后,相对应的访问http请求就变成 :
#http://ip:port/get | http://ip:port/set
#5.3 修改wsgi.py
#这个文件需要搭配apache,一般先要安装一下wsgi的环境包
sudo apt-get install apache2-dev #缺少apxs时安装
sudo pip install mod_wsgi
#如果安装失败,可以下载源码包进行安装,同时要注意的是不同的python版本可能需要对应不同版本的mod_wsgi
#然后修改http/http/wsgi.py
import os
from os.path import join,dirname,abspath
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "http.settings")
PROJECT_DIR = dirname(dirname(abspath(__file__)))
import sys
sys.path.insert(0,PROJECT_DIR)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
#5.4 修改http/httpmodule/views.py
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def set(request):
# To do
#example:
message = request.GET.get('a','error')
return HttpResponse(message)
def get(requset):
# To do
return HttpResponse(result)
# To do可以写上自己的逻辑代码,也可以调用自己封装的python脚本。
#request:请求包
#比如http请求是 http://ip:port/set?a=hello
#那么 message获取到的内容就是hello
#HttpResponse(message)就会返回内容,如果打开浏览器,正常的话应该能看到“hello”
#6. 配置httpd.conf
#编辑/etc/httpd/conf/httpd.conf #如果是yum安装的就是这个配置文件
#在配置文件最后加上
<VirtualHost *:8000>
ServerName example.com
ServerAlias example.com
ServerAdmin example@163.com
Alias /httpmodule /home/smart/web/http/httpmodule #需要修改
<Directory /home/smart/web/http/httpmodule> #需要修改
Options Indexes FollowSymLinks
Order deny,allow
Allow from all
</Directory>
WSGIScriptAlias / /home/smart/web/http/http/wsgi.py #需要修改
#WSGIDaemonProcess www.example.com python-path=/usr/local/lib/python2.6/site-packages
<Directory /home/smart/web/http/http> #需要修改
<Files wsgi.py>
Order deny,allow
Allow from all
</Files>
</Directory>
</VirtualHost>
#7.然后启动httpd
service httpd start
#8. sudo python manage.py migrate
#9. sudo python manage.py runserver
| true
|
3cff079ba2ad860fac992259397451fe6d512f3c
|
Shell
|
pbuyankin/openshift
|
/resources/action_hooks/pre_build
|
UTF-8
| 1,507
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# debug
# set -x
#loads VERSION, DEPS_PARAMS, ID, LOG_FILE, PLAY_COMMAND="${OPENSHIFT_DATA_DIR}play-${VERSION}/play"
. ${OPENSHIFT_REPO_DIR}.openshift/action_hooks/load_config
if [[ ! $? -eq 0 ]]; then
exit $?
fi
PLAY_PATH="play-$VERSION/"
# use tmp file just in case it hangs up
PLAY_ARCH="${OPENSHIFT_TMP_DIR}play-$VERSION.zip"
cd "$OPENSHIFT_DATA_DIR"
if [[ -d $PLAY_PATH ]]; then
echo "Play $VERSION already installed at `pwd`/$PLAY_PATH"
else
echo "Installing play $VERSION"
if [[ -f $PLAY_ARCH ]]; then
rm $PLAY_ARCH
fi
PLAY_URL="http://download.playframework.org/releases/play-$VERSION.zip"
curl -o $PLAY_ARCH $PLAY_URL
if [[ ! -f $PLAY_ARCH ]]; then
echo "Error downloading play $VERSION from $PLAY_URL"
exit -1
fi
unzip "$PLAY_ARCH"
rm $PLAY_ARCH
PLAY_FULL_PATH="${OPENSHIFT_DATA_DIR}${PLAY_PATH}"
if [[ ! -d $PLAY_FULL_PATH ]]; then
echo "Error installing play $VERSION to $PLAY_FULL_PATH"
exit -1
fi
#verify that the desired version installed successfully
if [[ ! `${PLAY_FULL_PATH}play version | tail -1` = $VERSION ]]; then
echo "Error installing play $VERSION to $PLAY_FULL_PATH"
rm -fr $PLAY_FULL_PATH
exit -1
fi
#cleanup, remove other play versions
for old in [play-]*; do
# avoid showing "Removing version [play-]*" message
if [[ -d $old ]]; then
if [[ $old = "play-$VERSION" ]]; then
echo "Skipping current version version $old"
else
echo "Removing version $old"
rm -fr $old
fi
fi
done
fi
exit 0
| true
|
ba2172c690cb72d4aeacdf28ca99b2f20143b3b8
|
Shell
|
AOSXAP/reqless
|
/core/build.sh
|
UTF-8
| 504
| 2.75
| 3
|
[] |
no_license
|
if [ "$1" == "" ]; then
if [ "$OSTYPE" == "win32" ] || [ "$OSTYPE" == "msys" ]; then
g++ core.cpp -o core.exe -I C:/OpenSSL/include -L C:/OpenSSL/lib -lssl -lcrypto -lws2_32 -lgdi32 && ./core.exe
else
g++ core.cpp -o core -lssl -lcrypto && ./core
fi
elif [ "$1" == "code" ]; then
if [ "$2" == "original" ]; then
git ls-files | grep -v -E "package.json|package-lock.json|node/build|.gitignore" | xargs wc -l
else
git ls-files | xargs wc -l
fi
fi
| true
|
bb0d510ab04e95416d57502457906d46b496dd10
|
Shell
|
LeeGeunHAHAHA/chamber_spo
|
/scripts/io/common
|
UTF-8
| 7,691
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
NBIO=nbio
NVME_CLI=nvme
IOLOG_PREFIX=iolog-
SMARTLOG_PREFIX=smartlog-
ERRORLOG_PREFIX=errorlog-
IOENGINE=spdk
#IOENGINE=libaio
PCI_ADDRESS=$FQA_DEVICE # domain:bus:device:function
PATH_FQA_TEST_SCRIPT=~/fqa/trunk/fqa_test_script
PATH_GET_LOG=~/fqa/trunk/io/get_log
if [[ -z $NBIO_NSID ]]; then
NSID=1
else
NSID=$NBIO_NSID
fi
if [ $IOENGINE == "spdk" ]; then
CONTROLLER="$PCI_ADDRESS"
export FQA_SPDK=1
else
CONTROLLER="/dev/nvme0"
export FQA_SPDK=0
fi
#JOURNAL_DIR=$PWD/$(dirname ${BASH_SOURCE[0]})
#JOURNAL_FILE=$JOURNAL_DIR/nvme0n1.journal
if [[ $FQA_TARGET == "native" ]]; then
JOURNAL_SHM=$((NSID+830))
COMPARE=true
else
COMPARE=false
fi
AE_SAVE_CURSOR="\033[s"
AE_RESTORE_CURSOR="\033[u"
AE_CLEAR_EOL="\033[K"
LIST=0
CLEAN=0
ARG=
GROUP_NO=
LOGDIR=$PWD
CONFIGS=0
# default values
NAMESPACE_BSZ=4096
MDTS=$((32 * NAMESPACE_BSZ))
PASS="\033[32;1mpass\033[0m"
FAIL="\033[31;1mfail\033[0m"
function run_config()
{
if skip_test; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
if [ $CLEAN -eq 1 ]; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
TITLE="configuration" print_title
if [ $LIST -eq 1 ]; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
#format
printf "$PASS\n"
TEST_NO=$((TEST_NO + 1))
}
function group_init()
{
GROUP_DIR=${PWD##*/}
GROUP_NO=(${GROUP_DIR//./ }[0])
ARG=$1
if [ ! -n "${TEST_NO+1}" ]; then
TEST_NO=0
echo "$GROUP_NO. $GROUP_TITLE"
fi
if [ -n "${CURDIR+1}" ]; then
LOGDIR=$CURDIR
fi
if [ $# -gt 0 ]; then
if [ "$1" == "list" ]; then
LIST=1
return
elif [ "$1" == "clean" ]; then
CLEAN=1
clean_logs
return
fi
fi
command -v $NVME_CLI >/dev/null # check if the nvme-cli is installed
RET=$?
if [ $RET -ne 0 ]; then
echo "nvme-cli is not installed"
exit $RET
fi
if [[ $CONTROLLER == '/dev/nvme*' ]]; then
if [ ! -b "${CONTROLLER}n${NSID}" ]; then
echo "namespace '${CONTROLLER}n${NSID}' not found"
exit 1
fi
fi
NAMESPACE_BSZ=`$NVME_CLI id-ns $CONTROLLER -n $NSID | awk '/in use/ { print $5 }' | awk -F ':' '{ print 2^$2 }'`
NAMESPACE_SIZE=$((NAMESPACE_BSZ * `$NVME_CLI id-ns $CONTROLLER -n $NSID | awk '/nsze/ { print $3 }'`))
NAMESPACE_SIZE_GB=$((NAMESPACE_SIZE/1024**3))
#MDTS=$((`$NVME_CLI id-ctrl $CONTROLLER | awk '/mdts/ { print 2^$3*4096 }'`))
MDTS=$((128 * 1024))
if [ $# -ge 1 ]; then
TEST_TO_RUN=$1
fi
LOGDIR="$LOGDIR/log"
mkdir $LOGDIR &> /dev/null
}
function print_title()
{
TITLE_STR=`eval "echo -n $TITLE"`
if [ $LIST -eq 1 ]; then
printf " %02d. %s\n" $TEST_NO "$TITLE_PREFIX$TITLE_STR"
else
printf " %02d. %-70s ... " $TEST_NO "$TITLE_PREFIX$TITLE_STR"
fi
}
function clean_logs()
{
rm -rf $PWD/log
}
function get_log_page_bg()
{
cnt=0
sleep 5
while [ true ]
do
${PATH_FQA_TEST_SCRIPT}/get_log_page_set.sh $FQA_DEVICE $PATH_GET_LOG $cnt
cnt=$(echo ${cnt}+1 | bc)
sleep 30
done
}
function run_nbio()
{
if [ -n "${SUBTEST_NO+1}" ]; then
LOGSUFFIX="-$SUBTEST_NO"
SUBTEST_NO=$((SUBTEST_NO + 1))
fi
if [[ $MULTI_NS == "1" ]]; then
IOLOG_FILE="$LOGDIR/$IOLOG_PREFIX$PCI_ADDRESS"_"$TEST_NO$LOGSUFFIX"
SMARTLOG_FILE="$LOGDIR/$SMARTLOG_PREFIX$PCI_ADDRESS"_"$TEST_NO$LOGSUFFIX"
ERRORLOG_FILE="$LOGDIR/$ERRORLOG_PREFIX$PCI_ADDRESS"_"$TEST_NO$LOGSUFFIX"
else
IOLOG_FILE="$LOGDIR/$IOLOG_PREFIX$PCI_ADDRESS"_ns"$NSID"_"$TEST_NO$LOGSUFFIX"
SMARTLOG_FILE="$LOGDIR/$SMARTLOG_PREFIX$PCI_ADDRESS"_ns"$NSID"_"$TEST_NO$LOGSUFFIX"
ERRORLOG_FILE="$LOGDIR/$ERRORLOG_PREFIX$PCI_ADDRESS"_ns"$NSID"_"$TEST_NO$LOGSUFFIX"
fi
export IOENGINE=$IOENGINE
export IOLOG=$IOLOG_FILE
export SMARTLOG=$SMARTLOG_FILE
export ERRORLOG=$ERRORLOG_FILE
export CONTROLLER=$CONTROLLER
export NSID=$NSID
export JOURNAL_SHM=$JOURNAL_SHM
export COMPARE=$COMPARE
if [[ -z $AMOUNT ]]; then
export AMOUNT="time:10s"
fi
if [[ -z $AMOUNT_SCALE ]]; then
export AMOUNT_SCALE=1
fi
if [ $GROUP_NO -ne 11 ] && [ $GROUP_NO -ne 12 ]; then
get_log_page_bg &
PID_TRACE=$!
fi
$NBIO --display testcase $PARAM_FILE $@
RET=$?
if [ $RET -eq 1 ]; then
printf "abort\n"
exit $RET
elif [ $RET -gt 1 ]; then
printf "$FAIL\n"
if [ -s $ERRORLOG_FILE ]; then
echo "check the log '$ERRORLOG_FILE' for details"
fi
exit $RET
fi
if [ ! -s $ERRORLOG_FILE ]; then
# remove empty error log file
rm -f $ERRORLOG_FILE
fi
if [ $GROUP_NO -ne 11 ] && [ $GROUP_NO -ne 12 ]; then
kill -9 $PID_TRACE
fi
}
function skip_test()
{
if [ -n "${TEST_TO_RUN+1}" ]; then
if [ $TEST_TO_RUN -eq $TEST_NO ]; then
return 1
fi
return 0
fi
return 1
}
function run_test()
{
if skip_test; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
if [ $CLEAN -eq 1 ]; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
if [ $GROUP_NO -ne 11 ] && [ $GROUP_NO -ne 12 ]; then
$PATH_FQA_TEST_SCRIPT/send_current_status.sh $GROUP_NO. / $TEST_NO. $TITLE_PREFIX$TITLE_STR
fi
print_title
if [ $LIST -eq 1 ]; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
if [ -n "${PRETEST_EVENT+1}" ]; then
$PRETEST_EVENT
fi
run_nbio
if [ -n "${POSTTEST_EVENT+1}" ]; then
$POSTTEST_EVENT
fi
printf "$PASS\n"
if [ ! -s $ERRORLOG_FILE ]; then
# remove empty error log file
rm -f $ERRORLOG_FILE
fi
TEST_NO=$((TEST_NO + 1))
}
log()
{
now=`date '+%b %d %H:%M:%S'`
>&2 echo "$now $@"
}
function run_custom()
{
if skip_test; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
if [ $CLEAN -eq 1 ]; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
print_title
if [ $LIST -eq 1 ]; then
TEST_NO=$((TEST_NO + 1))
return 0
fi
if [ -n "${PRETEST_EVENT+1}" ]; then
$PRETEST_EVENT
fi
TESTLOG_FILE=$LOGDIR/testlog-$TEST_NO
SUBTEST_NO=1
print_title > $TESTLOG_FILE
$@ 2>> $TESTLOG_FILE
RET=$?
if [ $RET -ne 0 ]; then
printf "$FAIL\n"
if [ -s $ERRORLOG_FILE ]; then
echo "check the log '$ERRORLOG_FILE' for details"
fi
exit $RET
fi
if [ -n "${POSTTEST_EVENT+1}" ]; then
$POSTTEST_EVENT
fi
printf "$PASS\n"
if [ ! -s $ERRORLOG_FILE ]; then
# remove empty error log file
rm -f $ERRORLOG_FILE
fi
TEST_NO=$((TEST_NO + 1))
}
function run_groups()
{
CURDIR=$PWD
for group in "$@"
do
cd $CURDIR
cd $1
x()
{
source run $ARG
}
x
done
cd $CURDIR
}
function clean_journal()
{
#rm -f $JOURNAL_FILE
ipcrm -M $JOURNAL_SHM &> /dev/null
}
function format()
{
command -v $NVME_CLI >/dev/null # check if the nvme-cli is installed
RET=$?
if [ $RET -ne 0 ]; then
echo "nvme-cli is not installed"
exit $RET
fi
# nvme format $NAMESPACE $1 --lbaf=3 >/dev/null
$NVME_CLI format $NAMESPACE $1 >/dev/null
RET=$?
if [ $RET -ne 0 ]; then
echo "namespace format failure"
exit $RET
fi
clean_journal
}
function shutdown_device()
{
sync
rmmod nvme
}
function initialize_device()
{
modprobe nvme retry_time=0
for ((sec=1; sec<=20; sec++))
do
if [ -b $NAMESPACE ]; then
return 0
fi
sleep 1
done
echo "namespace '$NAMESPACE' not found"
exit 1
}
function power_on()
{
# TODO: power on
# re-enumerate pci bus
echo "1" > /sys/bus/pci/rescan
if [ ! -d /sys/bus/pci/devices/$PCI_ADDRESS ]; then
echo "pci device '$PCI_ADDRESS' not found"
exit 1
fi
}
function power_off()
{
# TODO: power off
# remove the knowledge of the device
echo "1" > /sys/bus/pci/devices/$PCI_ADDRESS/remove
}
function register_config()
{
CONFIG_NAME[CONFIGS]=$1
CONFIG_FUNC[CONFIGS]=$2
CONFIGS=$((CONFIGS+1))
}
| true
|
27445e0c5e4620d0c2a0e7dab96369b1a4daa8f5
|
Shell
|
travis-deshotels/useful-scripts
|
/finddir.sh
|
UTF-8
| 764
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#add this function to your bashrc and create an alias
# shellcheck disable=SC2010
function finddir {
PROJECT_DIR=""
match_count=$(ls -l "$PROJECT_DIR" | grep '^d' | grep -ic "$1")
if [[ $match_count == 0 ]];then
echo "No match"
elif [[ $match_count == 1 ]];then
cd "$PROJECT_DIR/$(ls "$PROJECT_DIR" | grep -i "$1")" || exit
else
lines=$(ls -l "$PROJECT_DIR" | grep '^d' | grep -i "$1" | awk '{print $9}')
i=1
lines_array=()
while IFS= read -r line
do
echo $i. "$line"
lines_array[$i]="$line"
i=$((i+1))
done <<< "$lines"
echo -n "Select one: "
read -r choice
cd "$PROJECT_DIR/${lines_array[$choice]}" || exit
fi
}
| true
|
3ec7e4f64bce29f9fd32ad124ac035788d6e4a9b
|
Shell
|
randomlock/pytest_ec2
|
/pytest_ec2/pytest.sh
|
UTF-8
| 2,099
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ -z "$1" ]] || [[ -z "$2" ]] || [[ -z "$3" ]] ; then
echo "Please specify test case branch, backend path and ec2 pem key path"
exit 1
fi
#if [[ $EUID == 0 ]]; then
# echo "Exiting from root"
# exit
#fi
branch=$1
backend_path=$2
ec2_pem_key_path=$3
echo "###########################################################################"
python pytest_ec2.py start
ec2_ip=$(head -n 1 /tmp/ec2_instance_ip)
echo "###########################################################################"
echo "Uploading branch ${branch} to test case instance ${ec2_ip}"
. upload_backend_for_unit_test.sh ${branch} ${ec2_ip} ${backend_path} ${ec2_pem_key_path}
echo "###########################################################################"
echo "Setting up test backend and test database"
ec2_user=ubuntu
echo "backend folder is ${backend_folder}"
ssh -t ${ec2_user}@${ec2_ip} "sudo /home/${ec2_user}/run_unit_test.sh ${backend_folder}"
ssh -t ${ec2_user}@${ec2_ip} "sudo /home/${ec2_user}/prepare_test_database.sh verifi verifi verifi 127.0.0.1"
echo "###########################################################################"
echo "running pytest"
today=`date +%Y%m%d_%H%M%S`
logfile=test-${today}.txt
curl -X POST -H 'Content-type: application/json' --data '{"text":"['$(echo "$USER")'] Started Pytest execution for branch '${branch}'"}' https://hooks.slack.com/services/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
ssh -t ${ec2_user}@${ec2_ip} "sudo /home/${ec2_user}/run_pytest.sh ${logfile}"
echo "\n\n\nCopy log file into local machine at /tmp"
scp ${ec2_user}@${ec2_ip}:/home/${ec2_user}/${logfile} /tmp/.
echo "###########################################################################"
echo "###########################################################################"
echo "uploading file to slack"
python pytest_slack.py /tmp/$logfile
curl -X POST -H 'Content-type: application/json' --data '{"text":"['$(echo "$USER")'] Pytest summary report for branch - '${branch}'"}' https://hooks.slack.com/services/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
echo "###########################################################################"
# stopping ec2 machine
#python pytest_ec2.py stop
| true
|
77e39590b85e5c284cad15728029efb79800df24
|
Shell
|
julp/ugrep
|
/test/ugrep.sh
|
UTF-8
| 5,057
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# charset: UTF-8
# WARNING:
# * only file in UTF-8 *without BOM* should be compared (grep considers BOM when ugrep don't)
# * comparaison must be about an UTF-8 file with LF line ending (grep considers CR when ugrep don't)
# * search string should be ASCII only when LC_ALL=C
declare -r TESTDIR=$(dirname $(readlink -f "${BASH_SOURCE}"))
declare -r DATADIR="${TESTDIR}/data"
. ${TESTDIR}/assert.sh.inc
if [ "${UGREP_SYSTEM}" == 'UTF-8' ]; then
FILE="${DATADIR}/utf8_eleve.txt"
UFILE="${DATADIR}/iso88591_eleve.txt"
else
FILE="${DATADIR}/iso88591_eleve.txt"
UFILE="${DATADIR}/utf8_eleve.txt"
fi
ARGS='--color=never -HnA 2 -B 3 after_context bin/ugrep.c'
assertOutputValueEx "-A 2 -B 3 (without -v)" "LC_ALL=C ./ugrep ${UGREP_OPTS} ${ARGS} 2>/dev/null" "grep ${ARGS}"
assertOutputValueEx "-A 2 -B 3 (with -v)" "LC_ALL=C ./ugrep ${UGREP_OPTS} -v ${ARGS} 2>/dev/null" "grep -v ${ARGS}"
#ARGS='--color=never -HnA 4 -B 6 after_context bin/ugrep.c'
#assertOutputValueEx "-A 4 -B 6 (without -v)" "LC_ALL=C ./ugrep ${UGREP_OPTS} ${ARGS} 2>/dev/null" "grep ${ARGS}"
ARGS='--color=never -HnA 6 -B 4 after_context bin/ugrep.c'
assertOutputValueEx "-A 6 -B 4 (without -v)" "LC_ALL=C ./ugrep ${UGREP_OPTS} ${ARGS} 2>/dev/null" "grep ${ARGS}"
ARGS='--color=never -HnA 4 -B 6 "^[^{}]*$" bin/ugrep.c'
assertOutputValueEx "-A 4 -B 6 (with -v)" "LC_ALL=C ./ugrep ${UGREP_OPTS} -v ${ARGS} 2>/dev/null" "grep -v ${ARGS}"
ARGS='élève'
assertOutputCommand "count matching lines (-c)" "./ugrep ${UGREP_OPTS} -c ${ARGS} ${UFILE} 2>/dev/null" "grep -c ${ARGS} ${FILE}" "-eq"
assertOutputCommand "count non-matching lines (-vc)" "./ugrep ${UGREP_OPTS} -vc ${ARGS} ${UFILE} 2>/dev/null" "grep -vc ${ARGS} ${FILE}" "-eq"
ARGS='-c e'
INPUT="echo -en \"${E_ACUTE_NFD}\nl\n${E_GRAVE_NFD}\nv\ne\""
assertOutputValue "grapheme consistent (-c)" "${INPUT} | ./ugrep ${UGREP_OPTS} --unit=grapheme ${ARGS} 2>/dev/null" 1 "-eq"
assertOutputValue "grapheme inconsistent (-c)" "${INPUT} | ./ugrep ${UGREP_OPTS} --unit=codepoint ${ARGS} 2>/dev/null" 3 "-eq"
assertOutputValue "grapheme consistent (-Ec)" "${INPUT} | ./ugrep ${UGREP_OPTS} --unit=grapheme -E ${ARGS} 2>/dev/null" 1 "-eq"
assertOutputValue "grapheme inconsistent (-Ec)" "${INPUT} | ./ugrep ${UGREP_OPTS} --unit=codepoint -E ${ARGS} 2>/dev/null" 3 "-eq"
ARGS='--color=never -m 2 a'
INPUT="echo -en \"a\na\na\na\""
assertOutputCommand "max-count" "${INPUT} | ./ugrep ${UGREP_OPTS} ${ARGS} 2>/dev/null" "echo -en \"a\na\""
assertOutputValue "count + max-count" "${INPUT} | ./ugrep ${UGREP_OPTS} -c ${ARGS} 2>/dev/null" 2 "-eq"
ARGS='--color=never -vm 2 z'
assertOutputCommand "revert-match + max-count" "${INPUT} | ./ugrep ${UGREP_OPTS} ${ARGS} 2>/dev/null" "echo -en \"a\na\""
assertOutputValue "revert-match + count + max-count" "${INPUT} | ./ugrep -c ${UGREP_OPTS} ${ARGS} 2>/dev/null" 2 "-eq"
declare -r SDBDA_NFC=$'\xE1\xB9\xA9'
declare -r SDBDA_NFD=$'\x73\xCC\xA3\xCC\x87'
declare -r SDBDA_NONE=$'\x73\xCC\x87\xCC\xA3'
INPUT="echo -en \"${SDBDA_NONE}\n${SDBDA_NFC}\n${SDBDA_NFD}\n\""
assertOutputValue "NFC" "${INPUT} | ./ugrep ${UGREP_OPTS} --unit=grapheme --form=c -c '${SDBDA_NFD}' 2>/dev/null" 3 "-eq"
assertOutputValue "NFD" "${INPUT} | ./ugrep ${UGREP_OPTS} --unit=grapheme --form=d -c '${SDBDA_NFC}' 2>/dev/null" 3 "-eq"
# ./ugrep ${UGREP_OPTS} -q élève test/utf8_eleve.txt 2>/dev/null
# assertTrue "[[ $? -eq 0 ]]"
# ./ugrep ${UGREP_OPTS} -q zzz test/utf8_eleve.txt 2>/dev/null
# assertTrue "[[ $? -eq 1 ]]"
# ./ugrep ${UGREP_OPTS} -q élève /unexistant 2>/dev/null
# assertTrue "[[ $? -gt 1 ]]"
# ./ugrep ${UGREP_OPTS} -q élève /unexistant test/utf8_eleve.txt 2>/dev/null
# assertTrue "[[ $? -eq 0 ]]"
assertExitValue "exit value with one or more lines selected" "./ugrep ${UGREP_OPTS} -q élève ${FILE} 2>/dev/null" 0
assertExitValue "exit value with no lines selected" "./ugrep ${UGREP_OPTS} -q zzz ${UFILE} 2>/dev/null" 1
assertExitValue "exit value with error and no more file" "./ugrep ${UGREP_OPTS} -q élève /unexistant 2>/dev/null" 1 "-gt"
ARGS='--color=never élève'
assertOutputCommand "file with match (-l)" "./ugrep ${UGREP_OPTS} -l ${ARGS} ${FILE} 2>/dev/null" "grep -l ${ARGS} ${FILE}"
assertOutputCommand "file without match (-L)" "./ugrep ${UGREP_OPTS} -L ${ARGS} ${FILE} 2>/dev/null" "grep -L ${ARGS} ${FILE}"
ARGS='--color=never zzz'
assertOutputCommand "file with match (-l)" "./ugrep ${UGREP_OPTS} -l ${ARGS} ${FILE} 2>/dev/null" "grep -l ${ARGS} ${FILE}"
assertOutputCommand "file without match (-L)" "./ugrep ${UGREP_OPTS} -L ${ARGS} ${FILE} 2>/dev/null" "grep -L ${ARGS} ${FILE}"
FILE='engine.h' # Others are too "particular"
ARGS="--color=never -nw ''"
assertOutputValueEx "empty pattern" "./ugrep ${UGREP_OPTS} -E ${ARGS} ${FILE} 2>/dev/null" "grep ${ARGS} ${FILE}"
assertOutputValueEx "empty pattern + word (-w)" "./ugrep ${UGREP_OPTS} -Ew ${ARGS} ${FILE} 2>/dev/null" "grep -w ${ARGS} ${FILE}"
assertOutputValueEx "empty pattern + whole line (-x)" "./ugrep ${UGREP_OPTS} -Ex ${ARGS} ${FILE} 2>/dev/null" "grep -x ${ARGS} ${FILE}"
exit $?
| true
|
e6dac3a60e5aa2a123ab28c8b72b66d3579b935c
|
Shell
|
qingen/wenet
|
/examples/openasr2021/s0/local/dump_wav.sh
|
UTF-8
| 1,374
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# dumps such pipe-style-wav to real audio file
nj=1
. tools/parse_options.sh || exit 1;
inscp=$1
segments=$2
outscp=$3
data=$(dirname ${inscp})
if [ $# -eq 4 ]; then
logdir=$4
else
logdir=${data}/log
fi
mkdir -p ${logdir}
sox=`which sox`
[ ! -x $sox ] && echo "Could not find the sox program at $sph2pipe" && exit 1;
paste -d " " <(cut -f 1 -d " " $inscp) <(cut -f 2- -d " " $inscp | tr -t " " "#") \
> $data/wav_ori.scp
tools/segment.py --segments $segments --input $data/wav_ori.scp --output $data/wav_segments.scp
sed -i 's/ /,/g' $data/wav_segments.scp
sed -i 's/#/ /g' $data/wav_segments.scp
rm -f $logdir/wav_*.slice
rm -f $logdir/*.log
split --additional-suffix .slice -d -n l/$nj $data/wav_segments.scp $logdir/wav_
for slice in `ls $logdir/wav_*.slice`; do
{
name=`basename -s .slice $slice`
mkdir -p ${data}/wavs/${name}
cat ${slice} | awk -F ',' -v sox=$sox -v data=`pwd`/$data/wavs/$name \
-v logdir=$logdir -v name=$name '{
during=$4-$3
cmd=$2 sox " - " data "/" $1 ".wav" " trim " $3 " " during;
system(cmd)
printf("%s %s/%s.wav\n", $1, data, $1);
}' | \
sort > ${data}/wavs_${name}.scp || exit 1;
} &
done
wait
cat ${data}/wavs_*.scp > $outscp
rm ${data}/wavs_*.scp
rm -f $data/{segments,wav_segments.scp,reco2file_and_channel,reco2dur}
tools/fix_data_dir.sh $data
| true
|
b02322302b071463282fe8cba186bfefce96bc0a
|
Shell
|
davep-github/dpw
|
/etc/cron.d.work/daily.home
|
UTF-8
| 1,530
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# $Id: daily,v 1.61 2007/08/28 08:20:12 davep Exp $
#set -x
. $HOME/etc/crontab.rc
clean-droppings
# HUGE, plus some dirs are foolishly make with to high permissions
# I sure hope *I* didn't write the comment above this line.
# echo "Index ports:" # indexes are getting a bit large
# Also, not needed under linux, is done w/eupdate, used by
# index-ports $init_opt 2>&1
# @todo Add pkg/deb/etc index/db updates here.
#
# @todo check mod time on parent dir and don't rebuild if there are no changes.
# or make in temp area, cmp and copy only if different.
#
#echo "Make Robin's pic dir"
#mk-html-dir.pl /usr/local/www/data/daughter/pics > /usr/local/www/data/daughter/pics/dir.html 2>&1
#echo "Update GNU Cash Stock Prices"
#gnc-prices ~/finances/p2k 2>&1
# gnucash --add-price-quotes ~/finances/p2k
export ETC=$HOME/etc
# rotate personal log files...
echo "Rotate logs..."
dp-rotate-logs
echo '...done'
echo ""
#echo "Remake playlists..."
#mk-album-playlists.py /media/audio/music
# clear pan's cache
echo 'Clear pan cache...'
cp -f ~/etc/Pan.template ~/.gnome/Pan
# and clear firefox's
#echo 'Clear ff cache...'
#clrff
echo 'Clear mplayer cache...'
clrmp
echo "******* SKIPPING: Index MH dirs:"
#index-mail $init_opt 2>&1
#echo "done."
#echo ""
echo "******* SKIPPING: Index notes:"
#index-notes $init_opt 2>&1
#echo "done."
#echo ""
echo "******* SKIPPING: Update locdb for music files:"
#update-mp3db
#echo "done."
#echo ""
echo ""
echo "daily done."
echo "==========================="
| true
|
06bcc073498f1840ac61fd552fe64f8872115c25
|
Shell
|
ggdream/scripts
|
/dart.sh
|
UTF-8
| 1,001
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
version=2.10.4
location=/usr/local
target=$HOME/.bashrc
wget -c https://storage.googleapis.com/dart-archive/channels/stable/release/$version/sdk/dartsdk-linux-x64-release.zip
sudo apt install unzip -y
sudo unzip -d $location dartsdk-linux-x64-release.zip
echo "export PUB_HOSTED_URL=https://pub.flutter-io.cn" >> $target
echo "export FLUTTER_STORAGE_BASE_URL=https://storage.flutter-io.cn" >> $target
echo "export PUB_CACHE=$location/dart-sdk/cache" >> $target
echo "export PATH=$PATH:$location/dart-sdk/bin" >> $target
source $HOME/.bashrc
dart --version
# sudo apt-get update
# sudo apt-get install apt-transport-https -y
# sudo sh -c 'wget -qO- https://dl.google.com/linux/linux_signing_key.pub | apt-key add -'
# sudo sh -c 'wget -qO- https://storage.flutter-io.cn/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list'
# sudo apt-get update && sudo apt-get install dart
# echo 'export PATH="$PATH:/usr/lib/dart/bin"' >> /etc/profile
| true
|
6dac09ef7c29cf4bfa6a0a75ba23f0196343b129
|
Shell
|
mcschmitz/notion2readme
|
/.githooks/pre-commit
|
UTF-8
| 2,813
| 4
| 4
|
[] |
no_license
|
#!/bin/sh
#
# An example hook script to verify what is about to be committed.
# Called by "git commit" with no arguments. The hook should
# exit with non-zero status after issuing an appropriate message if
# it wants to stop the commit.
#
if git rev-parse --verify HEAD >/dev/null 2>&1
then
against=HEAD
else
# Initial commit: diff against an empty tree object
against=4b825dc642cb6eb9a060e54bf8d69288fbee4904
fi
# If you want to allow non-ASCII filenames set this variable to true.
allownonascii=$(git config --bool hooks.allownonascii)
# Redirect output to stderr.
exec 1>&2
# Cross platform projects tend to avoid non-ASCII filenames; prevent
# them from being added to the repository. We exploit the fact that the
# printable range starts at the space character and ends with tilde.
if [ "$allownonascii" != "true" ] &&
# Note that the use of brackets around a tr range is ok here, (it's
# even required, for portability to Solaris 10's /usr/bin/tr), since
# the square bracket bytes happen to fall in the designated range.
test $(git diff --cached --name-only --diff-filter=A -z $against |
LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0
then
cat <<\EOF
Error: Attempt to add a non-ASCII file name.
This can cause problems if you want to work with people on other platforms.
To be portable it is advisable to rename the file.
If you know what you are doing you can disable this check using:
git config hooks.allownonascii true
EOF
exit 1
fi
pythonfiles=$(find . -type f -name "*.py" | xargs git diff --cached --name-only $against | awk '{print $1}' | paste -d' ' -s)
echo "Started pre-commit git hook"
echo "**********************BLACK**********************"
if [ -z "$pythonfiles" ]
then
echo "No changes in pythonfiles"
else
pip install -q autoflake docformatter black
autoflake -i --expand-star-imports --remove-all-unused-imports --ignore-init-module-imports --remove-duplicate-keys --remove-unused-variables $pythonfiles
docformatter -i --make-summary-multi-line --pre-summary-newline $pythonfiles
isort $pythonfiles
black $pythonfiles -l 79
ret=$?
if [ $ret -ne 0 ]; then
echo "Error committing changes: Please format (black) your code."
exit 1
fi
git add $pythonfiles
fi
echo "********************FLAKE8***********************"
if [ -z "$pythonfiles" ]
then
echo "No changes in pythonfiles"
else
pip install -q wemake-python-styleguide
wget "https://raw.githubusercontent.com/ebot7/eb7-styleguide/master/settings/setup_ml_githooks.cfg"
echo $pythonfiles
flake8 $pythonfiles --config=setup_ml_githooks.cfg
ret=$?
rm setup_ml_githooks.cfg
if [ $ret -ne 0 ]; then
echo "Error committing changes: Please check syntax errors."
exit 1
fi
fi
echo "*************************************************"
echo "Ended pre-commit git hook"
| true
|
acf2a58ceedb0497fccba6e7a88f5190b57c02e2
|
Shell
|
provingground-moe/qserv
|
/admin/bin/dreplicate.sh
|
UTF-8
| 2,251
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/sh
# LSST Data Management System
# Copyright 2014 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
# Replicate directories to a number of servers. This is a temporary
# solution for data replication on small-scale cluster (or mosly
# for testing simple multi-node setup). This will likely be replaced
# with some cluster management-based solution as we prepare for
# actual deployment.
# @author Andy Salnikov, SLAC
set -e
usage() {
cat << EOD
Usage: `basename $0` [options] path host [host ...]
Available options:
-h this message
-d path use different location on remote hosts
-u name use different remote user name
Copies specified path which can be a single file or a directory to one
or more remote hosts. path argument must specify absolute path name (start
with slash). Directories are replicated recursively.
EOD
}
dest=''
user=''
# get the options
while getopts hd:u: c ; do
case $c in
h) usage ; exit 0 ;;
d) dest="$OPTARG" ;;
u) user="${OPTARG}@" ;;
\?) usage ; exit 2 ;;
esac
done
shift `expr $OPTIND - 1`
if [ $# -lt 2 ] ; then
usage
exit 2
fi
path=$1
shift
hosts="$@"
case "$path" in
/*) ;;
*) echo "expect absolute path" ; exit 2 ;;
esac
# strip trailing slash
path=$(echo $path | sed 's%\(.*[^/]\)/*%\1%')
test "$dest" || dest=$(dirname "$path")
for host in $hosts; do
echo "Syncronizing $path to remote $host:$dest"
rsync --rsh=ssh -a --delete "$path" "$user$host:$dest"
done
| true
|
bb899e5d43d89e04c552dff7f9a5531069f0619b
|
Shell
|
CSC466-hub/WizWaterTest
|
/linpack/vm-libvirt.sh
|
UTF-8
| 2,148
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# run this on a Linux machine like arldcn24,28
if [ "$#" -ne 1 ]; then
echo "Usage: $0 numberOfSockets (specify as 1 or 2)"
exit 1
fi
if [ "$1" -eq 1 ]; then
numaopts=" --physcpubind=0-7,16-23 --localalloc "
numsmp=16
echo "Running on one socket with numactl $numaopts"
elif [ "$1" -eq 2 ]; then
numaopts=" --physcpubind=0-31 --interleave=0,1 "
numsmp=32
echo "Running on two sockets with numactl $numaopts"
else
echo "Usage: $0 numberOfSockets (specify as 1 or 2)"
exit 1
fi
LIBDIR=../common/vm
SSHOPTS="-i ../common/id_rsa -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -oConnectionAttempts=60"
VMIP=10.71.1.99
# prepare source disk images
make -C $LIBDIR
# create ephemeral overlay qcow image
# (we probably could have used -snapshot)
IMG=linpack.qcow
qemu-img create -f qcow2 -b $LIBDIR/ubuntu-13.10-server-cloudimg-amd64-disk1.img $IMG
# start the VM & bind port 2222 on the host to port 22 in the VM
#numactl $numaopts kvm -net nic -net user -hda $IMG -hdb $LIBDIR/seed.img -m 100G -smp $numsmp \
# -nographic -redir :2222::22 >$IMG.log &
# clean out any old VM
sudo virsh destroy linpack
# start the VM
# -- WARNING WARNING WARNING virsh.xml is hardcoded to wmf's home directory --
sudo virsh create virsh.xml
# remove the overlay (qemu will keep it open as needed)
sleep 2
rm -f $IMG
# look at the topology
ssh $SSHOPTS spyre@$VMIP sudo apt-get install -y hwloc
ssh $SSHOPTS spyre@$VMIP lstopo --of console > results/vm.tuned.topo
# copy code in (we could use Ansible for this kind of thing, but...)
rsync -a -e "ssh $SSHOPTS" bin/ spyre@$VMIP:~
# annotate the log
mkdir -p results
log="results/vm.log"
now=`date`
echo "Running linpack, started at $now"
echo "--------------------------------------------------------------------------------" >> $log
echo "Running linpack, started at $now" >> $log
# run linpack
ssh $SSHOPTS spyre@$VMIP ./runme_xeon64 >> $log
# annotate the log
echo "" >> $log
echo -n "Experiment completed at "; date
# shut down the VM
ssh $SSHOPTS spyre@$VMIP sudo shutdown -h now
wait
echo Experiment completed
| true
|
9fa7dac3426dcd518551de15e1e538eb2c912825
|
Shell
|
Paulmicha/decoupled-prototype
|
/cwt/extensions/drupalwt/global.vars.sh
|
UTF-8
| 630
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
##
# Global (env) vars for the 'drupalwt' CWT extension.
#
# This file is used during "instance init" to generate the global environment
# variables specific to current project instance.
#
# @see u_instance_init() in cwt/instance/instance.inc.sh
# @see cwt/utilities/global.sh
# @see cwt/bootstrap.sh
#
# Make the automatic crontab setup for Drupal cron on local host during 'app
# install' opt-in.
global DWT_USE_CRONTAB "[default]=false"
# [optional] Shorter generated make tasks names.
# @see u_instance_task_name() in cwt/instance/instance.inc.sh
global CWT_MAKE_TASKS_SHORTER "[append]='drupalwt/dwt'"
| true
|
8165b2479d0e3ad59283153aedddb8cfe7cf2383
|
Shell
|
iarhbahsir/todo-nlp
|
/ProcessTODO
|
UTF-8
| 2,733
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# take input for desired username/repo
# git clone desired repo
# find all .java files in cloned directory
# output list of java files to a .txt file
# run TodoReader java tool on the file of results to:
# write all TODO tasks to a .txt file (name based on commit #, new file for every commit, new folder for every repo, create folder if doesn't exist)
cd
#echo started
#if [[$1 == ""]]; then
echo "Enter username"
read userName
echo "Enter repo name"
read repoName
echo "Enter path to src file containing todoReader package"
read toolPath
#else
# username=$1
# repoName=$2
# toolPath=$3
#fi
# "workspace/GitHub\ Data\ Collection/src" used as toolPath for testing
CLONED_DIR="Cloned"
mkdir ${CLONED_DIR}
cd ${CLONED_DIR}
pathInput=$(pwd)
if test -d $repoName; then
rm -rf $repoName
echo deleted
fi
git clone https://github.com/$userName/$repoName $repoName
# Check if we successfully cloned.
if [ ! -d $repoName ]; then
echo "There is not directory cloned $repoName"
exit 1
fi
# Create file listing all java files in repo
INFO_SUF="-Info"
JAVAFILE_SUF="-JavaFiles.txt"
if test -d $repoName$INFO_SUF; then
rm -rf $repoName$INFO_SUF
echo "info deleted"
fi
mkdir $repoName$INFO_SUF
cd $repoName
cur=$(pwd)
find -name "*.java" > $cur$INFO_SUF/$repoName$JAVAFILE_SUF
echo "created list of java files in directory $repoName$INFO_SUF"
# Write each change log to a new file in -Info directory
MAX_COMMITS=500
numCommits=$(git rev-list HEAD | wc -l)
if [ $numCommits -gt $MAX_COMMITS ]; then
numCommits=$MAX_COMMITS
fi
echo "Previous $numCommits commits read"
for((i=1; i<$numCommits; i++)); do
declare -i j=$i-1
declare -i fileNum=$numCommits-$i
file="${repoName}-Output-$fileNum.txt"
date=$(git log --date=iso-strict --pretty=format:"%cd" | sed -n "${i}p")
echo $date >> $file
git rev-list HEAD | sed -n "${i}p" >> ${file}
git diff HEAD~$i..HEAD~$j >> ${file}
mv $file ${cur}$INFO_SUF
done
file="${repoName}-Output-0.txt"
oldestCommit=$(git rev-list HEAD | sed -n "${numCommits}p")
date=$(git log --date=iso-strict --pretty=format:"%cd" | sed -n "${numCommits}p")
echo $date >> $file
git rev-list HEAD | sed -n "${numCommits}p" >> ${file}
while IFS='' read -r line || [[ -n "$line" ]]; do
# place delimiter then file name
echo "!D@E#L\$I%M^I&T*E(R)" >> $file
echo $line >> $file
git show $oldestCommit:$line >> $file
done < "$cur-Info/$repoName$JAVAFILE_SUF"
mv $file ${cur}-Info
echo "Setup done."
# Run TodoReader java tool
#cd
#cd "$toolPath"
#javac -d ../bin -classpath ../bin todoReader/TodoReader.java
#cd ..
#cd bin
#java todoReader/TodoReader "$pathInput" "$repoName"
echo "Process completed."
| true
|
9b9340e9fd4e3f63dff91ce66c10576e3d73d2ec
|
Shell
|
bboykov/dotfiles
|
/scripts/dotfiles-install-homebrew-packages
|
UTF-8
| 1,681
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
readonly DOTFILES_HOME="${HOME}/dotfiles"
readonly DOTFILES_CONFIG="${DOTFILES_HOME}/config"
# shellcheck source=../lib/functions.bash
source "${DOTFILES_HOME}/lib/functions.bash"
set +e
command -v brew >/dev/null 2>&1
brew_installed=$?
set -e
if [[ ${brew_installed} -eq 0 ]] ; then
util::info_message "homebrew is already installed"
else
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
source "${HOME}/.bashrc"
fi
platform="$(util::detect_platform)"
case "${platform}" in
ubuntu)
default_homebrew_packages=(
gcc
shfmt
kubie
k9s
)
;;
wsl-ubuntu)
default_homebrew_packages=(
gcc
shfmt
)
;;
fedora)
default_homebrew_packages=(
shfmt
todo-txt
)
;;
macos)
default_homebrew_packages=(
bash-completion@2
gcc
shfmt
vim
wget
curl
todo-txt
k9s
kubie
pyenv
tfenv
the_silver_searcher
htop
shellcheck
)
;;
esac
if [[ -n $* ]]; then
homebrew_packages=("$@")
else
homebrew_packages=("${default_homebrew_packages[@]}")
fi
homebrew_packages_sring=$(printf "%s " "${homebrew_packages[@]}")
util::info_message "Installing packages: ${homebrew_packages_sring}"
if ((${SET_DEBUG:-0})); then
brew update
brew install "${homebrew_packages[@]}"
else
brew update >/dev/null 2>&1
brew install -q "${homebrew_packages[@]}" >/dev/null 2>&1
fi
# util::info_message "Reinstall vim."
# # brew remove vim
# brew cleanup
# brew install vim --with-python
util::info_message "Packages installed."
| true
|
1ebe5d9df1e9414ff0ba09266d68f1fb29bcf274
|
Shell
|
skoef/zfsync
|
/lib/common
|
UTF-8
| 1,889
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -z "${LIB_COMMON_INCLUDED}" ]; then
LIB_COMMON_INCLUDED=1
zf_usage() {
local msg=$1
[ ! -z "${msg}" ] && echo "Error: ${msg}" >&2
cat >&2 << EOT
Usage: zfsync [-dhiPq] [-U target_user] [-I identity_file] [-k retention]
[-K retention] -H target_host -T target_zfs
source_zfs
source_zfs is the filesystem that should be synced to a remote host
Options:
-d enable debugging (negates -q)
-h show you this help page
-H target_host host to send snapshots to [required]
-i send incremental stream between local and remote
snapshot instead of a stream package of all
intermediary snapshots
-I identity_file use this key for authentication
-k local_retention after synchronizing, apply local retention
-K remote_retention after synchronizing, apply remote retention
-P do not set user properties on remote end
-q be more quiet
-T target_zfs ZFS on target host to send snapshots to [required]
-U target_user alternative user to authenticate as on remote host
EOT
[ ! -z "${msg}" ] && exit 1
}
zf_debug() {
[ ${ZF_DEBUG} -eq 1 ] && \
printf "[%s] Debug: %s\n" "$(date)" "${@}" >&2
return 0
}
zf_warn() {
printf "[%s] Warning: %s\n" "$(date)" "${@}" >&2
}
zf_error() {
printf "[%s] Error: %s\n" "$(date)" "${@}" >&2
exit 1
}
zf_msg(){
[ ${ZF_SILENT} -eq 0 -o ${ZF_DEBUG} -eq 1 ] && \
printf "[%s] %s\n" "$(date)" "${@}" >&2
return 0
}
zf_has_binary() {
local bin=$1 path
path=$(which ${bin} 2>/dev/null)
if [ -z "${path}" ] || [ ! -x ${path} ]; then
zf_debug "${bin} not found in PATH"
return 1
fi
zf_debug "${bin} found in PATH"
}
fi
| true
|
736751887a22dfd865c8fcf67c8fe25013adb7d2
|
Shell
|
JJRcop/newSS13tools
|
/auto/mapmerge.sh
|
UTF-8
| 290
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#
if [ "$1" != "" ]; then
echo "Dir is $1"
else
echo "Positional parameter 1 is empty"
exit;
fi
for map in $( find $1/_maps -type f -name '*.dmm'); do
echo $map
cp -f $map $map.backup
done
echo -en "0-3,5-122\n\n" | python3 $1/tools/mapmerge/mapmerger.py $1/_maps 0;
| true
|
8511d465bcdcd4d394c466ba9bf171460d05f847
|
Shell
|
victorwang0526/wtglj-app
|
/travis/build-ios.sh
|
UTF-8
| 217
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash -v
set -e
# Build Ionic App for iOS
ionic cordova platform add ios --nofetch
if [[ "$TRAVIS_BRANCH" == "develop" ]]
then
ionic cordova build ios
else
ionic cordova build ios --prod --release
fi
| true
|
2eab0fc114d881ca338e870fa233c88bcaa6cbe2
|
Shell
|
vlttnv/Tophat
|
/run_consumers.sh
|
UTF-8
| 1,102
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# The script runs multiple consumers
# To run the script:
# chmod a+x producers.sh
# ./producers.sh balancer_addr balancer_port producer_id num_producer
# $1 = balancer_addr : IP address of the balancer
# $2 = balancer_port : Port number of the balancer
# $3 = producer_id : ID of the starting producer
# $4 = num_producer : Number of producers
# e.g. ./consumers.sh 138.251.206.64 5000 10 5
#
# Consumers will request data from producers 10, 11, 12, 13, 14.
# Each consumer will request data from only one producer.
# All requests will be directed to 138.251.206.64:5000
balancer_addr=$1
balancer_port=$2
producer_id=$3
num_producer=$4
min=$producer_id
max=$(($min + $num_producer - 1))
printf '\nRunning consumers - Request data from producers %s - %s.\n' "$min" "$max"
while :
do
for i in $(seq $min $max)
do
printf 'Running consumer - Request data from producer %s.\n' "$i"
python client/consumer.py $balancer_addr $balancer_port $i &
sleep 0.1
done
done
kill_all()
{
kill $(ps aux | grep '[p]ython client/consumer.py' | awk '{print $2}')
}
trap "kill_all" SIGINT
wait
| true
|
37475aba1d4cffc06ea8ac918c99d61813e071bb
|
Shell
|
draschke/dotfiles
|
/scripts/cache
|
UTF-8
| 948
| 4.4375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Cache commands - cache
# Use before another command, and it will cache that command's
# output. Default cache lifetime is value of env var CACHE_MINS
# or 5; can be overridden with the -m (or --mins) option
# e.g. cache -m 10 <some command...>
# Stores cache files in /tmp, prefixed with cache.
declare tmpdir=/tmp
declare prefix
prefix=$(basename "$0")
main() {
local mins=${CACHE_MINS:-5}
local cachefile
cachefile="${tmpdir}/${prefix}.$(echo "$@" | md5sum | cut -d' ' -f 1)"
case "$1" in
# Only option is -m / --mins N to set specific cache file lifetime
-m|--mins)
shift
mins=$1
shift
;;
esac
# If there's no cache file or it's older than N mins then
# run the command for real, cacheing the output (again).
if [ ! -f "$cachefile" ] \
|| test "$(find "$cachefile" -mmin +"$mins")"; then
"$@" | tee "$cachefile"
else
cat "$cachefile"
fi
}
main "$@"
| true
|
74d93a0a8304b3257acbea8091cca92b26a0ac2b
|
Shell
|
bellmit/pmc-repo
|
/FinPreMerge/bin/finRSIScan.sh
|
UTF-8
| 1,600
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $debug -eq 1 ];then
set -x
fi
tstart_rsi="$(date +%s)"
#txn=`ade pwv|grep VIEW_TXN_NAME |cut -d: -f2|tr -d [\ ]`
#destdir=$ADE_VIEW_ROOT/fusionapps
rsioutfileformail="$destdir"/"$txn"_rsi.out
errCd13=0
rsioutfile="RSI_issues.csv"
echo -e "" > $rsioutfileformail
echo -e "RowSetIterator Scan Output" >> $rsioutfileformail
echo -e "==================================================" >> $rsioutfileformail
#echo -e "Running RowSetIterator Scan for the transaction $txn" >> $rsioutfileformail
#echo -e "" >> $rsioutfileformail
java -classpath $ADE_VIEW_ROOT/fatools/opensource/lib/JarJAuditFixer.jar:$ADE_VIEW_ROOT/fatools/opensource/lib/JarSpreadsheetHelper.jar:$ADE_VIEW_ROOT/fmwtools/BUILD_HOME/oracle_common/modules/oracle.xdk_11.1.0/xml.jar:$ADE_VIEW_ROOT/fmwtools/BUILD_HOME/oracle_common/modules/oracle.xdk_11.1.0/xmlparserv2.jar:$ADE_VIEW_ROOT/fmwtools/BUILD_HOME/modules/javax.mail_1.1.0.0_1-4-1.jar:$ADE_VIEW_ROOT/fmwtools/BUILD_HOME/wlserver_10.3/server/ext/jdbc/oracle/11g/ojdbc5.jar oracle.apps.rup2scans.RSIPremergeChecker "$txnFiles" $ADE_VIEW_ROOT
errCd13=$?
if `grep -q "FileName:" $rsioutfile`
then
errCd13=1
fi
echo -e "" >> $rsioutfileformail
cat $rsioutfile >> $rsioutfileformail
echo -e "" >> $rsioutfileformail
tstop_rsi="$(date +%s)"
telapsed_rsi="$(expr $tstop_rsi - $tstart_rsi)"
echo -e "Completed for transaction $txn." >> $rsioutfileformail
echo -e "Elapsed time: $(date -d "1970-01-01 $telapsed_rsi sec" +%H:%M:%S)" >> $rsioutfileformail
echo -e "" >> $rsioutfileformail
exit $errCd13
| true
|
ea4d460be6c98d2a20576b39edcc228bdd5daa5a
|
Shell
|
dpb587/elasticsearch-archive-bosh-release
|
/packages/elasticsearch-archiver/packaging
|
UTF-8
| 550
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
exec 2>&1
set -e
set -u
tar -xzf jq/jq-*.tar.gz
( cd jq-*
./configure \
--prefix="$BOSH_INSTALL_TARGET" \
--disable-maintainer-mode
make
make install
)
tar -xzf nodejs/node-v*.tar.gz
( cd node-*
./configure --prefix="$BOSH_INSTALL_TARGET"
make
make install
)
tar -xzf elasticdump/elasticdump-*.tar.gz --strip-components 1 -C "$BOSH_INSTALL_TARGET/lib"
ln -s $BOSH_INSTALL_TARGET/lib/node_modules/.bin/* "$BOSH_INSTALL_TARGET/bin"
cp mc/mc "$BOSH_INSTALL_TARGET/bin/mc"
chmod +x "$BOSH_INSTALL_TARGET/bin/mc"
| true
|
2c20cb2759e79bc72bcebd80133342777ddbdbcb
|
Shell
|
guardam/slap
|
/examples/zsh/complete
|
UTF-8
| 496
| 2.640625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
config="$(cargo r -q -- path -d "${(%):-%N}")/../complete.yml"
eval "$(cargo r -q -- parse zsh _ -- "$@" <"$config")"
[[ -z "$_success" ]] && exit 1
printf '%s\n' \
"opt = '${_opt_vals[@]}'
pos = '$_pos_vals'
flag = '${_flag_vals[@]}'
mode = '$_mode_vals'
mvals = '$_mvals_vals'
minvals = '${_minvals_vals[@]}'
maxvals = '${_maxvals_vals[@]}'
subcommand -> '$_subcommand'
subcmd_scopt = '${_subcmd_scopt_vals[@]}'
subcmd_scpos1 = '$_subcmd_scpos1_vals'"
| true
|
9ae7b98f10a783f49b687fb6f3ea7026b8602288
|
Shell
|
KarmaGYZ/mesos_test
|
/run_mesos_test.sh
|
UTF-8
| 7,754
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Common variable
MESOS_TEST_DIR="`dirname \"$0\"`" # relative
export MESOS_TEST_DIR="`( cd \"${MESOS_TEST_DIR}\" && pwd -P)`" # absolutized and normalized
# User specify configuration of mesos
export MESOS_MASTER="127.0.0.1:5050"
export REST_PORT="8077"
source "$(dirname "$0")"/config
# FLINK_HOME=/Users/yangze/Desktop/flink
export FLINK_DIR=${FLINK_HOME}/build-target
export TEST_DATA_DIR=${MESOS_TEST_DIR}/out
export END_TO_END_DIR=${FLINK_HOME}/flink-end-to-end-tests
export TEST_INFRA_DIR=${END_TO_END_DIR}/test-scripts
source "${MESOS_TEST_DIR}/test-runner-common.sh"
echo "flink-end-to-end-test directory: $END_TO_END_DIR"
echo "Flink distribution directory: $FLINK_DIR"
################################################################################
# Checkpointing tests
################################################################################
run_test "Resuming Savepoint (file, async, no parallelism change) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 2 file true"
run_test "Resuming Savepoint (file, sync, no parallelism change) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 2 file false"
run_test "Resuming Savepoint (file, async, scale up) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 4 file true"
run_test "Resuming Savepoint (file, sync, scale up) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 4 file false"
run_test "Resuming Savepoint (file, async, scale down) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 4 2 file true"
run_test "Resuming Savepoint (file, sync, scale down) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 4 2 file false"
run_test "Resuming Savepoint (rocks, no parallelism change, heap timers) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 2 rocks false heap"
run_test "Resuming Savepoint (rocks, scale up, heap timers) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 4 rocks false heap"
run_test "Resuming Savepoint (rocks, scale down, heap timers) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 4 2 rocks false heap"
run_test "Resuming Savepoint (rocks, no parallelism change, rocks timers) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 2 rocks false rocks"
run_test "Resuming Savepoint (rocks, scale up, rocks timers) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 2 4 rocks false rocks"
run_test "Resuming Savepoint (rocks, scale down, rocks timers) end-to-end test" "$MESOS_TEST_DIR/test_resume_savepoint.sh 4 2 rocks false rocks"
run_test "Resuming Externalized Checkpoint (file, async, no parallelism change) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 file true true"
run_test "Resuming Externalized Checkpoint (file, sync, no parallelism change) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 file false true"
run_test "Resuming Externalized Checkpoint (file, async, scale up) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 4 file true true"
run_test "Resuming Externalized Checkpoint (file, sync, scale up) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 4 file false true"
run_test "Resuming Externalized Checkpoint (file, async, scale down) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 4 2 file true true"
run_test "Resuming Externalized Checkpoint (file, sync, scale down) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 4 2 file false true"
run_test "Resuming Externalized Checkpoint (rocks, non-incremental, no parallelism change) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 rocks true false"
run_test "Resuming Externalized Checkpoint (rocks, incremental, no parallelism change) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 rocks true true"
run_test "Resuming Externalized Checkpoint (rocks, non-incremental, scale up) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 4 rocks true false"
run_test "Resuming Externalized Checkpoint (rocks, incremental, scale up) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 4 rocks true true"
run_test "Resuming Externalized Checkpoint (rocks, non-incremental, scale down) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 4 2 rocks true false"
run_test "Resuming Externalized Checkpoint (rocks, incremental, scale down) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 4 2 rocks true true"
run_test "Resuming Externalized Checkpoint after terminal failure (file, async) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 file true false true"
run_test "Resuming Externalized Checkpoint after terminal failure (file, sync) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 file false false true"
run_test "Resuming Externalized Checkpoint after terminal failure (rocks, non-incremental) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 rocks true false true"
run_test "Resuming Externalized Checkpoint after terminal failure (rocks, incremental) end-to-end test" "$MESOS_TEST_DIR/test_resume_externalized_checkpoints.sh 2 2 rocks true true true"
################################################################################
# Miscellaneous
################################################################################
run_test "Flink CLI end-to-end test" "$MESOS_TEST_DIR/test_cli.sh"
run_test "Heavy deployment end-to-end test" "$MESOS_TEST_DIR/test_heavy_deployment.sh" "skip_check_exceptions"
run_test "Queryable state (rocksdb) end-to-end test" "$MESOS_TEST_DIR/test_queryable_state.sh rocksdb"
run_test "DataSet allround end-to-end test" "$MESOS_TEST_DIR/test_batch_allround.sh"
run_test "Batch SQL end-to-end test" "$MESOS_TEST_DIR/test_batch_sql.sh"
run_test "Streaming SQL end-to-end test (Old planner)" "$MESOS_TEST_DIR/test_streaming_sql.sh old" "skip_check_exceptions"
run_test "Streaming SQL end-to-end test (Blink planner)" "$MESOS_TEST_DIR/test_streaming_sql.sh blink" "skip_check_exceptions"
run_test "Stateful stream job upgrade end-to-end test" "$MESOS_TEST_DIR/test_stateful_stream_job_upgrade.sh 2 4"
run_test "Walkthrough Table Java nightly end-to-end test" "$MESOS_TEST_DIR/test_table_walkthroughs.sh java"
run_test "Walkthrough Table Scala nightly end-to-end test" "$MESOS_TEST_DIR/test_table_walkthroughs.sh scala"
run_test "Walkthrough DataStream Java nightly end-to-end test" "$MESOS_TEST_DIR/test_datastream_walkthroughs.sh java"
run_test "Walkthrough DataStream Scala nightly end-to-end test" "$MESOS_TEST_DIR/test_datastream_walkthroughs.sh scala"
run_test "State TTL Heap backend end-to-end test" "$MESOS_TEST_DIR/test_stream_state_ttl.sh file"
run_test "State TTL RocksDb backend end-to-end test" "$MESOS_TEST_DIR/test_stream_state_ttl.sh rocks"
run_test "ConnectedComponents iterations with high parallelism end-to-end test" "$MESOS_TEST_DIR/test_high_parallelism_iterations.sh 25"
run_test "State Migration end-to-end test from 1.6" "$MESOS_TEST_DIR/test_state_migration.sh"
run_test "State Evolution end-to-end test" "$MESOS_TEST_DIR/test_state_evolution.sh"
run_test "Wordcount end-to-end test" "$MESOS_TEST_DIR/test_batch_wordcount.sh file"
run_test "class loading end-to-end test" "$MESOS_TEST_DIR/test_streaming_classloader.sh"
run_test "Distributed cache end-to-end test" "$MESOS_TEST_DIR/test_streaming_distributed_cache_via_blob.sh"
# run_test "TPC-H end-to-end test (Blink planner)" "$END_TO_END_DIR/test-scripts/test_tpch.sh" TODO
printf "\n[PASS] All tests passed\n"
exit 0
| true
|
dcec1b70d7d7d3d25d799828520c26e5afc5bcc0
|
Shell
|
timbertson/opam2nix-packages
|
/gup/.opam-repository.stamp.gup
|
UTF-8
| 426
| 3.21875
| 3
|
[] |
no_license
|
#!bash -eu
cd opam-repository/packages
function get_commit() {
(
commit="$(env \
GIT_AUTHOR_NAME="nobody" \
GIT_AUTHOR_EMAIL="nobody@example.org" \
GIT_AUTHOR_DATE='1970-01-01T00:00:00Z' \
GIT_COMMITTER_NAME="nobody" \
GIT_COMMITTER_EMAIL="nobody@example.org" \
GIT_COMMITTER_DATE='1970-01-01T00:00:00Z' \
git stash create || true)"
if [ -z "$commit" ]; then
git rev-parse HEAD
else
echo "$commit"
fi
)
}
get_commit > "$1"
| true
|
6904ad988de18153100e2e6b81ba6365d4c65ae9
|
Shell
|
mmpyro/swarm-queen
|
/entrypoint.sh
|
UTF-8
| 422
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $# -lt 1 ]]
then
echo 'mode argument required. apply | destroy'
exit 1
elif [[ $1 -eq 'apply' ]] || [[ $1 -eq 'destroy' ]]
then
cd /home/operations/orchestrator/src
python3 ./main.py --config /home/operations/config.json --terraform-dir /home/operations/terraform --ansible-dir /home/operations/ansible --mode $1
else
echo 'allowed value for parameter is apply or destroy'
exit 1
fi
| true
|
74227e864b3b3ea2e52a3bdfb9af2eed402c0989
|
Shell
|
jagumiel/Server_setup
|
/6_Test_PHP.sh
|
UTF-8
| 1,032
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Test de PHP
username="$(id -u -n)"
sudo adduser $username www-data #Anade al user al grupo www-data
#sudo chown $username.users /var/www
#sudo chown -R www-data:www-data /var/www
sudo chmod -R g+rwx /var/www #Da permisos al grupo
sudo chmod 777 -R /var/www/
#sudo cp testphp.php /var/www/
echo "<?php phpinfo();?>" > /var/www/testphp.php #No tengo permiso, con sudo antes, tampoco me deja.
sensible-browser /var/www/testphp.php #Se podría poner firefox, pero mejor el predeterminado, que sabemos que esta instalado.
sudo chmod 755 -R /var/www/ #Permisos de rwx para admin y rx para el resto.
dialog --title "Test PHP" \
--yesno "Ha aparecido el archivo php en su navegador para descargar y tiene una linea codigo dentro?" 7 60
respuesta3=$?
case $respuesta3 in
0) dialog --infobox "Prueba satisfactoria. \n *Ha pasado el test." 8 34
sleep 3;;
1) dialog --infobox "Prueba insatisfactoria. *No ha superado el test." 8 34
sleep 3;;
255) echo "Tecla [ESC] pulsada.";;
esac
#etc.
| true
|
a3ece638228d67c56b5eca99fabd696e081d2973
|
Shell
|
sviret/cmsmib
|
/ProcessData/batch/data_rawextractor.sh
|
UTF-8
| 2,844
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
################################################
#
# data_rawextractor.sh
#
# Script invoked by runDQ_extraction_fillraw_fast.sh
# Extract the information for a given Fill part
#
# --> List of inputs:
#
# ${1}: the path leading to the datafiles
# ${2}: the fill number
# ${3}: the list of run in the fill
# ${4}: the global tag to be used
# ${5}: the main release directory
# ${6}: the storage directory
# ${7}: the first RAW file to process
# ${8}: the last RAW file to process
# ${9}: the job rank in the process
# ${10}: the total number of jobs
# ${11}: the type of run (collision of interfill)
#
# Author: Seb Viret <viret@in2p3.fr> (26/10/2011)
#
# More info on MIB monitoring:
#
# http://cms-beam-gas.web.cern.ch
#
#################################################
# First set some environment variables
#
CMSSW_PROJECT_SRC=${5}
STEP=ProcessData
YEAR=2012
TOP=$PWD
cd $CMSSW_PROJECT_SRC
export SCRAM_ARCH=slc5_amd64_gcc462
eval `scramv1 runtime -sh`
cd $TOP
rootdir=${1}
fillnum=${2}
nfiles=${3}
rm temp
echo "$nfiles" > temp
runlist=`cat temp | sed 's/_/\n/g;'`
echo $runlist
nfirst=${7}
nlast=${8}
compteur=0
compteur_real=0
echo $nfirst,$nlast
rm *.root
cp $CMSSW_PROJECT_SRC/$STEP/test/BH_data_procraw_${11}_BASE.py BH_dummy.py
for f in $runlist
do
first=`echo $f | cut -b1-3`
last=`echo $f | cut -b4-6`
# Here we put the list of files into the python script
no_data=`nsls -l $rootdir$first/$last | wc -l`
if [ $no_data = 0 ]; then
echo 'Run '$f,' is empty, skip it'
continue
fi
for l in `nsls $rootdir$first/$last`
do
is_empty=`nsls -l $rootdir$first/$last/$l | grep ' 0 ' | wc -l`
if [ $is_empty = 1 ]; then
echo 'File ',$rootdir$first/$last/$l,' is empty, skip it'
compteur=$(( $compteur + 1))
continue
fi
echo $f,$compteur,$compteur_real
if (( $nfirst <= $compteur )) && (( $compteur <= $nlast )); then
compteur_real=$(( $compteur_real + 1))
fname="'rfio:$rootdir$first/$last/$l'"
sed "s%INPUTFILENAME%$fname,INPUTFILENAME%" -i BH_dummy.py
fi
compteur=$(( $compteur + 1))
done
done
sed "s%,INPUTFILENAME%%" -i BH_dummy.py
sed "s/MYGLOBALTAG/${4}/" -i BH_dummy.py
OUTPUT_NAME=MIB_data_result_${fillnum}_${9}_${10}.root
# Launch the job
cmsRun BH_dummy.py 2> out.txt
# Recover the data and check that there was no castor problem
# during the process
nprocfiles=`grep 'rfio' out.txt | wc -l`
ntots=$((3*$compteur_real))
# If there is no error we store the file, otherwise we send an error email
if [ "$ntots" = "$nprocfiles" ]; then
xrdcp extracted.root root://castorcms/${6}/$OUTPUT_NAME
else
mutt -s '[MIB DQ]:Run '${2}_${9}_${10}' problematic: '$nprocfiles'/'$ntots cms.mib@cern.ch < /dev/null
fi
rm $CMSSW_PROJECT_SRC/$STEP/batch/TMP_FILES/data_extrRAW_${11}_${fillnum}_${9}_E.sh
| true
|
9403d7856abb55934158617ec695c33877adb175
|
Shell
|
Phil-Friderici/puppet-module-pam
|
/tests/vagrant_test_all.sh
|
UTF-8
| 375
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
systems=$(vagrant status |grep virtualbox | awk '{print $1}')
echo -e "\nsystems to test:\n${systems}"
for i in $systems
do
echo -e "\n\n\n========= Running: vagrant up ${i}"
vagrant up $i
echo -e "\n\n\n========= Testing ssh to ${i}"
vagrant ssh $i -c "exit"
echo -e "\n\n\n========= Running: vagrant destroy -f ${i}"
vagrant destroy -f $i
done
| true
|
b7212ca6cb12f139b2e2812a739735348d249996
|
Shell
|
cybergarage/uecho
|
/format
|
UTF-8
| 434
| 3.359375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
formatters=(clang-format clang-format-7)
for f in "${formatters[@]}"
do
if command -v "${f}" &> /dev/null
then
format=$f
break
fi
done
if [ -x "`which clang-tidy`" ]; then
find . -name "*.h" -or -name "*.c" -or -name "*.cpp" -or -name "*.m" | xargs -L 1 -Ifname clang-tidy --fix fname -- -Iinclude -Isrc
fi
find . -name "*.h" -or -name "*.c" -or -name "*.cpp" -or -name "*.m" | xargs -L 1 -Ifname $format -i fname
| true
|
200b3cf0efcc5ea14f52a18c4fa3bd1cde6b6479
|
Shell
|
thomastk/kunjumon
|
/src/kunjumonOracle
|
UTF-8
| 305
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Any environment settings specific to Oracle.
INSTALL_DIR=/usr/local/nagios/kunjumon
ORACLE_CLIENT_LIB_PATH=/usr/lib/oracle/11.2/client64/lib
CONFIG_FILE=$1
MONITOR_NAME=$2
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$ORACLE_CLIENT_LIB_PATH
$INSTALL_DIR/kunjumon.pl $CONFIG_FILE $MONITOR_NAME
| true
|
58c810496df7447a912e802b7cdcbbfd9e684fae
|
Shell
|
aparna-arr/Allscripts
|
/old/other/Plots/MAnorm/MAnorm.sh
|
UTF-8
| 4,848
| 3.03125
| 3
|
[] |
no_license
|
if [ $# -ne 6 ]
then
echo "Usage: `basename $0` peak1.bed peak2.bed read1.bed read2.bed bp_shift_1 bp_shift_2"
exit
fi
echo "StepI: clean input"
sed 's/\s$//g' $1 | awk 'BEGIN {OFS="\t"}
{if ($1~/chr/ && $1 !="chrM" && $1 !~/random/ && $3>$2 && $2>0 && $3>0)
print $1,$2,$3>"peak1.bed";
else
print $0 > "peak1_dump.bed"}'
sed 's/\s$//g' $2 | awk 'BEGIN {OFS="\t"}
{if ($$1~/chr/ && 1 !="chrM" && $1 !~/random/ && $3>$2 && $2>0 && $3>0)
print $1,$2,$3>"peak2.bed";
else
print $0 > "peak2_dump.bed"}'
sed 's/\s$//g' $3 | awk -v var=$5 'BEGIN {OFS="\t"}
{if ($1~/chr/ && $1 !="chrM" && $4=="+" && $1 !~/random/ && $3>$2 && $2>0 && $3>0)
print $1,$2+var,$3+var>"read1.bed";
else if ($1~/chr/ && $1 !="chrM" && $4=="-" && $1 !~/random/ && $3>$2 && $2>var && $3>var)
print $1,$2-var,$3-var>"read1.bed";
else
print $0 > "read1_dump.bed"}'
sed 's/\s$//g' $4 | awk -v var=$6 'BEGIN {OFS="\t"}
{if ($1~/chr/ && $1 !="chrM" && $4=="+" && $1 !~/random/ && $3>$2 && $2>0 && $3>0)
print $1,$2+var,$3+var>"read2.bed";
else if ($1~/chr/ && $1 !="chrM" && $4=="-" && $1 !~/random/ && $3>$2 && $2>var && $3>var)
print $1,$2-var,$3-var>"read2.bed";
else
print $0 > "read2_dump.bed"}'
echo "StepII: classify common or unique peaks"
intersectBed -a peak1.bed -b peak2.bed -u | sort -k1,1 -k2,2n -k3,3n > common_peak1.bed
intersectBed -a peak2.bed -b peak1.bed -u | sort -k1,1 -k2,2n -k3,3n > common_peak2.bed
intersectBed -a peak1.bed -b peak2.bed -v | sort -k1,1 -k2,2n -k3,3n > unique_peak1.bed
intersectBed -a peak2.bed -b peak1.bed -v | sort -k1,1 -k2,2n -k3,3n > unique_peak2.bed
#cat common_peak1.bed common_peak2.bed | mergeBed -i - > common_peak.bed
cat common_peak1.bed common_peak2.bed > temp_common_peak.bed
mergeBed -i temp_common_peak.bed > common_peak.bed
echo "StepIII: count peak read"
if [ -f MAnorm.bed ];
then
rm MAnorm.bed
fi
coverageBed -a read1.bed -b unique_peak1.bed | sort -k1,1 -k2,2n -k3,3n | awk 'BEGIN {OFS="\t"} {print $1,$2,$3,"unique_peak1" >> "MAnorm.bed"; print $4 > "unique_peak1_count_read1"}'
coverageBed -a read2.bed -b unique_peak1.bed | sort -k1,1 -k2,2n -k3,3n | awk '{print $4 > "unique_peak1_count_read2"}'
coverageBed -a read1.bed -b common_peak1.bed | sort -k1,1 -k2,2n -k3,3n | awk 'BEGIN {OFS="\t"} {print $1,$2,$3,"common_peak1" >> "MAnorm.bed";print $4 > "common_peak1_count_read1"}'
coverageBed -a read2.bed -b common_peak1.bed | sort -k1,1 -k2,2n -k3,3n | awk '{print $4 > "common_peak1_count_read2"}'
coverageBed -a read1.bed -b common_peak2.bed | sort -k1,1 -k2,2n -k3,3n | awk 'BEGIN {OFS="\t"} {print $1,$2,$3,"common_peak2" >> "MAnorm.bed";print $4 > "common_peak2_count_read1"}'
coverageBed -a read2.bed -b common_peak2.bed |sort -k1,1 -k2,2n -k3,3n | awk '{print $4 > "common_peak2_count_read2"}'
coverageBed -a read1.bed -b unique_peak2.bed | sort -k1,1 -k2,2n -k3,3n | awk 'BEGIN {OFS="\t"} {print $1,$2,$3,"unique_peak2">> "MAnorm.bed";print $4 > "unique_peak2_count_read1"}'
coverageBed -a read2.bed -b unique_peak2.bed | sort -k1,1 -k2,2n -k3,3n | awk '{print $4 > "unique_peak2_count_read2"}'
cat common_peak1_count_read1 common_peak2_count_read1 > common_peak_count_read1
cat common_peak1_count_read2 common_peak2_count_read2 > common_peak_count_read2
cat unique_peak1_count_read1 common_peak1_count_read1 common_peak2_count_read1 unique_peak2_count_read1 > peak_count_read1
cat unique_peak1_count_read2 common_peak1_count_read2 common_peak2_count_read2 unique_peak2_count_read2 > peak_count_read2
if [ -f MAnorm_merge.bed ];
then
rm MAnorm_merge.bed
fi
cat unique_peak1.bed | awk 'BEGIN {OFS="\t"} {print $1,$2,$3,"unique_peak1" >> "MAnorm_merge.bed"}'
coverageBed -a read1.bed -b common_peak.bed | sort -k1,1 -k2,2n -k3,3n | awk 'BEGIN {OFS="\t"} {print $1,$2,$3,"merged_common_peak" >> "MAnorm_merge.bed"; print $4 > "merge_common_read1"}'
coverageBed -a read2.bed -b common_peak.bed | sort -k1,1 -k2,2n -k3,3n | awk '{print $4 > "merge_common_read2"}'
cat unique_peak2.bed | awk 'BEGIN {OFS="\t"} {print $1,$2,$3,"unique_peak2" >> "MAnorm_merge.bed"}'
cat unique_peak1_count_read1 merge_common_read1 unique_peak2_count_read1 > merge_common_peak_count_read1
cat unique_peak1_count_read2 merge_common_read2 unique_peak2_count_read2 > merge_common_peak_count_read2
echo "SetpIV: normalize using common peaks"
#R --vanilla MAnorm.r >Rcommand.out
R CMD BATCH ./MAnorm.r Rcommand.out
awk 'BEGIN{OFS="\t"}{if($4~/1/) print $1,$2,$3,$7>"sample1_peaks.wig"}' MAnorm_result.xls
awk 'BEGIN{OFS="\t"}{if($4~/2/) print $1,$2,$3,$7>"sample2_peaks.wig"}' MAnorm_result.xls
rm temp_common_peak.bed
rm *count*
rm *read1*
rm *read2*
rm *peak1*
rm *peak2*
rm MAnorm.bed
rm MAnorm_merge.bed
rm common_peak.bed
| true
|
051d49acfa005a7d01c029745d7217eba4e89a48
|
Shell
|
harakara51/MacroCenterWebsite
|
/mcwapp
|
UTF-8
| 578
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# chkconfig: 345 99 10
# description: Dictionary node.js app
appName=MCW
PORT=3005
appUser=ec2-user
nodeDir=/home/ec2-user/Node
wordsAppDir=$nodeDir/MCW
logDir=$wordsAppDir/var/log
logFile=$logDir/node_log
prog=MCWapp
start()
{
echo "Starting $prog: "
su - ec2-user -c "cd $wordsAppDir && npm start >>$logFile 2>&1 &"
RETVAL=$?
}
stop()
{
echo "Stopping $prog: "
pid=`ps -ef|grep words|grep node|cut -d' ' -f2`
kill $pid
RETVAL=$?
}
case "$1" in
start) start ;;
stop) stop ;;
*) echo $"Usage: $0 {start|stop}"
RETVAL=2
esac
exit $RETVAL
| true
|
4fa25320b2f49b0fb963951bc90f2c5ba89a07e7
|
Shell
|
cr4r/proxy-terminal
|
/install
|
UTF-8
| 1,803
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
red=$(tput setaf 1)
gren=$(tput setaf 2)
yellow=$(tput setaf 3)
msg() {
local colors="/etc/new-adm-color"
if [[ ! -e $colors ]]; then
COLOR[0]='\033[1;37m' #BRAN='\033[1;37m'
COLOR[1]='\e[31m' #VERMELHO='\e[31m'
COLOR[2]='\e[32m' #VERDE='\e[32m'
COLOR[3]='\e[33m' #AMARELO='\e[33m'
COLOR[4]='\e[34m' #AZUL='\e[34m'
COLOR[5]='\e[35m' #MAGENTA='\e[35m'
COLOR[6]='\033[1;36m' #MAG='\033[1;36m'
else
local COL=0
for number in $(cat $colors); do
case $number in
1) COLOR[$COL]='\033[1;37m' ;;
2) COLOR[$COL]='\e[31m' ;;
3) COLOR[$COL]='\e[32m' ;;
4) COLOR[$COL]='\e[33m' ;;
5) COLOR[$COL]='\e[34m' ;;
6) COLOR[$COL]='\e[35m' ;;
7) COLOR[$COL]='\033[1;36m' ;;
esac
let COL++
done
fi
NEGRITO='\e[1m'
SEMCOR='\e[0m'
case $1 in
-ne) cor="${COLOR[1]}${NEGRITO}" && echo -ne "${cor}${2}${SEMCOR}" ;;
-ama) cor="${COLOR[3]}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}" ;;
-verm) cor="${COLOR[3]}${NEGRITO}[!] ${COLOR[1]}" && echo -e "${cor}${2}${SEMCOR}" ;;
-verm2) cor="${COLOR[1]}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}" ;;
-azu) cor="${COLOR[6]}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}" ;;
-verd) cor="${COLOR[2]}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}" ;;
-bra) cor="${COLOR[0]}${NEGRITO}" && echo -e "${cor}${2}${SEMCOR}" ;;
"-bar2" | "-bar") cor="${COLOR[4]}======================================================" && echo -e "${SEMCOR}${cor}${SEMCOR}" ;;
esac
}
if [[ "${EUID}" -ne 0 ]]; then
msg -verm "Kamu harus jalankan script ini mode root"
msg -verm "Ketik $(msg -bra "sudo ./install") "
exit 1
fi
sudo cp proxy /bin/proxy && sudo chmod 777 /bin/proxy
msg -azu "============== Instalasi Selesai! ============== "
proxy
| true
|
7713715d75235abf37cd23b684635b96a77a3b93
|
Shell
|
Decentrify/Deployment
|
/gvod/gvod/bin/start.sh
|
UTF-8
| 444
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "$0 $@"
if [ $# -ne 2 ];
then echo "illegal number of parameters - expected 2: homeDir jarDir"
exit 1
fi
HOME_DIR=$1
JAR_DIR=$2
JAR_PATH=${JAR_DIR}/gvod_ws.jar
APP_CONF=${HOME_DIR}/etc/gvod.conf
WS_CONF=${HOME_DIR}/etc/gvod_ws.yml
IPV4="-Djava.net.preferIPv4Stack=true"
nohup java $IPV4 -Dconfig.file=$APP_CONF -jar $JAR_PATH server ${WS_CONF} &> ${HOME_DIR}/var/gvod_nohup.out &
eval 'echo $!' > ${HOME_DIR}/gvod.pid
| true
|
0868709f6dad3135b52aee7824f60bd44eb70395
|
Shell
|
povilasb/unix-configs
|
/alacritty/install.sh
|
UTF-8
| 172
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [[ -z $@ ]] ; then
src=alacritty.yml
else
src=alacritty.$@.yml
fi
mkdir -p ~/.config/alacritty
install -m 644 $src ~/.config/alacritty/alacritty.yml
| true
|
a64ad9b2c65a04319ff4d3d2db9314c53f026ef4
|
Shell
|
rundeck/rundeck
|
/test/api/test-scm-plugins-list.sh
|
UTF-8
| 2,759
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#/ Purpose:
#/ Test the scm plugins list endpoints
#/
DIR=$(cd `dirname $0` && pwd)
export API_XML_NO_WRAPPER=1
source $DIR/include_scm_test.sh
project="testscmlist"
test_plugins_list_xml(){
local intname=$1
ENDPOINT="${APIURL}/project/$project/scm/$intname/plugins"
test_begin "XML Response"
ACCEPT=application/xml
api_request $ENDPOINT $DIR/curl.out
$SHELL $SRC_DIR/api-test-success.sh $DIR/curl.out || exit 2
#Check projects list
assert_xml_value $intname '/scmPluginList/integration' $DIR/curl.out
assert_xml_value '1' 'count(/scmPluginList/plugins/scmPluginDescription)' $DIR/curl.out
assert_xml_value "git-$intname" '/scmPluginList/plugins/scmPluginDescription/type' $DIR/curl.out
assert_xml_value "false" '/scmPluginList/plugins/scmPluginDescription/configured' $DIR/curl.out
assert_xml_value "false" '/scmPluginList/plugins/scmPluginDescription/enabled' $DIR/curl.out
test_succeed
}
#/ expect invalid integration name
test_plugins_list_xml_failure(){
local intname=$1
ENDPOINT="${APIURL}/project/$project/scm/$intname/plugins"
test_begin "XML Response/Invalid integration"
ACCEPT=application/xml
EXPECT_STATUS=400
api_request $ENDPOINT $DIR/curl.out
$SHELL $SRC_DIR/api-test-error.sh $DIR/curl.out \
"Invalid API Request: the value \"$intname\" for parameter \"integration\" was invalid. It must be in the list: [export, import]" || exit 2
test_succeed
}
test_plugins_list_json(){
local intname=$1
ENDPOINT="${APIURL}/project/$project/scm/$intname/plugins"
test_begin "JSON response"
ACCEPT=application/json
api_request $ENDPOINT $DIR/curl.out
#Check projects list
assert_json_value $intname '.integration' $DIR/curl.out
assert_json_value '1' '.plugins | length' $DIR/curl.out
assert_json_value "git-$intname" '.plugins[0].type' $DIR/curl.out
assert_json_value "false" '.plugins[0].configured' $DIR/curl.out
assert_json_value "false" '.plugins[0].enabled' $DIR/curl.out
test_succeed
}
test_plugins_list_json_failure(){
local intname=$1
ENDPOINT="${APIURL}/project/$project/scm/$intname/plugins"
test_begin "JSON response/Invalid integration"
ACCEPT=application/json
EXPECT_STATUS=400
api_request $ENDPOINT $DIR/curl.out
#Check projects list
assert_json_value 'true' '.error' $DIR/curl.out
assert_json_value "Invalid API Request: the value \"$intname\" for parameter \"integration\" was invalid. It must be in the list: [export, import]" \
'.message' $DIR/curl.out
test_succeed
}
main(){
create_project $project
test_plugins_list_xml 'import'
test_plugins_list_xml 'export'
test_plugins_list_xml_failure 'invalid'
test_plugins_list_json 'import'
test_plugins_list_json 'export'
test_plugins_list_json_failure 'invalid'
remove_project $project
}
main
| true
|
10b72f2e1c3def9b0caf6fd9046ee96cd901f452
|
Shell
|
redhat-nfvpe/openstack-lab
|
/manager/get-mac-address
|
UTF-8
| 495
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Note: use "_" instead of "-" for NETWORK
NAME=$1
NETWORK=$2
HERE=$(dirname "$(readlink -f "$0")")
ROOT=$(readlink -f "$HERE/..")
. "$ROOT/configuration/environment"
. "$ROOT/utils/functions"
C_NAME=$(c "$NAME")
MAC_ADDRESSES=$("$ROOT/ssh" manager-root "$(cat <<- EOT
set -e
virsh domiflist $C_NAME 2> /dev/null |
tail --lines=+3 |
head --lines=-1 |
awk '{ split(\$5,a,"/"); print \$3 "=" a[1] }' |
sed 's/\-/_/g'
EOT
)")
eval "$MAC_ADDRESSES"
echo "${!NETWORK}"
| true
|
01988cffb8bf45f31f3d01e3bc24233052bd7d19
|
Shell
|
Shiba-Chain/sample-ci
|
/docker/go/entrypoint.sh
|
UTF-8
| 88
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash -e
if [[ "$#" -eq "0" ]]; then
exec go run server.go
else
exec "$@"
fi
| true
|
808b13c57e12d8579b779d470d28b006aa5c9e63
|
Shell
|
optionalg/quickstart-couchbase
|
/scripts/format.sh
|
UTF-8
| 370
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
DEVICE=/dev/sdk
MOUNTPOINT=/mnt/datadisk
echo "Creating the filesystem."
mkfs -t ext4 ${DEVICE}
echo "Updating fstab"
LINE="${DEVICE}\t${MOUNTPOINT}\text4\tdefaults,nofail\t0\t2"
echo -e ${LINE} >> /etc/fstab
echo "Mounting the disk"
mkdir $MOUNTPOINT
mount -a
echo "Changing permissions"
chown couchbase $MOUNTPOINT
chgrp couchbase $MOUNTPOINT
| true
|
a6f8cb2dc4d88f72b99756ebc7946291b3e27b65
|
Shell
|
gitluin/sbar
|
/network.sh
|
UTF-8
| 436
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
NETSTATE="$(cat "/sys/class/net/wlp2s0/operstate")"
if [ $NETSTATE = "up" ]; then
# bssid comes first, fuhgettaboudit
WPASTR="$(sudo wpa_cli -i wlp2s0 status | grep ssid)"
NETNAME="$(cut -d$'\n' -f2 <<<$WPASTR)"
NETNAME="$(cut -d'=' -f2 <<<"$NETNAME")"
NETNAME="$(echo "$NETNAME" | sed 's/ /_/')"
else
NETNAME="down"
fi
#"VOL: $VOL | o $BRIGHT% | $NETNAME | $BATSYM $BAT%"
/usr/local/bin/sbar_update.sh "$NETNAME" 7
| true
|
35875a29f48d098bb2e27b3f2f0e53248312c149
|
Shell
|
Cyberdeep/turbinia
|
/docker/third_party/ubuntu-essential/build_ubuntu_essential.sh
|
UTF-8
| 1,097
| 3.0625
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
docker rm ubuntu-essential-multilayer 2>/dev/null
set -ve
docker build -t textlab/ubuntu-essential-multilayer - <<'EOF'
FROM ubuntu:14.04
# Make an exception for apt: it gets deselected, even though it probably shouldn't.
RUN dpkg --clear-selections && echo apt install |dpkg --set-selections && \
SUDO_FORCE_REMOVE=yes DEBIAN_FRONTEND=noninteractive apt-get --purge -y dselect-upgrade && \
dpkg-query -Wf '${db:Status-Abbrev}\t${binary:Package}\n' |grep '^.i' |awk -F'\t' '{print $2 " install"}' |dpkg --set-selections && \
rm -r /var/cache/apt /var/lib/apt/lists
EOF
TMP_FILE="`mktemp -t ubuntu-essential-XXXXXXX.tar.gz`"
docker run --rm -i textlab/ubuntu-essential-multilayer tar zpc --exclude=/etc/hostname \
--exclude=/etc/resolv.conf --exclude=/etc/hosts --one-file-system / >"$TMP_FILE"
docker rmi textlab/ubuntu-essential-multilayer
docker import - textlab/ubuntu-essential-nocmd <"$TMP_FILE"
docker build -t textlab/ubuntu-essential - <<'EOF'
FROM textlab/ubuntu-essential-nocmd
CMD ["/bin/bash"]
EOF
docker rmi textlab/ubuntu-essential-nocmd
rm -f "$TMP_FILE"
| true
|
cb18f7b13251a51df7d796874f92e85d65a9f1df
|
Shell
|
rafael2k/pinephone-modem-config
|
/etc/gpsd/device-hook
|
UTF-8
| 199
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
case "$2" in
ACTIVATE)
echo "AT+QGPS=1" | atinout - /dev/EG25.AT -
;;
DEACTIVATE)
echo "AT+QGPSEND" | atinout - /dev/EG25.AT -
;;
""|*)
echo "Unhandled argument: $2"
exit 1
;;
esac
| true
|
1ff55f260ab639a547abce5ad2037dc18c220168
|
Shell
|
circuitbomb/eve-skylizer
|
/bin/import-staticdata.sh
|
UTF-8
| 8,901
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Script to import latest EVE static data from Fuzzworks and import it to a mysql database
# It's part of skŸlizer but it might be usefull for other applications too.
#
# @author: chrRtg
# contact EVE ingame "Rtg Quack"
# @see: https://github.com/chrRtg/eve-skylizer
#
# some basic code from https://github.com/MagePsycho/mysql-user-db-creator-bash-script
export LC_CTYPE=C
export LANG=C
BIN_MYSQL=$(which mysql)
####################################
# application VARIABLES
####################################
# name, user and password input also via commandline parameters
DB_HOST='localhost'
DB_NAME=
DB_USER=
DB_PASS=
TMP_DIR='../data'
# which tables to import
FUZZWORK="
invCategories
invGroups
invMarketGroups
invTypes
invTypeMaterials
mapDenormalize
mapLocationWormholeClasses
mapSolarSystemJumps
trnTranslations
"
# SQL to be executed after a successful import
POSTSQL="
SET FOREIGN_KEY_CHECKS = 0;
ALTER TABLE invCategories ADD INDEX idx_cat_name (categoryName ASC);
ALTER TABLE invGroups ADD INDEX idx_ig_cid (categoryID ASC), ADD INDEX idx_ig_gname (groupName ASC);
ALTER TABLE invMarketGroups ADD INDEX idx_mg_parent (parentGroupID ASC), ADD INDEX idx_mg_name (marketGroupName ASC);
ALTER TABLE invTypes ADD INDEX idx_it_group (groupID ASC), ADD INDEX idx_it_name (typeName ASC),ADD INDEX idx_it_mgroup (marketGroupID ASC);
ALTER TABLE invTypeMaterials ADD INDEX idx_typeid (typeID ASC), ADD INDEX idx_mtypeid (materialTypeID ASC);
ALTER TABLE trnTranslations ADD INDEX idx_tt_tcid (tcID ASC), ADD INDEX idx_tt_kid (keyID ASC), ADD INDEX idx_tt_text (text(20) ASC), ADD INDEX idx_tt_lang (languageID ASC);
ALTER TABLE mapDenormalize ADD INDEX md_IX_name (itemName(15) ASC), ADD INDEX md_IX_groupid (groupID ASC), ADD INDEX md_IX_typeid (typeID ASC);
ALTER TABLE mapSolarSystemJumps ADD INDEX IDX_fromSolar (fromSolarSystemID ASC), ADD INDEX IDX_toSolar (toSolarSystemID ASC);
SET FOREIGN_KEY_CHECKS = 1;
"
####################################
# core VARIABLES
####################################
_lastimport=0
_currimport=0
_requireupdate=0
_forceupdate=0
_bold=$(tput bold)
_underline=$(tput sgr 0 1)
_reset=$(tput sgr0)
_purple=$(tput setaf 171)
_red=$(tput setaf 1)
_green=$(tput setaf 76)
_tan=$(tput setaf 3)
_blue=$(tput setaf 38)
#
# HEADERS & LOGGING
#
function _debug()
{
[ "$DEBUG" -eq 1 ] && $@
}
function _header()
{
printf "\n${_bold}${_purple}========== %s ==========${_reset}\n" "$@"
}
function _arrow()
{
printf "➜ $@\n"
}
function _success()
{
printf "${_green}✔ %s${_reset}\n" "$@"
}
function _error() {
printf "${_red}✖ %s${_reset}\n" "$@"
}
function _warning()
{
printf "${_tan}➜ %s${_reset}\n" "$@"
}
function _underline()
{
printf "${_underline}${_bold}%s${_reset}\n" "$@"
}
function _bold()
{
printf "${_bold}%s${_reset}\n" "$@"
}
function _note()
{
printf "${_underline}${_bold}${_blue}Note:${_reset} ${_blue}%s${_reset}\n" "$@"
}
function _die()
{
_error "$@"
_cleanUpOnExit
exit 1
}
function _safeExit()
{
_cleanUpOnExit
exit 0
}
function _executeCommand()
{
eval $1;
ERR=$?
if [ $ERR -ne 0 ] ; then
# I've got an error
if [[ -z $2 ]]; then
_die "The command '$1' failed with exit code $ERR"
else
_die "$2 with exit code $ERR"
fi
fi
}
function _printUsage()
{
echo -n "$(basename $0) [OPTION]...
Import EVE static files from fuzzwork to MySql.
Options:
-h, --host MySQL Host [default 'localhost']
-d, --database MySQL Database
-u, --user MySQL User
-p, --pass MySQL Password (If empty, script will ask you)
-f, --force Force update regardless if you have the most current data
-h, --help Display this help and exit
Examples:
$(basename $0) --help
$(basename $0) [--host=\"<host-name>\"] --database=\"<db-name>\" [--user=\"<db-user>\"] [--pass=\"<user-password>\"]
"
_safeExit
}
function processArgs()
{
# Parse Arguments
for arg in "$@"
do
case $arg in
-h=*|--host=*)
DB_HOST="${arg#*=}"
;;
-d=*|--database=*)
DB_NAME="${arg#*=}"
;;
-u=*|--user=*)
DB_USER="${arg#*=}"
;;
-p=*|--pass=*)
DB_PASS="${arg#*=}"
;;
-f|--force)
_forceupdate=1
;;
-h|--help)
_printUsage
;;
*)
_warning "Unknown parameter '$arg'"
_printUsage
;;
esac
done
if [[ -z $DB_PASS ]]; then
_warning "please enter your database password:"
read -s DB_PASS
fi
[[ -z $DB_PASS ]] && _error "The password cannot be empty." && exit 1
[[ -z $DB_NAME ]] && _error "Database name cannot be empty." && exit 1
[[ -z $DB_USER ]] && _error "Database user is required." && exit 1
}
function createTmpDir()
{
if [ ! -d $TMP_DIR/fuzzwork_import ]; then
_executeCommand "mkdir $TMP_DIR/fuzzwork_import" "can not create directory in '$TMP_DIR'"
fi
}
function _cleanUpOnExit()
{
if [ -d $TMP_DIR/fuzzwork_import ]; then
rm -Rf $TMP_DIR/fuzzwork_import
fi
}
function checkMysql
{
if [[ -z $BIN_MYSQL ]]; then
_die "no MySQL binary found"
fi
}
function checkMysqlConnection()
{
_executeCommand "$BIN_MYSQL --user=\"$DB_USER\" --password=\"$DB_PASS\" --host=\"$DB_HOST\" -e 'select version();'" "can not connect to mysql with the given credentials"
}
function checkMysqlDB()
{
_executeCommand "$BIN_MYSQL --user=\"$DB_USER\" --password=\"$DB_PASS\" --host=\"$DB_HOST\" $DB_NAME -e 'use $DB_NAME;'" "can not find MySQL database '$DB_NAME' "
}
function printSuccessMessage()
{
_success "congrats, your EVE static data is now up to date"
_success "fly safe capsuleer"
}
# read a last modified date of a ressource and compare against a locally stored value
# if remove versin is newer a variable is set from 0 to 1
function checkUpdate()
{
# read timestamp of some smaller file on fuzzworks into variable _lastimport
_executeCommand "export _currimport=$(wget --server-response --spider https://www.fuzzwork.co.uk/dump/latest/invCategories.sql.bz2 2>&1 | grep -i Last-Modified | cut -c 18- | date -f - +%s)" "can not fetch date of last update"
if [ -s $TMP_DIR/fuzzwork_last.txt ]; then
_lastimport=$(< $TMP_DIR/fuzzwork_last.txt)
fi
if [ "$_currimport" -gt "$_lastimport" ]; then
_requireupdate=1
_warning "Fuzzwork has newer files available"
elif [ "$_forceupdate" -eq 1 ]; then
_requireupdate=1
_warning "FORCE update mode"
fi
}
function wgetFuzzworks()
{
_warning "download EVE tables from fuzzwork..."
for TABLE in $FUZZWORK
do
_executeCommand "wget -nv -O $TMP_DIR/fuzzwork_import/$TABLE.sql.bz2 https://www.fuzzwork.co.uk/dump/latest/$TABLE.sql.bz2" "failed to download EVE data files from fuzzworks"
done
}
function importFuzzworks()
{
_warning "Import fuzzwork data to MySQL..."
for filename in $TMP_DIR/fuzzwork_import/*.bz2; do
_executeCommand "bzcat $filename | $BIN_MYSQL --user=\"$DB_USER\" --password=\"$DB_PASS\" --host=\"$DB_HOST\" $DB_NAME" "failed to import files from fuzzworks to db '$DB_NAME' "
_success " '$filename' imported"
done
}
function postFixDatabase()
{
_warning "postFixDatabase..."
_executeCommand "$BIN_MYSQL --user=\"$DB_USER\" --password=\"$DB_PASS\" --host=\"$DB_HOST\" $DB_NAME --execute='$POSTSQL'" "can not find MySQL database '$DB_NAME' "
}
################################################################################
# Main
################################################################################
function main()
{
cd "$(dirname "$0")"
[[ $# -lt 1 ]] && _printUsage
# MySQL binay existing / MySQL installed?
checkMysql
_success "MySQL binary found"
# read commandline
processArgs "$@"
checkMysqlConnection
_success "MySQL connection established"
checkMysqlDB
_success "MySQL database is existing"
createTmpDir
_success "temporary directory created"
checkUpdate
if [ $_requireupdate -eq 0 ]; then
_warning "no update neccessary, you already have the most current data"
_safeExit
fi
wgetFuzzworks
_success "data from fuzzworks successful downloaded"
importFuzzworks
_success "data imported to your MySql database"
postFixDatabase
_success "MySQL POSTSQL database post processed"
printSuccessMessage
# store last modification date of fuzzwork data to file for later checks
echo "$_currimport" > "$TMP_DIR/fuzzwork_last.txt"
_safeExit
}
main "$@"
| true
|
8e24cbe34e8c26c0ff424045c5d2dddc020ce81b
|
Shell
|
IBMa/equal-access
|
/travis/publish/karma-achecker.sh
|
UTF-8
| 439
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
if [ -n "$TRAVIS_TAG" ]; then
if [[ $TRAVIS_TAG =~ ([^ ]+)\#([^ ]+) ]]; then
cd ./karma-accessibility-checker/src;
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >.npmrc;
NPM_VERSION="${BASH_REMATCH[1]}";
EXT_VERSION="${BASH_REMATCH[2]}";
echo "Deploy karma-accessibility-checker version $NPM_VERSION...";
npm --no-git-tag-version version $NPM_VERSION;
npm publish;
fi
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.