blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
957a38849beee787f493362bdcac43d410cf01c7
|
Shell
|
rust-random/book
|
/tests/generate.sh
|
UTF-8
| 303
| 2.875
| 3
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mkdir -p src
cat << EOF > src/lib.rs
#![allow(non_snake_case)]
#[macro_use]
extern crate doc_comment;
EOF
for doc in ../src/*.md
do
NAME=$(basename $doc .md)
NAME=${NAME//./_}
NAME=${NAME//-/_}
echo -e "doctest\041(\"../$doc\");" > src/$NAME.rs
echo "mod $NAME;" >> src/lib.rs
done
| true
|
37cdbd9cee261d076cb99fcdc7a458aac8a7a989
|
Shell
|
amusecode/amuse
|
/src/amuse/community/ph4/src/test_nstab.sh
|
UTF-8
| 443
| 3.4375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
# Check that the Fortran and C versions of nstab actually return the
# same results! Run n random tests.
n=100
if (( $# > 0 )); then n=$1 ; fi
make -f Makefile.ph4 test_nstab
i=0
bad=0
while (( $i < $n )); do
i=$((i+1))
s=`./random_nstab`
f=`ftest_nstab $s`
c=`ctest_nstab $s`
if [ $f != $c ]; then
bad=$(($bad+1))
echo $s `ftest_nstab $s` `ctest_nstab $s`
fi
done
echo $bad disagreement\(s\) found
| true
|
f8e3685c405e08bcb47c039c9552d421dec2e868
|
Shell
|
mdjnewman/dotfiles
|
/bin/gnucash-price-quotes.sh
|
UTF-8
| 600
| 3.25
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/local/bin/bash
export PATH=/usr/local/bin:$PATH
# Not strictly, but my .bash_profile does require it
if ((BASH_VERSINFO[0] < 4)); then echo "Sorry, you need at least bash-4.0 to run this script." >&2; exit 1; fi
export HOME=/Users/mnewman
. "$HOME/.bash_profile" > /dev/null
OUT="$(/Applications/Gnucash.app/Contents/MacOS/gnucash-cli --debug --quotes get '/Volumes/GoogleDrive-110379146933208071142/My Drive/Money/GNUCash Files/Money.gnucash' 2>&1)"
EXIT_CODE=$?
if [[ $EXIT_CODE != 0 || "$OUT" == *"ERROR"* ]]; then
echo Exit code is $EXIT_CODE
echo "$OUT"
fi
exit $EXIT_CODE
| true
|
596b97f00706577125707b42cdabae7d6e4b4bec
|
Shell
|
yuuki/mackerel2route53
|
/script/envfile
|
UTF-8
| 408
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e -o pipefail
# - Usage:
# envfile ENV_FILE_PATH CMD
#
# - ENV_FILE format
# $ cat /etc/default/aws
# AWS_ACCESS_KEY_ID='xxxxxx'
# AWS_ACCESS_SECKRET_KEY='yyyyyy'
# AWS_REGION=ap-northeast-1
env_file_path="$1"
cmd="${@:2:($#-1)}"
if [ -z "${cmd}" ]; then
echo 'CMD required' >&2
exit 1
fi
for i in $(grep -v '^#' "${env_file_path}" | xargs); do
export "$i"
done
exec ${cmd}
| true
|
6caccd7e60ca8a8aa0352a28dc49126de699e5b8
|
Shell
|
hjanime/Xert_paper
|
/NGS_downstream/master/master_DiffEnrichment.sh
|
UTF-8
| 3,730
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Master script for differential binding analysis using Diffbind and diffReps
cnt_dir=''
atac_dir=''
cnt_peaks=''
atac_peaks=''
work_dir=$(pwd)'/'
path=''
help() {
echo "Performs differential binding analysis for CUT&Tag and ATAC-seq."
echo
echo "Syntax: ./master_diffEnrichment.sh [-a|b|c|d|p|t|h]"
echo "options:"
echo "a Provide directory containing merged peak files (ATAC-seq). [mandatory]"
echo "b Provide directory containing BAM files (CUT&Tag. [mandatory]"
echo "d Provides working directory (Standard is current directory)."
echo "c Provide directory containing merged peak files (CUT&Tag). [mandatory]"
echo "h Prints this help."
echo "p Provide path to /Xert_paper/NGS_downstream/. [mandatory]"
echo "t Provide directory containing BAM files (ATAC-seq). [mandatory]"
echo
}
parse_args() {
case "$1" in
-a)
atac_peaks="$2"
;;
-b)
cnt_dir="$2"
;;
-c)
cnt_peaks="$2"
;;
-d)
work_dir="$2"
;;
-p)
path="$2"
;;
-t)
atac_dir="$2"
;;
-h)
help
exit 0
;;
*)
echo "Unknown or badly placed parameter '$1'." 1>&2
exit 1
;;
esac
}
while [[ "$#" -ge 1 ]]; do
parse_args "$1" "$2"
shift; shift
done
if [[ $path == '' ]]
then
echo -e "Please provide the path to /Xert_paper/NGS_downstream/ with -p"
exit 1
fi
if [[ $cnt_dir == '' ]]
then
echo -e "Please provide the path to a directory containing CUT&Tag BAM files with -b"
exit 1
fi
if [[ $atac_dir == '' ]]
then
echo -e "Please provide the path to a directory containing ATAC-seq BAM files with -t"
exit 1
fi
if [[ $cnt_peaks == '' ]]
then
echo -e "Please provide the path to a directory containing CUT&Tag peak files with -c"
exit 1
fi
if [[ $atac_peaks == '' ]]
then
echo -e "Please provide the path to a directory containing ATAC-seq peak files with -a"
exit 1
fi
atac_peaks=$(realpath $atac_peaks)'/'
cnt_peaks=$(realpath $cnt_peaks)'/'
atac_dir=$(realpath $atac_dir)'/'
cnt_dir=$(realpath $cnt_dir)'/'
work_dir=$(realpath $work_dir)'/'
path=$(realpath $path)'/'
mkdir -p ${work_dir}noX_peaks
noX_dir=${work_dir}noX_peaks'/'
# Prepares BED files without X chromosomes for Diffbind analysis
echo -e "Prepares BED files for DiffBind"
${path}scripts/diffbind.sh $atac_peaks $cnt_peaks $noX_dir
mkdir -p ${work_dir}raw_DiffBind
rawDiffbind_dir=${work_dir}raw_DiffBind'/'
# Performs Diffbind analysis in R
echo -e "Performs Diffbind analysis"
Rscript ${path}scripts/diffbind.R $noX_dir $atac_dir $cnt_dir $rawDiffbind_dir
mkdir -p ${work_dir}final_DiffBind
DiffBind_dir=${work_dir}final_DiffBind'/'
# Prepares BED files for visualization with UCSC
echo -e "Prepares BED tracks for UCSC"
${path}scripts/combine_diffbind.sh $rawDiffbind_dir $DiffBind_dir
chrom_sizes=${path}files/mm10_chrom_sizes.txt
mkdir -p ${work_dir}raw_diffreps
raw_diffreps=${work_dir}raw_diffreps'/'
# Runs diffreps to call differential regions between H3K9me3 samples
echo -e "Runs diffReps for H3K9me3"
${path}scripts/diffreps.sh $cnt_dir $raw_diffreps $chrom_sizes
# Runs diffreps to call consensus regions between H3K9me3 samples
echo -e "Calls consensus peaks between H3K9me3 conditions"
${path}scripts/diffreps_consensus.sh $raw_diffreps $chrom_sizes
mkdir -p ${work_dir}final_diffreps
diffreps_dir=${work_dir}final_diffreps'/'
# Combines consensus and differential peaks from diffreps
echo -e "Prepares diffReps BED tracks for UCSC"
${path}scripts/combine_diffreps.sh $raw_diffreps $diffreps_dir
| true
|
ae4fd5b2a6555584ae512c3ca8ef267ab5ad4a12
|
Shell
|
jeaye/safepaste
|
/tool/clean-expired
|
UTF-8
| 337
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu
dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
function usage
{
echo "usage: $0 <paste directory>"
exit 1
}
[ ! $# -eq 1 ] && usage
for paste in $(find "$1" -mmin +0 -type f | egrep -v 'burn|disable');
do
# Only remove if it's writeable
[ -w "$paste" ] && rm -fv "$paste" "$paste.burn"
done
| true
|
0bee5c8ad5b40326623a948be56044589c625e48
|
Shell
|
peterfpeterson/dotfiles
|
/bin/watchman.sh
|
UTF-8
| 3,115
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
######################################################################
# Script to run [watchman](https://facebook.github.io/watchman/)
######################################################################
help () {
echo "usage: watchman.sh <command> [texfile]"
echo ""
echo "help Show this help"
echo "start Delete the triggers and shutdown the watchman server"
echo "stop Delete the triggers and shutdown the watchman server"
echo "status List the triggers in this directory"
echo ""
echo "Example configuration file:"
echo " [\"trigger\", \"/home/username/hotscience\","
echo " {"
echo " \"name\": \"latex\","
echo " \"append_files\": true,"
echo " \"expression\": ["
echo " \"anyof\","
echo " [\"match\", \"*.tex\", \"wholename\"]"
echo " ],"
echo " \"command\": [\"pdflatex\", \"awesome_paper.tex\"]"
echo " }"
echo " ]"
}
triggername () {
CONFIGFILE=${1}
jq -r -M .[2].name "${CONFIGFILE}"
}
triggerrunning() {
triggers=$(watchman trigger-list "$(pwd)" | jq -r -M .triggers[].name)
echo "$triggers"
if [ "${1/triggers}" == "${1}" ]; then
return 0
else
return 1
fi
}
########## check for dependencies
if [ ! "$(command -v watchman)" ]; then
echo "failed to find watchman https://facebook.github.io/watchman/"
exit 255
fi
if [ ! "$(command -v jq)" ]; then
echo "failed to find jq https://stedolan.github.io/jq/"
exit 255
fi
########## exit early on help
if [ $# -eq 0 ]; then
help
exit 255
fi
if [ "$1" = "help" ]; then
help
exit 0
fi
########## determine configuration file
if [ $# -eq 2 ]; then
CONFIGFILE="$2"
else
CONFIGFILE=watchman.json
fi
if [ ! -f ${CONFIGFILE} ]; then
echo "error: Could not open file ${CONFIGFILE}: No such file or directory"
exit 255
fi
if [ "$(jq -e . "${CONFIGFILE}" > /dev/null 2>&1)" ]; then
jq -e . ${CONFIGFILE}
exit 255
fi
########## switch to the specified directory
DIR=$(jq -r -M .[1] "${CONFIGFILE}")
if [ ! -d "${DIR}" ]; then
echo "error: invalid directory ${DIR}"
exit 255
fi
cd "${DIR}" || exit
DIR=$(pwd)
########## do the actual work
trigger=$(triggername ${CONFIGFILE})
case "$1" in
# "help" is dealt with above
start)
# startup watchman
if [ "$(triggerrunning "${trigger}")" ]; then
echo "\"${trigger}\" already running"
else
echo "start watching ${DIR}"
watchman -o "${DIR}/watchman.log" -j < "${CONFIGFILE}"
fi
;;
stop)
# delete the trigger and shutdown watchman
if [ "$(triggerrunning "${trigger}")" ]; then
echo "stop watching ${DIR}"
watchman trigger-del "${DIR}" "${trigger}"
watchman shutdown-server
else
echo "\"${trigger}\" not running"
fi
;;
status)
# list all of the wacky triggers
watchman trigger-list "${DIR}"
;;
*)
echo "unknown command \"$1\""
exit 255
;;
esac
exit
| true
|
96d45c4a07608491c32843b1498be03f7930b26d
|
Shell
|
petronny/aur3-mirror
|
/crack/PKGBUILD
|
UTF-8
| 946
| 2.640625
| 3
|
[] |
no_license
|
# Contributor: Lex Black <autumn-wind at web dot de>
# Contributor: Arno Rehn <arno@arnorehn.de>
pkgname=crack
pkgver=0.10
pkgrel=1
pkgdesc="LLVM based scripting language"
arch=(i686 x86_64)
url="http://code.google.com/p/crack-language/"
license=('MPL')
depends=('llvm-crack')
makedepends=('mesa' 'pcre' 'gtk2' 'sdl' 'libxs')
optdepends=('mesa: OpenGL bindings' 'pcre: bindings' 'gkt2: bindings' 'sdl: bindings' 'libxs: bindings')
source=(http://crack-lang.org/downloads/$pkgname-$pkgver.tar.gz)
sha1sums=('41ae317f7b3047c4c51e94e3b5d42b469942057d')
export LDFLAGS="${LDFLAGS//-Wl,--as-needed}"
prepare() {
cd "$srcdir/$pkgname-$pkgver"
sed -i "s;ln -sf \$(DESTDIR)\$(bindir)/;ln -sf ;" Makefile.am
sed -i "s;ln -sf \$(DESTDIR)\$(bindir)/;ln -sf ;" Makefile.in
}
build() {
cd "$srcdir/$pkgname-$pkgver"
./configure --prefix=/usr
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make DESTDIR="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true
|
41c197bb2cedf60980dfee0857fc7621db24b229
|
Shell
|
danielwii/benkyo
|
/bin/update_assets.sh
|
UTF-8
| 409
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
echo '[x] update assets...'
if type yarn 2>/dev/null; then
yarn
fi
rm -rf ui/static/libs ui/static/fonts
mkdir -p ui/static/libs
mkdir -p ui/static/fonts
cp -f node_modules/bulma/css/bulma.css ui/static/libs/bulma.css
cp -f node_modules/font-awesome/css/font-awesome.css ui/static/libs/font-awesome.css
cp -rf node_modules/font-awesome/fonts/* ui/static/fonts/
echo '[x] ^_^ done!'
| true
|
0c3d8bc1f029ace213cb4b2c0355d5d9c5db6671
|
Shell
|
Ellian-aragao/IFB-EDA
|
/2_Listas/compileLibrary.sh
|
UTF-8
| 2,191
| 4.03125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
#faz a compilação da biblioteca
compile_library() {
echo 'Compilando biblioteca LinkedList'
gcc -c $compileFlags /home/ellian/code/faculdade/ed1/2_Listas/LinkedList/linkedList.c /home/ellian/code/faculdade/ed1/2_Listas/LinkedList/testInterface/testLinkedListInterface.c
}
# gera binário final utilizando o teste do exercício e a interface dele com a biblioteca
compile_exercicio() {
echo 'Compilando binário final: '$path/$pathPrograma
gcc $compileFlags $(ls $path/$pathPrograma/*.c *.o)
rm $(ls *.o)
}
echo_binario_final_fail() {
echo -e '\n*****************************'
echo '! Binário final inexistente !'
echo "*****************************"
}
# verifica argumento de debug
type_execution_binary() {
# 1: simples em terminal
# 2: completa txt separado
# defaut: none
case $debugOption in
1)
valgrind \
./a.out
;;
2)
valgrind -s \
--leak-check=full \
--show-leak-kinds=all \
--track-origins=yes \
--verbose \
--log-file=valgrind-out.txt \
./a.out
;;
*)
./a.out
;;
esac
}
# executa binário padrão gerado pelo compilador gcc
execute_binary() {
# verifica se existe o binário
if [ -s "a.out" ]; then
echo -e 'Executando exercício\n--------------------\n'
type_execution_binary
if [ -z $dontDeletBinary ]; then
rm a.out
fi
else
echo_binario_final_fail
fi
}
# faz a compilação conforme parâmetro enviado como argumento
fluxo_execucao() {
# exclui casos que não existe path
if [ -z $1 ]; then
echo 'não foi enviado path'
exit
elif [ ! -d $1 ]; then
echo 'diretório enviado não existe'
exit
# vefifica se a path não é da biblioteca LinkedList
elif [ "$1" != "LinkedList" ] && [ "$1" != "LinkedList/" ]; then
compile_library
# se é da bilbioteca faz a compilação da API de teste
else
gcc -c $compileFlags testInterface/testLinkedListInterface.c
fi
compile_exercicio
execute_binary
}
compileFlags='-g -W -Wall -Wextra -Wshadow -Werror'
path=~/code/faculdade/ed1/2_Listas
pathPrograma=$1
debugOption=$2
dontDeletBinary=$3
cd $path
fluxo_execucao $pathPrograma $debugOption
| true
|
c8cf2c994a80fafa556b3fa3b507a29769840ef7
|
Shell
|
erezhuri/erez-py
|
/bash_examples/Telnet_Basic_Test.sh
|
UTF-8
| 8,116
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
# the set of soubroutines below/above help to test the FIXation envitonment #
# two ways are appearing for that. 1) randlomly test issues. 2) specifically #
# test the things related to certain Jira items, for example #
# Please note, that what is tested is easily modified via chaning the items #
# strings of arrya, right before the command is exectuted. #
# As indicated, either random, or fixed execeution is possible, by restrictin#
# the RANDMO number generator with each command #
#veryfication commands can be added to test exactly type of things one would #
#like to test. These verification routes can be written in any language #
# and called right after the completion of each telnet command #
# for verification to be done independently of telnet session, which terminates#
#immediately after execution, on may want to direct each command out put to #
#file and process with a processor of his/her choice
## show all disconnected lps
status_all ()
{
echo "(echo "clear"; echo "fs"; sleep 1;) | telnet $HOST $PORT > status_all.tln"
(echo "clear"; echo "fs"; sleep 1;) | telnet $HOST $PORT > status_all.tln
if [[ $1 ]];then
echo "non-connected LP's:"
cat status_all.tln | grep "not-connected" | sort -u | awk ' { print $1 }'
fi
}
#show all disconnected lp from the list
status_lp ()
{
#LP_FILE=$1;
#echo $LP_FILE;
sleep 2;
(echo "clear"; echo "fs"; sleep 5;) | telnet $HOST $PORT > status_list.tln
if [[ $1 ]];then
echo "non-connected LP's:"
for i in $LP_LIST
do
grep $i status_list.tln | grep "not-connected" | sort -u | awk ' { print $1 }'
done
fi
}
## run info on all active LPs from the list
info_lp()
{
status_lp
for i in $LP_LIST
do
lp=grep -v "not-connected" status_list.tln | sort -u | awk ' { print $1 }'
(echo "clear"; echo "info $lp"; sleep 2;) | telnet $HOST $PORT
done
}
## randomly find an active lp and provide info for it
info_rand()
{
status_all
alllps=`cat status_all.tln|egrep -v "^[a-z]|^[A-Z]|>" |awk ' { print $1 }'|wc -l`
rand=`echo $(( ( RANDOM % $alllps ) + 1 ))`
lp=` cat status_all.tln|egrep -v "^[a-z]|^[A-Z]|>" |awk ' { print $1 }' |grep . -n |egrep "^$rand:"|awk -F\: ' { print $2} '`
(echo "clear"; echo "info $lp"; sleep 2;) | telnet $HOST $PORT
#verify_lpinfo
}
## randomly display top of book
topOfBook_rand()
{
curr[1]='eurusd';
curr[2]='eurgbp';
curr[3]='eurjpy';
#curr[4]='aud';
rand=`echo $(( ( RANDOM % 3 ) + 1 ))`
echo $rand;
#output lines with "empties"
(echo "clear"; echo "afr ${curr[$rand]}"; sleep 3;) | telnet $HOST $PORT | grep bid | egrep -v "[0-9]"
#second way using awk
# cat book.tln | grep bid | awk '{ if (NF < 6) print }'
}
# show top of book for ccp's from the list
topOfBook_ccp()
{
for i in `cat $CCP` ;do
#output lines with "empties"
(echo "clear"; echo "afr $i"; sleep 3;) | telnet $HOST $PORT | grep bid | egrep -v "[0-9]" # > book.tln
#grep bid book.tln
done
#second way using awk
# cat book | grep bid | awk '{ if (NF < 6) print }'
}
## randomly display full book
books_rand()
{
curr[1]='eurusd';
curr[2]='eurgbp';
curr[3]='eurjpy';
#curr[4]='aud';
rand=`echo $(( ( RANDOM % 3 ) + 1 ))`
echo $rand;
(echo "clear"; echo "afr ${curr[$rand]}"; sleep 5;) | telnet $HOST $PORT> fullbook.tln
max=`cat fullbook.tln|awk '{print $5}'|sort|egrep "[0-9]" | tail -1`;
echo $max;
min=`cat fullbook.tln|awk '{print $5}'|sort|egrep "[0-9]" | head -1`;
echo $min;
let "d=$min*100/$max";
echo $d;
if [ "$d" -lt "98" ]; then
echo "switched values in FB: ";
cat fullbook.tln;
# cat fullbook.tln | awk '{if(NF==2){str=$1"_"$2};if($4>555 && NF>2){print str ": " $0}}'
fi
#verify_full_book
}
## display full book according to ccp
verify_switched_ccy()
{
#ccp=$1;
for i in `cat $CCP` ;do
(echo "clear"; echo "afr ^$i"; sleep 5;) | telnet $HOST $PORT > fullbook.tln
#cat fullbook.tln
echo $i
zeroVol=`cat fullbook.tln|awk '{print $7}' fullbook.tln|sort -u|egrep "[0-9]" |head -1 `;
#echo `cat fullbook.tln|awk '{print $7}' fullbook.tln|sort -u|egrep "[0-9]" |head -1 `
if [[ $zeroVol == '0' ]];then
echo we have zero volum
cat fullbook.tln
fi
max=`cat fullbook.tln|awk '{print $5}' fullbook.tln|sort|egrep "[0-9]" | tail -1`;
echo max $max;
min=`awk '{print $5}' fullbook.tln|sort|egrep "[0-9]" | head -1`;
echo min $min;
let "d=$min*100/$max";
echo $d;
if [ "$d" -lt "96" ]; then
echo "switched values in FB: ";
# cat fullbook.tln;
# cat fullbook.tln | awk '{if(NF==2){str=$1"_"$2};if($4>555 && NF>2){print str ": " $0}}'
fi
done
#verify_full_book
}
buildTrades()
{
#VOL="1m"
ALL_TIF=( ioc day fok )
ALL_OT=( pq limit market )
for lp in $LP_LIST;do
for TIF in "${ALL_TIF[@]}";do
for OT in "${ALL_OT[@]}";do
for ccy in `cat $CCP`;do
for VOL in 500000 1m 3m 5m 10m;do
echo "buy $lp $ccy $VOL $OT $TIF" >> trade_orders.tln
echo "sell $lp $ccy $VOL $OT $TIF" >> trade_orders.tln
echo "buyv $lp $ccy $VOL $OT $TIF" >> trade_orders.tln
echo "sellv $lp $ccy $VOL $OT $TIF" >> trade_orders.tln
done
done
done
done
done
echo Done building comands
}
#### Random trading ####
trade_rand()
{
range="`cat trade_orders.tln|wc -l`"
j=1
while read line ; do
order[$j]=$line
#echo "criating-$j - $line to: ${order[$j]}"
((j++))
done < trade_orders.tln
#for i in "`cat trade_orders.tln`";do
# order+=($i)
# echo "criating-$j - $i to: ${order[$j]}"
# ((j++))
#done
#echo "range: $range , len: ${#order[@]}"
#rand=$(( ( RANDOM % ${#order[@]} ) + 1 ))
#echo "$rand"
#echo "running: ${order[$rand]}"
while true;do
rand=$(( ( RANDOM % $range ) + 1 ))
echo "running - $rand: ${order[$rand]}"
(echo "clear"; echo "${order[$rand]}"; sleep 60;) | telnet $HOST $PORT | grep -v "logged-in"
done
}
#### Serial trading ####
trade()
{
for i in "`cat trade_orders.tln`";do
echo "$i"
(echo "clear"; echo "$i"; sleep 60;) | telnet $HOST $PORT | grep -v "logged-in"
sleep 60
done
}
#################### Parse parameters #########################
usage()
{
cat << EOF
usage: $0 options
This script run FIXation/FTT with SimFix
OPTIONS:
-h Show this message
-l List of LPs
-c ccy pais list
-i ip of host (default localhost)
-p port (default 60000)
-r run random tradeing
-v Verbose
EOF
#turn this on if you run Ftt Binary (do not work wuth -m)
}
RUN_TIME=`date +%Y%m%d_%H%M%S`
LP_FILE=
CCP=
HOST=
PORT=
RAND=
VERBOSE=
while getopts “hl:c:i:p:rv” OPTION
do
# Check if we there is a valid value to options that are not just flags
if [[ $OPTARG == -* ]]; then
if [[ -z $TERM ]];then echo "$0: ---- option requires an argument -- $OPTION"
else echo "$(tput setaf 1)$0: ---- option requires an argument -- $OPTION$(tput sgr 0)";fi
usage
exit 1
else
case $OPTION in
h)
usage
exit 1
;;
l)
LP_FILE=$OPTARG
;;
c)
CCP=$OPTARG
;;
i)
HOST=$OPTARG
;;
p)
PORT=$OPTARG
;;
r)
RAND=1
;;
v)
VERBOSE=1
;;
?)
usage
exit
;;
esac
fi
done
if [[ -z $CCP ]]; then
usage
exit 1
fi
if [[ -z $HOST ]];then
HOST="localhost"
fi
if [[ -z $PORT ]];then
PORT=60000
fi
if [[ -z $LP_FILE ]];then
status_all
LP_LIST=`grep -v "not-connected" status_all.tln | sort -u | awk ' { print $1 }'`
#echo "hotspot socgen rbc gs dbab ms bofa rbs ads gtx integral fastmatch fxspotstream ebsai1 currenex citi fxall mizuho barx hsbc bnpp commerz ubs saxo bmpx jpm" > LP.tln
#LP_FILE="$PWD/LP.tln"
echo "using default LP list $LP_LIST"
else
LP_LIST="`cat $LP_FILE`"
fi
#==== End of Parse parameters ====#
################################### MAIN #############################
rm -f *.tln
#export LP_FILE=$1;
#export CCP=$2;
#echo run status all
#status_all 1;
echo run LP status
status_lp 1;
echo run show TOB
topOfBook_ccp;
echo run show full book
verify_switched_ccy
buildTrades
if [[ $RAND ]];then
echo run random trades
trade_rand
else
echo run serial trades
trade
fi
| true
|
673435fbaa0fafff653f16bb8e184a078e6a247f
|
Shell
|
DheerajJoshi/cloud-platform-how-out-of-date-are-we
|
/updater-image/update.sh
|
UTF-8
| 1,591
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
NAMESPACE="how-out-of-date-are-we"
API_KEY_SECRET="how-out-of-date-are-we-api-key"
main() {
set_api_key
helm_releases
terraform_modules
documentation
repositories
}
set_kube_context() {
aws s3 cp s3://${KUBECONFIG_S3_BUCKET}/${KUBECONFIG_S3_KEY} /tmp/kubeconfig
kubectl config use-context ${KUBE_CLUSTER}
export KUBE_CONTEXT=${KUBE_CLUSTER} # So that we can tell if this function has been called
}
# Fetch the API key from a kubernetes secret in the live-1 cluster, if no API_KEY environment variable is set.
# This allows us to bypass the kubernetes secret lookup in development, but setting an API_KEY env. var.
set_api_key() {
if [[ -z "${API_KEY}" ]]; then
echo "Fetching API_KEY from kubernetes secret"
set_kube_context
export API_KEY=$(kubectl -n ${NAMESPACE} get secrets ${API_KEY_SECRET} -o jsonpath='{.data.token}' | base64 -d)
fi
}
helm_releases() {
if [[ -z "${KUBE_CONTEXT}" ]]; then
set_kube_context
fi
helm repo update
curl -H "X-API-KEY: ${API_KEY}" -d "$(/app/helm-releases.rb)" ${DATA_URL}/helm_whatup
}
terraform_modules() {
git clone --depth 1 https://github.com/ministryofjustice/cloud-platform-environments.git
(
cd cloud-platform-environments
curl -H "X-API-KEY: ${API_KEY}" -d "$(/app/module-versions.rb)" ${DATA_URL}/terraform_modules
)
}
documentation() {
curl -H "X-API-KEY: ${API_KEY}" -d "$(/app/documentation-pages-to-review.rb)" ${DATA_URL}/documentation
}
repositories() {
curl -H "X-API-KEY: ${API_KEY}" -d "$(cloud-platform-repository-checker)" ${DATA_URL}/repositories
}
main
| true
|
cd45fbdd1e872a8f8dc392ae1684098d73a3bb43
|
Shell
|
benbacon/terraform-jitsi
|
/publicnetwork/install_jitsi.tpl
|
UTF-8
| 2,662
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
export DEBIAN_FRONTEND=noninteractive
export HOSTNAME="${domain}"
echo "${certificate}" > /etc/ssl/$HOSTNAME.crt
echo "${key}" > /etc/ssl/$HOSTNAME.key
chmod 644 /etc/ssl/$HOSTNAME.*
echo -e "nameserver 1.1.1.1\nnameserver 1.0.0.1" >> /etc/resolv.conf
# Harden SSH
sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin no/g; s/#PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config
systemctl reload ssh
# Disable ipv6
sysctl -w net.ipv6.conf.all.disable_ipv6=1
sysctl -w net.ipv6.conf.default.disable_ipv6=1
# Set hostname
hostnamectl set-hostname $HOSTNAME
echo -e "127.0.0.1 localhost $HOSTNAME" >> /etc/hosts
apt-get -y update
apt-get -y upgrade
apt-get -y install apt-transport-https
# Install Java if Debian 9 or earlier
os_version=$(lsb_release -r | cut -f2 | awk -F '.' '{ print $1 }')
if [[ $os_version -le 9 ]]; then
apt-get -y install openjdk-8-jre-headless
echo "JAVA_HOME=$(readlink -f /usr/bin/java | sed "s:bin/java::")" | sudo tee -a /etc/profile
source /etc/profile
fi
# Install Nginx
apt-get -y install nginx
systemctl start nginx.service
systemctl enable nginx.service
# Add Jitsi to sources
apt-get -y install gnupg
wget -qO - https://download.jitsi.org/jitsi-key.gpg.key | sudo apt-key add -
sh -c "echo 'deb https://download.jitsi.org stable/' > /etc/apt/sources.list.d/jitsi-stable.list"
apt-get -y update
echo -e "DefaultLimitNOFILE=65000\nDefaultLimitNPROC=65000\nDefaultTasksMax=65000" >> /etc/systemd/system.conf
systemctl daemon-reload
# Configure Jitsi install
echo "jitsi-videobridge jitsi-videobridge/jvb-hostname string $HOSTNAME" | debconf-set-selections
echo "jitsi-meet jitsi-meet/cert-choice select I want to use my own certificate" | debconf-set-selections
# Install Jitsi
apt-get -y install jitsi-meet
# Increase default screen sharing frame rate
sed -i "s| // desktopSharingFrameRate: {| desktopSharingFrameRate: {\n min: 15,\n max: 30\n },|g" /etc/jitsi/meet/$HOSTNAME-config.js
# Configure jicofo
sed -i 's/authentication = "anonymous"/authentication = "internal_plain"/g' /etc/prosody/conf.avail/$HOSTNAME.cfg.lua
echo -e "\nVirtualHost \"guest.$HOSTNAME\"\n authentication = \"anonymous\"\n c2s_require_encryption = false" >> /etc/prosody/conf.avail/$HOSTNAME.cfg.lua
sed -i "s|// anonymousdomain: 'guest.example.com',|anonymousdomain: \'guest.$HOSTNAME\',|g" /etc/jitsi/meet/$HOSTNAME-config.js
echo "org.jitsi.jicofo.auth.URL=XMPP:$HOSTNAME" >> /etc/jitsi/jicofo/sip-communicator.properties
systemctl restart prosody jicofo jitsi-videobridge2
prosodyctl register "${jitsi_user_name}" $HOSTNAME '${jitsi_password}'
| true
|
3c6f096fb4a1738d561075ff3f2046bc88d9b8e8
|
Shell
|
DavidCohen17/ImpalaToGo
|
/bin/gen-cache-dataset.sh
|
UTF-8
| 191
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# $1 output path
# $2 number of files
# $3 size in Kilobytes
for i in `seq 1 $2`;
do
dd if=/dev/urandom of=$1/output$i.dat bs=1024 count=$3
done
| true
|
450529da696b5495986eb1289ef349b724e8ec2c
|
Shell
|
acg/trigger
|
/package/docs
|
UTF-8
| 275
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
shout() { echo "package/docs: $*" >&2; }
barf() { shout "fatal: $*"; exit 111; }
safe() { "$@" || barf "cannot $*"; }
#### MAIN
umask 022
test -d package || barf "no package directory"
exec package/build -c compile-doc -s doc -d html -b make ${1+"$@"}
exit 111
| true
|
bcc5afd785703c56aa30a4b7b1dd36b033210b0b
|
Shell
|
RobinAlgayres/beer
|
/recipes/zrc2019/decode_beer.sh
|
UTF-8
| 2,273
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#######################################################################
## SETUP
if [ $# -ne 3 ]; then
echo "usage: $0 <wav_dir> <embedding_dir> <clean>"
exit 1
fi
# Directory structure
datadir=data_run
feadir=features_run
expdir=exp
wav=$1
embedding_dir=$2
# Data
db=zrc2019
dataset=testing
transcription=$(basename $1).txt
# Features
feaname=mfcc
# AUD training
# The number of epochs probably needs to be tuned to the final data.
epochs=10
# These parameter will be ignore if you do parallel training. More
# precisely, the learning rate will be set to 1 and the batch
# size to the number of utterances in the training data.
lrate=0.1
batch_size=400
#######################################################################
if [ $3 = 'clean' ]; then
rm -r $datadir $feadir
rm $expdir/$db/aud/$transcription
rm $expdir/$db/datasets/$dataset.pkl
fi
source activate beer
mkdir -p $datadir $expdir $feadir
echo "--> Preparing data for the $db database"
local/$db/prepare_data.sh $datadir/$db $wav $dataset || exit 1
echo "--> Extracting features for the $db database"
steps/extract_features.sh conf/${feaname}.yml $datadir/$db/$dataset \
$feadir/$db/$dataset || exit 1
# Create a "dataset". This "dataset" is just an object
# associating the features with their utterance id and some
# other meta-data (e.g. global mean, variance, ...).
echo "--> Creating dataset(s) for $db database"
steps/create_dataset.sh $datadir/$db/$dataset \
$feadir/$db/$dataset/${feaname}.npz \
$expdir/$db/datasets/${dataset}.pkl
echo "--> Acoustic Unit Discovery on $db database"
steps/aud.sh conf/hmm.yml $expdir/$db/datasets/${dataset}.pkl \
$epochs $lrate $batch_size $expdir/$db/aud $transcription
python beer_to_onehot.py $expdir/$db/aud/$transcription $embedding_dir
# Parallel training. Much faster (and more accurate). This is the
# recommended training way. However, you need to have Sun Grid Engine
# like (i.e. qsub command) to run it. If you have a different
# enviroment please see utils/parallel/sge/* to see how to adapt
# this recipe to you system.
#steps/aud_parallel.sh conf/hmm.yml \
# data/$db/train/uttids \
# $expdir/$db/datasets/${dataset}.pkl \
# $epochs $expdir/$db/aud
| true
|
badeb6fa4ecd5af82d0f7189be728482c8a9869e
|
Shell
|
guduhanyan/QRSdetection
|
/perf_test.sh
|
UTF-8
| 346
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
NUM_TESTS=10
cd ./src
make clean
make
cd ../test
make clean
make
rm perf_report.txt
touch perf_report.txt
export OMP_NUM_THREADS=4
for (( i=1; i<=$NUM_TESTS; i++ ))
do
echo "Test no. $i"
make run
cat qrs_log.txt >> perf_report.txt
done
cd ..
./perf_test_report.py -b ./test/perf_report.txt -t QRS_report -n ${NUM_TESTS}
| true
|
61a5e2a6d3b5d1b47d5cad74f0207df5a72ab515
|
Shell
|
haobtc/blockstore
|
/bin/check_tail_cnt.sh
|
UTF-8
| 487
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
netname=$1
. setup-env.sh
tip_height=`python bin/get_tip_height.py $netname`
start_height=`expr $tip_height - 200`
python bin/check_cnt_txes.py $netname $start_height 300 | grep 'cnt mismatch' | awk '{print $3}' | tee /tmp/mismatch.txt
cnt=`wc -l /tmp/mismatch.txt|awk '{print $1}'`
echo `date` checking $start_height $cnt>/tmp/st.txt
if [ $cnt -gt 0 ]; then
cd bsquery
for bh in $(cat /tmp/mismatch.txt); do node start.js -s fetch -c $netname -b $bh; done
fi
| true
|
de3c9fb1510f83c4ccd489c41058286935a0236f
|
Shell
|
Xceptance/XLT-Packer
|
/scripts/common/xlt-home/start-xlt.sh
|
UTF-8
| 2,533
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# What is my name?
SCRIPTNAME=`basename $0`
# Some defaults
AGENT_PREFIX=ac
DOWNLOAD_ARCHIVE=xlt.zip
START_PORT=8500
PASSWORD_FILE=/home/xlt/.acpass
ARCHIVE_LOCATION="${1}"
TARGET_DIR="${2}"
AGENT_DIR="$TARGET_DIR/ac"
function help() {
echo "Start XLT."
echo " xlt-archive-file : Archive containing the XLT version to start"
echo " target-dir : Where to extract the archive to? This will be the working directory for XLT."
echo "Usage: $SCRIPTNAME <xlt-archive-file> <target-dir>"
}
function err() {
>&2 echo "FATAL: $1"
exit 1
}
for arg in $@ ; do
case $arg in
--help)
help
exit 0
;;
esac
done
# Check for at least two input parameters, stop otherwise
if [ $# -lt 2 ]; then
help
echo ""
err "Incorrect command line. Please specify the XLT archive and installation directory."
fi
# Check existience of archive to unpack
if [ ! -e $ARCHIVE_LOCATION ]; then
err "$ARCHIVE_LOCATION does not exist. Aborting."
fi
# check target
if [ ! -d $TARGET_DIR ]; then
err "Target dir '$TARGET_DIR' does not exist. Aborting."
fi
# can we write?
touch $TARGET_DIR/test
if [ "$?" -ne "0" ]; then
err "Target dir '$TARGET_DIR' is not writable. Aborting."
fi
echo "Cleaning target dir '$TARGET_DIR'"
rm -rf $TARGET_DIR/*
# Unzipping
echo "Unzipping XLT archive..."
unzip $ARCHIVE_LOCATION \
-x "*/doc/*" "*/samples/*" "*/tools/*" "*/etc/*" "*/bin/*.cmd" \
-d $TARGET_DIR
# Removing temp if existing
if [ -d $AGENT_DIR ]; then
echo "Removing old agent dir $AGENT_DIR ..."
rm -rf $AGENT_DIR
fi
# Renaming install dir
echo "Renaming XLT dir..."
mv $TARGET_DIR/xlt-* $AGENT_DIR
# Setting rights
echo "Setting execution rights..."
chmod a+x $AGENT_DIR/bin/*.sh
PASSWORD=""
if [ -f $PASSWORD_FILE ]; then
PASSWORD=$(< $PASSWORD_FILE)
fi
# Configure port and password
sed -i 's/com.xceptance.xlt.agentcontroller.port =.*/com.xceptance.xlt.agentcontroller.port = 8500/g' $AGENT_DIR/config/agentcontroller.properties
if [ -n "$PASSWORD" ]; then
sed -i 's/^com.xceptance.xlt.agentcontroller.password = .*$/com.xceptance.xlt.agentcontroller.password = '"$PASSWORD"'/g' $AGENT_DIR/config/agentcontroller.properties
else
sed -i 's/^\(com.xceptance.xlt.agentcontroller.password =.*\)$/#\1/g' $AGENT_DIR/config/agentcontroller.properties
fi
# Start agentcontroller
CURRENT_USER=`whoami`
echo "Kill current java processes (if any)"
killall -9 -u $CURRENT_USER java
echo "Starting XLT Agent Controller"
$AGENT_DIR/bin/agentcontroller.sh&
exit 0
| true
|
9bdafb4ade6c6274f662ac1cd76ff0004785a442
|
Shell
|
bellmit/SVNrepo
|
/Invessence/modules/emailer/src/main/sh/stop_email.sh
|
UTF-8
| 271
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
LOG=/inv/log
JAVA_HOME=/usr/bin
LIB_HOME=/inv/services/lib
LOG_HOME=/inv/log
export CLASSPATH=${CLASSPATH}:${LIB_HOME}
for jlist in "`ps -eaf | grep -i emailer | grep -v grep`"
do
id=`echo $jlist | gawk -F' ' '{print $2}'`
echo kill $id
kill -9 $id
done
| true
|
7791cd0f29fe4fb3fbbdb25879d82fac99118cb8
|
Shell
|
gadams999/iot-static-ip-endpoints
|
/source/run-all-tests.sh
|
UTF-8
| 1,628
| 3.109375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
#
#
# This script runs all tests for the root CDK project, as well as any microservices, Lambda functions, or dependency
# source code packages. These include unit tests, integration tests, and snapshot tests.
#
# The if/then blocks are for error handling. They will cause the script to stop executing if an error is thrown from the
# node process running the test case(s). Removing them or not using them for additional calls with result in the
# script continuing to execute despite an error being thrown.
# Save the current working directory
source_dir=$PWD
# Install
npm install
# License checks
./license-report.sh
if [ "$?" = "1" ]; then
echo "(source/run-all-tests.sh) ERROR: there is likely output above." 1>&2
exit 1
fi
# Test the CDK project
npm run build
npm run lint
npm run test -- -u
if [ "$?" = "1" ]; then
echo "(source/run-all-tests.sh) ERROR: there is likely output above." 1>&2
exit 1
fi
# lambda python tests
chmod +x ./run-lambda-tests.sh
./run-lambda-tests.sh
# Return to the source/ level
cd $source_dir
| true
|
8d84865feb5859a106d90a75ee3cc7a4aecbbe8a
|
Shell
|
Dhilibanachilles/Arithmetic-Computation
|
/arithmeticComputationq8.sh
|
UTF-8
| 617
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash -x
read -p "Enter the vlaue of a :: " a
read -p "Enter the vlaue of b :: " b
read -p "Enter the vlaue of c :: " c
declare -A results
results[one]=$(( $a + $b *$c ))
results[two]=$(( $a * $b + $c ))
results[three]=$(( $c + $a / $b ))
results[four]=$(( $a%$b *$c ))
#Array
i=0
a[i++]=${results[one]}
a[i++]=${results[two]}
a[i++]=${results[three]}
a[i++]=${results[four]}
echo "outcome" ${a[@]}
temp=0
for (( i=0; i<4; i++ ))
do
for (( j=0; j<4-i-1; j++ ))
do
if [[ ${a[j]} -lt ${a[$((j+1))]} ]]
then
temp=${a[j]}
a[j]=${a[$((j+1))]}
a[j+1]=$temp
fi
done
done
echo "Descending Order" ${a[@]}
| true
|
b57165f98e73f1d92c37eb23e5e1e4ac02e7576a
|
Shell
|
pixelastic/oroshi
|
/config/zsh/aliases/jump.zsh
|
UTF-8
| 228
| 2.828125
| 3
|
[] |
no_license
|
# Mark / Jump
# Source: https://jeroenjanssens.com/navigate/
export MARKPATH=$HOME/.marks
alias m='mark'
alias mR='unmark'
alias ml="ls $MARKPATH"
function j {
cd -P "${MARKPATH}/$1" 2>/dev/null || echo "No such mark: $1"
}
| true
|
84e865c1abc379f86f071f1e0905686cf0c1b8e8
|
Shell
|
RussBabb/rocket2020
|
/setup/conky/grid
|
UTF-8
| 143
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
GRID=$($HOME/bin/conky/get-grid)
GRIDCH=$(echo $GRID | grep -i JJ00)
if [ -z "$GRIDCH" ]; then
echo $GRID
else
echo "NO GPS"
fi
| true
|
1a66534a62e7df8d56b9ef72a26fa217e9222974
|
Shell
|
sushantmimani/reverse_proxy
|
/run.sh
|
UTF-8
| 309
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
control_c()
# run if user hits control-c
{
echo "Bring down service and freeing up associated volumes"
docker-compose down -v
}
trap control_c SIGINT
if pip install --user -r requirements1.txt && python unit_tests.py ; then
docker-compose up
else
echo "Tests failed"
fi
| true
|
be690d8dfac63f3e15adc84fb72bd083114ca49f
|
Shell
|
yejunhai/helloworld
|
/CPU内存利用率.sh
|
UTF-8
| 172
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#CPU利用率: $cpu
cpu=`top -n 1|awk '/%Cpu/{printf("%.2f%\n",(100-$8))}'`
#内存利用率: $mem "
mem=`free|awk '/Mem:/{printf("%.2f%\n",($2-$4)/$2*100)}'`
| true
|
b189a9454b18e29610d8a4df7c0d99bb2263c82e
|
Shell
|
AppSecAI-TEST/lightfish
|
/.travisci/testtravis
|
UTF-8
| 250
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env sh
for stage in unittest integrationtest systemtest
do
export BUILDSTAGE=$stage
for action in install before_script test after_script
do
echo Running $stage / $action
.travisci/script $action
done
done
| true
|
e1945d583fa21f99d03eb2be9910d13a6edd3356
|
Shell
|
kokorinosoba/dotfiles
|
/etc/init/setup/terminal-solarized.sh
|
UTF-8
| 247
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# install solarized color scheme to default terminal
readonly TMPDIR=${TMPDIR%/}
git clone https://github.com/tomislav/osx-terminal.app-colors-solarized.git $TMPDIR/Solarized &&
open $TMPDIR/Solarized/Solarized\ Dark.terminal
| true
|
f587fc09880e24d389a321bfd6a0ea360df804f9
|
Shell
|
atweiden/voidpkgs
|
/srcpkgs/jitterentropy-rngd/template
|
UTF-8
| 701
| 2.625
| 3
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
maintainer="nox"
pkgname="jitterentropy-rngd"
version=1.2.8
revision=1
short_desc="Jitter RNG Daemon"
homepage="https://www.chronox.de/jent.html"
license="BSD-3-Clause, GPL-2.0-only"
distfiles="https://www.chronox.de/jent/$pkgname-$version.tar.xz"
checksum="c4480c67d888fd9c6c9c3bcf06c785578ad81984ec978da3c33d16aa019664fb"
build_style="gnu-makefile"
make_use_env="yes"
CFLAGS="-O0"
pre_configure() {
# fix sbin and disable systemd service installation
sed \
-i \
-e 's/sbin/bin/g' \
-e '/jitterentropy\.service\ /d' \
Makefile
}
post_install() {
vlicense COPYING
vlicense COPYING.bsd
vlicense COPYING.gplv2
}
# vim: set filetype=sh foldmethod=marker foldlevel=0 nowrap:
| true
|
92692832ba3d4ffb2d7dfd3f5cc48c5fdfeabe49
|
Shell
|
shahkamran/aws-cloudwatch-custom-monitor-scripts
|
/aws_cloudwatch_publish.sh
|
UTF-8
| 2,477
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# SQL Metrics
max_connections=$(/usr/local/bin/mysqlmon.sh max_connections)
max_used_connections=$(/usr/local/bin/mysqlmon.sh max_used_connections)
threads_connected=$(/usr/local/bin/mysqlmon.sh threads_connected)
connections=$(/usr/local/bin/mysqlmon.sh connections)
#
# OS Metrics
Memory_Utilisation=$(free -m | awk 'NR==2{printf "%.2f\t", $3*100/$2 }')
TCP_Conn=$(netstat -an | wc -l)
TCP_Conn_http=$(netstat -an | grep 80 | wc -l)
TCP_Conn_https=$(netstat -an | grep 443 | wc -l)
Users=$(uptime |awk '{ print $6 }')
IO_Wait=$(iostat | awk 'NR==4 {print $5}')
Disk_Utilisation=$(df -h | grep /dev/nvme| awk '{print $5}' | grep -o '[0-9]*')
#
# Timestamp
cdate=$(date -u +%Y-%m-%dT%H:%M:00.000Z)
#
# Instance ID
instanceid=$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
#
# Publish Metrics
aws cloudwatch put-metric-data --metric-name MySQLMaxConnections --namespace "Custom" --dimensions="Instance=$instanceid" --value $max_connections --timestamp $cdate
aws cloudwatch put-metric-data --metric-name MySQLMaxUsedConnections --namespace "Custom" --dimensions="Instance=$instanceid" --value $max_used_connections --timestamp $cdate
aws cloudwatch put-metric-data --metric-name MySQLThreadsConnected --namespace "Custom" --dimensions="Instance=$instanceid" --value $threads_connected --timestamp $cdate
aws cloudwatch put-metric-data --metric-name MySQLConnections --namespace "Custom" --dimensions="Instance=$instanceid" --value $connections --timestamp $cdate
aws cloudwatch put-metric-data --metric-name Memory_Utilisation --dimensions Instance=$instanceid --namespace "Custom" --value $Memory_Utilisation
aws cloudwatch put-metric-data --metric-name TCP_Connections --dimensions Instance=$instanceid --namespace "Custom" --value $TCP_Conn
aws cloudwatch put-metric-data --metric-name TCP_Conn_http --dimensions Instance=$instanceid --namespace "Custom" --value $TCP_Conn_http
aws cloudwatch put-metric-data --metric-name TCP_Conn_https --dimensions Instance=$instanceid --namespace "Custom" --value $TCP_Conn_https
aws cloudwatch put-metric-data --metric-name No_of_users --dimensions Instance=$instanceid --namespace "Custom" --value $Users
aws cloudwatch put-metric-data --metric-name IO_Wait --dimensions Instance=$instanceid --namespace "Custom" --value $IO_Wait
aws cloudwatch put-metric-data --metric-name Disk_Utilisation --dimensions Instance=$instanceid --namespace "Custom" --unit Percent --value $Disk_Utilisation
#
#
| true
|
e813589872cae3036cf713e6df70f6e1dc2147e5
|
Shell
|
mstange22/Exercism
|
/bash/resistor-color-trio/resistor_color_trio.sh
|
UTF-8
| 999
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
declare -A number_map=(
[black]=0
[brown]=1
[red]=2
[orange]=3
[yellow]=4
[green]=5
[blue]=6
[violet]=7
[grey]=8
[white]=9
)
main () {
sum=""
# check colors and add digits
for color in $1 $2; do
! [[ ${number_map[$color]} ]] && { echo "invalid color"; exit 1; }
sum+=${number_map[$color]}
done
# check third color
! [[ ${number_map[$3]} ]] && { echo "invalid color"; exit 1; }
# add zeroes
zeroes=${number_map[$3]}
for (( i=0; i<$zeroes; i++ )); do
sum+=0
done
#strip leading 0
if [[ $1 == "black" ]]; then
sum=${sum:1}
fi
# replace zeroes with appropriate prefix, if necessary
if (( ${#sum} > 8 )) && [[ ${sum: -9} == "000000000" ]]; then
sum="${sum::-9} gigaohms"
elif (( ${#sum} > 5 )) && [[ ${sum: -6} == "000000" ]]; then
sum="${sum::-6} megaohms"
elif (( ${#sum} > 2 )) && [[ ${sum: -3} == "000" ]]; then
sum="${sum::-3} kiloohms"
else sum="$sum ohms"
fi
echo $sum
}
main "$@"
| true
|
bf22ccf6cfb67c1df53531602865ee9614cb9785
|
Shell
|
lewis8879456/astralgate
|
/packages/net/astral/files/lib/keeper-utils.sh
|
UTF-8
| 1,002
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
[ -e /etc/functions.sh ] && . /etc/functions.sh || . ./functions.sh
keeper_generate_config() {
local enabled
local secret
local storage
config_get enabled $1 enabled
config_get secret $1 secret
config_get storage $1 storage
echo "uid = root
gid = root
use chroot = yes
pid file = /var/run/rsyncd.pid
log file = /dev/null
[hosts]
comment = AstralGate hosts
auth users = astralgate
secrets file = /tmp/astral-keeper-rsyncd.secrets
read only = no" > /tmp/astral-keeper-rsyncd.conf
echo "path = $storage" >> /tmp/astral-keeper-rsyncd.conf
echo "astralgate:$secret" > /tmp/astral-keeper-rsyncd.secrets
chmod 700 /tmp/astral-keeper-rsyncd.secrets
mkdir -p $storage
}
generate_config() {
config_load astral
config_foreach keeper_generate_config keeper
}
case "$1" in
generate_config)
generate_config
;;
esac
| true
|
dbdf6bcd45a9b8614602eb0d6b7c210c169e0683
|
Shell
|
DavidHerel/OSY
|
/hw1/hw1.sh
|
UTF-8
| 1,645
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
#declaring functions
write_file(){
if [ -L "${input:5}" ]; #lookin for a file
then
rdl=$(readlink ${input:5})
echo "LINK '${input:5}' '${rdl}'"
elif [ -f "${input:5}" ]; #lookin for a symbolic link
then
number_lines=$(wc -l < "${input:5}")
first_line=$(head -n 1 "${input:5}")
file_array+=("${input:5}")
echo "FILE '${input:5}' ${number_lines} '${first_line}'"
elif [ -d "${input:5}" ]; #lookin for a dir
then
echo "DIR '${input:5}'"
else #error
error=1
echo "ERROR '${input:5}'" >&2
fi
}
declare -a file_array
make_file=0 #false
error=0 #false
#waiting for arguments
while getopts ":hz" opt;
do
case $opt in
h) echo "This script reads an input"
echo "If your line starts with PATH /home/.., script will take words after PATH as a path to a file/dir/symlink"
echo "You can write two arguments h and z"
echo "Argument h will show you this manual"
echo "Argument z will zip files you written after PATH"
exit 0
;;
z) make_file=1
;;
?) exit 2
;;
esac
done
shift $(($OPTIND - 1))
#reading an input
while read input
do
if [ "${input:0:5}" = "PATH " ];
then
#print things that needs to be print
write_file
fi
done
#finishing the job
if [ "$make_file" = 1 ];
then
tar czf output.tgz "${file_array[@]}"
fi
if [ "$error" = 1 ];
then
exit 1
elif [ "$error" = 0 ];
then
exit 0
fi
| true
|
b482a16797fb1317633f2555031b7705dc9022ca
|
Shell
|
RakhithJK/research-automation
|
/dumpbootargs
|
UTF-8
| 1,034
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
INFILE="$1"
COMPANION=$(ls | grep "$1.ARM64")
COMPANION="$COMPANION"
if ! [ -f "$COMPANION" ]; then
if ! [ -f "$INFILE" ]; then
echo "Usage: $0 [kernelcache]"
exit 1;
else
echo "Analyzing kernel..."
jtool2 --analyze "$INFILE" > /dev/null
COMPANION=$(ls | grep "$1.ARM64")
COMPANION="$COMPANION"
if ! [ -f "$COMPANION" ]; then
echo "Usage: $0 [kernelcache]"
exit 1;
fi
fi
exit 1;
fi
echo "Retrieving offset for PE_parse_boot_argn_internal..."
PE_PARSE_BOOT_ARGN_INTERNAL=$(cat "$COMPANION" | sed -e 's/|/ /g' | grep "PE_parse_boot_argn_internal" | awk '{print $1}' | sed -e 's/0x//g')
echo "Retrieving list of boot arguments..."
TMP=$(jtool2 -d kernelcache.release.n71.decompressed 2> /dev/null | grep -e "_func_$PE_PARSE_BOOT_ARGN_INTERNAL" -e "PE_parse_boot_argn")
TMP=$(echo "$TMP" | grep "(" | tr -d '\t' | sed -e 's/(/ /g' -e 's/,/ /g')
TMP=$(echo "$TMP" | awk '{print $2}' | tr -d '"')
echo "$TMP" | sort -u > bootargs.txt
echo "Successfully retrieved bootargs"
open "bootargs.txt"
| true
|
c3bbafc95e20f2363bb51c97cb6e1f3b7cb156b7
|
Shell
|
jhliberty/AAF
|
/build/tools/FilterLog_i686Linux
|
UTF-8
| 2,024
| 2.90625
| 3
|
[] |
no_license
|
#! /bin/bash
###############################################################################
#
# $Id: FilterLog_i686Linux,v 1.4 2009/06/01 11:46:49 stuart_hc Exp $ $Name: V116 $
#
# The contents of this file are subject to the AAF SDK Public Source
# License Agreement Version 2.0 (the "License"); You may not use this
# file except in compliance with the License. The License is available
# in AAFSDKPSL.TXT, or you may obtain a copy of the License from the
# Advanced Media Workflow Association, Inc., or its successor.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License. Refer to Section 3.3 of the License for proper use
# of this Exhibit.
#
# WARNING: Please contact the Advanced Media Workflow Association,
# Inc., for more information about any additional licenses to
# intellectual property covering the AAF Standard that may be required
# to create and distribute AAF compliant products.
# (http://www.amwa.tv/policies).
#
# Copyright Notices:
# The Original Code of this file is Copyright 1998-2009, licensor of the
# Advanced Media Workflow Association. All rights reserved.
#
###############################################################################
# This script simply filters known good messages from a Linux "make
# everything" log. Any text that makes it through the filter is
# considered an error.
StripEscapedReturns | \
grep -v ^g++ | \
grep -v ^ar | \
grep -v Generating | \
grep -v Entering | \
grep -v Leaving | \
grep -v Building | \
grep -v ^cp | \
grep -v ^cd | \
grep -v "OMF library not available for i686Linux" | \
grep -v "Nothing to be done" | \
grep -v Skipping | \
grep -v "updating timestamp" | \
grep -v "did not change" | \
grep -v update.ksh | \
grep -v "make -f unixaafsdk.mak CFG=Debug" |
grep -v ^real | \
grep -v ^user | \
grep -v ^sys
| true
|
b6e16aa6563713d9635d4101db2b1c832e2601b5
|
Shell
|
ChrisHarte/dotfiles
|
/.zshrc
|
UTF-8
| 2,732
| 2.75
| 3
|
[] |
no_license
|
# Oh my zsh integration
ZSH=$HOME/.oh-my-zsh
# Plugins
plugins=(git rbenv)
#ZSH_THEME="agnoster-light"
ZSH_THEME="robbyrussell"
# Completion dots
COMPLETION_WAITING_DOTS="true"
# Disable autocorrect on enter
unsetopt correct_all
# Now load oh-my-zsh
source $ZSH/oh-my-zsh.sh
# Show completion on first TAB
setopt menucomplete
# Load completions for Ruby, Git, etc.
autoload compinit
compinit
# Path
PATH=/usr/local/bin:/usr/local/lib/node:/usr/local/sbin:/usr/local/var:/usr/local/share/npm/bin:/usr/local/share/npm/bin:$HOME/.dotfiles/bin:$PATH
# MacPorts
PATH=/opt/local/bin:/opt/local/sbin:$PATH
# NodeJS
NODE_PATH=/usr/local/lib/node_modules
# Heroku
PATH=/usr/local/heroku/bin:$PATH
# RBENV
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
# Unbreak broken, non-colored terminal
export TERM='xterm-256color'
export LSCOLORS="ExGxBxDxCxEgEdxbxgxcxd"
export GREP_OPTIONS="--color"
# Show contents of directory after cd-ing into it
chpwd() {
ls -lrthG
}
# Save history
export HISTSIZE=10000
export HISTFILE=~/.zsh_history
export SAVEHIST=$HISTSIZE
export VISUAL=vim
export EDITOR=vim
# Disable flow control commands (keeps C-s from freezing everything)
stty start undef
stty stop undef
# Sudo support for rbenv
function rbenvsudo(){
executable=$1
shift 1
sudo $(rbenv which $executable) $*
}
# tmux
alias tmux='TERM=xterm-256color tmux -2'
alias tmuxinator='TERM=xterm-256color tmuxinator'
alias mux='TERM=xterm-256color mux'
# tmuxinator
[[ -s "$HOME/.tmuxinator/scripts/tmuxinator" ]] && source "$HOME/.tmuxinator/scripts/tmuxinator"
# full 256 colors in terminal (run "tput colors" to check)
export TERM=xterm-256color
# sub - https://github.com/37signals/sub
eval "$($HOME/.91/bin/91 init -)"
# in terminal vim we want access to the Ctrl+ combinations
alias vim="stty stop '' -ixoff ; vim"
# `Frozing' tty, so after any command terminal settings will be restored
ttyctl -f
# VI-mode
# http://www.techrepublic.com/blog/opensource/using-vi-key-bindings-in-bash-and-zsh/193
bindkey -v
# 10ms for key sequences
KEYTIMEOUT=1
# Use dotfiles version of tig on OSX (tig broken in macports and brew doesnt play with both)
if [[ -f /etc/zshenv && $(uname) == Darwin ]]; then
alias tig=tig-osx
fi
source $HOME/.dotfiles/zsh/aliases.sh
source $HOME/.dotfiles/zsh/functions.sh
# Shaves about 0.5s off Rails boot time (when using perf patch). Taken from https://gist.github.com/1688857
export RUBY_HEAP_MIN_SLOTS=1000000
export RUBY_HEAP_SLOTS_INCREMENT=1000000
export RUBY_HEAP_SLOTS_GROWTH_FACTOR=1
export RUBY_GC_MALLOC_LIMIT=1000000000
export RUBY_HEAP_FREE_MIN=500000
# disable oh-my-zsh update prompts, upgrade with upgrade_oh_my_zsh
DISABLE_UPDATE_PROMPT=true
DISABLE_AUTO_UPDATE=true
| true
|
bf7ca2cfc96a3ed9774eceaca40a50622830ecbe
|
Shell
|
gobattle/oh-my-zsh
|
/custom/example.zsh
|
UTF-8
| 439
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
# Add yourself some shortcuts to projects you often work on
# Example:
#
# brainstormr=/Users/robbyrussell/Projects/development/planetargon/brainstormr
# Making SSH_AUTH_SOCK Work Between Detaches in Tmux
if [ ! -z "$SSH_AUTH_SOCK" -a "$SSH_AUTH_SOCK" != "$HOME/.ssh/agent_sock" ] ; then
unlink "$HOME/.ssh/agent_sock" 2>/dev/null
ln -s "$SSH_AUTH_SOCK" "$HOME/.ssh/agent_sock"
export SSH_AUTH_SOCK="$HOME/.ssh/agent_sock"
fi
| true
|
73b2860f6ce55369b301e96710955b327b72e91e
|
Shell
|
isahakukamil/reuben_scripts
|
/sec_module2/x11.sh
|
UTF-8
| 1,275
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#---MOP WORK PACKAGE TAG: WP022BISV3---
#---DESC:
#---SETTING X11 PROTOCOL FORWARDING---
#---SCRIPT BY REUBEN A. BOAKYE
#SIGNUM: EBOAREU---
#---All scripts run are logged into a single log file called "script.log"
#with file path /var/log/script.log---
file=/etc/ssh/sshd_config
if test -f "$file"; then
timeStamp=$(date '+%m_%y_%H_%M_%S')
fExtension1=".log"
backupFile=/etc/ssh/sshd_x11_$timeStamp
logName="script"
logFile=/var/log/$logName$fExtension1
#This creates a backup of the configuration file.
sudo cp $file $backupFile
sed -i "s/.*x11Forwarding.*/x11Forwarding yes/g" $file
STATUS=`echo "$?"`
#This tests if logfile is already created.
if test -f "$logFile"; then
if [ "$STATUS" -eq 0 ]; then
sudo echo "$USER $timeStamp Exit_Status: Successfully Executed" >> $logFile
else
sudo echo "$USER $timeStamp Exit_Status: Failed to Execute." >> $logFile
sudo mv $backupfile $file
fi
else
sudo touch $logFile
if [ "$STATUS" -eq 0 ]; then
sudo echo "$USER $timeStamp Exit_Status: Successfully Executed" >> $logFile
else
sudo echo "$USER $timeStamp Exit_Status: Failed to Execute." >> $logFile
sudo mv $backupfile $file
fi
fi
sudo service sshd restart
else
echo "***THE CONFIGURATION FILE DOES NOT EXIST!***"
fi
| true
|
4f46080a7895be318b4bb10bdc19dee2852d2eff
|
Shell
|
abhimanyuZ/WiFiAutoLogin
|
/WifiLogin.sh
|
UTF-8
| 276
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
curl -k -s --data "RequestType=Login&UE-Username=Your_Username&UE-Password=Your_Password" https://scg.ruckuswireless.com:9998/SubscriberPortal/login | grep 'Sign Out' &> /dev/null
if [ $? == 0 ]; then
echo "WiFi logged in!"
else
echo "Not connected to WiFi!"
fi
| true
|
da2004a4fd253ff00beb646eb810f4d6a23812da
|
Shell
|
SeltmannSoftware/BrickPiES
|
/brickpies.sh
|
UTF-8
| 3,893
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
operation=$1
shift
for var in "$@"
do
case "$var" in
led)
case "$operation" in
disable)
echo "Disabling power LED...editing /boot/config.txt"
if
grep -q '#enable_uart=1' /boot/config.txt;
then
echo 'Power LED support already disabled!';
else
if
grep -q 'enable_uart=1' /boot/config.txt;
then
sed -n -i 's/enable_uart=1/#enable_uart=1/' /boot/config.txt
else
echo "Disable failed...power LED support not installed"
fi
fi
;;
enable)
echo "Enabling power LED...editing /boot/config.txt. Reboot required."
if
grep -q '#enable_uart=1' /boot/config.txt;
then
sed -n -i 's/#enable_uart=1/enable_uart=1/' /boot/config.txt
else
if
grep -q 'enable_uart=1' /boot/config.txt;
then
echo 'Power LED support already installed!';
else
echo '\n# Power LED (via TxD)\nenable_uart=1\n' >> /boot/config.txt;
fi
fi
;;
*)
echo "Usage: brickpies disable|enable led|power|reset"
exit 1
esac
;;
power)
case "$operation" in
disable)
echo "Disabling gpio power button...editing /boot/config.txt"
if
grep -q '#dtoverlay=gpio-shutdown' /boot/config.txt;
then
echo 'GPIO power support already disabled!';
else
if
grep -q 'dtoverlay=gpio-shutdown' /boot/config.txt;
then
sed -n -i 's/dtoverlay=gpio-shutdown/#dtoverlay=gpio-shutdown/' /boot/config.txt
else
echo "Disable failed...GPIO power support not enabled"
fi
fi
;;
enable)
echo "Enabling POWER button...editing /boot/config.txt. Reboot required."
if
grep -q '#dtoverlay=gpio-shutdown' /boot/config.txt;
then
sed -n -i 's/#dtoverlay=gpio-shutdown/dtoverlay=gpio-shutdown/' /boot/config.txt
else
if
grep -q 'dtoverlay=gpio-shutdown' /boot/config.txt;
then
echo 'Power LED support already installed!';
else
printf '\n# Power off (via gpio 3)\ndtoverlay=gpio-shutdown\n' >> /boot/config.txt;
fi
fi
;;
*)
echo "Usage: brickpies disable|enable led|power|reset"
exit 1
esac
;;
reset)
case "$operation" in
disable)
echo "Disabling reset..."
#Disable - add sudo?
update-rc.d gpio_soft_reset disable
/etc/init.d/gpio_soft_reset stop
;;
enable)
echo "Enabling reset..."
# Enable
# Install if needed
if [ ! -e /etc/init.d/gpio_soft_reset ]
then
cp gpio_soft_reset /etc/init.d
chmod +x /etc/init.d/gpio_soft_reset
fi
if [ ! -e /usr/local/bin/soft_reset_listener.py ]
then
cp soft_reset_listener.py /usr/local/bin
chmod +x /usr/local/bin/soft_reset_listener.py
fi
# Now enable - add sudo?
update-rc.d gpio_soft_reset defaults
/etc/init.d/gpio_soft_reset start
;;
remove)
#Remove
echo "Removing reset..."
update-rc.d gpio_soft_reset disable
/etc/init.d/gpio_soft_reset stop
if [ -e /etc/init.d/gpio_soft_reset ]
then
rm /etc/init.d/gpio_soft_reset
fi
if [ -e /usr/local/bin/soft_reset_listener.py ]
then
rm /usr/local/bin/soft_reset_listener.py
fi
;;
*)
echo "Usage: brickpies disable|enable led|power|reset"
exit 1
esac
;;
*)
echo "Usage: brickpies disable|enable led|power|reset"
exit 1
esac
done
| true
|
42bbe202f0f32095e5a24b22944de53e7c240f89
|
Shell
|
zap0xfce2/Scripts
|
/Sicherung.sh
|
UTF-8
| 2,027
| 3.609375
| 4
|
[] |
no_license
|
#! /bin/sh
#
# einzutragen mit crontab -e
# 25 23 * * * /root/Sicherung.sh
#
##########################
## Konfiguration Anfang ##
##########################
# Welches Verzeichnis soll gesichert werden?
quelle=/daten
# Welches Medium soll gemountet werden?
externehdd=/dev/sdx1
# Mailadresse des Admins
adminmail=sender@youdomain.com
# Sendermailadresse
sendermail=absender@yourdomain.com
# Mail nach erfolgreichen sichern senden?
sendonokay=false
# Mail bei Fehlern Senden?
sendonfail=true
# bei lokaler sicherung leer lassen
mountparameter=""
# Soll das Medium am ende ausgeworfen werden?
unmountonend=true
# Format des Datums
tag=$(date +"%Y_%m_%d")
# wohin mounten/kopieren
# (verzeichnis wird angelegt wenn nicht vorhanden)
ziel=/media/sicherung
##########################
### Konfiguration Ende ###
##########################
### script anfang ###
# Versuche das Ziel zu erzeugen
mkdir -p $ziel
# Mounte die Festplatte nach Ziel
/bin/mount $mountparameter $externehdd $ziel
# Wenn der Mountvorgang erfolgreich ist
if [ "cat /proc/mounts | grep $ziel" ]
then
# Schreibe das Datum in die Logdatei (am Anfang)
/bin/date > /var/log/$tag-sicherung.log
# Kopiere und schreibe das Ergebnis in die Logdatei
#/bin/cp -uRv $quelle/* $ziel >> /var/log/$tag-sicherung.log
/usr/bin/rsync -av --delete $quelle $ziel >> /var/log/$tag-sicherung.log
# Schreibe das Datum in die Logdatei (am Ende)
/bin/date >> /var/log/$tag-sicherung.log
if $sendonokay;
then
# Sende die Erfolgsmail
/bin/date | /usr/bin/mail -s "Starte die Sicherung des Servers: " $(hostname) -r $sendermail $adminmail
fi
# Wenn der Mountvorgang fehlgeschlagen ist
else
# Schreibe den Fehler in die Logdatei
echo "Fehler beim Mounten :-(" >> /var/log/$tag-sicherung.log
if $sendonfail;
then
# Sende die Fehlermail
/bin/date | /usr/bin/mail -s "Fehler beim Mounten auf Server: " $(hostname) -r $sendermail $adminmail
fi
fi
if $unmountonend;
then
# Unmounte das Ziel
/bin/umount $ziel
fi
### script ende
| true
|
5fd1b0e8e23b0526976b714e1fdeac8cd04f9c56
|
Shell
|
JasonGross/coq-debian-build-scripts
|
/ocaml-stuff/03-build-debian-only-source.sh
|
UTF-8
| 326
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -ex
. versions.sh
for i in ${DSCS} ${DEBIAN_DSCS}; do
FOLDER="$(to_folder_name "$i")"
pushd "debian-sources/$FOLDER" || exit $?
EXTRA_ARGS="$(extra_debuild_args_for "$i")"
debuild --prepend-path "$(dirname "$(which jbuilder)")" -d ${EXTRA_ARGS} -S || exit $? # -us -uc
popd
done
| true
|
8d843b622254bc64e5c44285926769701e561c5b
|
Shell
|
eyhl/issm
|
/trunk/externalpackages/doxygen/install.sh
|
UTF-8
| 343
| 3.140625
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -eu
#Some cleanup
rm -rf install src
mkdir install
#Download latest version
svn co https://svn.code.sf.net/p/doxygen/code/trunk src
#Configure doxygen
cd src && ./configure --prefix "$ISSM_DIR/externalpackages/doxygen/install"
if [ $# -eq 0 ]; then
make
else
make -j $1
fi
#Install doxygen
make install
make install_docs
| true
|
e254490fce76a6dd795e05b6920138eef273a3ff
|
Shell
|
pedrohenriquepires/dotfiles
|
/hyper/install.sh
|
UTF-8
| 243
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source=$DIR/.hyper.js
target=$HOME/.hyper.js
if [ ! -L "$target" ]; then
echo "Copying $source to $target"
yes | cp -rf $source $target
fi
echo "Hyper setup successful."
| true
|
726d9a2e5a4aa5c43bc14be9023c833df3739d45
|
Shell
|
BowenLi1994/Interactive-Graph-Search
|
/experiment/script/wide1000.sh
|
UTF-8
| 561
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
COMMAND=../sequence
METHOD1=outdegree
METHOD2=baseline
METHOD3=comprehensive
METHOD4=compared
DATASET=combine1000
OUT_PATH=/home/bowen/igs_result/wide1000/
$COMMAND $METHOD2 $DATASET > ${OUT_PATH}${DATASET}_${METHOD2}.result &
$COMMAND $METHOD3 $DATASET 1 > ${OUT_PATH}${DATASET}_${METHOD3}.result &
$COMMAND $METHOD4 $DATASET > ${OUT_PATH}${DATASET}_${METHOD4}.result &
for THRESHOLD in 100 200 300 400 500 600 700 800 900 1000
do
$COMMAND $METHOD1 $DATASET $THRESHOLD > ${OUT_PATH}${DATASET}_${METHOD1}${THRESHOLD}.result &
done
| true
|
f1d1dad4b995238766c0065ecd7d70c820a02a20
|
Shell
|
ros4hri/ros4hri
|
/deploy_pipeline.sh
|
UTF-8
| 827
| 3.515625
| 4
|
[] |
no_license
|
#! /bin/bash
set -e # enable error checking -> while return on first error
set -o errexit
INSTALL_PREFIX=`pwd`/ros4hri-dev
if [ -d "$INSTALL_PREFIX" ]; then
rm -rf $INSTALL_PREFIX
fi
mkdir -p $INSTALL_PREFIX
if [ -d "tmp-dev" ]; then
rm -rf tmp-dev
fi
mkdir -p tmp-dev && cd tmp-dev
### 1. Clone & compile nodes
for PKG_NAME in hri_msgs face_detection face_recognition skeleton_tracker;
do
echo "%%%%%%%%%%%%%%%%%%%%%%% INSTALLING $PKG_NAME %%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
git clone https://github.com/ros4hri/$PKG_NAME.git
cd $PKG_NAME && mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=$INSTALL_PREFIX ..
make
make install
cd ../..
source $INSTALL_PREFIX/setup.sh
done
### 2. Test pipeline
echo "%%%%%%%%%%%%%%%%%%%%%%% TESTING PIPELINE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
| true
|
24a497cd2dccbecfea99ab2d56d29a67ec83a03d
|
Shell
|
agoessling/defaults
|
/setup_linux.sh
|
UTF-8
| 2,426
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# Exit on errors, undefined variables, and pipe failures.
set -euo pipefail
source "$(dirname "$0")/terminal_config.sh"
# install apt packages.
sudo apt-get update
sudo apt-get -y install \
git \
pip \
tmux \
dconf-cli \
uuid-runtime \
npm \
python3-venv \
# Install tmux configuration.
cp tmux/.tmux.conf ~/
mkdir -p ~/.tmux
cp tmux/tmux-colorscheme.conf ~/.tmux/
if [ ! -d ~/.tmux/plugins/tpm ]; then
git clone --depth=1 https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
fi
# Install Chrome.
if ! dpkg -s google-chrome-stable | grep -q 'Status: install ok installed'; then
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo dpkg -i google-chrome-stable_current_amd64.deb
sudo apt-get -y -f install
rm google-chrome-stable_current_amd64.deb
fi
# Swap Caps-Lock with Escape.
if ! grep -q 'setxkbmap -option caps:escape' ~/.profile; then
echo "" >> ~/.profile
echo "# Make Caps-Lock a second Escape." >> ~/.profile
echo "setxkbmap -option caps:escape" >> ~/.profile
fi
# Install recent NeoVim
if ! command -v nvim &> /dev/null; then
sudo wget -q --show-progress -O /usr/local/bin/nvim https://github.com/neovim/neovim/releases/download/stable/nvim.appimage
sudo chmod +x /usr/local/bin/nvim
fi
# Install NVIM configuration.
if [ ! -d ~/.config/nvim ]; then
git clone git@github.com:agoessling/nvim_config.git ~/.config/nvim
else
git pull origin master
fi
# Download patched fonts.
mkdir -p ~/.local/share/fonts
wget -nc -q --show-progress -P ~/.local/share/fonts https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/Hack/Regular/HackNerdFont-Regular.ttf
wget -nc -q --show-progress -P ~/.local/share/fonts https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/Hack/Bold/HackNerdFont-Bold.ttf
wget -nc -q --show-progress -P ~/.local/share/fonts https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/Hack/Italic/HackNerdFont-Italic.ttf
wget -nc -q --show-progress -P ~/.local/share/fonts https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/Hack/BoldItalic/HackNerdFont-BoldItalic.ttf
# Setup terminal colorscheme
setup_gruvbox_colors
# Change default font for gnome terminal
set_font "$uuid" "Hack Nerd Font 10"
# Configure Bash
cp -i .bash_aliases ~/
if ! grep -q 'Custom bashrc additions' ~/.bashrc; then
cat .bashrc >> ~/.bashrc
fi
# Configure Git
cp -i .gitconfig ~/
| true
|
610090f5b911f8ce047fcb07679065f3d1111fc3
|
Shell
|
edgarxue/shell
|
/zabbix_config_backup.sh
|
UTF-8
| 1,588
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# for : back up zabbix config
# by: edgar
#
#================================================================
source /etc/bashrc
source /etc/profile
#================================================================
# config mysql basic message
MySQL_USER=
MySQL_PASSWORD=
MySQL_HOST=
MySQL_PORT=3306
MySQL_DUMP_PATH=/data/zabbix_config_backup
MySQL_DATABASE_NAME=zabbix
DATE=$(date '+%Y-%m-%d')
TIME=$(date '+%H:%M:%S')
#================================================================
# Make sure backup dir exists
[ -d ${MySQL_DUMP_PATH} ] || mkdir ${MySQL_DUMP_PATH}
cd ${MySQL_DUMP_PATH}
[ -d logs ] || mkdir logs
[ -d ${DATE} ] || mkdir ${DATE}
cd ${DATE}
#================================================================
#Begin work
TABLE_NAME_ALL=$(mysql -u${MySQL_USER} -p${MySQL_PASSWORD} -P${MySQL_PORT} -h${MySQL_HOST} ${MySQL_DATABASE_NAME} -e "show tables"|egrep -v "(Tables_in_zabbix|history*|trends*|acknowledges|alerts|auditlog|events|service_alarms)")
for TABLE_NAME in ${TABLE_NAME_ALL}
do
mysqldump -u${MySQL_USER} -p${MySQL_PASSWORD} -P${MySQL_PORT} -h${MySQL_HOST} ${MySQL_DATABASE_NAME} ${TABLE_NAME} >${TABLE_NAME}.sql
sleep 1
done
[ "$?" == 0 ] && echo "${DATE} ${TIME} : Backup zabbix succeed" >> ${MySQL_DUMP_PATH}/logs/ZabbixMysqlDump.log
[ "$?" != 0 ] && echo "${DATE} ${TIME} : Backup zabbix not succeed" >> ${MySQL_DUMP_PATH}/logs/ZabbixMysqlDump.log
#================================================================
# rm back file more 7 days
cd ${MySQL_DUMP_PATH}/
rm -rf $(date +%Y-%m-%d --date='7 days ago')
exit 0
| true
|
0fd37259638e33fd1b73d4ef8d031f0ac6d2ffb4
|
Shell
|
fherbine/sysAdmin42
|
/init/scripts/04
|
UTF-8
| 419
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -e "/var/log/mdchk" ]
then
md5sum /etc/crontab > /tmp/mdchk
diff /tmp/mdchk /var/log/mdchk > /tmp/mddf
df=`cat /tmp/mddf`
if [ "$df" = "" ]
then
echo "no changes"
else
echo "The file /etc/crontab has been updated." | mail -s crontab_updt root
fi
md5sum /etc/crontab > /var/log/mdchk
else
chmod 777 $PWD/04
echo "0 0 * * * root $PWD/04" | crontab
md5sum /etc/crontab > /var/log/mdchk
fi
| true
|
e786cef1594023ab7f084404c86551c14e4ed9ae
|
Shell
|
kramse/kramse-labs
|
/web-security/programs/menu.sh
|
UTF-8
| 877
| 2.890625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
stty erase "ˆH" kill "ˆU" intr "ˆC" tabs
echo stty erase "ˆH" kill "ˆU" intr "ˆC" tabs
/usr/ucb/uptime
echo
trap "" 2
cat /v/adm/motd
echo "Enter help for command list."
while true
do
trap continue 2
echo "research gateway> \c"
if read line
then
case "$line" in
"") continue;;
esac
set -- $line
case "$1" in
help) echo " ping <destination>"
echo " traceroute <destination>"
echo " dig <parameters>"
echo " telnet <destination> [port]"
echo " finger <destination>"
echo;;
ping) shift
/bin/ping $*;;
traceroute)
shift
/v/bin/traceroute $*;;
dig) shift
/v/bin/dig $*;;
telnet) shift
/usr/ucb/telnet $*;;
finger) shift
/usr/ucb/finger $*;;
exit) break;;
quit) break;;
*) echo "Unknown command - $line."
echo "Enter help for command list";;
esac
else
break
fi
done
exit
| true
|
f3c65b92990c8840a80530901be093ec3b1eb82c
|
Shell
|
yujiny97/Mushroom_classification
|
/0714(57data)newversion/cnt.sh
|
UTF-8
| 119
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
for x in `find ./train/ -maxdepth 2 -mindepth 1 -type d -print`
do
echo $x, `find $x -type f|wc -l`;
done
| true
|
f6929cc4b9b503f9a3781a7b2707cef0f7e309e8
|
Shell
|
marcelloc/Unofficial-pfSense-packages
|
/pkg-e2guardian5/files/usr/local/pkg/e2guardian_rc.template
|
UTF-8
| 1,266
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
# $FreeBSD: ports/www/e2guardian-devel/files/e2guardian.in,v 1.6 2012/01/14 08:57:12 dougb Exp $
# PROVIDE: e2guardian
# REQUIRE: NETWORKING SERVERS squid
# KEYWORD: shutdown
# Define these e2guardian_* variables in one of these files:
# /etc/rc.conf
# /etc/rc.conf.local
# /etc/rc.conf.d/e2guardian
#
# DO YEST CHANGE THESE DEFAULT VALUES HERE
#
# e2guardian_enable="YES"
# e2guardian_flags="<set as needed>"
sysctl kern.ipc.somaxconn=16384
sysctl kern.maxfiles=131072
sysctl kern.maxfilesperproc=104856
sysctl kern.threads.max_threads_per_proc=20480
if [ -f /var/run/e2guardian.dirty ]; then
rm -f /var/run/e2guardian.dirty
fi
e2guardian_enable=${e2guardian_enable:-"YES"}
e2guardian_pidfile=${e2guardian_pidfile:-"/var/run/e2guardian.pid"}
. /etc/rc.subr
name="e2guardian"
rcvar=e2guardian_enable
command="/usr/local/sbin/${name}"
load_rc_config $name
pidfile="${e2guardian_pidfile}"
#hack to get e2guardian working on 2.3.x
if [ -f /usr/local/sbin/libc.so.7 ];then
cp /usr/local/sbin/libc.so.7 /usr/local/lib
fi
if [ ! -x $command ];then
/bin/chmod +x $command
fi
run_rc_command "$1"
#hack to get e2guardian working on 2.3.x
if [ -f /usr/local/lib/libc.so.7 ];then
sleep 3
rm -f /usr/local/lib/libc.so.7
fi
| true
|
c6c6b3a780e6308d6bdf448911eb22bf38f3e8c8
|
Shell
|
Ting007/xml-security
|
/versions.alt/orig/v2/xml-security/depends.sh
|
UTF-8
| 505
| 2.625
| 3
|
[
"Apache-2.0",
"BSD-2-Clause",
"Apache-1.1"
] |
permissive
|
#!/usr/local/bin/bash
CLASSPATH=/nfs/spectre/u5/aristot/subjects/lib:./build/classes
for i in ../../common/libs/*.jar
do
CLASSPATH=$CLASSPATH:$i
done
export CLASSPATH
java junit.textui.SelectiveTestRunner -names ${1} | sed -e 's/^[0-9]*:[ ]*//' -e 's/\.[^.]*$//' | sort -u |
(
while read LINE
do
DLIST="${DLIST} ${LINE}"
done
java -mx128m -cp ${CLASSPATH}:/nfs/spectre/u5/aristot/subjects/lib/BCEL listclass -nocontents -dependencies -recurse ${DLIST} -exclude java. javax. sun.
) |
sort -u
| true
|
b5528a4a616ac5b64f5e82a859f4941f9b7416f8
|
Shell
|
yahoopete/redirector
|
/tools/full_tests.sh
|
UTF-8
| 868
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
. tools/env
status "DEPLOY_TO=$DEPLOY_TO"
mappings="dist/full_tests_mappings.csv"
status "Combining all known mappings into $mappings ..."
mkdir -p dist
{
{
IFS=,
read titles
while read site rest
do
cat data/mappings/$site.csv
done
} < data/sites.csv
# contains many issues so commented out, pending review ..
# cat data/tests/full/*.csv
cat data/tests/subsets/*.csv
} | egrep -v '^Old Url' | sort -u | (
echo "Old Url,New Url,Status,Suggested Link,Archive Link"
cat
) > $mappings
status "Checking test coverage ..."
tools/test_coverage.sh --name "$mappings" --sites data/sites.csv $mappings
status "Testing static assets ..."
tools/test_static_assets.sh --sites data/sites.csv
status "Testing $mappings ..."
prove -l tools/test_mappings.pl :: $@ $mappings
| true
|
5994fe8819b3f3c50ec4f1cd72fcbaf3e8e0195a
|
Shell
|
gnowledge/gstudio-docker
|
/scripts/code-update.sh
|
UTF-8
| 2,845
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#This script calls various scripts inside the gstudio container for the updation of codes
# Following variables are used to store the color codes for displaying the content on terminal
black="\033[0;90m" ;
red="\033[0;91m" ;
green="\033[0;92m" ;
brown="\033[0;93m" ;
blue="\033[0;94m" ;
purple="\033[0;95m" ;
cyan="\033[0;96m" ;
grey="\033[0;97m" ;
white="\033[0;98m" ;
reset="\033[0m" ;
#for filename
#patch=$(basename $(tar -tf /mnt/patch-*.tar.gz | head -n 1));
#update_patch="${filename%.*.*}";
#patch="patch-7a6c2ac-r5-20190221";
#patch="patch-26eaf18-r5-20190320";
patch="update-patch-c0463c5-r6-20190718";
#code to run the script named git-offline-code-update.sh inside the container started
echo -e "\n${cyan}copying updated patch from /mnt/${patch} to /home/docker/code/ in gstudio container ${reset}";
sudo rsync -avPhz /mnt/update-patch-r6/${patch} /home/core/code/ ;
echo -e "\n${cyan}Updating offline patch ${reset}";
docker exec -it gstudio /bin/sh -c "/bin/bash /home/docker/code/${patch}/code-updates/git-offline-update.sh";
#code to run the script named git-offline-code-update.sh inside the container started
#code to copy user-csvs of sp99, sp100 and cc inside the container started
val="cc";
echo -e "\n${cyan} Copying the sp99, sp100 and cc user csvs to the user-csvs folder inside the container ${reset}";
sudo rsync -avPhz /home/core/user-csvs/sp/sp99_users.csv /home/core/code/user-csvs/; #copying sp99 user csvs
sudo rsync -avPhz /home/core/user-csvs/sp/sp100_users.csv /home/core/code/user-csvs/; #copying sp100 user csvs
sudo rsync -avPhz /home/core/user-csvs/${val}/cc_users.csv /home/core/code/user-csvs/; #copying cc user csvs
#code to copy user-csvs of sp99, sp100 and cc inside the container ended
# Code To change the permissions of user-csvs folder
echo -e "\n${cyan} Changing the permissions of /home/core/user-csvs folder"
sudo chown root:root /home/core/user-csvs ;
sudo chmod +xr /home/core/user-csvs ;
#code to run the script named python-files-exec.sh inside the container started
echo -e "\n${cyan}Executing the python files ${reset}";
docker exec -it gstudio /bin/sh -c "/bin/bash /home/docker/code/${patch}/code-updates/python-files-exec.sh";
#code to run the script named python-files-exec.sh inside the container ended
#code to copy backup-old-server-data.sh and Execute-get_all_users_activity_timestamp_csvs.sh to /home/core
echo -e "\n${cyan}Copying the scripts for old server data backup and getting all user activity timestamp csvs to /home/core";
sudo rsync -avPhz /home/core/code/scripts/backup-old-server-data.sh /home/core/ ;
sudo rsync -avPhz /home/core/code/scripts/Execute-get_all_users_activity_timestamp_csvs.sh /home/core/ ;
sudo rsync -avPhz /mnt/update-patch-r6/${patch}/code-updates/execute-ActivityTimestamp-process.sh /home/core/ ;
| true
|
2bc80218e949df2a0bfe92218b6e8941095315f6
|
Shell
|
cloudfoundry-attic/mega-ci
|
/scripts/ci/run-consats/task
|
UTF-8
| 4,192
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -exu
export ROOT="${PWD}"
export CONSUL_RELEASE_VERSION="99999+dev.$(date +%s)"
export STEMCELL_VERSION="$(cat ${ROOT}/stemcell/version)"
export TURBULENCE_RELEASE_VERSION="$(cat ${ROOT}/turbulence-release/version)"
export BOSH_AWS_CPI_RELEASE_VERSION="$(cat ${ROOT}/bosh-aws-cpi-release/version)"
export LATEST_CONSUL_RELEASE_VERSION="$(cat ${ROOT}/latest-consul-release/version)"
function main() {
bosh target "${BOSH_DIRECTOR}"
export BOSH_DIRECTOR_UUID="$(bosh -t "${BOSH_DIRECTOR}" status --uuid)"
upload_stemcell
if $WINDOWS_CLIENTS; then
upload_windows_stemcell
fi
upload_releases
generate_manifest
force_compilation
bosh -t "${BOSH_DIRECTOR}" deployment "${ROOT}/consats.yml"
bosh -t "${BOSH_DIRECTOR}" -n deploy
bosh -t "${BOSH_DIRECTOR}" run errand acceptance-tests
cleanup_deployment "consul"
cleanup_deployment "turbulence-consul"
bosh -t "${BOSH_DIRECTOR}" -n cleanup
}
function upload_stemcell() {
pushd "${ROOT}/stemcell" > /dev/null
bosh -t "${BOSH_DIRECTOR}" upload stemcell stemcell.tgz --skip-if-exists
popd > /dev/null
}
function upload_windows_stemcell() {
pushd "${ROOT}/windows-stemcell" > /dev/null
bosh -t "${BOSH_DIRECTOR}" upload stemcell light-bosh-stemcell-*-aws-xen-hvm-windows2012R2-go_agent.tgz --skip-if-exists
popd > /dev/null
}
function upload_releases() {
pushd "${ROOT}/turbulence-release" > /dev/null
bosh -t "${BOSH_DIRECTOR}" upload release release.tgz --skip-if-exists
popd > /dev/null
pushd "${ROOT}/bosh-aws-cpi-release" > /dev/null
bosh -t "${BOSH_DIRECTOR}" upload release release.tgz --skip-if-exists
popd > /dev/null
pushd "${ROOT}/consul-release" > /dev/null
bosh -t "${BOSH_DIRECTOR}" -n create release --force --version "${CONSUL_RELEASE_VERSION}"
bosh -t "${BOSH_DIRECTOR}" upload release
popd > /dev/null
pushd "${ROOT}/latest-consul-release" > /dev/null
bosh -t "${BOSH_DIRECTOR}" upload release release.tgz --skip-if-exists
popd > /dev/null
}
function force_compilation() {
pushd /tmp > /dev/null
sed -e "s/REPLACE_ME_DIRECTOR_UUID/${BOSH_DIRECTOR_UUID}/g" \
-e "s/CONSUL_RELEASE_VERSION/${CONSUL_RELEASE_VERSION}/g" \
-e "s/TURBULENCE_RELEASE_VERSION/${TURBULENCE_RELEASE_VERSION}/g" \
-e "s/CPI_RELEASE_VERSION/${BOSH_AWS_CPI_RELEASE_VERSION}/g" \
-e "s/STEMCELL_VERSION/${STEMCELL_VERSION}/g" \
"${ROOT}/mega-ci/scripts/ci/run-consats/fixtures/consul_compilation.yml" > "consul_compilation.yml"
bosh -t "${BOSH_DIRECTOR}" -d "consul_compilation.yml" -n deploy
bosh -t "${BOSH_DIRECTOR}" -d "consul_compilation.yml" export release "consul/${CONSUL_RELEASE_VERSION}" "ubuntu-trusty/${STEMCELL_VERSION}"
bosh -t "${BOSH_DIRECTOR}" -d "consul_compilation.yml" export release "turbulence/${TURBULENCE_RELEASE_VERSION}" "ubuntu-trusty/${STEMCELL_VERSION}"
bosh -t "${BOSH_DIRECTOR}" -d "consul_compilation.yml" export release "bosh-aws-cpi/${BOSH_AWS_CPI_RELEASE_VERSION}" "ubuntu-trusty/${STEMCELL_VERSION}"
bosh -t "${BOSH_DIRECTOR}" -d "consul_compilation.yml" -n delete deployment compilation
popd > /dev/null
}
function generate_manifest() {
mkdir -p "${ROOT}/consul-release/aws"
mkdir -p "${GOPATH}/src/github.com/cloudfoundry"
pushd "${GOPATH}/src/github.com/cloudfoundry" > /dev/null
ln -s "${ROOT}/mega-ci"
pushd "${GOPATH}/src/github.com/cloudfoundry/mega-ci" > /dev/null
go run "./scripts/ci/run-consats/generate_manifest.go" \
"${ROOT}/consul-release/manifests/aws/consats.yml" \
> "${ROOT}/consats.yml"
popd > /dev/null
popd > /dev/null
}
function cleanup_deployment() {
local deployment
deployment="${1}-[A-Za-z0-9]\{8\}-[A-Za-z0-9]\{4\}-[A-Za-z0-9]\{4\}-[A-Za-z0-9]\{4\}-[A-Za-z0-9]\{12\}"
for i in $(bosh -t "${BOSH_DIRECTOR}" deployments | grep -o "${deployment}" | uniq); do
bosh -t "${BOSH_DIRECTOR}" -n delete deployment $i
done
test -z "$(bosh -t "${BOSH_DIRECTOR}" deployments | grep "${deployment}")"
}
function teardown() {
set +e
bosh -t "${BOSH_DIRECTOR}" -n delete deployment consats
bosh -t "${BOSH_DIRECTOR}" -n delete release consul
set -e
}
trap teardown EXIT
main
| true
|
8cda373b60b812718766823248991e421acabb11
|
Shell
|
demokratikollen/demokratikollen
|
/provision/windows.sh
|
UTF-8
| 1,617
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Windows shell provisioner for Ansible playbooks, based on KSid's
# windows-vagrant-ansible: https://github.com/KSid/windows-vagrant-ansible
#
# @todo - Allow proxy configuration to be passed in via Vagrantfile config.
#
# @see README.md
# @author Jeff Geerling, 2014
# @version 1.0
#
# Uncomment if behind a proxy server.
# export {http,https,ftp}_proxy='http://username:password@proxy-host:80'
ANSIBLE_PLAYBOOK=$1
if [ ! -f /vagrant/$ANSIBLE_PLAYBOOK ]; then
echo "Cannot find Ansible playbook."
exit 1
fi
# Install Ansible and its dependencies if it's not installed already.
if [ ! -f /usr/local/bin/ansible ]; then
echo "Installing Ansible dependencies."
sudo apt-get update
sudo apt-get -y install python python-dev
echo "Installing pip via easy_install."
wget http://peak.telecommunity.com/dist/ez_setup.py
sudo python ez_setup.py && rm -f ez_setup.py
sudo easy_install pip
# Make sure setuptools are installed correctly.
sudo pip install setuptools --no-use-wheel --upgrade
echo "Installing required python modules."
sudo pip install paramiko pyyaml jinja2 markupsafe
echo "Installing Ansible."
sudo pip install ansible==1.7
fi
# Fix the ansible host file to be able to only do the develop provisioning
if [ ! -f /tmp/ansible_hosts ] ; then
sudo echo "localhost ansible_connection=local" >> /tmp/ansible_hosts
sudo echo "[develop]" >> /tmp/ansible_hosts
sudo echo "localhost" >> /tmp/ansible_hosts
fi
echo "Running Ansible provisioner defined in Vagrantfile."
ansible-playbook /vagrant/${ANSIBLE_PLAYBOOK} -i /tmp/ansible_hosts --extra-vars "is_windows=true"
| true
|
38101bee1ae72a148f14de2a19e7ace80a36aa89
|
Shell
|
glamorous/dotfiles
|
/bootstrap.sh
|
UTF-8
| 2,089
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
execute_step() {
show_header
print_step $1 "$2"
print_with_newline
ask_for_confirmation "Are you sure you want to execute the script?"
if answer_is_yes; then
print_after_newline "" "print_with_newline"
source "$MAIN_DIR/resources/steps/$3"
else
print_after_newline "We will skip this step like you asked…" "print_info"
fi;
ask_to_continue
}
main() {
# Ensure that the following actions
# are made relative to this file's path.
cd "$(dirname "${BASH_SOURCE[0]}")" \
|| exit 1
source resources/utils.sh
source resources/utils-macos.sh
show_header
print_with_newline "So you want to set up your Mac? Good, \033[1mbootstrap.sh\033[0m will help you out with that."
print_with_newline "Beware though… This will alter many of your settings…"
ask_for_sudo
if cmd_exists "git"; then
# @TODO: Ask and download latests updates from repo
print_after_newline "You should use the latest version of this repository." "print_warning"
fi
print_after_newline "If you're really sure you want to continue, enter “yes sir!” to continue" "ask_for_input"
if [[ $REPLY != "yes sir!" ]]; then
print_after_newline "No worries, I'll stop here… Ciao! 👋" "print_in_yellow"
print_with_newline
exit 0
fi;
print_after_newline "OK, you asked for it… Let's go!" "print_in_green"
print_with_newline
ask_to_continue
execute_step "1" "OSX setup (computer name, Apple ID…)" "macos-setup.sh"
execute_step "2" "Essentials (.dotfiles, brew, xcode, ssh, git…)" "essentials.sh"
execute_step "3" "Development (Composer packages, PHP-settings…)" "development.sh"
execute_step "4" "OSX settings (App preferences)" "macos-app-settings.sh"
ask_for_reboot
print_after_newline "\033[32mYay, we're all done here! 🎉\nEnjoy your configured computer! 😊\033[0m" "print_with_newline"
}
MAIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
main $1
exit;
| true
|
ebfe6779b24e4dfdb69fddcbcf793b1310f5756b
|
Shell
|
OlBiMaCooJam/heaven-games
|
/deploy.sh
|
UTF-8
| 1,060
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
DOCKER_APP_NAME=heavenApp
DOCKER_DB_NAME=mydb
#DOCKER_NGINX_NAME=heavenNginx
EXIST_BLUE=$(docker-compose -p ${DOCKER_APP_NAME}-blue -f docker-compose.blue.yml ps | grep Up)
EXIST_DB=$(docker-compose -p ${DOCKER_DB_NAME} -f docker-compose.db.yml ps | grep Up)
#EXIST_NGINX=$(docker-compose -p ${DOCKER_NGINX_NAME} -f docker-compose.db.yml ps | grep Up)
if [ -z "$EXIST_DB" ]; then
echo "DB setting"
docker-compose -p ${DOCKER_DB_NAME} -f docker-compose.db.yml up -d
fi
#if [ -z "$EXIST_NGINX" ]; then
# echo "NGINX setting"
# docker-compose -p ${DOCKER_NGINX_NAME} -f docker-compose.nginx.yml up -d
#fi
if [ -z "$EXIST_BLUE" ]; then
echo "blue up"
docker-compose -p ${DOCKER_APP_NAME}-blue -f docker-compose.blue.yml up -d
sleep 10
docker-compose -p ${DOCKER_APP_NAME}-green -f docker-compose.green.yml down
else
echo "green up"
docker-compose -p ${DOCKER_APP_NAME}-green -f docker-compose.green.yml up -d
sleep 10
docker-compose -p ${DOCKER_APP_NAME}-blue -f docker-compose.blue.yml down
fi
| true
|
56de26d0f62f14a20b452ec0cfb3a4c0b30a0d6a
|
Shell
|
c10b10/sh
|
/[www]sitedown
|
UTF-8
| 564
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
dname=
while [[ $dname = "" ]]; do
echo -n "Enter site name (domain.com): "
read dname
done
if [ -x /var/www/$dname ]; then
# Disable site
a2dissite $dname
service apache2 restart
# Remove files
rm -rf /var/www/$dname
# Remove database
dbname=`echo $dname | sed 's/\./_/g'`
mysql -u root -e "drop database $dbname;"
# Remove available-site
rm -rf /etc/apache2/sites-available/$dname
# Remove hosts entry
ename=`echo "$dname" | sed 's/\./\\\\./g'`
sed -i_bak -e "/$ename/d" /etc/hosts
fi
| true
|
bcfb9cd60160076651095a4a65ffcca4642b46ce
|
Shell
|
akinozgen/bin
|
/openbsd_bin/vol
|
UTF-8
| 669
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# http://github.com/mitchweaver/bin
#
# simple OpenBSD volume wrapper
#
usage() { >&2 echo 'usage: vol [-i] [-d] [-s]' ; exit 1 ; }
inc() { mixerctl -q outputs.master=+"$1" ; }
dec() { mixerctl -q outputs.master=-"$1" ; }
get() { printf '%s%%\n' $(( $(mixerctl -n outputs.master) * 100 / 255 )) ; }
_set() {
if [ "$1" -gt 100 ] ; then
set 100
elif [ "$1" -lt 0 ] ; then
set 0
fi
set -- $(echo "$1 * 2.55" | bc)
[ "${1%%.*}" -gt 255 ] && set 255
mixerctl outputs.master=${1%%.*} >/dev/null
}
case "$1" in
inc|-i) inc "$2" ;;
dec|-d) dec "$2" ;;
set|-s) _set "$2" ;;
-h) usage ;;
*) get
esac
| true
|
d23b04c0064ae998659727b0ec5a78e4fcd8899f
|
Shell
|
srichavalimsft/benchmarks
|
/scripts/aeron/remote-cluster-benchmarks
|
UTF-8
| 12,018
| 3.34375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
##
## Copyright 2015-2021 Real Logic Limited.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## https://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
if [ -z "${MESSAGE_RATE}" ];
then
MESSAGE_RATE=(50000)
fi
if [ -z "${MESSAGE_LENGTH}" ]
then
export MESSAGE_LENGTH=(32 224 1344)
fi
source "${DIR}/remote-benchmarks-helper"
required_vars=(
"CLIENT_BENCHMARKS_PATH"
"CLIENT_JAVA_HOME"
"CLIENT_DRIVER_NUMA_NODE"
"CLIENT_LOAD_TEST_RIG_NUMA_NODE"
"CLIENT_INGRESS_CHANNEL"
"CLIENT_INGRESS_ENDPOINTS"
"CLIENT_EGRESS_CHANNEL"
"CLUSTER_MEMBERS"
"CLUSTER_SIZE"
"_JAVA_HOME"
"_DRIVER_NUMA_NODE"
"_NUMA_NODE"
"_CLUSTER_DIR"
"_CLUSTER_CONSENSUS_CHANNEL"
"_CLUSTER_INGRESS_CHANNEL"
"_CLUSTER_LOG_CHANNEL"
"_CLUSTER_REPLICATION_CHANNEL"
"_ARCHIVE_DIR"
"_ARCHIVE_CONTROL_CHANNEL")
for var in "${required_vars[@]}"; do
if [[ ${var} == _* ]]
then
for ((n = 0; n < CLUSTER_SIZE; n++))
do
node_var="NODE${n}${var}"
if [[ -z "${!node_var+''}" ]]; then
echo "env var '${node_var}' must be set"
required_var_missing=true
fi
done
else
if [[ -z "${!var+''}" ]]
then
echo "env var '${var}' must be set"
required_var_missing=true
fi
fi
done
if [ -n "${required_var_missing}" ]
then
exit 1
fi
enable_java_driver=1
no_java_driver=0
enable_c_driver=1
no_c_driver=0
enable_onload=1
enable_ef_vi=1
enable_ats=1
file_sync_levels=(0 2)
results_file="aeron-cluster-results.zip"
while [[ $# -gt 0 ]]
do
case "${1}" in
--disable-c-driver)
enable_c_driver=0
shift
;;
--no-c-driver)
no_c_driver=1
shift
;;
--disable-java-driver)
enable_java_driver=0
shift
;;
--no-java-driver)
no_java_driver=1
shift
;;
--no-onload)
enable_onload=0
shift
;;
--no-ef_vi)
enable_ef_vi=0
shift
;;
--no-ats)
enable_ats=0
shift
;;
--file-sync-level)
IFS=','
read -ra file_sync_levels <<<"${2}"
unset IFS
shift
shift
;;
--results-file)
results_file="${2}"
shift
shift
;;
-h | --help)
echo "${0} [--no-c-driver] [--no-java-driver] [--no-onload] [--no-ef_vi] [--no-ats] [--file-sync-level \"\${file-sync-level-csv}\"]"
exit
;;
*)
echo "Invalid parameter. Use --help to get a list of supported parameters."
exit 1
;;
esac
done
if [ "${enable_java_driver}" -ne 1 ] && [ "${enable_c_driver}" -ne 1 ]
then
echo "At least C or Java driver must be enabled!"
exit 1
fi
function start_server()
{
local cluster_node_class_name=${1}
local server_driver=${2}
local fsync=${3}
local node_id=${4}
local benchmarks_path=NODE${node_id}_BENCHMARKS_PATH
local java_home=NODE${node_id}_JAVA_HOME
local cluster_dir=NODE${node_id}_CLUSTER_DIR
local cluster_consensus_channel=NODE${node_id}_CLUSTER_CONSENSUS_CHANNEL
local cluster_ingress_channel=NODE${node_id}_CLUSTER_INGRESS_CHANNEL
local cluster_log_channel=NODE${node_id}_CLUSTER_LOG_CHANNEL
local cluster_replication_channel=NODE${node_id}_CLUSTER_REPLICATION_CHANNEL
local archive_dir=NODE${node_id}_ARCHIVE_DIR
local archive_control_channel=NODE${node_id}_ARCHIVE_CONTROL_CHANNEL
local numa_node=NODE${node_id}_NUMA_NODE
echo "
export JAVA_HOME=\"${!java_home}\" \
; $(kill_java_process "${cluster_node_class_name}") \
; rm -rf \"${cluster_dir}\" \
; rm -rf \"${archive_dir}\" \
; ${server_driver} \
&& export JVM_OPTS=\"\
-Duk.co.real_logic.benchmarks.aeron.remote.connection.timeout=${CONNECTION_TIMEOUT} \
-Daeron.cluster.dir=${!cluster_dir} \
-Daeron.cluster.idle.strategy=noop \
-Daeron.cluster.members=${CLUSTER_MEMBERS} \
-Daeron.cluster.member.id=${node_id} \
-Daeron.cluster.consensus.channel=${!cluster_consensus_channel} \
-Daeron.cluster.ingress.channel=${!cluster_ingress_channel} \
-Daeron.cluster.log.channel=${!cluster_log_channel} \
-Daeron.cluster.replication.channel=${!cluster_replication_channel} \
-Daeron.archive.dir=${!archive_dir} \
-Daeron.archive.control.channel=${!archive_control_channel} \
-Daeron.archive.file.sync.level=${fsync} \
-Daeron.archive.catalog.file.sync.level=${fsync} \
-Daeron.archive.recording.events.enabled=false\" \
&& numactl -N ${!numa_node} -m ${!numa_node} ${!benchmarks_path}/scripts/aeron/cluster-node"
}
commands=()
scenarios=()
scripts_path="benchmarks_path_var/scripts/aeron"
onload="onload --profile=latency --force-profiles "
if [ "${enable_java_driver}" -eq 1 ]
then
if [ "${no_java_driver}" -eq 0 ]
then
scenarios=("java")
commands+=("$(start_media_driver "${scripts_path}/media-driver" "" "driver_numa_node_var")")
fi
if [ "${enable_onload}" -eq 1 ]
then
scenarios+=("java-onload")
commands+=("$(start_media_driver "${scripts_path}/media-driver" "${onload}" "driver_numa_node_var")")
fi
fi
if [ "${enable_c_driver}" -eq 1 ]
then
create_benchmark_props="touch ${scripts_path}/benchmark.properties"
if [ "${no_c_driver}" -eq 0 ]
then
scenarios+=("c")
commands+=("${create_benchmark_props}; $(start_media_driver "${scripts_path}/c-media-driver" "" "driver_numa_node_var")")
fi
if [ "${enable_onload}" -eq 1 ]
then
scenarios+=("c-onload")
commands+=("${create_benchmark_props}; $(start_media_driver "${scripts_path}/c-media-driver" "${onload}" "driver_numa_node_var")")
fi
if [ "${enable_ef_vi}" -eq 1 ]
then
scenarios+=("c-ef_vi")
commands+=("${create_benchmark_props}; \
export AERON_DRIVER_DYNAMIC_LIBRARIES=\"${scripts_path}/libaeron_ef_vi.so\" \
AERON_UDP_CHANNEL_TRANSPORT_BINDINGS_MEDIA=\"aeron_udp_channel_transport_ef_vi_bindings\" \
AERON_EF_VI_CONF_FILE=\"${scripts_path}/ef_vi.conf\" \
&& $(start_media_driver "${scripts_path}/c-media-driver" "" "driver_numa_node_var")")
fi
if [ "${enable_ats}" -eq 1 ]
then
scenarios+=("c-ats")
commands+=("${create_benchmark_props}; \
export AERON_TRANSPORT_SECURITY_CONF_DIR=\"${scripts_path}\" \
AERON_TRANSPORT_SECURITY_CONF_FILE=ats.conf \
AERON_UDP_CHANNEL_OUTGOING_INTERCEPTORS=\"aeron_transport_security_channel_interceptor_load\" \
AERON_UDP_CHANNEL_INCOMING_INTERCEPTORS=\"aeron_transport_security_channel_interceptor_load\" \
AERON_DRIVER_DYNAMIC_LIBRARIES=\"${scripts_path}/libaeron_transport_security.so\" \
&& $(start_media_driver "${scripts_path}/c-media-driver" "" "driver_numa_node_var")")
if [ "${enable_onload}" -eq 1 ]
then
scenarios+=("c-ats-onload")
commands+=("${create_benchmark_props}; \
export AERON_TRANSPORT_SECURITY_CONF_DIR=\"${scripts_path}\" \
AERON_TRANSPORT_SECURITY_CONF_FILE=ats.conf \
AERON_UDP_CHANNEL_OUTGOING_INTERCEPTORS=\"aeron_transport_security_channel_interceptor_load\" \
AERON_UDP_CHANNEL_INCOMING_INTERCEPTORS=\"aeron_transport_security_channel_interceptor_load\" \
AERON_DRIVER_DYNAMIC_LIBRARIES=\"${scripts_path}/libaeron_transport_security.so\" \
&& $(start_media_driver "${scripts_path}/c-media-driver" "${onload}" "driver_numa_node_var")")
fi
if [ "${enable_ef_vi}" -eq 1 ]
then
scenarios+=("c-ats-ef_vi")
commands+=("${create_benchmark_props}; \
export AERON_TRANSPORT_SECURITY_CONF_DIR=\"${scripts_path}\" \
AERON_TRANSPORT_SECURITY_CONF_FILE=ats.conf \
AERON_UDP_CHANNEL_OUTGOING_INTERCEPTORS=\"aeron_transport_security_channel_interceptor_load\" \
AERON_UDP_CHANNEL_INCOMING_INTERCEPTORS=\"aeron_transport_security_channel_interceptor_load\" \
AERON_DRIVER_DYNAMIC_LIBRARIES=\"${scripts_path}/libaeron_transport_security.so\",\"${scripts_path}/libaeron_ef_vi.so\" \
AERON_UDP_CHANNEL_TRANSPORT_BINDINGS_MEDIA=\"aeron_udp_channel_transport_ef_vi_bindings\" \
AERON_EF_VI_CONF_FILE=\"${scripts_path}/ef_vi.conf\" \
&& $(start_media_driver "${scripts_path}/c-media-driver" "" "driver_numa_node_var")")
fi
fi
fi
for index in "${!scenarios[@]}"
do
scenario="${scenarios[index]}"
client_driver="${commands[index]//benchmarks_path_var/${CLIENT_BENCHMARKS_PATH}}"
client_driver="${client_driver//driver_numa_node_var/${CLIENT_DRIVER_NUMA_NODE}}"
for fsync in "${file_sync_levels[@]}"
do
test="cluster-${scenario}-fsync-${fsync}"
echo -e "\n Testing scenario: '${test}'\n"
client_class_name="uk.co.real_logic.benchmarks.remote.LoadTestRig"
cluster_node_class_name="uk.co.real_logic.benchmarks.aeron.remote.ClusterNode"
client_ingress_endpoints=''
if [ -n "${CLIENT_INGRESS_ENDPOINTS}" ]; then
client_ingress_endpoints="-Daeron.cluster.ingress.endpoints=${CLIENT_INGRESS_ENDPOINTS}"
fi
for messageRate in "${MESSAGE_RATE[@]}"
do
for burstSize in "${BURST_SIZE[@]}"
do
for messageLength in "${MESSAGE_LENGTH[@]}"
do
for ((r = 1; r <= RUNS; r++))
do
echo -e '\n### Benchmark run #'"${r}"' ...\n'
start_client="export JVM_OPTS=\"\
-Duk.co.real_logic.benchmarks.aeron.remote.connection.timeout=${CONNECTION_TIMEOUT}\
-Duk.co.real_logic.benchmarks.remote.iterations=${ITERATIONS}\
-Duk.co.real_logic.benchmarks.remote.message.rate=${messageRate# }\
-Duk.co.real_logic.benchmarks.remote.batch.size=${burstSize# }\
-Duk.co.real_logic.benchmarks.remote.message.length=${messageLength# }\
-Duk.co.real_logic.benchmarks.remote.output.file=${test}\
-Duk.co.real_logic.benchmarks.remote.output.directory=${CLIENT_BENCHMARKS_PATH}/scripts/results\
-Daeron.cluster.ingress.channel=${CLIENT_INGRESS_CHANNEL}\
${client_ingress_endpoints}\
-Daeron.cluster.egress.channel=${CLIENT_EGRESS_CHANNEL}\
-Daeron.cluster.message.timeout=300000000000\"\
&& export JAVA_HOME=\"${CLIENT_JAVA_HOME}\"\
; $(kill_java_process "${client_class_name}")\
; ${client_driver}\
&& numactl -N ${CLIENT_LOAD_TEST_RIG_NUMA_NODE} -m ${CLIENT_LOAD_TEST_RIG_NUMA_NODE} ${CLIENT_BENCHMARKS_PATH}/scripts/aeron/cluster-client \
; kill -9 \${driver_pid}; wait"
for ((n = 0; n < CLUSTER_SIZE; n++))
do
echo -e "\nStarting node ${n}..."
ssh_node=SSH_CLUSTER_NODE${n}
server_benchmarks_path=NODE${n}_BENCHMARKS_PATH
server_driver="${commands[index]//benchmarks_path_var/${!server_benchmarks_path}}"
server_driver_numa_node=NODE${n}_DRIVER_NUMA_NODE
server_driver="${server_driver//driver_numa_node_var/${!server_driver_numa_node}}"
execute_remote_command "${!ssh_node}" "($(start_server "${cluster_node_class_name}" "${server_driver}" "${fsync}" "${n}") &) > /tmp/benchmarks-cluster-node-${n}.log 2>&1 && exit"
done
echo -e "\nRunning benchmark..."
execute_remote_command "${SSH_CLIENT_NODE}" "${start_client} && exit"
for ((n = 0; n < CLUSTER_SIZE; n++))
do
echo -e "\nStopping node ${n}..."
ssh_node=SSH_CLUSTER_NODE${n}
execute_remote_command "${!ssh_node}" "$(kill_java_process "${cluster_node_class_name}"); $(stop_media_driver) && exit"
done
done
done
done
done
done
done
download_results "${results_file}" "${CLIENT_BENCHMARKS_PATH}/scripts/results" "${DIR}/.."
| true
|
c78be3372f36a8dac229e5d7a1615431734f685e
|
Shell
|
ralucah/cachepj
|
/scripts/populate.sh
|
UTF-8
| 613
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
###################################
K=6
CHUNKS=(8 16 32)
HTDOCS="/usr/local/apache2/htdocs/"
SUDO="sudo"
###################################
function populate_full {
for C in ${CHUNKS[@]}
do
FULL=$(( $C * $K ))
echo "Full: $FULL MB"
$SUDO dd if=/dev/urandom of=$HTDOCS/$FULL bs=${C}M count=$K
done
}
function populate_chunks {
for C in ${CHUNKS[@]}
do
echo "$C MB"
$SUDO dd if=/dev/urandom of=$HTDOCS/${C} bs=1M count=$C
done
}
###################################
$SUDO rm -rf $HTDOCS/*
populate_full
populate_chunks
ls -lh $HTDOCS
| true
|
8889ef11011847d0f4f35f00f985ae6bdbdd5790
|
Shell
|
wor/abs-repo
|
/aggregate/PKGBUILD
|
UTF-8
| 774
| 2.734375
| 3
|
[] |
no_license
|
# Maintainer: josephgbr <rafael.f.f1@gmail.com>
# Contributor: Mark Smith <markzzzsmith@yahoo.com.au>
# Contributor: Esa Määttä <wor@iki.fi>
pkgname=aggregate
pkgver=1.0.2
pkgrel=3
pkgdesc="A tool for aggregating IPv4 CIDR networks"
arch=('i686' 'x86_64')
url="http://www.vergenet.net/linux/aggregate/"
license="GPL"
depends=('popt')
depends=('glibc' 'popt')
source=("${url}/download/$pkgname-$pkgver.tar.gz"
"no-vanessa-logger.patch")
md5sums=('ca4401a4bdfaa7710fb5c5af98f00b3b'
'2f416c5bef4048f2b43f0063152c3b0b')
build() {
cd $pkgname-$pkgver
patch -i ../no-vanessa-logger.patch
autoreconf -i # This is needed as no-vanessa-logger patch modifies configure.in
./configure --prefix=/usr --mandir=/usr/share/man
make
}
package() {
cd $pkgname-$pkgver
make DESTDIR="$pkgdir" install
}
| true
|
6c73b7886d72d7dcc2cc940901147f099f7ae5fe
|
Shell
|
pj0616/ctakes-docker
|
/bin/runReaderContainer.sh
|
UTF-8
| 296
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ ! -f env_file.txt ]; then
echo "ERROR: File env_file.txt not found."
echo " This script (" $0 ") requires an env_file.txt file."
exit 1
fi
shared_dir=`pwd`/shared
docker run --name i2b2-reader --env-file env_file.txt -v $shared_dir:/shared -d i2b2-reader
| true
|
dba91ea49393279b7f717be6fd90568dc9b2474f
|
Shell
|
Gilwyad/mailnesia.com
|
/tools/psql-create-tables.sh
|
UTF-8
| 2,864
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# This script can be run initially to create all tables and necessary relations.
#arguments to psql
psqlArgs="--tuples-only --no-psqlrc --username=mailnesia --quiet";
# Partitioning is used, the key being the id because that's the only
# value that needs to be unique in the whole table across the partitions.
# "emails" is the "master" table from which all of the partitions inherit.
# This contains no data so no indexes are required. The creation of
# partitions and modification of the insert trigger is handled by the
# utility `psql-partition-update.sh`.
# The purpose of partitioning is to make it easy to discard old data.
# Instead of `DELETE FROM emails WHERE ( EXTRACT(EPOCH FROM
# current_timestamp - arrival_date) / 3600)::INT > ?;`, it's as simple
# as `DROP TABLE emails_5;`. The latter causes almost no disk activity
# compared to the former, which can run for minutes, and cause
# performance issues.
# function to create the emails table
createEmailsTable()
{
echo "create emails table"
echo "
CREATE TABLE IF NOT EXISTS emails (
id SERIAL PRIMARY KEY,
arrival_date timestamp without time zone NOT NULL default CURRENT_TIMESTAMP,
email_date varchar(31) default NULL,
email_from varchar(100) default NULL,
email_to varchar(100) default NULL,
email_subject varchar(200) default NULL,
mailbox varchar(30) NOT NULL,
email bytea
);
"|psql $psqlArgs
}
# function to create the mailbox alias table
# This table holds the alias names for mailboxes.
createMailboxAliasTable()
{
echo "create mailbox alias table"
echo "
CREATE TABLE IF NOT EXISTS mailbox_alias (
mailbox varchar(30) NOT NULL,
alias varchar(30) NOT NULL UNIQUE
);
ALTER TABLE mailbox_alias ADD CONSTRAINT lowercase_only CHECK (LOWER(alias) = alias);
"|psql $psqlArgs
}
# function to create the email per day statistics table
# This table is for statistics only, contains the number of emails
# received each day and the combined size of them.
createEmailPerDayTable()
{
echo "create email per day statistics table"
echo "
CREATE TABLE IF NOT EXISTS emailperday (
day date default current_date UNIQUE,
email integer DEFAULT 0,
bandwidth integer DEFAULT 0
);
"|psql $psqlArgs
}
# function to start partitioning the emails table
# insert_emails_trigger is the insert trigger that calls the trigger function defined in
# psql-partition-update.sh to redirect all writes to the latest partition.
startPartitioning()
{
echo "running psql-partition-update.sh"
/bin/bash psql-partition-update.sh
echo "starting partitioning the emails table"
echo "
CREATE TRIGGER insert_emails_trigger
BEFORE INSERT ON emails
FOR EACH ROW EXECUTE PROCEDURE emails_insert_trigger();
"|psql $psqlArgs
}
createEmailsTable
createMailboxAliasTable
createEmailPerDayTable
startPartitioning
| true
|
79a6c2d2a13b1859a4c713e00a170d43cbda6584
|
Shell
|
j0ma/morph-seg
|
/src/segment-with-categories.sh
|
UTF-8
| 4,198
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
# Created by argbash-init v2.8.1
# ARG_OPTIONAL_SINGLE([input])
# ARG_OPTIONAL_SINGLE([output])
# ARG_OPTIONAL_SINGLE([model-binary])
# ARG_OPTIONAL_SINGLE([model-type])
# ARG_HELP([<The general help message of my script>])
# ARGBASH_GO()
# needed because of Argbash --> m4_ignore([
### START OF CODE GENERATED BY Argbash v2.8.1 one line above ###
# Argbash is a bash code generator used to get arguments parsing right.
# Argbash is FREE SOFTWARE, see https://argbash.io for more info
die() {
local _ret=$2
test -n "$_ret" || _ret=1
test "$_PRINT_HELP" = yes && print_help >&2
echo "$1" >&2
exit ${_ret}
}
begins_with_short_option() {
local first_option all_short_options='h'
first_option="${1:0:1}"
test "$all_short_options" = "${all_short_options/$first_option/}" && return 1 || return 0
}
# THE DEFAULTS INITIALIZATION - OPTIONALS
_arg_input=
_arg_output=
_arg_model_binary=
_arg_model_type=
print_help() {
printf '%s\n' "Script to segment FlatCat/LMVR output for MC2010 evaluation"
printf 'Usage: %s [--input <arg>] [--output <arg>] [--model-binary <arg>] [--model-type <arg>] [-h|--help]\n' "$0"
printf '\t%s\n' "-h, --help: Prints help"
}
parse_commandline() {
while test $# -gt 0; do
_key="$1"
case "$_key" in
--input)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_input="$2"
shift
;;
--input=*)
_arg_input="${_key##--input=}"
;;
--output)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_output="$2"
shift
;;
--output=*)
_arg_output="${_key##--output=}"
;;
--model-binary)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_model_binary="$2"
shift
;;
--model-binary=*)
_arg_model_binary="${_key##--model-binary=}"
;;
--model-type)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
_arg_model_type="$2"
shift
;;
--model-type=*)
_arg_model_type="${_key##--model-type=}"
;;
-h | --help)
print_help
exit 0
;;
-h*)
print_help
exit 0
;;
*)
_PRINT_HELP=yes die "FATAL ERROR: Got an unexpected argument '$1'" 1
;;
esac
shift
done
}
parse_commandline "$@"
# OTHER STUFF GENERATED BY Argbash
### END OF CODE GENERATED BY Argbash (sortof) ### ])
# [ <-- needed because of Argbash
validate() {
[ -z "$1" ] && print_help && exit 1
}
segment_lmvr() {
# make sure we're actually running 2.7
if [ -z "$(python -c "import sys; print(sys.version)" | grep -E "^2\.7")" ]; then
echo "Need to be running Python 2.7 for LMVR!"
exit 1
fi
INPUT_FILE=$1
MODEL_BINARY=$2
OUTPUT_FILE=$3
echo "Segmenting with LMVR..."
lmvr-segment \
"${MODEL_BINARY}" \
--output-categories \
-o "${OUTPUT_FILE}" \
"${INPUT_FILE}"
}
segment_flatcat() {
INPUT_FILE=$1
MODEL_BINARY=$2
OUTPUT_FILE=$3
echo "Segmenting with Flatcat..."
flatcat-segment \
"${MODEL_BINARY}" \
--output-categories \
-o "${OUTPUT_FILE}" \
"${INPUT_FILE}"
}
printf 'Value of --%s: %s\n' 'input' "$_arg_input"
printf 'Value of --%s: %s\n' 'output' "$_arg_output"
printf 'Value of --%s: %s\n' 'model-binary' "$_arg_model_binary"
printf 'Value of --%s: %s\n' 'model-type' "$_arg_model_type"
if [ "$_arg_model_type" = "flatcat" ]; then
segment_flatcat \
"${_arg_input}" \
"${_arg_model_binary}" \
"${_arg_output}"
elif [ "$_arg_model_type" = "lmvr" ]; then
segment_lmvr \
"${_arg_input}" \
"${_arg_model_binary}" \
"${_arg_output}"
else
echo "Invalid model type! Only \"flatcat\" and \"lmvr\" are supported!"
fi
# ] <-- needed because of Argbash
| true
|
d9b66eac85d4fb2fc8f9a08dafe863a725061c6d
|
Shell
|
pranav/cs5700
|
/Project3/ex2/Makefileex2.sh
|
UTF-8
| 334
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
VARIANT=$1
for file in $(ls $VARIANT); do
latency=$(echo $(./latency.py $VARIANT/$file) | sed 's/ /,/g')
droprate=$(echo $(./droprate.py $VARIANT/$file) | sed 's/ /,/g')
throughput=$(echo $(./throughput.py $VARIANT/$file) | sed 's/ /,/g')
echo $file ',' $latency ',' $droprate ',' $throughput | sed 's/ //g'
done
| true
|
6c08a182b2daed9cde604bbb5ff01a7f3e198baf
|
Shell
|
doc22940/ghindex
|
/00_create_watch_stream.sh
|
UTF-8
| 590
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script will create stream of repository_url -> watcher events
# You will need to replace project id and destination table with your own:
source ./scripts_config
echo "Gathering watchers. Data will be saved to $WATCHERS_TABLE"
# This should yield approximately 19M+ records
bq --project_id $PROJECT_ID \
query --batch \
--allow_large_results \
--destination_table $WATCHERS_TABLE \
--replace \
"SELECT actor.login, repo.name
FROM
(TABLE_DATE_RANGE([githubarchive:day.],
TIMESTAMP('2015-01-01'),
TIMESTAMP('2016-01-04')
))
Where type = 'WatchEvent'"
| true
|
9ccffce2da7671131201652868ac7bceda04dfff
|
Shell
|
kristersm/DMI
|
/shell_operators.sh
|
UTF-8
| 1,645
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#4. piemers - izteiksmes (+,-,*,/) ar argumentiem
a=$1
b=$2
echo "---- piemers Nr.4 ----"
val41=`expr $a + $b`
echo "$a + $b = " $val41
val42=`expr $a - $b`
echo "$a - $b = " $val42
val43=`expr $a \* $b`
echo "$a * $b = " $val43
val44=`expr $a / $b`
echo "$a / $b = " $val44
val45=`expr $a % $b`
echo "$a % $b = " $val45
echo "-------------------------"
#3. piemers - izteiksmes (+,-,*,/) ar mainigiem
a=10
b=40
echo "---- piemers Nr.3 ----"
val31=`expr $a + $b`
echo "$a + $b = " $val31
val32=`expr $a - $b`
echo "$a - $b = " $val32
val33=`expr $a \* $b`
echo "$a * $b = " $val33
val34=`expr $a / $b`
echo "$a / $b = " $val34
val35=`expr $a % $b`
echo "$a % $b = " $val35
echo "-------------------------"
#2. piemers - izteiksmes (+,-,*,/) ar konstantem
echo "---- piemers Nr.2 ----"
val21=`expr 2 + 3`
echo "2+ 3 = " $val21
val22=`expr 2 - 3`
echo "2 - 3 = " $val22
val23=`expr 2 \* 3`
echo "2 * 3 = " $val23
val24=`expr 2 / 3`
echo "2 / 3 = " $val24
val25=`expr 2 % 3`
echo "2 % 3 = " $val25
echo "-------------------------"
#1. piemers - pareizs izteiksmes pieraksts
echo "------ Piemers Nr.1 ------"
val11='expr 2+2'
echo "Parasti apostrofi bez atstarpem " $val11
val12='expr2+2'
echo "Parassti apostrofi ar atstarpem " $val12
val13=`expr 2+2`
echo "Neparasti apostrofi bez atstarpem " $val13
val14=`expr 2 + 2`
echo "Neparasti apostrofi ar atstarpem " $val14
echo "---------"
#!/bin/sh
#val=`expr 2 + 2`
#echo "Total value : $val"
#!/bin/sh
#val=`expr 2 - 2`
#echo "Total value : $val"
#!/bin/sh
#val=`expr 2 \* 2`
#echo "Total value : $val"
#!/bin/sh
#val=`expr 2 / 2`
#echo "Total value : $val"
| true
|
d574e285bd9e217a4f243fd671fcfaffd3826138
|
Shell
|
CBIIT/HPC_DME_APIs
|
/src/hpc-server/hpc-ws-rs-test/src/test/dice/test-register/test-collection-registration/test-register-project-invalid-restricted-attribute/runme
|
UTF-8
| 744
| 3.046875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Readme.txt
# @author: George Zaki
#
# Copyright Leidos Biomedical Research, Inc
#
# Distributed under the OSI-approved BSD 3-Clause License.
# See http://ncip.github.com/HPC/LICENSE.txt for details.
#
# Script to run this test
# Usage: runme [no arguments]
#import common functions
source ../../../utils/functions
sleep 1
NAME=`date +"%b-%d-%G-%H-%M-%S"`
USERID=`get_username ../../../utils/config`
DESTINATION=$(get_basefolder)/test-${NAME}
curl_register input.json "$DESTINATION" collection
EXPECTED_MSG="Invalid Metadata Value: collection_type = Projectxxx"
get_http_code $RESPONSE_HEADER >&2
get_json_value $RESPONSE_MSG errorType >&2
get_json_value $RESPONSE_MSG message | sed "s/^$EXPECTED_MSG.*/$EXPECTED_MSG/" >&2
| true
|
0e81809633492a2abcae53a6029f88cd205cff7e
|
Shell
|
ruadapt/gpuheap
|
/local_test/merge_path/test.sh
|
UTF-8
| 586
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
i=0
totalcases=26
for ((batchSize=32;batchSize<=2048;batchSize*=2));do
max_blockSize=$(($batchSize<=1024 ? $batchSize:1024))
for ((blockSize=32;blockSize<=$max_blockSize;blockSize*=2));do
printf "\r[%d/%d]" $i $totalcases
RES=`./merge_path_test $batchSize $blockSize | tail -1 | cut -d' ' -f1`
if [ "$RES" != "Success" ]; then
echo $batchSize $blockSize $RES
./merge_path_test $batchSize $blockSize
exit 1
fi
i=$((i+1))
done
done
printf "\rAll $totalcases testcases passed\nSuccess\n"
| true
|
2fa0c477040e13161d28a15f9dc7a57b52ab5403
|
Shell
|
loikg/dotfiles
|
/.bin/yt2wanie
|
UTF-8
| 237
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/sh
#
# Download a video from youtube in mp3
# upload it to google drive
#
cd /tmp
youtube-dl -x --audio-format mp3 --audio-quality 0 \
-o '%(title)s.%(ext)s' \
--exec 'dropbox_uploader.sh -p upload {} /Wanie && rm -f {}' $1
| true
|
89758093d5e3b308ce4ff938ce8a41de917ed006
|
Shell
|
kmonticolo/ansible-role-pacemaker
|
/pacemaker/files/crm_mon_stats.sh
|
UTF-8
| 1,796
| 3.28125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# nagios returncodes:
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
while getopts “i:” OPTION; do
case $OPTION in
i)
item=$OPTARG
;;
?)
echo "UNKNOWN: Unknown argument: $1"
exit $ST_UK
;;
esac
done
# MANDATORY args
if [ -z "$item" ]; then
echo "UNKNOWN: missing argument -i"
exit $ST_UK
fi
# check for info and set $STATUS
out=$(crm_mon --one-shot --as-xml 2>/dev/null)
STATUS=$?
p_import="import xml.etree.cElementTree as et,sys,io;tree=et.ElementTree();"
p_tree="tree.parse(io.BytesIO(b'''$out'''));"
if [ ${STATUS} -eq 0 ]
then
if [ "$item" == "last_update" ]; then
p_item="obj=tree.getroot(); print obj[0][0].get('time');"
elif [ "$item" == "nodes_configured" ]; then
p_item="obj=tree.getroot(); print obj[0][4].get('number');"
elif [ "$item" == "resources_configured" ]; then
p_item="obj=tree.getroot(); print obj[0][5].get('number'); "
elif [ "$item" == "nodes_online" ]; then
# p_item="obj=tree.getroot(); print obj[1][0].get('online'); print obj[1][1].get('online'); print(\"\",obj[1][2].get('online')); "
p_item="obj=tree.getroot(); print ' '.join([f.get('online') for f in obj.findall('nodes/node')])"
elif [ "$item" == "nodes_resources_running" ]; then
p_item="obj=tree.getroot(); print ' '.join([f.get('resources_running') for f in obj.findall('nodes/node')])"
else
echo "UNKNOWN: wrong item: $item"
exit ${STATE_UNKNOWN}
fi
p_out=$(python -c "$p_import $p_tree $p_item") # 2>/dev/null)
if [ $? -eq 0 ]
then
echo "$p_out"
else
echo "UNKNOWN: python returncode $?"
exit ${STATE_UNKNOWN}
fi
else
echo "UNKNOWN: returncode ${STATUS}"
exit ${STATE_UNKNOWN}
fi
| true
|
b17312e618bfbd2ce36b68289c9c2a6d2366641a
|
Shell
|
thenightex/gitlab-ci-testing
|
/7-gitlab-docker-prod/runner-and-docker-host.sh
|
UTF-8
| 844
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# Setup Gitlab Runner, version 1
# INSTALL RUNNER
curl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh | sudo bash
apt install gitlab-runner
# INSTALL DOCKER-COMPOSE
curl -L "https://github.com/docker/compose/releases/download/1.25.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
# INSTALL DOCKER
apt install containerd docker.io
docker version
usermod -aG docker gitlab-runner
gitlab-runner register \
--detach \
--restart always \
--non-interactive \
--url "https://gitlab.url.com/" \
--registration-token "REGISTRATION_TOKEN" \
--executor "shell" \
--name gitlab-runner-uschi \
--description "uschi-prod" \
--tag-list "uschi,shell,prod,docker-daemon" \
--run-untagged="false" \
--locked="false" \
--access-level="not_protected"
| true
|
180b6ae6009c4056432e21ff2a6fe42f585bd3a6
|
Shell
|
mjipeo/ops
|
/install_dotfiles
|
UTF-8
| 2,322
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
YUM=/usr/bin/yum
HOME_DIR=/home/rich
BOOTSTRAP_DIR=/tmp/bootstrap
PROD_DIR=$HOME_DIR/data/docs/fiveguys/prod
DEV_DIR=$HOME_DIR/data/docs/fiveguys/dev-mjipeo
LOG_DIR=$HOME_DIR/data/logs/fiveguys
RPM=/bin/rpm
YUM=/usr/bin/yum
MAKE=/usr/bin/make
CHKCONFIG=/sbin/chkconfig
SERVICE=/sbin/service
PYTHON=/usr/local/bin/python2.7
EASY_INSTALL=/usr/local/bin/easy_install-2.7
PIP=/usr/local/bin/pip-2.7
SUPERVISORCTL=/usr/local/bin/supervisorctl
NPM=/usr/local/bin/npm
sudo $YUM install -y zsh git
cd ~
git clone https://github.com/robbyrussell/oh-my-zsh ~/.oh-my-zsh
git clone https://github.com/mjipeo/dotfiles.git ~/.dotfiles
exit;
sudo $YUM install -y zsh git
# Install Python 2.7
cd /tmp
sudo $YUM groupinstall -y "Development tools"
sudo $YUM install -y zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel tk-devel
wget http://python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2 --no-check-certificate
tar xf Python-2.7.3.tar.bz2
cd Python-2.7.3
./configure --prefix=/usr/local
make && sudo $MAKE altinstall
# Install Distribute & PIP
cd /tmp
wget http://pypi.python.org/packages/source/d/distribute/distribute-0.6.35.tar.gz --no-check-certificate
tar xf distribute-0.6.35.tar.gz
cd distribute-0.6.35
sudo $PYTHON setup.py install
sudo $EASY_INSTALL pip
# Install Virtualenv
cd /tmp
sudo $PIP install virtualenv
sudo $PIP install virtualenvwrapper
export WORKON_HOME=$HOME_DIR/.virtualenvs
export VIRTUALENVWRAPPER_PYTHON=/usr/local/bin/python2.7
export VIRTUALENVWRAPPER_VIRTUALENV=/usr/local/bin/virtualenv-2.7
source /usr/local/bin/virtualenvwrapper.sh
mkvirtualenv --distribute --no-site-package fiveguys
workon fiveguys
# (Master Only) Recompile VIM with ruby support
sudo $YUM install -y hg
cd /tmp
hg clone https://vim.googlecode.com/hg/ vim
cd vim
./configure --enable-pythoninterp --enable-rubyinterp
make
sudo $MAKE install
# (Master Only) Install Node.js & NPM
cd /tmp
wget http://nodejs.org/dist/v0.10.21/node-v0.10.21.tar.gz
tar xfz node-v0.10.21.tar.gz
cd node-v0.10.21
./configure --prefix=/usr/local
make && sudo $MAKE install
# Install Supervisor
# ------------------
sudo $EASY_INSTALL supervisor
sudo cp $BOOTSTRAP_DIR/supervisord.conf /etc/
sudo cp $BOOTSTRAP_DIR/supervisord /etc/init.d/
sudo $CHKCONFIG supervisord on
sudo $SERVICE supervisord start
| true
|
5905f11906cc7411bd616af94e0712113cfde2d3
|
Shell
|
sandeepraonayini/shell-scripts
|
/trap.sh
|
UTF-8
| 334
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#This file is to show signals are can trap
clear
trap 'echo " .please use Q to exit"' SIGINT SIGTERM SIGTSTP
while [ "$CHOICE" != "Q" ] && [ "$CHOICE" != "q" ]; do
echo "MAINMENU"
echo "1)choice one"
echo "2)cjoice two"
echo "3)choice three"
echo "4) QUIT/EXIT"
echo ""
read CHOICE
clear
done
| true
|
5690fb35fcea27aaee343536b76379985dca4777
|
Shell
|
NIDObr/FatDefrag
|
/NidoDefrag
|
UTF-8
| 1,406
| 3.875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#------------------------------------------------------------------
# Author: nido
# More: < https://github.com/NIDObr >
# Version: 0.1.3-beta 12/07/2020
# Tested on:
# GNU bash, versão 4.4.20(1)-release (x86_64-pc-linux-gnu)
# Info:
# Defragments FAT systems.
# POSIX compatible
# License: BSD-3-Clause License
#------------------------------------------------------------------
#---------------------------------VARS-----------------------------
dir_mnt="/media/nido_defrage"
dir_tmp="/home/defrags2"
#---------------------------------HEADER---------------------------
[ "$USER" = 'root' ] || { # ROOT?
printf '%s\n' "Error run as ROOT!"
exit 2
}
[ -z "$1" ] || { #
printf '%s\n' "Error especify device!"
}
[ -e "$1" ] || {
printf '%s\n' "Specfied target \"""${1}""\", does not exit."
exit 3
}
if [ ! -e "${dir_mnt}" ]&&[ ! -e "${dir_tmp}" ];then
printf '%s\n' "Directories do not exist, creating..."
mkdir "${dir_mnt}" "${dir_tmp}" && printf '%s\n' "Successfully created!"
fi
if grep -q "${1}" /proc/mounts;then
dir_mnt=$(grep "${1}" /proc/mounts | awk -F " " '{print $2}')
else
mount "${1}" "${dir_mnt}" || exit 4
fi
mv "${dir_mnt}"/* "${dir_tmp}" || exit 5
umount "${dir_mnt}" || exit 6
mkfs.vfat "${1}" || exit 7
mount "${1}" "${dir_mnt}" || exit 8
mv "${dir_tmp}"/* "${dir_mnt}/" || exit 9
umount "${dir_mnt}" || exit '10'
printf '%s\n' "Successfully defragmented!"
exit 0
| true
|
8833ad12e853eda7f539b8ca309510993ca1744d
|
Shell
|
devfsc/electrum-PAC
|
/contrib/build-wine/prepare-x11.sh
|
UTF-8
| 932
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Please update these links carefully, some versions won't work under Wine
MINGW_SETUP_URL=https://ayera.dl.sourceforge.net/project/mingw/Installer/mingw-get-setup.exe
X11_HASH_URL=https://github.com/akhavr/x11_hash/archive/1.4.tar.gz
## These settings probably don't need change
export WINEPREFIX=/opt/wine64
#export WINEARCH='win32'
PYHOME=c:/python27
PYTHON="wine $PYHOME/python.exe -OO -B"
MINGW="wine c:/MinGW/bin/mingw-get.exe"
# Let's begin!
cd `dirname $0`
set -e
wine 'wineboot'
cd tmp
# downoad mingw-get-setup.exe
wget -q -O mingw-get-setup.exe "$MINGW_SETUP_URL"
wine mingw-get-setup.exe
echo "add c:\MinGW\bin to PATH using regedit in HKEY_CURRENT_USER/Environment"
regedit
$MINGW install gcc
$MINGW install mingw-utils
$MINGW install mingw32-libz
# Install x11 hash
wget -O x11_hash.tar.gz "$X11_HASH_URL"
tar -xvf x11_hash.tar.gz
cd x11_hash-1.4
$PYTHON setup.py build --compile=mingw32 install
| true
|
33a1126fa6381ec13ae48c25e6f32b389f0fb5d3
|
Shell
|
vallisneria/hknu2-system-programming
|
/Chapter7/4_모든_파일_처리/1_listdir.bash
|
UTF-8
| 515
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# 7장 68페이지
# 모든 파일 처리 문제 1
#
# 명령줄 인수로 주어진 디렉터리 내의 모든 서브디렉터리를 리스트하는
# bash script를 작성하라. 명령줄 인수가 없으면 현재 디렉터리를 대상으로 한다.
if [ $# -eq 0 ]
then
~/Documents/190428_bash_script/listdir.bash .
elif [ -d $1 ]
then
cd $1
for file in *
do
if [ -d "$file" ]
then
echo "$file"
~/Documents/190428_bash_script/listdir.bash $file
fi
done
fi
| true
|
54e413b1b01da5a1423535a5137154898c5e3cda
|
Shell
|
edpzjh/jbosgi
|
/distribution/installer/src/main/resources/runtime/bin/run.conf
|
UTF-8
| 1,264
| 2.953125
| 3
|
[] |
no_license
|
## -*- shell-script -*- ######################################################
## ##
## JBossOSGi Bootstrap Script Configuration ##
## ##
##############################################################################
###
#
# This file is optional; it may be removed if not needed.
#
#
# Specify the location of the Java home directory. If set then $JAVA will
# be defined to $JAVA_HOME/bin/java, else $JAVA will be "java".
#
#JAVA_HOME="/opt/java/jdk"
#
# Specify the exact Java VM executable to use.
#
#JAVA=""
#
# Specify options to pass to the Java VM.
#
if [ "x$JAVA_OPTS" = "x" ]; then
JAVA_OPTS="-Xms128m -Xmx512m -XX:MaxPermSize=256m -Dsun.rmi.dgc.client.gcInterval=3600000 -Dsun.rmi.dgc.server.gcInterval=3600000"
fi
# VFS leak detection
JAVA_OPTS="$JAVA_OPTS -Djboss.vfs.leakDebugging=true"
# Sample JPDA settings for remote socket debuging
#JAVA_OPTS="$JAVA_OPTS -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n"
# Sample JPDA settings for shared memory debugging
#JAVA_OPTS="$JAVA_OPTS -Xrunjdwp:transport=dt_shmem,server=y,suspend=n,address=jboss"
| true
|
8a773b77512820643c164d71c83c40855bcbc4a9
|
Shell
|
ccozkan/bash-scripts
|
/themenuthatrulesthemall.sh
|
UTF-8
| 1,393
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
##scriptin calismasi icin zenity gerek.
file=$(zenity --width=720 --height=640 --list --title "the menu that rules them all" --text "Open..." --column File "yutup" "unutulmaz filmler" "kiybord" "clementine" "File Manager" "pavu" "lxrandr" "terminal" "2 saate kapan" "3 saate kapan" "hemen kapan amk" "alarm kuracam" "mausmenu" "kapan ne bileyim")
if [ "$file" = "yutup" ]; then
chromium www.youtube.com
elif [ "$file" = "unutulmaz filmler" ]; then
chromium www.unutulmazfilmler.com
elif [ "$file" = "kiybord" ]; then
xvkbd
elif [ "$file" = "clementine" ]; then
clementine
elif [ "$file" = "File Manager" ]; then
pcmanfm
elif [ "$file" = "pavu" ]; then
pavucontrol
elif [ "$file" = "lxrandr" ]; then
lxrandr
elif [ "$file" = "terminal" ]; then
terminator
elif [ "$file" = "2* saate kapan" ]; then
sleep 72
cd /home/cc/Scripts/kullanilan/
./bilgkapa
elif [ "$file" = "3 saate kapan" ]; then
sleep 10800
shutdown -h now
elif [ "$file" = "hemen kapan amk" ]; then
shutdown -h now
elif [ "$file" = "alarm kuracam" ]; then
alarm-clock-applet
elif [ "$file" = "kapan ne bileyim" ]; then
killall ~/Scripts/sacmasapan/themenuthatrulesthemall.sh
elif [ "$file" = "mausmenu" ]; then
~/Scripts/sacmasapan/themenuthatrulesthemall.sh
else
~/Scripts/sacmasapan/themenuthatrulesthemall.sh
fi
| true
|
aaaaf3cfb1cc667036548931b2e142db043acc98
|
Shell
|
aino/wfinstall
|
/install_redis
|
UTF-8
| 298
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
PREFIX=$HOME/opt/redis
VERSION=2.6.2
NAME=redis-$VERSION
cd $HOME/tmp
if [ ! -e $NAME.tar.gz ]; then
wget http://redis.googlecode.com/files/$NAME.tar.gz
fi
tar xzf $NAME.tar.gz
cd $NAME
make
make PREFIX=$PREFIX install
ln -s $PREFIX/bin/redis-server ~/bin/
mkdir $HOME/var/redis
| true
|
e55ac6556bc4338f2b557bc4f6839aaf78e72c51
|
Shell
|
jvant/NAMD_Equilibration
|
/usefull_scripts/remove_files_from_all_dirs.sh
|
UTF-8
| 242
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "What would you like to remove? Remember to be careful what you type!!!"
read files
var=$(ls ../put_pdbs_here)
for i in $var
do
cd ../systems/${i:0:4}/
pwd
echo ${i:0:4}
echo " "
rm $files
cd -
done
| true
|
1d981186f0e9394f7397307718cfa0a815d41c7c
|
Shell
|
rustymyers/AppleDefaultScreensaver
|
/LaunchAgent.pkg/Contents/Resources/PAYLOAD/screensaver
|
UTF-8
| 2,740
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
#* screensaverset
#+ chris.gerke@gmail.com
#+
#+ Description: Default Screensaver.
#+
#+ Version: 1.0
#+
#+ History:
#+ 1.0: Script.
#+
#+ TODO:
#+ * Add error checking?
ME=$0
SCRIPT_DIR="$1/Contents/Resources"
TARGET_DIR="$3"
#+ // fix
if [ -z "${TARGET_DIR}" ] || [ "${TARGET_DIR}" = "/" ]; then
TARGET_DIR=""
fi
#+ UUID
if [[ `ioreg -rd1 -c IOPlatformExpertDevice | /usr/bin/grep -i "UUID" | cut -c27-50` == "00000000-0000-1000-8000-" ]]; then
UUID=`ioreg -rd1 -c IOPlatformExpertDevice | /usr/bin/grep -i "UUID" | cut -c51-62 | awk {'print tolower()'}`
elif [[ `ioreg -rd1 -c IOPlatformExpertDevice | /usr/bin/grep -i "UUID" | cut -c27-50` != "00000000-0000-1000-8000-" ]]; then
UUID=`ioreg -rd1 -c IOPlatformExpertDevice | /usr/bin/grep -i "UUID" | cut -c27-62`
fi
#+ ByHost
sudo /bin/mkdir -p "${HOME}/Library/Preferences/ByHost"
#+ com.apple.screensaver.plist
sudo /usr/bin/defaults write "${HOME}/Library/Preferences/com.apple.screensaver" "askForPassword" -int "1"
sudo /usr/bin/defaults write "${HOME}/Library/Preferences/com.apple.screensaver" "askForPasswordDelay" -int "5"
#+ com.apple.screensaver.${UUID}.plist
sudo /usr/bin/defaults write "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}" "CleanExit" -string "YES"
sudo /usr/libexec/PlistBuddy -c 'Add :CleanExit string YES' "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist"
sudo /usr/libexec/PlistBuddy -c 'Add :idleTime integer 900' "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist"
sudo /usr/libexec/PlistBuddy -c 'Add :moduleDict dict' "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist"
sudo /usr/libexec/PlistBuddy -c 'Add :moduleDict:iLifeMediaGroupType integer 0' "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist"
sudo /usr/libexec/PlistBuddy -c "Add :moduleDict:moduleName string Default" "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist"
sudo /usr/libexec/PlistBuddy -c "Add :moduleDict:path string ${TARGET_DIR}/Library/Screen Savers/Default.slideSaver" "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist"
sudo /usr/libexec/PlistBuddy -c 'Add :moduleDict:type integer 4' "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist"
sudo /usr/libexec/PlistBuddy -c 'Add :DisplayType string Photos' "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.Default.${UUID}.plist"
#+ Lockfile
sudo /bin/rm -Rf "${HOME}/Library/Preferences/com.apple.screensaver.plist.lockfile"
sudo /bin/rm -Rf "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.${UUID}.plist.lockfile"
sudo /bin/rm -Rf "${HOME}/Library/Preferences/ByHost/com.apple.screensaver.Default.${UUID}.plist.lockfile"
exit 0
| true
|
9c7f5131ec780f4c2a88bd797726fd3efa6ed2a6
|
Shell
|
waagsociety/makingradio_backend
|
/ansible/deploy_app.sh
|
UTF-8
| 905
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
MY_USER=${1}
GIT_DIR="${2}"
DEST_DIR="${3}"
REPO="${4}"
APP_DIR=${GIT_DIR}/app
if [ ! -d "${GIT_DIR}" ]
then
sudo su $MY_USER -c "git clone ${REPO} ${GIT_DIR}"
elif sudo su $MY_USER -c "git -C ${GIT_DIR} remote -v update" 2>&1 | grep master | grep 'origin/master' | grep 'up to date' >/dev/null
then
echo "Code not changed"
exit 0
fi
cd ${GIT_DIR};
if ! sudo su $MY_USER -c "git pull"
then
echo "ERROR pulling"
exit 1
fi
cd ${APP_DIR}
if ! sudo su $MY_USER -c "npm prune && npm install"
then
echo "Error running npm install"
exit 1
fi
sudo su $MY_USER -c "rm -rf ./build"
if ! sudo su $MY_USER -c "npm run build"
then
echo "Error running npm build"
exit 1
fi
if [ -d "${DEST_DIR}" ]
then
sudo rm -rf ${DEST_DIR}
fi
sudo mkdir ${DEST_DIR}
sudo cp -r ${APP_DIR}/build/* ${DEST_DIR}
sudo chown -R www-data:www-data ${DEST_DIR}
sudo chmod -R ug-w,o-rwx ${DEST_DIR}
| true
|
c5946bbcc8fd80f92ad57ee915606c7fd6e0686d
|
Shell
|
zhongtouwang2019/alphaBetaLab
|
/examples/ww3/regularMesh/wwiiiRunCFSR/qsubSync.sh
|
UTF-8
| 682
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#script to submit to a pbs cluster a job and wait for its conclusion
cmd=$1;
sleepingTime=60; # 1 minutes
date;
echo "submitting command "$cmd;
jobId=`qsub $cmd`;
err=$?;
if [ $err != 0 ]
then
echo "error submitting the job. Quitting";
exit;
fi
echo 'job id: '$jobId
echo;
while true;
do
qout=`qstat $jobId`;
err=$?;
if [ $err == 0 ]
then
dt=`date`;
#echo -en "\e[1A";
#echo -e "\e[0K\r"$dt': the job is still running. sleeping for '$sleepingTime' seconds';
echo -ne "\r"$dt': the job is still running. sleeping for '$sleepingTime' seconds';
sleep $sleepingTime;
else
echo
echo "job finished. Quitting"
break;
fi
done
| true
|
128f39114d5de0f2bbcc147a6b3a4c3110dea6b6
|
Shell
|
hantmac/Install_or_Update_Go_Automatically
|
/installOrUpdateGo.sh
|
UTF-8
| 595
| 3.40625
| 3
|
[] |
no_license
|
# !/bin/bash
if [ -z "$1" ]; then
echo "usage: ./install.sh go-package.tar.gz"
exit
fi
if [ -d "/usr/local/go" ]; then
echo "Uninstalling old go version..."
sudo rm -rf /usr/local/go
echo "updating..."
sudo tar -C /usr/local -xzf $1
else
echo "Installing..."
sudo tar -C /usr/local -xzf $1
echo export GOPATH=/go >> /etc/profile
echo export GOROOT=/usr/local/go >> /etc/profile
echo export PATH=$PATH:$GOROOT/bin:$GOPATH/bin1 >> /etc/profile
fi
source /etc/profile
echo "go version:"
go version
rm -rf $1
echo "Done"
| true
|
df842c1d2be657276de39e909820e0c2d97912ac
|
Shell
|
CHENNA-REDDY12/docker-rabbitmq
|
/scripts/write-rabbitmq-config.sh
|
UTF-8
| 978
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#
RABBITMQ_PORT=${RABBITMQ_PORT:-5672}
INET_DIST_LISTEN_MIN=${INET_DIST_LISTEN_MIN:-55950}
INET_DIST_LISTEN_MAX=${INET_DIST_LISTEN_MAX:-55954}
# For reference: https://www.rabbitmq.com/configure.html
cat > /etc/rabbitmq/rabbitmq.config <<EOF
[
{rabbit, [{default_user, <<"$RABBITMQ_USER">>},
{default_pass, <<"$RABBITMQ_PASS">>},
{default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
{tcp_listeners, [${RABBITMQ_PORT}]},
{reverse_dns_lookups, true},
{cluster_partition_handling, pause_minority},
{log_levels, [
{connection, info},
{mirroring, info},
{federation, info}
]},
{loopback_users, []}
]},
{kernel, [{inet_dist_listen_min, $INET_DIST_LISTEN_MIN},
{inet_dist_listen_max, $INET_DIST_LISTEN_MAX}
]}
].
EOF
exit 0
| true
|
ec26bf1ac82fbcaeed616bb338c8b8a74ea81af2
|
Shell
|
Sadashiv/interview_questions
|
/shellscript/while.sh
|
UTF-8
| 405
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#while.sh Show the use of while loop
set -x
answer=y
while [ "$answer" = "y" ]
do
echo "Enter the code and description" >/dev/tty
read code description
echo "$code | $description" >> newlist
echo "Enter any mode (y/n):\c" >/dev/tty
read anymore
case $anymore in
y*|Y*) answer=y;;
# sleep 60
n*|N*) answer=n;;
*) answer=y;;
esac
done
| true
|
4e54dc4d5ef55ce18d8ee1e32e4b9ea5a260b7c5
|
Shell
|
maxeasy2/repository
|
/nginx/docker.run
|
UTF-8
| 470
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
NGINX_VOLUME_PATH=/develop/docker
IMAGE_NAME=firesh/nginx-lua
#IMAGE_NAME=nginx:alpine
#IMAGE_NAME=nginx:latest
ALPINE_BASH_INSTALL=${1:-N}
docker run -d --name nginx --rm -p 80:80 \
-v ${NGINX_VOLUME_PATH}/nginx/resource:/usr/share/nginx \
-v ${NGINX_VOLUME_PATH}/nginx/conf.d:/etc/nginx/conf.d \
-v ${NGINX_VOLUME_PATH}/nginx/logs:/var/log/nginx \
${IMAGE_NAME}
if [ $ALPINE_BASH_INSTALL == 'Y' ]; then
docker exec -it nginx apk add --no-cache bash
fi
| true
|
0b3876e227f57554dcd1750851a48c2422a08ab9
|
Shell
|
aep/vag_reverse_engineering
|
/emulator/binrelease.sh
|
UTF-8
| 1,446
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
THIS=$(dirname $(readlink -f $0))
set -ex
VER=0.8.5
APP=emulator
rm -rf target/binrelease
mkdir -p target/binrelease
b(){
staticpie=$1
name=$2
rusttarget=$3
gcctarget=$4
export PATH=$PATH:/opt/toolchains/$gcctarget/bin/
export TARGET=${gcctarget}
export TARGET_CC="/opt/toolchains/$gcctarget/bin/${gcctarget}-gcc"
export TARGET_AR="/opt/toolchains/$gcctarget/bin/${gcctarget}-ar"
if $staticpie
then
export RUSTFLAGS="$RUSTFLAGS -C linker=rust-musl-cc -C link-arg=-static -C link-arg=-pie"
else
export RUSTFLAGS="$RUSTFLAGS -C linker=/opt/toolchains/$gcctarget/bin/${gcctarget}-gcc"
fi
cargo +nightly build --target $rusttarget --release
cd $THIS
cp target/$rusttarget/release/$APP target/binrelease/$APP-$VER-$name
#/opt/toolchains/$gcctarget/bin/$gcctarget-strip target/binrelease/$APP-$VER-$name
}
#b staticpie name rust-target gcc-target
#b false mips-linux-musleabi mips-unknown-linux-musl mips-linux-musleabi
b false arm-linux-gnueabihf arm-unknown-linux-gnueabihf arm-linux-gnueabihf
#b false arm-linux-androideabi armv7-linux-androideabi arm-linux-androideabi
#b false mipsel-linux-musleabi mipsel-unknown-linux-musl mipsel-linux-musleabi
#b true x86_64-linux x86_64-unknown-linux-musl x86_64-linux-musl
| true
|
cecc4dfe25bb763f7b0f68868e6e7cc9afa2b08e
|
Shell
|
ufcg-lsd/saps-scripts
|
/input-downloader/ufcg-default/get-station-data.sh
|
UTF-8
| 1,302
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
IMAGE_NAME=$1
IMAGES_DIR_PATH=$2
IMAGE_MTL_PATH=$IMAGES_DIR_PATH/$IMAGE_NAME"_MTL.txt"
IMAGE_STATION_FILE_PATH=$IMAGES_DIR_PATH/$IMAGE_NAME"_station.csv"
# Global variables
SANDBOX=$(pwd)
SEBAL_DIR_PATH=$SANDBOX/SEBAL
CONF_FILE=sebal.conf
LIBRARY_PATH=/usr/local/lib
BOUNDING_BOX_PATH=example/boundingbox_vertices
LOG4J_PATH=$SEBAL_DIR_PATH/log4j.properties
# This function calls a java code to prepare a station file of a given image
function getStationData {
cd $SEBAL_DIR_PATH
echo "Pre Process Parameters: $IMAGE_NAME $IMAGES_DIR_PATH/ $IMAGE_MTL_PATH $IMAGES_DIR_PATH/ 0 0 9000 9000 1 1 $SEBAL_DIR_PATH/$BOUNDING_BOX_PATH $SEBAL_DIR_PATH/$CONF_FILE"
java -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=4000,suspend=n -Dlog4j.configuration=file:$LOG4J_PATH -Djava.library.path=$LIBRARY_PATH -cp target/SEBAL-0.0.1-SNAPSHOT.jar:target/lib/* org.fogbowcloud.sebal.PreProcessMain $IMAGE_NAME $IMAGES_DIR_PATH/ $IMAGE_MTL_PATH $IMAGES_DIR_PATH/ 0 0 9000 9000 1 1 $SEBAL_DIR_PATH/$BOUNDING_BOX_PATH $SEBAL_DIR_PATH/$CONF_FILE
mv $IMAGES_DIR_PATH/$IMAGE_NAME/$IMAGE_NAME"_station.csv" $IMAGES_DIR_PATH
cd $IMAGES_DIR_PATH
rm -r $IMAGE_NAME
cd $SEBAL_DIR_PATH
chmod 777 $IMAGE_STATION_FILE_PATH
echo -e "\n" >> $IMAGE_STATION_FILE_PATH
cd ..
}
getStationData
| true
|
8d223d3545928c4764038ac37ab9f128a2aea711
|
Shell
|
jonesry/coreos-offline-installer
|
/src/installer
|
UTF-8
| 4,157
| 4.375
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
# Getting list of all hard disks
ALLDEVICES="$(ls -1 /dev/sd?)"
DEVICES=""
INSTALLATIONDEVICE=""
CLOUDCONFIGFILE="cloud-config.yml"
IGNITIONFILE="coreos-install.json"
clear
echo "Please choose the device for CoreOS installation."
select choice in ${ALLDEVICES} cancel
do
case "${choice}" in
cancel) echo "The installation of coreos is canceled."; exit 0 ;;
"") echo "Invalid selection" ;;
*) clear
echo "You choose ${choice} for installation."
echo ""
INSTALLATIONDEVICE=$choice
break ;;
esac
done
# remove the installation device from the list
# to avod that it reformat by mistake in the last step ;-)
for device in ${ALLDEVICES}
do
if [[ "${device}" != "${INSTALLATIONDEVICE}" ]]; then
DEVICES="${DEVICES} ${device}"
fi
done
echo "Do you want to use a cloud config or an ignition file?"
echo -e "Type 'c' for cloud config or 'i' for ignition. \c"
read answer
if [[ "${answer}" = "C" || "${answer}" = "c" ]] ; then
echo -e "\nDo you want to use your own cloud config file?"
echo "If you let it blank or choose no the default cloud-config file will be used."
echo -e "(y/N) > \c"
read answer
if [[ "${answer}" = "y" || "${answer}" = "Y" || "${answer}" = "yes" || "${answer}" = "Yes" ]] ; then
echo -e "\nPlease enter the full path to youre file."
echo -n "> "
read CLOUDCONFIGFILE
fi
echo "Begin installation."
./coreos-offline-install -d "${INSTALLATIONDEVICE}" -c "${CLOUDCONFIGFILE}"
elif [[ "${answer}" = "I" || "${answer}" = "i" ]]; then
echo -e "\nDo you want to use your own ignition file? "
echo "If you let it blank or choose 'no' the default ignition file will be used."
echo -e "(y/N) > \c"
read answer
if [[ "${answer}" = "y" || "${answer}" = "Y" || "${answer}" = "yes" || "${answer}" = "Yes" ]] ; then
echo -e "\n Please enter the full path to youre file."
echo -n "> "
read IGNITIONFILE
fi
echo "Begin installation."
./coreos-offline-install -d "${INSTALLATIONDEVICE}" -i "${IGNITIONFILE}"
else
echo "You didn't choose a configuration file for the coreos installation."
echo "Because it is recomented to use one we abort here."
echo "All changes on the hard drives will be persist."
echo "To install coreos use the script coreos-offline-install."
exit 1
fi
# Adding files
echo -e "\n\nType enter to continue.\c"
read nothing
clear
echo "It is possible to add additional files to your coreos installation."
echo "You can specify a directory and all the files in it"
echo "will be copied in the /app directory on the coreos root partition."
echo -e "Do you want to add additional files (y/N)? \c"
read answer
if [[ "${answer}" = "y" || "${answer}" = "Y" || "${answer}" = "yes" || "${answer}" = "Yes" ]] ; then
echo "Please specify your source directory (like /media/setup/app_install):"
echo -e "> \c"
read directory
if [[ -n "${directory}" && "${directory}" == /* ]] ; then
./addfiles -d "${INSTALLATIONDEVICE}" -a "${directory}"
else
echo "The path \"${directory}\" didn't start with a / or you let it empty."
echo "No files will copied to the hard disk."
fi
fi
# Foramt additional hard drives
echo -e "\n\nType enter to continue.\c"
read nothing
clear
echo "You can use this script to format additional devices."
echo "The script delete all partition indormation from the device"
echo "and create a new partition with the full size of the device."
echo "The partition will be format with an ext4 filesystem."
echo "All datas on these will be lost!"
echo -e "Do you want to format additional devices (y/N)? \c"
read answer
if [[ "${answer}" = "y" || "${answer}" = "Y" || "${answer}" = "yes" || "${answer}" = "Yes" ]] ; then
newdevice="yes"
while [ "${newdevice}" == "yes" ]
do
./formating-additional-disks "${DEVICES}"
echo "Do you want to format additional devices?"
select choice in yes no
do
case "${choice}" in
yes) newdevice="yes"; clear; break ;;
no) newdevice="no"; break ;;
"") echo "Invalid selection" ;;
esac
done
done
fi
echo "Your coreos istallation is ready for use."
| true
|
6f357a48a7cfaa994126211ba5c334b625cff4aa
|
Shell
|
ocaml/ocaml-ci-scripts
|
/.travis-opam.sh
|
UTF-8
| 1,029
| 2.5625
| 3
|
[
"ISC"
] |
permissive
|
( set +x; echo -en "travis_fold:start:prepare.ci\r"; ) 2>/dev/null
# If a fork of these scripts is specified, use that GitHub user instead
fork_user=${FORK_USER:-ocaml}
# If a branch of these scripts is specified, use that branch instead of 'master'
fork_branch=${FORK_BRANCH:-master}
### Bootstrap
set -uex
get() {
wget https://raw.githubusercontent.com/${fork_user}/ocaml-ci-scripts/${fork_branch}/$@
}
test "$TRAVIS_REPO_SLUG" = "ocaml/ocaml-ci-scripts" || \
get .travis-ocaml.sh
sh .travis-ocaml.sh
source .travis-ocaml.env
export OPAMYES=1
eval $(opam config env)
opam depext -y conf-m4
if [ "$TRAVIS_REPO_SLUG" = "ocaml/ocaml-ci-scripts" ] ; then
opam pin add travis-opam --kind=path .
else
opam pin add travis-opam https://github.com/${fork_user}/ocaml-ci-scripts.git#${fork_branch}
fi
cp ~/.opam/$(opam switch show)/bin/ci-opam ~/
opam remove -a travis-opam
mv ~/ci-opam ~/.opam/$(opam switch show)/bin/ci-opam
( set +x; echo -en "travis_fold:end:prepare.ci\r" ) 2>/dev/null
opam config exec -- ci-opam
| true
|
feccbd5b2accf1fe9b27cce9f2b98e27b0ad68fc
|
Shell
|
mano7onam/kotlin-native
|
/samples/gtk/build.sh
|
UTF-8
| 968
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd )
source "$DIR/../konan.sh"
IPREFIX_macbook=-I/opt/local/include
IPREFIX_linux=-I/usr/include
if [ x$TARGET == x ]; then
case "$OSTYPE" in
darwin*) TARGET=macbook ;;
linux*) TARGET=linux ;;
*) echo "unknown: $OSTYPE" && exit 1;;
esac
fi
var=IPREFIX_${TARGET}
IPREFIX="${!var}"
mkdir -p $DIR/build/c_interop/
mkdir -p $DIR/build/bin/
echo "Generating GTK stubs, may take few mins depending on the hardware..."
cinterop -J-Xmx8g -compilerOpts "$IPREFIX/atk-1.0 $IPREFIX/gdk-pixbuf-2.0 $IPREFIX/cairo $IPREFIX/pango-1.0 \
-I/opt/local/lib/glib-2.0/include $IPREFIX/gtk-3.0 $IPREFIX/glib-2.0" \
-def $DIR/src/main/c_interop/gtk3.def -target $TARGET -o $DIR/build/c_interop/gtk3 || exit 1
konanc -target $TARGET $DIR/src/main/kotlin -library $DIR/build/c_interop/gtk3 \
-o $DIR/build/bin/Gtk3Demo || exit 1
echo "Artifact path is $DIR/build/bin/Gtk3Demo.kexe"
| true
|
b617da964219b90b0b9bfa228c2e4cf9c64aa736
|
Shell
|
dabapps/django-db-queue-exports
|
/publish
|
UTF-8
| 512
| 3.234375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
rm -rf dist/
env/bin/pip install --upgrade setuptools wheel
env/bin/python setup.py sdist bdist_wheel
echo "Package built for distribution successfully..."
read -p "Do you wish to publish the package to the Python Package Index? (If no, the package will be published to the TEST PyPi) - or press ctrl+C to cancel " yn
env/bin/pip install twine
case $yn in
[Yy]* ) env/bin/twine upload dist/*;;
* ) env/bin/twine upload --repository-url https://test.pypi.org/legacy/ dist/*;;
esac
| true
|
758448b61f63ffd0e7d2237704c5bdecaea3791c
|
Shell
|
magodo/docker_practice
|
/postgresql/scripts/ha-pitr-archive-external/witness_main.sh
|
UTF-8
| 11,517
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#########################################################################
# Author: Zhaoting Weng
# Created Time: Thu 09 Aug 2018 08:26:13 PM CST
# Description:
#########################################################################
MYDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"&& pwd)"
MYNAME="$(basename "${BASH_SOURCE[0]}")"
# shellcheck disable=SC1090
. "$MYDIR"/../common.sh
# shellcheck disable=SC1090
. "$MYDIR"/../config.sh
#########################################################################
# action: start
#########################################################################
usage_start() {
cat << EOF
Usage: start [option] [primary_container] [standby_container]
Options:
-h, --help
-i, --init setup primary and standby before start
-s, --sync use sync replication instead of async
EOF
}
do_start() {
local sync_opt="--async"
while :; do
case $1 in
-h|--help)
usage_start
exit 0
;;
-i|--init)
local init=1
;;
-s|--sync)
sync_opt="--sync"
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local primary=$1
local standby=$2
[[ -z $primary ]] && die "missing param: primary"
[[ -z $standby ]] && die "missing param: standby"
if [[ $init = 1 ]]; then
primary_host=$(docker exec $primary hostname)
standby_host=$(docker exec $standby hostname)
docker exec ha_p0_1 bash -c "$(cat << EOF
mkdir -p "$BASEBACKUP_DIR"
mkdir -p "$ARCHIVE_DIR"
chown -R postgres:postgres "$BACKUP_ROOT"
EOF
)"
docker exec $primary "$SCRIPT_ROOT"/ha/main.sh setup -r primary -p $standby_host ${sync_opt}
docker exec $primary "$SCRIPT_ROOT"/ha/main.sh start -w
# setup standby needs a running primary (for basebackup)
docker exec $standby "$SCRIPT_ROOT"/ha/main.sh setup -r standby -p $primary_host
# here we doesn't wait because the semantic of "wait" in pg_ctl(v9.6) means that the server could accept connection,
# which is not the case for the warm standby.
docker exec $standby "$SCRIPT_ROOT"/ha/main.sh start
# do a initial basebackup, so that we can do pitr from beginning
do_basebackup "$primary"
else
docker exec $primary "$SCRIPT_ROOT"/ha/main.sh start
docker exec $standby "$SCRIPT_ROOT"/ha/main.sh start
fi
}
#########################################################################
# action: failover
#########################################################################
usage_failover() {
cat << EOF
Usage: failover [option] [primary_container] [standby_container]
Description: configure network so that VIP is bound to standby, then promote standby as primary.
Options:
-h, --help
-p, --project docker-compose project
EOF
}
do_failover() {
local project
while :; do
case $1 in
-h|--help)
usage_failover
exit 0
;;
-p|--project)
project=$2
shift
;;
--project=?*)
project=${1#*=}
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local primary=$1
local standby=$2
[[ -z $project ]] && die "missing param: project"
[[ -z $primary ]] && die "missing param: primary"
[[ -z $standby ]] && die "missing param: standby"
docker network disconnect ${project}_external_net "$primary"
docker network connect --ip "$VIP" ${project}_external_net "$standby"
docker exec "$standby" "$SCRIPT_ROOT"/ha/main.sh promote
}
#########################################################################
# action: failback
#########################################################################
usage_failback() {
cat << EOF
Usage: failback [option] [failbackup_container]
Options:
-h, --help
EOF
}
do_failback() {
local project
while :; do
case $1 in
-h|--help)
usage_failback
exit 0
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local failback_container=$1
[[ -z $failback_container ]] && die "missing param: failback_container"
docker exec "$failback_container" "$SCRIPT_ROOT"/ha/main.sh rewind
}
#########################################################################
# action: sync_switch
#########################################################################
usage_sync_switch() {
cat << EOF
Usage: sync_switch [option] [primary_container] [sync|async]
Description: switch replication mode between sync and async on primary.
Options:
-h, --help
EOF
}
do_sync_switch() {
while :; do
case $1 in
-h|--help)
usage_sync_switch
exit 0
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local primary=$1
local mode=$2
[[ -z $primary ]] && die "missing param: primary_container"
[[ -z $mode ]] && die "missing param: repl_mode"
docker exec "$primary" "$SCRIPT_ROOT"/ha/main.sh sync_switch $mode
}
#########################################################################
# basebackup
#########################################################################
usage_basebackup() {
cat << EOF
Usage: basebackup [option] [primary_container]
Description: make a basebackup on primary cluster
Options:
-h, --help
EOF
}
do_basebackup() {
while :; do
case $1 in
-h|--help)
usage_basebackup
exit 1
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local primary=$1
docker exec "$primary" "$SCRIPT_ROOT"/ha/main.sh basebackup
}
#########################################################################
# recover
#########################################################################
usage_recover() {
cat << EOF
Usage: recover [option] [-t datetime | -p recover_point] [primary_container] [standby_container]
Description: recover to a specified datetime or recovery point (created beforehead)
Options:
-h, --help
-t datetime recover to datetime specified
-p recover_point recover to recovery point specified (which is created before head)
EOF
}
do_recover() {
while :; do
case $1 in
-h|--help)
usage_recover
exit 1
;;
--)
shift
break
;;
-t)
shift
point_options=("-t" "$1")
recovery_datetime=$1
;;
-p)
shift
point_options=("-p" "$1")
recovery_point=$1
;;
*)
break
;;
esac
shift
done
local primary=$1
local standby=$2
if [[ -z "$recovery_datetime" ]] && [[ -z "$recovery_point" ]]; then
die "missing paramter: -t / -p"
fi
# do pitr for both underlying db
info "find nearest basebackup..."
this_basebackup_dir="$(docker exec ha_p0_1 "$SCRIPT_ROOT"/ha/main.sh nearest_basebackup "${point_options[@]}")" || die "can't find any basebackup earliear than specified recover time/point: ${point_options[*]}"
info "nearest basebackup is: $this_basebackup_dir"
info "recover for primary db"
docker exec "$primary" "$SCRIPT_ROOT"/ha/main.sh recover "${point_options[@]}" "$this_basebackup_dir" || die "failed to recover for primary"
info "remake standby"
primary_host=$(docker exec "$primary" hostname)
# setup standby needs a running primary (for basebackup)
docker exec $standby "$SCRIPT_ROOT"/ha/main.sh stop
docker exec $standby "$SCRIPT_ROOT"/ha/main.sh setup -r standby -p "$primary_host"
docker exec $standby "$SCRIPT_ROOT"/ha/main.sh start
}
#########################################################################
# create_recovery_point
#########################################################################
usage_create_recovery_point() {
cat << EOF
Usage: create_recovery_point [option] [point_name]
Description: create a recovery point (to be used by pitr)
Options:
-h, --help
EOF
}
do_create_recovery_point() {
while :; do
case $1 in
-h|--help)
usage_create_recovery_point
exit 1
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local name=$1
psql -d "postgresql://$SUPER_USER:$SUPER_PASSWD@$VIP" -c "select pg_create_restore_point('$name')" || die "failed to create restore point"
# insert a mapping from name -> timestamp
# this is because when recovering by restore point, we still need the timestamp to find the nearest baseabckup
docker exec ha_p0_1 bash -c "echo $name,$(date +%s) >> $RUNTIME_INFO_RECOVERY_POINT_MAP_FILE"
}
#########################################################################
# main
#########################################################################
usage() {
cat << EOF
Usage: ./${MYNAME} [option] [action]
Options:
-h, --help
Actions:
start start primary and standby
failover remove primary from current network and promote current standby as new primary
failback revoke previous primary as standby following new primary
sync_switch switch replication mode between sync and async
basebackup do basebackup
recover point-in-time recovery
create_recovery_point create a recovery point (used to do pitr later)
EOF
}
main() {
while :; do
case $1 in
-h|--help)
usage
exit 0
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local action="$1"
shift
case $action in
"start")
do_start "$@"
;;
"failover")
do_failover "$@"
;;
"failback")
do_failback "$@"
;;
"sync_switch")
do_sync_switch "$@"
;;
"basebackup")
do_basebackup "$@"
;;
"recover")
do_recover "$@"
;;
"create_recovery_point")
do_create_recovery_point "$@"
;;
*)
die "Unknwon action: $action!"
;;
esac
exit 0
}
main "$@"
| true
|
14ba8cd31f400cc8d082592e47726276ca3bba8a
|
Shell
|
restartus/global-lmic-reports-orderly
|
/scripts/publish_meffs
|
UTF-8
| 924
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
DOCS_DIR=gh-meffs
VERSION=$(git rev-parse --short HEAD)
REMOTE_URL=git@github.com:mrc-ide/global-lmic-meffs.git
TODAY=$(date "+%Y-%m-%d")
DATE=${1:-$TODAY}
export GIT_SSH_COMMAND="ssh -i ../.ssh/meffs/id_rsa"
git -C ${DOCS_DIR} config user.email "oj.watson@hotmail.co.uk"
git -C ${DOCS_DIR} config user.name "OJWatson"
if [ ! -d "${DOCS_DIR}/.git" ]; then
git init ${DOCS_DIR}
echo "init again"
git -C ${DOCS_DIR} add .
git -C ${DOCS_DIR} commit --no-verify -m "Update server results for ${DATE} at version ${VERSION} "
git -C ${DOCS_DIR} remote add origin ${REMOTE_URL}
git -C ${DOCS_DIR} push --force -u origin master
else
git -C ${DOCS_DIR} pull
echo "pull"
git -C ${DOCS_DIR} add .
git -C ${DOCS_DIR} commit --no-verify -m "Update server results for ${DATE} at version ${VERSION} "
#git -C ${DOCS_DIR} remote add origin ${REMOTE_URL}
git -C ${DOCS_DIR} push
fi
| true
|
616b6df44ab9e6a01fa56cf3fa37432bdde746ac
|
Shell
|
MichaelDBrown/pcf-concourse-pipelines
|
/tasks/config-healthwatch/task.sh
|
UTF-8
| 3,068
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $DEBUG == true ]]; then
set -ex
else
set -e
fi
chmod +x om-cli/om-linux
OM_CMD=./om-cli/om-linux
chmod +x ./jq/jq-linux64
JQ_CMD=./jq/jq-linux64
PRODUCT_PROPERTIES=$(
echo "{}" |
$JQ_CMD -n \
--arg opsmanager_url "$OPS_MGR_HOST" \
--argjson mysql_skip_name_resolve "$MYSQL_SKIP_NAME_RESOLVE" \
--arg foundation_name "$HEALTHWATCH_FOUNDATION_NAME" \
--argjson ingestor_instances "$HEALTHWATCH_FORWARDER_INGESTOR_INSTANCES" \
--argjson loader_instances "$HEALTHWATCH_FORWARDER_LOADER_INSTANCES" \
--argjson canary_instances "$HEALTHWATCH_FORWARDER_CANARY_INSTANCES" \
--argjson bosh_health_instances "$HEALTHWATCH_FORWARDER_BOSH_HEALTH_INSTANCES" \
--argjson bosh_tasks_instances "$HEALTHWATCH_FORWARDER_BOSH_TASKS_INSTANCES" \
--argjson cli_instances "$HEALTHWATCH_FORWARDER_CLI_INSTANCES" \
--argjson opsman_instances "$HEALTHWATCH_FORWARDER_OPSMAN_INSTANCES" \
--arg health_check_az "$HEALTHWATCH_FORWARDER_HEALTHCHECK_AZ" \
'
. +
{
".mysql.skip_name_resolve": {
"value": $mysql_skip_name_resolve
},
".healthwatch-forwarder.ingestor_instance_count": {
"value": $ingestor_instances
},
".healthwatch-forwarder.loader_instance_count": {
"value": $loader_instances
},
".healthwatch-forwarder.canary_instance_count": {
"value": $canary_instances
},
".healthwatch-forwarder.boshhealth_instance_count": {
"value": $bosh_health_instances
},
".healthwatch-forwarder.boshtasks_instance_count": {
"value": $bosh_tasks_instances
},
".healthwatch-forwarder.cli_instance_count": {
"value": $cli_instances
},
".healthwatch-forwarder.opsman_instance_count": {
"value": $opsman_instances
},
".healthwatch-forwarder.health_check_az": {
"value": $health_check_az
},
".healthwatch-forwarder.opsmanager_url": {
"value": $opsmanager_url
}
} +
if $foundation_name != "" then
{
".healthwatch-forwarder.foundation_name": {
"value": $foundation_name
}
}
else .
end
'
)
PRODUCT_NETWORK=$(
echo "{}" |
$JQ_CMD -n \
--arg singleton_jobs_az "$SINGLETON_JOBS_AZ" \
--arg other_azs "$OTHER_AZS" \
--arg network_name "$NETWORK_NAME" \
--arg service_network_name "$SERVICE_NETWORK_NAME" \
'. +
{
"singleton_availability_zone": {
"name": $singleton_jobs_az
},
"other_availability_zones": ($other_azs | split(",") | map({name: .})),
"network": {
"name": $network_name
},
"service_network": {
"name": $services_network_name
}
}
'
)
# network must be configured first, so make two separate om calls
$OM_CMD -t https://$OPS_MGR_HOST -u $OPS_MGR_USR -p $OPS_MGR_PWD -k configure-product -n $PRODUCT_IDENTIFIER -pn "$PRODUCT_NETWORK"
$OM_CMD -t https://$OPS_MGR_HOST -u $OPS_MGR_USR -p $OPS_MGR_PWD -k configure-product -n $PRODUCT_IDENTIFIER -p "$PRODUCT_PROPERTIES"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.