blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
43633f70dcbf65d33d688676a32cf9ee20e8d2e5
|
Shell
|
forgottenswitch/concurrent.sh
|
/peach.sh
|
UTF-8
| 484
| 3.203125
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
# Parallel-each
# (e.g. the same as `make -jN`)
#
. ./concurrent.sh
job_prepare_tempdir
#
# Main
#
work_n() {
local name="$1" ; shift
echo "job $name begin"
sleep 1
echo "job $name end"
job_yield_status 0
}
peach_n_max=4
peach_lines '
work_n 1
work_n 2
work_n 3
work_n 4
work_n 5
work_n 6
work_n 7
work_n 8
'
echo All done
for n in 1 2
do
job_id="peachjob_$n"
echo "job '$job_id' exit status was '$(job_yielded_status "$job_id")'"
done
| true
|
f02097b3bbb7392e618b7039e87c9391d1400206
|
Shell
|
jerremyfly/RTCS
|
/rtcs-room/script/InstallServer.sh
|
UTF-8
| 928
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#scriptname:InstallServer
#使用问题反馈靖哥哥
#创建安装路径
sudo mkdir /opt/VideoConsultation
sudo mkdir /opt/VideoConsultation/record
echo "正在安装,请稍候-------------------"
sudo cp application.properties rtcscoreserver.tar.gz rtcs-room-1.2.4.jar libRTCSAuth.so RunServer.sh CloseServer.sh RunConsultationServer.sh UninstallServer.sh /opt/VideoConsultation
sudo chmod +x /opt/VideoConsultation/*.sh
#获取docker
sudo docker load < /opt/VideoConsultation/rtcscoreserver.tar.gz
#设置开机自启
sudo cp /opt/VideoConsultation/RunConsultationServer.sh /etc/init.d
sudo chmod +x /etc/init.d/RunConsultationServer.sh
sudo chown root:root /etc/init.d/RunConsultationServer.sh
cd /etc/init.d
sudo update-rc.d RunConsultationServer.sh defaults
echo "-----------------------------------"
echo "-------安装成功,请重启服务器-------"
echo "-----------------------------------"
| true
|
b17cce6bd0c0c7c82aa3656b397ad3685955d142
|
Shell
|
CuriouslyCurious/dotfiles
|
/polybar/.config/polybar/pkg.sh
|
UTF-8
| 395
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
OS="$(cat /etc/*release | grep -w NAME | awk '{split($0,a,"="); print a[2]}')"
if [[ $OS == "\"Arch Linux\"" ]]
then
pac=$(checkupdates | wc -l)
aur=$(trizen -Qua | wc -l)
check=$((pac + aur))
#check=$((pac))
if [[ "$check" != "0" ]]
then
echo "$pac %{F#5b5b5b}%{F-} $aur"
fi
elif [[ $OS == "\"NixOS\"" ]]
then
:
else
:
fi
| true
|
fec4e5fa8e249fad2a508be651b235b85ee23082
|
Shell
|
variani/kaggle
|
/avito-context-click/scripts/03-uncompress-input.sh
|
UTF-8
| 176
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/bash
# @ http://askubuntu.com/a/586995
#```
# sudo apt-get install dtrx
#```
datadir="data/input"
pushd $datadir
for f in $(ls | grep 7z); do
echo $f
done
popd
| true
|
d0e47974e6680a6ea31724b5f3eea1183d51b9d7
|
Shell
|
NCBI-Codeathons/MASQ
|
/applets/manta/test_kat/test_task.sh
|
UTF-8
| 379
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
TEST_NAME="test_task"
# dxWDL jar as an argument
DXWDL_JAR=$1
dx mkdir -p /Jun/test/tasks/${TEST_NAME}/
java -jar ${DXWDL_JAR} \
compile ${TEST_NAME}.wdl -project project-FfG3k9Q97yFygXqZ4B39B2BV \
-inputs ${TEST_NAME}.input.json \
-destination /test/tasks/${TEST_NAME}/ \
--imports ../ -f | xargs dx run -f ${TEST_NAME}.input.dx.json
| true
|
9f9217fe31c2d84124dac6d97ab1b7781a5ffe4d
|
Shell
|
taitulism/take-me-home
|
/leave-home
|
UTF-8
| 618
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# .bashrc
# .inputrc
# .vimrc
# This script restores original files from the backup folder to $HOME (~/)
BACKUPS=$HOME/take-me-home/backups
# Restore .bashrc
if [ -f $BACKUPS/original-bashrc.bkp ] ; then
rm ~/.bashrc
cp $BACKUPS/original-bashrc.bkp ~/.bashrc
source ~/.bashrc
fi
# Restore .inputrc
if [ -f $BACKUPS/original-inputrc.bkp ] ; then
rm ~/.inputrc
cp $BACKUPS/original-inputrc.bkp ~/.inputrc
fi
# Restore .vimrc
if [ -f $BACKUPS/original-vimrc.bkp ] ; then
rm ~/.vimrc
cp $BACKUPS/original-vimrc.bkp ~/.vimrc
fi
rm -rf ~/take-me-home
echo done.
unset BACKUPS
| true
|
de99160262f7a277c8ea0652ffbbfb5a048fb178
|
Shell
|
jdost/xmonad-config
|
/setup.sh
|
UTF-8
| 2,285
| 4.34375
| 4
|
[] |
no_license
|
#/bin/sh
set -euo pipefail
export XDG_CONFIG_HOME=${XDG_CONFIG_HOME:-$HOME/.config}
show_help() {
cat <<-HELP
Setup script for xmonad configuration
USAGE: ${0} [command]
commands:
init -- Initialize system with expected packages and linked configs
update -- Updates state of local repo and fixes any drift issues
link -- Create missing links not already defined
HELP
}
linkIfNot() {
if [ -e $1 ]; then
if [ ! -e $2 ]; then
echo "Linking " $1
ln -s $PWD/$1 $2
fi
elif [ ! -e $2 ]; then
echo "Linking " $1
ln -s $PWD/$1 $2
fi
}
link() {
# Shell/Environment
linkIfNot environment $HOME/.local/environment/xmonad
linkIfNot "" $HOME/.xmonad
linkIfNot dmrc $HOME/.dmrc
LAYOUT_PATH="lib/machines/$HOSTNAME.hs"
if [ ! -e $LAYOUT_PATH ]; then
LAYOUT_PATH="lib/machines/Default.hs"
fi
linkIfNot $LAYOUT_PATH $HOME/.xmonad/lib/CurrentMachine.hs
linkIfNot polybar $XDG_CONFIG_HOME/polybar
[[ ! -e polybar/system ]] && cp polybar/system.example polybar/system
mkdir -p $XDG_CONFIG_HOME/supervisord/config.d/
linkIfNot supervisor.d/urxvtd.conf $XDG_CONFIG_HOME/supervisord/config.d/urxvtd.conf
linkIfNot supervisor.d/unclutter.conf $XDG_CONFIG_HOME/supervisord/config.d/unclutter.conf
linkIfNot supervisor.d/polybar.conf $XDG_CONFIG_HOME/supervisord/config.d/statusbar.conf
}
install() {
sudo pacman -Sy
sudo pacman -S --needed xmonad xmonad-contrib
sudo pacman -S --needed xorg-xsetroot xdotool
#sudo pacman -S --needed conky
#sudo pacman -S --needed dzen2 trayer
#sudo pacman -S --needed polybar
sudo pacman -S --needed unclutter
#sudo pacman -S --needed mpc
}
update() {
git pull
}
if [ -z "${1}" ]; then
echo "Missing action. Syntax: ${0} [command]"
echo " Options:"
echo " init -- installs associated programs and creates all symlinks"
echo " update -- updates packages associated with repo, creates any new symlinks"
echo " link -- create symlinks for files (will not overwrite existing files"
echo ""
exit 1
fi
case "${1:-}" in
'init')
install
link
;;
'update')
update
link
;;
'link')
link
;;
*)
show_help
exit
;;
esac
| true
|
859bf26bcd9b299c0a550bf5fd91ba722dd30a9f
|
Shell
|
hjain1462/AdidasApiAutomationTest
|
/run-bdd-test.sh
|
UTF-8
| 532
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export URI=https://petstore.swagger.io
if [ "$1" = "-s" ] && [ ! -z $2 ] ; then
mvn -P integration-tests verify -Dskip.unit.tests=true -Dcucumber.options='--tags @api,@ui' -s $2
sleep 5s
mvn -P integration-failed-tests verify -Dskip.unit.tests=true -Dcucumber.options='--tags @api,@ui' -s $2
else
mvn -P integration-tests verify -Dskip.unit.tests=true -Dcucumber.options='--tags @api,@ui'
sleep 5s
mvn -P integration-failed-tests verify -Dskip.unit.tests=true -Dcucumber.options='--tags @api,@ui'
fi
| true
|
24f1359efdc15db4f7101871c31e3946701ba697
|
Shell
|
waynr/doks-examples
|
/network-policy/script/down
|
UTF-8
| 758
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# Grab the cluster name with a default of "network-policy-demo".
CLUSTER_NAME=${1:-network-policy-demo}
# Delete the client droplet.
DROPLET_NAME="${CLUSTER_NAME}-client"
DROPLET_ID=$(doctl compute droplet list -ojson | jq --raw-output --arg DROPLET_NAME "${DROPLET_NAME}" '.[] | select(.name == $DROPLET_NAME) | .id')
if [ "${DROPLET_ID}" ]; then
doctl compute droplet delete ${DROPLET_ID} -f
fi
# Delete the client droplet SSH key.
SSH_KEY_ID="$(doctl compute ssh-key list -ojson | jq --raw-output --arg NAME "${DROPLET_NAME}" '.[] | select(.name == $NAME) | .id')"
if [ "${SSH_KEY_ID}" ]; then
doctl compute ssh-key delete ${SSH_KEY_ID} -f
fi
# Delete the cluster.
../script/delete-cluster ${CLUSTER_NAME}
| true
|
d91e1dc64a89b178a0e06878e8027e0a016ecd55
|
Shell
|
zzh8829/dotfiles
|
/homebrew.sh
|
UTF-8
| 2,249
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Check for Homebrew,
# Install if we don't have it
if test ! $(which brew); then
echo "Installing homebrew..."
bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
eval "$(/opt/homebrew/bin/brew shellenv)"
# Make sure we’re using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade
# Install dev
brew install python
brew install node
# Install some tools
brew install wget
brew install neovim
brew install tmux
brew install tig
brew install p7zip
brew install speedtest_cli
brew install ssh-copy-id
brew install svn
brew install ctags
brew install scmpuff
brew install gpg
brew install tmuxinator
brew install kubectl
brew install ffmpeg
brew install cmake
brew install fzf
brew install ripgrep
brew install jq
brew install helm
brew install mosh
brew install stow
brew install youtube-dl
brew install clang-format
# Install Cask
brew install --cask visual-studio-code
brew install --cask xquartz
brew install --cask bettertouchtool
brew install --cask insomnia
brew install --cask docker
brew install --cask osxfuse && brew install sshfs
brew install --cask google-cloud-sdk
brew install --cask google-chrome
brew install --cask slack
brew install --cask 1password
brew install --cask skim
brew install --cask discord
brew install --cask spotify
brew install --cask vlc
brew install --cask the-unarchiver
brew install --cask grandperspective
brew install --cask steermouse
# brew install --cask swiftdefaultappsprefpane
# brwe install --cask macs-fan-control
# Install developer friendly quick look plugins; see https://github.com/sindresorhus/quick-look-plugins
brew install --cask qlcolorcode qlstephen qlmarkdown quicklook-json qlimagesize webpquicklook suspicious-package quicklookase qlvideo
# brew tap homebrew/cask-drivers
# brew install --cask logitech-gaming-software
# brew install --cask razer-synapse
# Remove outdated versions from the cellar.
brew cleanup
| true
|
eb2e18177c70b6536314c28b5e169c611ea4ced5
|
Shell
|
Anthropohedron/dotfiles
|
/.platform/all/ifcommand/az/az-publish-app
|
UTF-8
| 329
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
usage () {
echo "Usage: $0 <resource group> <app service> <package>" >&2
exit 1
}
RESOURCEGROUP="$1"
APPSERVICE="$2"
PACKAGEFILE="$3"
if test $# -ne 3 -o ! -r "$3"
then
usage
fi
exec az functionapp deployment source config-zip \
--resource-group "$RESOURCEGROUP" \
--name "$APPSERVICE" \
--src "$PACKAGEFILE"
| true
|
c25a23a6556fb8fd8441061d863960ac00a76ebc
|
Shell
|
tangzonghui/feim-20161122
|
/pie-dir/db-img-tool/d_ramdisk/tzh-d-ramdisk.sh
|
UTF-8
| 285
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "$0 ramdisk-file"
exit
fi
cp $1 ramdisk.img.gz
mv $1 ramdisk-before.img
rm ramdisk_new -rf
mkdir ramdisk_new
gunzip ramdisk.img.gz
pushd ramdisk_new
cpio -i -F ../ramdisk.img
popd
rm -rf ramdisk.img.gz ramdisk.img
echo see ramdisk_new
| true
|
89c2e65a7804979d3cc95835dd01325e4ec8cf03
|
Shell
|
klkblake/serval-dna
|
/testframework.sh
|
UTF-8
| 39,290
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Serval Project testing framework for Bash shell
# Copyright 2012 Paul Gardner-Stephen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# This file is sourced by all testing scripts. A typical test script looks
# like this:
#
# #!/bin/bash
# source testframework.sh
# setup() {
# export BLAH_CONFIG=$TFWTMP/blah.conf
# echo "username=$LOGNAME" >$BLAH_CONFIG
# }
# teardown() {
# # $TFWTMP is always removed after every test, so no need to
# # remove blah.conf ourselves.
# }
# doc_feature1='Feature one works'
# test_feature1() {
# execute programUnderTest --feature1 arg1 arg2
# assertExitStatus '==' 0
# assertRealTime --message='ran in under half a second' '<=' 0.5
# assertStdoutIs ""
# assertStderrIs ""
# tfw_cat arg1
# }
# doc_feature2='Feature two fails with status 1'
# setup_feature2() {
# # Overrides setup(), so we have to call it ourselves explicitly
# # here if we still want it.
# setup
# echo "option=specialValue" >>$BLAH_CONFIG
# }
# test_feature2() {
# execute programUnderTest --feature2 arg1 arg2
# assertExitStatus '==' 1
# assertStdoutIs -e "Response:\tok\n"
# assertStderrGrep "^ERROR: missing arg3$"
# }
# runTests "$@"
AWK=awk
SED=sed
GREP=grep
TSFMT='+%Y-%m-%d %H:%M:%S'
SYSTYPE=`uname -s`
if [ $SYSTYPE = "SunOS" ]; then
abspath () { case "$1" in /*)printf "%s\n" "$1";; *)printf "%s\n" "$PWD/$1";; esac; }
AWK=gawk
SED=gsed
GREP=ggrep
fi
if [ $SYSTYPE = "Linux" ]; then
# Get nanosecond resolution
TSFMT='+%Y-%m-%d %H:%M:%S.%N'
fi
usage() {
echo -n "\
Usage: ${0##*/} [options] [--]
Options:
-t, --trace Enable shell "set -x" tracing during tests, output to test log
-v, --verbose Send test log to output during execution
-j, --jobs Run all tests in parallel (by default runs as --jobs=1)
--jobs=N Run tests in parallel, at most N at a time
-E, --stop-on-error Do not execute any tests after an ERROR occurs
-F, --stop-on-failure Do not execute any tests after a FAIL occurs
--filter=PREFIX Only execute tests whose names start with PREFIX
--filter=N Only execute test number N
--filter=M-N Only execute tests with numbers in range M-N inclusive
--filter=-N Only execute tests with numbers <= N
--filter=N- Only execute tests with numbers >= N
--filter=M,N,... Only execute tests with number M or N or ...
"
}
# Internal utility for setting shopt variables and restoring their original
# value:
# local oo
# _tfw_shopt oo -s extglob -u extdebug
# ...
# _tfw_shopt_restore oo
_tfw_shopt() {
local _var="$1"
shift
local op=s
local restore=
while [ $# -ne 0 ]
do
case "$1" in
-s) op=s;;
-u) op=u;;
*)
local opt="$1"
restore="${restore:+$restore; }shopt -$(shopt -q $opt && echo s || echo u) $opt"
shopt -$op $opt
;;
esac
shift
done
eval $_var='"$restore"'
}
_tfw_shopt_restore() {
local _var="$1"
[ -n "${!_var}" ] && eval "${!_var}"
}
declare -a _tfw_running_jobs
declare -a _tfw_job_pgids
# The rest of this file is parsed for extended glob patterns.
_tfw_shopt _tfw_orig_shopt -s extglob
runTests() {
_tfw_stdout=1
_tfw_stderr=2
_tfw_checkBashVersion
_tfw_checkTerminfo
_tfw_invoking_script=$(abspath "${BASH_SOURCE[1]}")
_tfw_suite_name="${_tfw_invoking_script##*/}"
_tfw_cwd=$(abspath "$PWD")
_tfw_tmpdir="${TFW_TMPDIR:-${TMPDIR:-/tmp}}/_tfw-$$"
trap '_tfw_status=$?; _tfw_killtests; rm -rf "$_tfw_tmpdir"; exit $_tfw_status' EXIT SIGHUP SIGINT SIGTERM
rm -rf "$_tfw_tmpdir"
mkdir -p "$_tfw_tmpdir" || return $?
_tfw_logdir="${TFW_LOGDIR:-$_tfw_cwd/testlog}/$_tfw_suite_name"
_tfw_trace=false
_tfw_verbose=false
_tfw_stop_on_error=false
_tfw_stop_on_failure=false
_tfw_default_timeout=60
local allargs="$*"
local -a filters=()
local njobs=1
local oo
_tfw_shopt oo -s extglob
while [ $# -ne 0 ]; do
case "$1" in
--help) usage; exit 0;;
-t|--trace) _tfw_trace=true;;
-v|--verbose) _tfw_verbose=true;;
--filter=*) filters+=("${1#*=}");;
-j|--jobs) njobs=0;;
--jobs=+([0-9])) njobs="${1#*=}";;
--jobs=*) _tfw_fatal "invalid option: $1";;
-E|--stop-on-error) _tfw_stop_on_error=true;;
-F|--stop-on-failure) _tfw_stop_on_failure=true;;
--) shift; break;;
--*) _tfw_fatal "unsupported option: $1";;
*) _tfw_fatal "spurious argument: $1";;
esac
shift
done
_tfw_shopt_restore oo
if $_tfw_verbose && [ $njobs -ne 1 ]; then
_tfw_fatal "--verbose is incompatible with --jobs=$njobs"
fi
# Create an empty results directory.
_tfw_results_dir="$_tfw_tmpdir/results"
mkdir "$_tfw_results_dir" || return $?
# Create an empty log directory.
mkdir -p "$_tfw_logdir" || return $?
rm -f "$_tfw_logdir"/*
# Enumerate all the test cases.
_tfw_find_tests "${filters[@]}"
# Enable job control.
set -m
# Iterate through all test cases, starting a new test whenever the number of
# running tests is less than the job limit.
_tfw_testcount=0
_tfw_passcount=0
_tfw_failcount=0
_tfw_errorcount=0
_tfw_fatalcount=0
_tfw_running_jobs=()
_tfw_job_pgids=()
_tfw_test_number_watermark=0
local testNumber
local testPosition=0
for ((testNumber = 1; testNumber <= ${#_tfw_tests[*]}; ++testNumber)); do
testName="${_tfw_tests[$(($testNumber - 1))]}"
[ -z "$testName" ] && continue
let ++testPosition
let ++_tfw_testcount
# Wait for any existing child process to finish.
while [ $njobs -ne 0 -a ${#_tfw_running_jobs[*]} -ge $njobs ]; do
_tfw_harvest_processes
done
[ $_tfw_fatalcount -ne 0 ] && break
$_tfw_stop_on_error && [ $_tfw_errorcount -ne 0 ] && break
$_tfw_stop_on_failure && [ $_tfw_failcount -ne 0 ] && break
# Start the next test in a child process.
_tfw_echo_intro $testPosition $testNumber $testName
if $_tfw_verbose || [ $njobs -ne 1 ]; then
echo
fi
echo "$testPosition $testNumber $testName" >"$_tfw_results_dir/$testName"
(
_tfw_test_name="$testName"
# Pick a unique decimal number that must not coincide with other tests
# being run concurrently, _including tests being run in other test
# scripts by other users on the same host_. We cannot simply use
# $testNumber. The subshell process ID is ideal. We don't use
# $BASHPID because MacOS only has Bash-3.2, and $BASHPID was introduced
# in Bash-4.
_tfw_unique=$($BASH -c 'echo $PPID')
# All files created by this test belong inside a temporary directory.
# The path name must be kept short because it is used to construct
# named socket paths, which have a limited length.
_tfw_tmp=/tmp/_tfw-$_tfw_unique
trap '_tfw_status=$?; rm -rf "$_tfw_tmp"; exit $_tfw_status' EXIT SIGHUP SIGINT SIGTERM
local start_time=$(_tfw_timestamp)
local finish_time=unknown
(
trap '_tfw_status=$?; _tfw_teardown; exit $_tfw_status' EXIT SIGHUP SIGINT SIGTERM
_tfw_result=ERROR
mkdir $_tfw_tmp || exit 255
_tfw_setup
_tfw_result=FAIL
_tfw_phase=testcase
tfw_log "# CALL test_$_tfw_test_name()"
$_tfw_trace && set -x
test_$_tfw_test_name
_tfw_result=PASS
case $_tfw_result in
PASS) exit 0;;
FAIL) exit 1;;
ERROR) exit 254;;
esac
exit 255
)
local stat=$?
finish_time=$(_tfw_timestamp)
local result=FATAL
case $stat in
254) result=ERROR;;
1) result=FAIL;;
0) result=PASS;;
esac
echo "$testPosition $testNumber $testName $result" >"$_tfw_results_dir/$testName"
{
echo "Name: $testName"
echo "Result: $result"
echo "Started: $start_time"
echo "Finished: $finish_time"
echo '++++++++++ log.stdout ++++++++++'
cat $_tfw_tmp/log.stdout
echo '++++++++++'
echo '++++++++++ log.stderr ++++++++++'
cat $_tfw_tmp/log.stderr
echo '++++++++++'
if $_tfw_trace; then
echo '++++++++++ log.xtrace ++++++++++'
cat $_tfw_tmp/log.xtrace
echo '++++++++++'
fi
} >"$_tfw_logdir/$testNumber.$testName.$result"
exit 0
) </dev/null &
local job=$(jobs %% | $SED -n -e '1s/^\[\([0-9]\{1,\}\)\].*/\1/p')
_tfw_running_jobs+=($job)
_tfw_job_pgids[$job]=$(jobs -p %%)
ln -f -s "$_tfw_results_dir/$testName" "$_tfw_results_dir/job-$job"
done
# Wait for all child processes to finish.
while [ ${#_tfw_running_jobs[*]} -ne 0 ]; do
_tfw_harvest_processes
done
# Clean up working directory.
rm -rf "$_tfw_tmpdir"
trap - EXIT SIGHUP SIGINT SIGTERM
# Echo result summary and exit with success if no failures or errors.
s=$([ $_tfw_testcount -eq 1 ] || echo s)
echo "$_tfw_testcount test$s, $_tfw_passcount pass, $_tfw_failcount fail, $_tfw_errorcount error"
[ $_tfw_fatalcount -eq 0 -a $_tfw_failcount -eq 0 -a $_tfw_errorcount -eq 0 ]
}
_tfw_killtests() {
if [ $njobs -eq 1 ]; then
echo -n " killing..."
else
echo -n -e "\r\rKilling tests...\r"
fi
trap '' SIGHUP SIGINT SIGTERM
local job
for job in ${_tfw_running_jobs[*]}; do
kill -TERM %$job 2>/dev/null
done
while [ ${#_tfw_running_jobs[*]} -ne 0 ]; do
_tfw_harvest_processes
done
}
_tfw_echo_intro() {
local docvar="doc_$3"
echo -n "$2. ${!docvar:-$3}..."
[ $1 -gt $_tfw_test_number_watermark ] && _tfw_test_number_watermark=$1
}
_tfw_harvest_processes() {
# <incantation>
# This is the only way known to get the effect of a 'wait' builtin that will
# return when _any_ child dies or after a one-second timeout.
trap 'kill -TERM $spid 2>/dev/null' SIGCHLD
sleep 1 &
spid=$!
set -m
wait $spid >/dev/null 2>/dev/null
trap - SIGCHLD
# </incantation>
local -a surviving_jobs=()
local job
for job in ${_tfw_running_jobs[*]}; do
if jobs %$job >/dev/null 2>/dev/null; then
surviving_jobs+=($job)
continue
fi
# Kill any residual processes from the test case.
local pgid=${_tfw_job_pgids[$job]}
[ -n "$pgid" ] && kill -TERM -$pgid 2>/dev/null
# Report the test script outcome.
if [ -s "$_tfw_results_dir/job-$job" ]; then
set -- $(<"$_tfw_results_dir/job-$job")
local testPosition="$1"
local testNumber="$2"
local testName="$3"
local result="$4"
case "$result" in
ERROR)
let _tfw_errorcount=_tfw_errorcount+1
;;
PASS)
let _tfw_passcount=_tfw_passcount+1
;;
FAIL)
let _tfw_failcount=_tfw_failcount+1
;;
*)
result=FATAL
let _tfw_fatalcount=_tfw_fatalcount+1
;;
esac
local lines
if ! $_tfw_verbose && [ $njobs -eq 1 ]; then
echo -n " "
_tfw_echo_result "$result"
echo
elif ! $_tfw_verbose && lines=$($_tfw_tput lines); then
local travel=$(($_tfw_test_number_watermark - $testPosition + 1))
if [ $travel -gt 0 -a $travel -lt $lines ] && $_tfw_tput cuu $travel ; then
_tfw_echo_intro $testPosition $testNumber $testName
echo -n " "
_tfw_echo_result "$result"
echo
travel=$(($_tfw_test_number_watermark - $testPosition))
[ $travel -gt 0 ] && $_tfw_tput cud $travel
fi
else
echo -n "$testNumber. ... "
_tfw_echo_result "$result"
echo
fi
else
_tfw_echoerr "${BASH_SOURCE[1]}: job %$job terminated without result"
fi
rm -f "$_tfw_results_dir/job-$job"
done
_tfw_running_jobs=(${surviving_jobs[*]})
}
_tfw_echo_result() {
local result="$1"
case "$result" in
ERROR | FATAL)
$_tfw_tput setaf 1
$_tfw_tput rev
echo -n "$result"
$_tfw_tput sgr0
$_tfw_tput op
;;
PASS)
$_tfw_tput setaf 2
echo -n "$result"
$_tfw_tput op
;;
FAIL)
$_tfw_tput setaf 1
echo -n "$result"
$_tfw_tput op
;;
*)
echo -n "$result"
;;
esac
}
# The following functions can be overridden by a test script to provide a
# default fixture for all test cases.
setup() {
:
}
teardown() {
:
}
# The following functions are provided to facilitate writing test cases and
# fixtures.
# Add quotations to the given arguments to allow them to be expanded intact
# in eval expressions.
shellarg() {
_tfw_shellarg "$@"
echo "${_tfw_args[*]}"
}
# Echo the absolute path (containing symlinks if given) of the given
# file/directory, which does not have to exist or even be accessible.
abspath() {
_tfw_abspath -L "$1"
}
# Echo the absolute path (resolving all symlinks) of the given file/directory,
# which does not have to exist or even be accessible.
realpath() {
_tfw_abspath -P "$1"
}
# Escape all grep(1) basic regular expression metacharacters.
escape_grep_basic() {
local re="$1"
local nil=''
re="${re//[\\]/\\\\$nil}"
re="${re//./\\.}"
re="${re//\*/\\*}"
re="${re//^/\\^}"
re="${re//\$/\\$}"
re="${re//\[/\\[}"
re="${re//\]/\\]}"
echo "$re"
}
# Escape all egrep(1) extended regular expression metacharacters.
escape_grep_extended() {
local re="$1"
local nil=''
re="${re//[\\]/\\\\$nil}"
re="${re//./\\.}"
re="${re//\*/\\*}"
re="${re//\?/\\?}"
re="${re//+/\\+}"
re="${re//^/\\^}"
re="${re//\$/\\$}"
re="${re//(/\\(}"
re="${re//)/\\)}"
re="${re//|/\\|}"
re="${re//\[/\\[}"
re="${re//{/\\{}"
echo "$re"
}
# Executes its arguments as a command:
# - captures the standard output and error in temporary files for later
# examination
# - captures the exit status for later assertions
# - sets the $executed variable to a description of the command that was
# executed
execute() {
tfw_log "# execute" $(shellarg "$@")
_tfw_getopts execute "$@"
shift $_tfw_getopts_shift
_tfw_execute "$@"
}
executeOk() {
tfw_log "# executeOk" $(shellarg "$@")
_tfw_getopts executeok "$@"
_tfw_opt_exit_status=0
_tfw_dump_on_fail --stderr
shift $_tfw_getopts_shift
_tfw_execute "$@"
}
# Wait until a given condition is met:
# - can specify the timeout with --timeout=SECONDS
# - can specify the sleep interval with --sleep=SECONDS
# - the condition is a command that is executed repeatedly until returns zero
# status
# where SECONDS may be fractional, eg, 1.5
wait_until() {
tfw_log "# wait_until" $(shellarg "$@")
local start=$SECONDS
_tfw_getopts wait_until "$@"
shift $_tfw_getopts_shift
sleep ${_tfw_opt_timeout:-$_tfw_default_timeout} &
local timeout_pid=$!
while true; do
"$@" && break
kill -0 $timeout_pid 2>/dev/null || fail "timeout"
sleep ${_tfw_opt_sleep:-1}
done
local end=$SECONDS
tfw_log "# waited for" $((end - start)) "seconds"
return 0
}
# Executes its arguments as a command in the current shell process (not in a
# child process), so that side effects like functions setting variables will
# have effect.
# - if the exit status is non-zero, then fails the current test
# - otherwise, logs a message indicating the assertion passed
assert() {
_tfw_getopts assert "$@"
shift $_tfw_getopts_shift
[ -z "$_tfw_message" ] && _tfw_message=$(shellarg "$@")
_tfw_assert "$@" || _tfw_failexit || return $?
tfw_log "# assert $_tfw_message"
return 0
}
assertExpr() {
_tfw_getopts assertexpr "$@"
shift $_tfw_getopts_shift
_tfw_parse_expr "$@" || return $?
_tfw_message="${_tfw_message:+$_tfw_message }("$@")"
_tfw_shellarg "${_tfw_expr[@]}"
_tfw_assert eval "${_tfw_args[@]}" || _tfw_failexit || return $?
tfw_log "# assert $_tfw_message"
return 0
}
fail() {
_tfw_getopts fail "$@"
shift $_tfw_getopts_shift
[ $# -ne 0 ] && _tfw_failmsg "$1"
_tfw_backtrace
_tfw_failexit
}
error() {
_tfw_getopts error "$@"
shift $_tfw_getopts_shift
[ $# -ne 0 ] && _tfw_errormsg "$1"
_tfw_backtrace
_tfw_errorexit
}
fatal() {
[ $# -eq 0 ] && set -- "no reason given"
_tfw_fatalmsg "$@"
_tfw_backtrace
_tfw_fatalexit
}
# Append a message to the test case's stdout log. A normal 'echo' to stdout
# will also do this, but tfw_log will work even in a context that stdout (fd 1)
# is redirected.
tfw_log() {
local ts=$(_tfw_timestamp)
cat >&$_tfw_log_fd <<EOF
${ts##* } $*
EOF
}
# Append the contents of a file to the test case's stdout log. A normal 'cat'
# to stdout would also do this, but tfw_cat echoes header and footer delimiter
# lines around to content to help distinguish it, and also works even in a
# context that stdout (fd 1) is redirected.
tfw_cat() {
local header=
local show_nonprinting=
for file; do
case $file in
--header=*)
header="${1#*=}"
continue
;;
-v|--show-nonprinting)
show_nonprinting=-v
continue
;;
--stdout)
file="$_tfw_tmp/stdout"
header="${header:-stdout of ($executed)}"
;;
--stderr)
file="$_tfw_tmp/stderr"
header="${header:-stderr of ($executed)}"
;;
*)
header="${header:-${file#$_tfw_tmp/}}"
;;
esac
local missing_nl=
tfw_log "#----- $header -----"
cat $show_nonprinting "$file" >&$_tfw_log_fd
if [ "$(tail -1c "$file")" != "$newline" ]; then
echo >&$_tfw_log_fd
missing_nl=" (no newline at end)"
fi
tfw_log "#-----$missing_nl"
header=
show_nonprinting=
done
}
tfw_core_backtrace() {
local executable="$1"
local corefile="$2"
echo backtrace >"$_tfw_tmpdir/backtrace.gdb"
tfw_log "#----- gdb backtrace from $executable $corefile -----"
gdb -n -batch -x "$_tfw_tmpdir/backtrace.gdb" "$executable" "$corefile" </dev/null
tfw_log "#-----"
rm -f "$_tfw_tmpdir/backtrace.gdb"
}
assertExitStatus() {
_tfw_getopts assertexitstatus "$@"
shift $_tfw_getopts_shift
[ -z "$_tfw_message" ] && _tfw_message="exit status ($_tfw_exitStatus) of ($executed) $*"
_tfw_assertExpr "$_tfw_exitStatus" "$@" || _tfw_failexit || return $?
tfw_log "# assert $_tfw_message"
return 0
}
assertRealTime() {
_tfw_getopts assertrealtime "$@"
shift $_tfw_getopts_shift
[ -z "$_tfw_message" ] && _tfw_message="real execution time ($realtime) of ($executed) $*"
_tfw_assertExpr "$realtime" "$@" || _tfw_failexit || return $?
tfw_log "# assert $_tfw_message"
return 0
}
replayStdout() {
cat $_tfw_tmp/stdout
}
replayStderr() {
cat $_tfw_tmp/stderr
}
assertStdoutIs() {
_tfw_assert_stdxxx_is stdout "$@" || _tfw_failexit
}
assertStderrIs() {
_tfw_assert_stdxxx_is stderr "$@" || _tfw_failexit
}
assertStdoutLineCount() {
_tfw_assert_stdxxx_linecount stdout "$@" || _tfw_failexit
}
assertStderrLineCount() {
_tfw_assert_stdxxx_linecount stderr "$@" || _tfw_failexit
}
assertStdoutGrep() {
_tfw_assert_stdxxx_grep stdout "$@" || _tfw_failexit
}
assertStderrGrep() {
_tfw_assert_stdxxx_grep stderr "$@" || _tfw_failexit
}
assertGrep() {
_tfw_getopts assertgrep "$@"
shift $_tfw_getopts_shift
if [ $# -ne 2 ]; then
_tfw_error "incorrect arguments"
return $?
fi
_tfw_dump_on_fail "$1"
_tfw_assert_grep "$1" "$1" "$2" || _tfw_failexit
}
# Internal (private) functions that are not to be invoked directly from test
# scripts.
# Add shell quotation to the given arguments, so that when expanded using
# 'eval', the exact same argument results. This makes argument handling fully
# immune to spaces and shell metacharacters.
_tfw_shellarg() {
local arg
_tfw_args=()
for arg; do
case "$arg" in
'' | *[^A-Za-z_0-9.,:=+\/-]* ) _tfw_args+=("'${arg//'/'\\''}'");;
*) _tfw_args+=("$arg");;
esac
done
}
# Echo the absolute path of the given path, using only Bash builtins.
_tfw_abspath() {
cdopt=-L
if [ $# -gt 1 -a "${1:0:1}" = - ]; then
cdopt="$1"
shift
fi
case "$1" in
*/)
builtin echo $(_tfw_abspath $cdopt "${1%/}")/
;;
/*/*)
if [ -d "$1" ]; then
(CDPATH= builtin cd $cdopt "$1" && builtin echo "$PWD")
else
builtin echo $(_tfw_abspath $cdopt "${1%/*}")/"${1##*/}"
fi
;;
/*)
echo "$1"
;;
*/*)
if [ -d "$1" ]; then
(CDPATH= builtin cd $cdopt "$1" && builtin echo "$PWD")
else
builtin echo $(_tfw_abspath $cdopt "${1%/*}")/"${1##*/}"
fi
;;
. | ..)
(CDPATH= builtin cd $cdopt "$1" && builtin echo "$PWD")
;;
*)
(CDPATH= builtin cd $cdopt . && builtin echo "$PWD/$1")
;;
esac
}
_tfw_timestamp() {
local ts=$(date "$TSFMT")
echo "${ts%[0-9][0-9][0-9][0-9][0-9][0-9]}"
}
_tfw_setup() {
_tfw_phase=setup
exec <&- 5>&1 5>&2 6>$_tfw_tmp/log.stdout 1>&6 2>$_tfw_tmp/log.stderr 7>$_tfw_tmp/log.xtrace
BASH_XTRACEFD=7
_tfw_log_fd=6
_tfw_stdout=5
_tfw_stderr=5
if $_tfw_verbose; then
# Find the PID of the current subshell process. Cannot use $BASHPID
# because MacOS only has Bash-3.2, and $BASHPID was introduced in Bash-4.
local mypid=$($BASH -c 'echo $PPID')
# These tail processes will die when the current subshell exits.
tail --pid=$mypid --follow $_tfw_tmp/log.stdout >&$_tfw_stdout 2>/dev/null &
tail --pid=$mypid --follow $_tfw_tmp/log.stderr >&$_tfw_stderr 2>/dev/null &
fi
export TFWUNIQUE=$_tfw_unique
export TFWVAR=$_tfw_tmp/var
mkdir $TFWVAR
export TFWTMP=$_tfw_tmp/tmp
mkdir $TFWTMP
cd $TFWTMP
tfw_log '# SETUP'
case `type -t setup_$_tfw_test_name` in
function)
tfw_log "# call setup_$_tfw_test_name()"
$_tfw_trace && set -x
setup_$_tfw_test_name $_tfw_test_name
set +x
;;
*)
tfw_log "# call setup($_tfw_test_name)"
$_tfw_trace && set -x
setup $_tfw_test_name
set +x
;;
esac
tfw_log '# END SETUP'
}
_tfw_teardown() {
_tfw_phase=teardown
tfw_log '# TEARDOWN'
case `type -t teardown_$_tfw_test_name` in
function)
tfw_log "# call teardown_$_tfw_test_name()"
$_tfw_trace && set -x
teardown_$_tfw_test_name
set +x
;;
*)
tfw_log "# call teardown($_tfw_test_name)"
$_tfw_trace && set -x
teardown $_tfw_test_name
set +x
;;
esac
tfw_log '# END TEARDOWN'
}
# Executes $_tfw_executable with the given arguments.
_tfw_execute() {
executed=$(shellarg "${_tfw_executable##*/}" "$@")
if $_tfw_opt_core_backtrace; then
ulimit -S -c unlimited
rm -f core
fi
{ time -p "$_tfw_executable" "$@" >$_tfw_tmp/stdout 2>$_tfw_tmp/stderr ; } 2>$_tfw_tmp/times
_tfw_exitStatus=$?
# Deal with core dump.
if $_tfw_opt_core_backtrace && [ -s core ]; then
tfw_core_backtrace "$_tfw_executable" core
fi
# Deal with exit status.
if [ -n "$_tfw_opt_exit_status" ]; then
_tfw_message="exit status ($_tfw_exitStatus) of ($executed) is $_tfw_opt_exit_status"
_tfw_dump_stderr_on_fail=true
_tfw_assert [ "$_tfw_exitStatus" -eq "$_tfw_opt_exit_status" ] || _tfw_failexit || return $?
tfw_log "# assert $_tfw_message"
else
tfw_log "# exit status of ($executed) = $_tfw_exitStatus"
fi
# Parse execution time report.
if ! _tfw_parse_times_to_milliseconds real realtime_ms ||
! _tfw_parse_times_to_milliseconds user usertime_ms ||
! _tfw_parse_times_to_milliseconds sys systime_ms
then
tfw_log '# malformed output from time:'
tfw_cat -v $_tfw_tmp/times
fi
return 0
}
_tfw_parse_times_to_milliseconds() {
local label="$1"
local var="$2"
local milliseconds=$($AWK '$1 == "'"$label"'" {
value = $2
minutes = 0
if (match(value, "[0-9]+m")) {
minutes = substr(value, RSTART, RLENGTH - 1)
value = substr(value, 1, RSTART - 1) substr(value, RSTART + RLENGTH)
}
if (substr(value, length(value)) == "s") {
value = substr(value, 1, length(value) - 1)
}
if (match(value, "^[0-9]+(\.[0-9]+)?$")) {
seconds = value + 0
print (minutes * 60 + seconds) * 1000
}
}' $_tfw_tmp/times)
[ -z "$milliseconds" ] && return 1
[ -n "$var" ] && eval $var=$milliseconds
return 0
}
_tfw_assert() {
local sense=
while [ "$1" = '!' ]; do
sense="$sense !"
shift
done
"$@"
if [ $sense $? -ne 0 ]; then
_tfw_failmsg "assertion failed: ${_tfw_message:-$*}"
_tfw_backtrace
return 1
fi
return 0
}
declare -a _tfw_opt_dump_on_fail
_tfw_dump_on_fail() {
for arg; do
local _found=false
local _f
for _f in "${_tfw_opt_dump_on_fail[@]}"; do
if [ "$_f" = "$arg" ]; then
_found=true
break
fi
done
$_found || _tfw_opt_dump_on_fail+=("$arg")
done
}
_tfw_getopts() {
local context="$1"
shift
_tfw_executable=
_tfw_opt_core_backtrace=false
_tfw_message=
_tfw_opt_dump_on_fail=()
_tfw_opt_error_on_fail=false
_tfw_opt_exit_status=
_tfw_opt_timeout=
_tfw_opt_sleep=
_tfw_opt_matches=
_tfw_opt_line=
_tfw_getopts_shift=0
local oo
_tfw_shopt oo -s extglob
while [ $# -ne 0 ]; do
case "$context:$1" in
*:--stdout) _tfw_dump_on_fail --stdout;;
*:--stderr) _tfw_dump_on_fail --stderr;;
assert*:--dump-on-fail=*) _tfw_dump_on_fail "${1#*=}";;
execute:--exit-status=+([0-9])) _tfw_opt_exit_status="${1#*=}";;
execute:--exit-status=*) _tfw_error "invalid value: $1";;
execute*:--executable=) _tfw_error "missing value: $1";;
execute*:--executable=*) _tfw_executable="${1#*=}";;
execute*:--core-backtrace) _tfw_opt_core_backtrace=true;;
wait_until:--timeout=@(+([0-9])?(.+([0-9]))|*([0-9]).+([0-9]))) _tfw_opt_timeout="${1#*=}";;
wait_until:--timeout=*) _tfw_error "invalid value: $1";;
wait_until:--sleep=@(+([0-9])?(.+([0-9]))|*([0-9]).+([0-9]))) _tfw_opt_sleep="${1#*=}";;
wait_until:--sleep=*) _tfw_error "invalid value: $1";;
assert*:--error-on-fail) _tfw_opt_error_on_fail=true;;
assert*:--message=*) _tfw_message="${1#*=}";;
assertgrep:--matches=+([0-9])) _tfw_opt_matches="${1#*=}";;
assertgrep:--matches=*) _tfw_error "invalid value: $1";;
assertfilecontent:--line=+([0-9])) _tfw_opt_line="${1#*=}";;
assertfilecontent:--line=*) _tfw_error "invalid value: $1";;
*:--) let _tfw_getopts_shift=_tfw_getopts_shift+1; shift; break;;
*:--*) _tfw_error "unsupported option: $1";;
*) break;;
esac
let _tfw_getopts_shift=_tfw_getopts_shift+1
shift
done
case "$context" in
execute*)
if [ -z "$_tfw_executable" ]; then
_tfw_executable="$1"
let _tfw_getopts_shift=_tfw_getopts_shift+1
shift
fi
[ -z "$_tfw_executable" ] && _tfw_error "missing executable argument"
;;
esac
_tfw_shopt_restore oo
return 0
}
_tfw_matches_rexp() {
local rexp="$1"
shift
for arg; do
if ! echo "$arg" | $GREP -q -e "$rexp"; then
return 1
fi
done
return 0
}
_tfw_parse_expr() {
local _expr="$*"
_tfw_expr=()
while [ $# -ne 0 ]; do
case "$1" in
'&&' | '||' | '!' | '(' | ')')
_tfw_expr+=("$1")
shift
;;
*)
if [ $# -lt 3 ]; then
_tfw_error "invalid expression: $_expr"
return $?
fi
case "$2" in
'==') _tfw_expr+=("[" "$1" "-eq" "$3" "]");;
'!=') _tfw_expr+=("[" "$1" "-ne" "$3" "]");;
'<=') _tfw_expr+=("[" "$1" "-le" "$3" "]");;
'<') _tfw_expr+=("[" "$1" "-lt" "$3" "]");;
'>=') _tfw_expr+=("[" "$1" "-ge" "$3" "]");;
'>') _tfw_expr+=("[" "$1" "-gt" "$3" "]");;
'~') _tfw_expr+=("_tfw_matches_rexp" "$3" "$1");;
'!~') _tfw_expr+=("!" "_tfw_matches_rexp" "$3" "$1");;
*)
_tfw_error "invalid expression: $_expr"
return $?
;;
esac
shift 3
;;
esac
done
return 0
}
_tfw_assertExpr() {
_tfw_parse_expr "$@" || return $?
_tfw_shellarg "${_tfw_expr[@]}"
_tfw_assert eval "${_tfw_args[@]}"
}
_tfw_assert_stdxxx_is() {
local qual="$1"
shift
_tfw_getopts assertfilecontent --$qual --stderr "$@"
shift $((_tfw_getopts_shift - 2))
if [ $# -lt 1 ]; then
_tfw_error "incorrect arguments"
return $?
fi
case "$_tfw_opt_line" in
'') ln -f "$_tfw_tmp/$qual" "$_tfw_tmp/content";;
*) $SED -n -e "${_tfw_opt_line}p" "$_tfw_tmp/$qual" >"$_tfw_tmp/content";;
esac
local message="${_tfw_message:-${_tfw_opt_line:+line $_tfw_opt_line of }$qual of ($executed) is $(shellarg "$@")}"
echo -n "$@" >$_tfw_tmp/stdxxx_is.tmp
if ! cmp -s $_tfw_tmp/stdxxx_is.tmp "$_tfw_tmp/content"; then
_tfw_failmsg "assertion failed: $message"
_tfw_backtrace
return 1
fi
tfw_log "# assert $message"
return 0
}
_tfw_assert_stdxxx_linecount() {
local qual="$1"
shift
_tfw_getopts assertfilecontent --$qual --stderr "$@"
shift $((_tfw_getopts_shift - 2))
if [ $# -lt 1 ]; then
_tfw_error "incorrect arguments"
return $?
fi
local lineCount=$(( $(cat $_tfw_tmp/$qual | wc -l) + 0 ))
[ -z "$_tfw_message" ] && _tfw_message="$qual line count ($lineCount) $*"
_tfw_assertExpr "$lineCount" "$@" || _tfw_failexit || return $?
tfw_log "# assert $_tfw_message"
return 0
}
_tfw_assert_stdxxx_grep() {
local qual="$1"
shift
_tfw_getopts assertgrep --$qual --stderr "$@"
shift $((_tfw_getopts_shift - 2))
if [ $# -ne 1 ]; then
_tfw_error "incorrect arguments"
return $?
fi
_tfw_assert_grep "$qual of ($executed)" $_tfw_tmp/$qual "$@"
}
_tfw_assert_grep() {
local label="$1"
local file="$2"
local pattern="$3"
local message=
if ! [ -e "$file" ]; then
_tfw_error "$file does not exist"
ret=$?
elif ! [ -f "$file" ]; then
_tfw_error "$file is not a regular file"
ret=$?
elif ! [ -r "$file" ]; then
_tfw_error "$file is not readable"
ret=$?
else
local matches=$(( $($GREP --regexp="$pattern" "$file" | wc -l) + 0 ))
local done=false
local ret=0
local info="$matches match"$([ $matches -ne 1 ] && echo "es")
local oo
_tfw_shopt oo -s extglob
case "$_tfw_opt_matches" in
'')
done=true
message="${_tfw_message:-$label contains a line matching \"$pattern\"}"
if [ $matches -ne 0 ]; then
tfw_log "# assert $message"
else
_tfw_failmsg "assertion failed ($info): $message"
ret=1
fi
;;
esac
case "$_tfw_opt_matches" in
+([0-9]))
done=true
local s=$([ $_tfw_opt_matches -ne 1 ] && echo s)
message="${_tfw_message:-$label contains exactly $_tfw_opt_matches line$s matching \"$pattern\"}"
if [ $matches -eq $_tfw_opt_matches ]; then
tfw_log "# assert $message"
else
_tfw_failmsg "assertion failed ($info): $message"
ret=1
fi
;;
esac
case "$_tfw_opt_matches" in
+([0-9])-*([0-9]))
done=true
local bound=${_tfw_opt_matches%-*}
local s=$([ $bound -ne 1 ] && echo s)
message="${_tfw_message:-$label contains at least $bound line$s matching \"$pattern\"}"
if [ $matches -ge $bound ]; then
tfw_log "# assert $message"
else
_tfw_failmsg "assertion failed ($info): $message"
ret=1
fi
;;
esac
case "$_tfw_opt_matches" in
*([0-9])-+([0-9]))
done=true
local bound=${_tfw_opt_matches#*-}
local s=$([ $bound -ne 1 ] && echo s)
message="${_tfw_message:-$label contains at most $bound line$s matching \"$pattern\"}"
if [ $matches -le $bound ]; then
tfw_log "# assert $message"
else
_tfw_failmsg "assertion failed ($info): $message"
ret=1
fi
;;
esac
if ! $done; then
_tfw_error "unsupported value for --matches=$_tfw_opt_matches"
ret=$?
fi
_tfw_shopt_restore oo
fi
if [ $ret -ne 0 ]; then
_tfw_backtrace
fi
return $ret
}
# Write a message to the real stderr of the test script, so the user sees it
# immediately. Also write the message to the test log, so it can be recovered
# later.
_tfw_echoerr() {
echo "$@" >&$_tfw_stderr
if [ $_tfw_stderr -ne 2 ]; then
echo "$@" >&2
fi
}
_tfw_checkBashVersion() {
[ -z "$BASH_VERSION" ] && _tfw_fatal "not running in Bash (/bin/bash) shell"
if [ -n "${BASH_VERSINFO[*]}" ]; then
[ ${BASH_VERSINFO[0]} -gt 3 ] && return 0
if [ ${BASH_VERSINFO[0]} -eq 3 ]; then
[ ${BASH_VERSINFO[1]} -gt 2 ] && return 0
if [ ${BASH_VERSINFO[1]} -eq 2 ]; then
[ ${BASH_VERSINFO[2]} -ge 48 ] && return 0
fi
fi
fi
_tfw_fatal "unsupported Bash version: $BASH_VERSION"
}
_tfw_checkTerminfo() {
_tfw_tput=false
case $(type -p tput) in
*/tput) _tfw_tput=tput;;
esac
}
# Return a list of test names in the _tfw_tests array variable, in the order
# that the test_TestName functions were defined. Test names must start with
# an alphabetic character (not numeric or '_').
_tfw_find_tests() {
_tfw_tests=()
local oo
_tfw_shopt oo -s extdebug
local name
for name in $(builtin declare -F |
$SED -n -e '/^declare -f test_[A-Za-z]/s/^declare -f test_//p' |
while read name; do builtin declare -F "test_$name"; done |
sort -k 2,2n -k 3,3 |
$SED -e 's/^test_//' -e 's/[ ].*//')
do
local number=$((${#_tfw_tests[*]} + 1))
local testName=
if [ $# -eq 0 ]; then
testName="$name"
else
local filter
for filter; do
case "$filter" in
+([0-9]))
if [ $number -eq $filter ]; then
testName="$name"
break
fi
;;
+([0-9])*(,+([0-9])))
local oIFS="$IFS"
IFS=,
local -a numbers=($filter)
IFS="$oIFS"
local n
for n in ${numbers[*]}; do
if [ $number -eq $n ]; then
testName="$name"
break 2
fi
done
;;
+([0-9])-)
local start=${filter%-}
if [ $number -ge $start ]; then
testName="$name"
break
fi
;;
-+([0-9]))
local end=${filter#-}
if [ $number -le $end ]; then
testName="$name"
break
fi
;;
+([0-9])-+([0-9]))
local start=${filter%-*}
local end=${filter#*-}
if [ $number -ge $start -a $number -le $end ]; then
testName="$name"
break
fi
;;
*)
case "$name" in
"$filter"*) testName="$name"; break;;
esac
;;
esac
done
fi
_tfw_tests+=("$testName")
done
_tfw_shopt_restore oo
}
# A "fail" event occurs when any assertion fails, and indicates that the test
# has not passed. Other tests may still proceed. A "fail" event during setup
# or teardown is treated as an error, not a failure.
_tfw_failmsg() {
# A failure during setup or teardown is treated as an error.
case $_tfw_phase in
testcase)
if ! $_tfw_opt_error_on_fail; then
tfw_log "FAIL: $*"
return 0;
fi
;;
esac
tfw_log "ERROR: $*"
}
_tfw_backtrace() {
tfw_log '#----- shell backtrace -----'
local -i up=1
while [ "${BASH_SOURCE[$up]}" == "${BASH_SOURCE[0]}" ]; do
let up=up+1
done
local -i i=0
while [ $up -lt ${#FUNCNAME[*]} -a "${BASH_SOURCE[$up]}" != "${BASH_SOURCE[0]}" ]; do
echo "[$i] ${FUNCNAME[$(($up-1))]}() called from ${FUNCNAME[$up]}() at line ${BASH_LINENO[$(($up-1))]} of ${BASH_SOURCE[$up]}" >&$_tfw_log_fd
let up=up+1
let i=i+1
done
tfw_log '#-----'
}
_tfw_failexit() {
# When exiting a test case due to a failure, log any diagnostic output that
# has been requested.
tfw_cat "${_tfw_opt_dump_on_fail[@]}"
# A failure during setup or teardown is treated as an error.
case $_tfw_phase in
testcase)
if ! $_tfw_opt_error_on_fail; then
exit 1
fi
;;
esac
_tfw_errorexit
}
# An "error" event prevents a test from running, so it neither passes nor fails.
# Other tests may still proceed.
_tfw_errormsg() {
[ $# -eq 0 ] && set -- "(no message)"
local -i up=1
local -i top=${#FUNCNAME[*]}
let top=top-1
while [ $up -lt $top -a "${BASH_SOURCE[$up]}" == "${BASH_SOURCE[0]}" ]; do
let up=up+1
done
tfw_log "ERROR in ${FUNCNAME[$up]}: $*"
}
_tfw_error() {
_tfw_errormsg "ERROR: $*"
_tfw_backtrace
_tfw_errorexit
}
_tfw_errorexit() {
# Do not exit process during teardown
_tfw_result=ERROR
case $_tfw_phase in
teardown) [ $_tfw_status -lt 254 ] && _tfw_status=254;;
*) exit 254;;
esac
return 254
}
# A "fatal" event stops the entire test run, and generally indicates an
# insurmountable problem in the test script or in the test framework itself.
_tfw_fatalmsg() {
_tfw_echoerr "${BASH_SOURCE[1]}: FATAL: $*"
}
_tfw_fatal() {
[ $# -eq 0 ] && set -- exiting
_tfw_echoerr "${BASH_SOURCE[1]}: FATAL: $*"
_tfw_fatalexit
}
_tfw_fatalexit() {
exit 255
}
# Restore the caller's shopt preferences before returning.
_tfw_shopt_restore _tfw_orig_shopt
| true
|
58f27786ef550234b914292cd116052d3d870a9a
|
Shell
|
jgm1986/raspberrypishellscripts
|
/raspbian/sd-backup.sh
|
UTF-8
| 787
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo "***********************************************************"
echo "* Raspbian SD Backup. *"
echo "* *"
echo "* WARNING: Please wait until this script end. *"
echo "***********************************************************"
echo ""
echo "INFO: The destination of this backup will be:"
echo " /home/pi/Descargas"
echo ""
echo "INFO: Creating SD backup..."
sudo dd if=/dev/mmcblk0 of=/home/pi/Descargas/raspberry_pyLoad.img bs=1M
echo "[ OK ]"
echo "INFO: Compressing backup image file..."
gzip --best Descargas/raspberry_pyLoad.img
echo "[ OK ]"
sudo rm -r Descargas/raspberry_pyLoad.img
echo "Done! Your Raspberry Pi SD backup has finished!"
| true
|
d9b83396d3a211c5513dcb55eb027af0967ed4f6
|
Shell
|
SachinChavhan/data-export
|
/csv-data-export-shell/export.sh
|
UTF-8
| 8,912
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
date=$(date '+%Y-%m-%d-%H:%M:%S')
if [ $# -ne 6 ]; then
echo "bash export.sh <zkNodes> <zkPath> <customer> <datamodel> <usdm-y/n> <plo-y/n> <custom-y/n>"
exit 1;
fi
error_handler(){
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [ERROR] - $1"
exit 1
}
# Grab command arguments
ZK_NODES="$1"
ZK_PATH="$2"
ZK_CUSTOMER="$3"
USDM=$(echo $4 | cut -f2 -d-)
PLO=$(echo $5 | cut -f2 -d-)
CUSTOM=$(echo $6 | cut -f2 -d-)
# Construct zookeeper paths
ZK_EXPORT="$ZK_PATH/$ZK_CUSTOMER"
ZK_DB_PARAMETERS="$ZK_EXPORT/dbParameters"
ZK_DB_HOST="$ZK_DB_PARAMETERS/hostname"
ZK_DB_PORT="$ZK_DB_PARAMETERS/port"
ZK_DB_NAME="$ZK_DB_PARAMETERS/database"
ZK_DB_USER="$ZK_DB_PARAMETERS/username"
ZK_DB_PASSWORD="$ZK_DB_PARAMETERS/password"
ZK_DB_SCHEMA="$ZK_DB_PARAMETERS/schema"
ZK_SFTP="$ZK_EXPORT/sftpParameters"
ZK_SFTP_HOST="$ZK_SFTP/hostname"
ZK_SFTP_PASSWORD="$ZK_SFTP/password"
ZK_SFTP_USER="$ZK_SFTP/username"
ZK_SFTP_LOCATION="$ZK_SFTP/sftplocation"
ZK_REPOSITORY="$ZK_EXPORT/repository"
ZK_CCDM_REPO="$ZK_REPOSITORY/ccdm"
ZK_QUERY_REPO="$ZK_REPOSITORY/query"
ZK_DIN_CUST_REPO="$ZK_REPOSITORY/din-customer"
ZK_DIN_CUST_TAG="$ZK_DIN_CUST_REPO/tag"
ZK_DIN_CUST_XML="$ZK_DIN_CUST_REPO/xmllocation"
ZK_CCDM_TAG="$ZK_CCDM_REPO/tag"
# Other common constants
WORK_DIR="/home/comprehend"
LOG_FILE="$WORK_DIR/log"
ZOOKEEPER="zookeepercli"
LOCAL_DATA_DIR="$WORK_DIR"
REPO_PATH="/home/comprehend/repo"
QUERY_REPO_PATH="/home/comprehend/repo/query"
CCDM_REPO_PATH="/home/comprehend/repo/ccdm"
CUST_REPO_PATH="/home/comprehend/repo/din-customer"
# Obtain info from zookeeper
echo "===> Obtaining credentials from zookeeper..."
DB_HOST=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DB_HOST)
echo "===> DB host: $DB_HOST"
DB_PORT=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DB_PORT)
echo "===> DB port: $DB_PORT"
DB_NAME=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DB_NAME)
echo "===> DB name: $DB_NAME"
DB_USER=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DB_USER)
echo "===> DB user: $DB_USER"
DB_PASSWORD=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DB_PASSWORD)
echo "===> DB password: (masked)"
DB_SCHEMA=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DB_SCHEMA)
echo "===> DB schema: $DB_SCHEMA"
SFTP_HOST=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_SFTP_HOST)
echo "===> Sftp host: $SFTP_HOST"
SFTP_USER=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_SFTP_USER)
echo "===> Sftp user: $SFTP_USER"
SFTP_PASSWORD=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_SFTP_PASSWORD)
echo "===> Sftp password: (masked)"
SFTP_LOCATION=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_SFTP_LOCATION)
echo "===> Sftplocation: $SFTP_LOCATION"
CCDM_REPO=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_CCDM_REPO)
echo "===> Ccdm repo: $CCDM_REPO"
QUERY_REPO=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_QUERY_REPO)
echo "===> query repo: $QUERY_REPO"
CUST_REPO=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DIN_CUST_REPO)
echo "===> Cust repo: $CUST_REPO"
CUST_XML=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DIN_CUST_XML)
echo "===> Cust xml: $CUST_XML"
CUST_TAG=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_DIN_CUST_TAG)
echo "===> Cust tag: $CUST_TAG"
CCDM_TAG=$($ZOOKEEPER --servers $ZK_NODES -c get $ZK_CCDM_TAG)
echo "===> ccdm tag: $CCDM_TAG"
echo "===> Cloning ccdm repository..."
REPO_CCDM_NAME=$(echo "$CCDM_REPO" | sed 's/.*\/\(.*\)\.git/\1/')
printf "host github.com\n HostName github.com\n IdentityFile $WORK_DIR/vault/${REPO_CCDM_NAME}_deploy_private_key\n" >> ~/.ssh/config
git clone $CCDM_REPO --branch $CCDM_TAG --depth 1 $CCDM_REPO_PATH
pushd $CCDM_REPO_PATH
echo "===> Cloning query repository..."
REPO_QUERY_NAME=$(echo "$QUERY_REPO" | sed 's/.*\/\(.*\)\.git/\1/')
printf "host github.com\n HostName github.com\n IdentityFile $WORK_DIR/vault/${REPO_QUERY_NAME}_deploy_private_key\n" >> ~/.ssh/config
git clone $QUERY_REPO --depth 1 $QUERY_REPO_PATH
pushd $QUERY_REPO_PATH
echo "===> Cloning din-customer repository..."
DIN_REPO_NAME=$(echo "$CUST_REPO" | sed 's/.*\/\(.*\)\.git/\1/')
printf "host github.com\n HostName github.com\n IdentityFile $WORK_DIR/vault/${DIN_REPO_NAME}_deploy_private_key\n" >> ~/.ssh/config
git clone $CUST_REPO --branch $CUST_TAG --depth 1 $CUST_REPO_PATH
pushd $CUST_REPO_PATH
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER : creating local export directory and copying data from db"
mkdir -p "$LOCAL_DATA_DIR"
CQS_DICT_PATH="$CCDM_REPO_PATH/resources/validation/cqs/global_cqs/"
XML_PATH="$CUST_REPO_PATH/$CUST_XML"
ls -lrt $XML_PATH
cp $XML_PATH $QUERY_REPO_PATH
cp $CQS_DICT_PATH/cqs_dictionary.py $QUERY_REPO_PATH
ls -lrt $QUERY_REPO_PATH
echo "=========== $QUERY_REPO_PATH"
export_data_tables(){
DATA_MODEL=$2
LOCAL_PATH=$WORK_DIR/$DATA_MODEL/$date
mkdir -p $LOCAL_PATH
for table in $(echo $1 | sed "s/,/ /g"); do
query="select * from $DB_SCHEMA.$table"
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -c "COPY ($query) TO STDOUT WITH CSV HEADER" | gzip > $LOCAL_PATH/$table.csv.gz
if [ "$?" -eq "1" ]; then
error_handler "$ZK_CUSTOMER $DATA_MODEL : copy command failed. Table : $table"
fi
done
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $DATA_MODEL : copied data from db successful"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $DATA_MODEL : transferring data to remote location"
sshpass -p $SFTP_PASSWORD scp -o StrictHostKeyChecking=no -r $WORK_DIR/$DATA_MODEL $SFTP_USER@$SFTP_HOST:$SFTP_LOCATION
if [ "$?" -eq "1" ]; then
error_handler "$ZK_CUSTOMER $DATA_MODEL : failed to transfer data to remote location"
fi
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $DATA_MODEL : Transferred data to remote location successful"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $DATA_MODEL : unzipping data at remote location"
sshpass -p $SFTP_PASSWORD ssh -o StrictHostKeyChecking=no $SFTP_USER@$SFTP_HOST 'gunzip -r '$SFTP_LOCATION/$DATA_MODEL
if [ "$?" -eq "1" ]; then
error_handler "$ZK_CUSTOMER $DATA_MODEL : failed unzipping at remote location"
fi
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $DATA_MODEL : unzipped data at remote location successful"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $DATA_MODEL : CSV export process completed successfully"
}
export_data_custom(){
a=$1
LOCAL_CUSTOM_DATA_PATH=$WORK_DIR/custom/$date
mkdir -p $LOCAL_CUSTOM_DATA_PATH
for j in "${a[@]}"
do
filename=$(echo $j | cut -f1 -d#)
query=$(echo $j | cut -f2 -d#)
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -c "COPY ($query) TO STDOUT WITH CSV HEADER" | gzip > $LOCAL_CUSTOM_DATA_PATH/$filename.csv.gz
if [ "$?" -eq "1" ]; then
error_handler "$ZK_CUSTOMER $filename : copy command failed. Table : $table"
fi
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $filename : copied data from db successful"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $filename : transferring data to remote location"
sshpass -p $SFTP_PASSWORD scp -o StrictHostKeyChecking=no -r $WORK_DIR/custom $SFTP_USER@$SFTP_HOST:$SFTP_LOCATION
if [ "$?" -eq "1" ]; then
error_handler "$ZK_CUSTOMER $filename : failed to transfer data to remote location"
fi
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $filename : Transferred data to remote location successful"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $filename : unzipping data at remote location"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $filename : unzipping data at remote location"
sshpass -p $SFTP_PASSWORD ssh -o StrictHostKeyChecking=no $SFTP_USER@$SFTP_HOST 'gunzip -r '$SFTP_LOCATION/custom
if [ "$?" -eq "1" ]; then
error_handler "$ZK_CUSTOMER $filename : failed unzipping at remote location"
fi
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $filename : unzipped data at remote location successful"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - $ZK_CUSTOMER $filename : CSV export process completed successfully"
done
}
if [ $USDM = "y" ];then
echo "===> export process for usdm"
usdm_tables="$(python $QUERY_REPO_PATH/read_params.py u)"
echo "===> python done usdm_tables : $usdm_tables"
export_data_tables $usdm_tables "usdm"
fi
if [ $PLO = "y" ];then
echo "===> export process for plo"
plo_tables="$(python $QUERY_REPO_PATH/read_params.py p)"
export_data_tables $plo_tables "plo"
fi
if [ $CUSTOM = "y" ];then
echo "===> export process for custom"
python $QUERY_REPO_PATH/read_params.py c > custom.txt
readarray a < custom.txt
export_data_custom $a
fi
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $remote_user [INFO] - CSV export process completed successfully"
| true
|
4a7f8a2ae211c1cdea38b8aaeaf66cf5a7befd5b
|
Shell
|
travisdowns/zero-fill-bench
|
/scripts/data-icl.sh
|
UTF-8
| 712
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
echo "RDIR=${RDIR:=./results}"
mkdir -p "$RDIR"
d='---------------------------'
# up to CPU-count threads
echo -e "$d\nCollecting data in $RDIR\n$d"
./bench --algos=fill0,fill1 --perf-cols=GHz --csv > "$RDIR/overall-warm.csv"
./bench --algos=fill0,fill1 --perf-cols=GHz --csv --warmup-ms=0 > "$RDIR/overall.csv"
./bench --algos=fill0,fill1,alt01 --min-size=60000 --max-size=100000000 --warmup-ms=0 --perf-cols=l2-out-silent,l2-out-non-silent,GHz --step=1.1 --csv > "$RDIR/l2-focus.csv"
./bench --algos=fill0,fill1,alt01 --min-size=600000 --max-size=400000000 --warmup-ms=0 --perf-cols=uncR,uncW --step=1.1 --csv > "$RDIR/l3-focus.csv"
echo -e "$d\nData collection finished!\n$d"
| true
|
68c147fc8548e1def2c8230fe93ca2cdd878e922
|
Shell
|
bangongzi/advanced_topo
|
/default_sw_config.sh
|
UTF-8
| 698
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
config_single(){
echo "dev s${1}-eth${2} are going to be configured"
tc qdisc delete dev s${1}-eth${2} root
tc qdisc add dev s${1}-eth${2} handle 1: root dsmark indices 1 default_index 0
tc qdisc add dev s${1}-eth${2} handle 2: parent 1: tbf burst 2048KB latency ${3} mtu 1514 rate ${4}Gbit
tc qdisc show dev s${1}-eth${2}
}
#configure ports of layer 1
for i in $( seq 1 9 )
do
config_single "30${i}" 1 75000 0.1
done
for i in $( seq 10 10 )
do
config_single "3${i}" 1 75000 0.1
done
#configure ports of layer 2
for i in $( seq 1 2 )
do
config_single "20${i}" 1 75000 0.5
done
config_single "101" 1 75000 1
#configure ports of layer 3
#no actual configure means it is very big
| true
|
c4be174f647833d326ffb436fe7d9ba7e2f13b24
|
Shell
|
Konubinix/Devel
|
/bin/konix_ebook_convert.sh
|
UTF-8
| 347
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
while getopts "hi:o:" opt; do
case $opt in
h)
usage
exit 0
;;
i)
IN="$OPTARG"
;;
o)
OUT="$OPTARG"
;;
esac
done
shift $((OPTIND-1))
if [ -e "${OUT}" ]
then
rm -rf "${OUT}"
fi
ebook-convert "${IN}" "${OUT}"
| true
|
953a1172b6e33f90a85a981c31075bfd81259aa9
|
Shell
|
gecos-team/gecosws-installation-disk-generator
|
/config/var/gensys/live-build/gecosv2-14.04/scripts/build/chroot_linux-image
|
UTF-8
| 5,053
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
## live-build(7) - System Build Scripts
## Copyright (C) 2006-2013 Daniel Baumann <daniel@debian.org>
##
## This program comes with ABSOLUTELY NO WARRANTY; for details see COPYING.
## This is free software, and you are welcome to redistribute it
## under certain conditions; see COPYING for details.
set -e
# Including common functions
[ -e "${LIVE_BUILD}/scripts/build.sh" ] && . "${LIVE_BUILD}/scripts/build.sh" || . /usr/lib/live/build.sh
# Setting static variables
DESCRIPTION="$(Echo 'schedule kernel packages for installation')"
HELP=""
USAGE="${PROGRAM} [--force]"
Arguments "${@}"
# Reading configuration files
Read_conffiles config/all config/common config/bootstrap config/chroot config/binary config/source
Set_defaults
# Requiring stage file
Require_stagefile .build/config .build/bootstrap
# Checking stage file
Check_stagefile .build/chroot_linux-image
# Checking lock file
Check_lockfile .lock
# Creating lock file
Create_lockfile .lock
# Diverting update-initramfs
#case "${LB_INITRAMFS}" in
# live-boot)
# mv chroot/usr/sbin/update-initramfs chroot/usr/sbin/update-initramfs.live-build
# ;;
#esac
if [ "${LB_LINUX_PACKAGES}" != "none" ]
then
for FLAVOUR in ${LB_LINUX_FLAVOURS}
do
for PACKAGE in ${LB_LINUX_PACKAGES}
do
echo ${PACKAGE}-${FLAVOUR} >> chroot/root/packages.chroot
done
done
# Include firmware packages
if [ "${LB_FIRMWARE_CHROOT}" = "true" ]
then
# Assumption: firmware packages install files into /lib/firmware
# Get all firmware packages names
mkdir -p cache/contents.chroot
FIRMWARE_PACKAGES=""
if [ "${LB_PARENT_DISTRIBUTION}" = "squeeze" ]
then
_CONTENTS="${LB_PARENT_MIRROR_CHROOT}/dists/${LB_PARENT_DISTRIBUTION}/Contents-${LB_ARCHITECTURES}.gz"
else
#_CONTENTS="$(for _PARENT_ARCHIVE_AREA in ${LB_PARENT_ARCHIVE_AREAS}; do echo ${LB_PARENT_MIRROR_CHROOT}/dists/${LB_PARENT_DISTRIBUTION}/${_PARENT_ARCHIVE_AREA}/Contents-${LB_ARCHITECTURES}.gz; done)"
_CONTENTS="${LB_PARENT_MIRROR_CHROOT}/dists/${LB_PARENT_DISTRIBUTION}/Contents-${LB_ARCHITECTURES}.gz"
fi
rm -f cache/contents.chroot/contents.${LB_PARENT_DISTRIBUTION}.${LB_ARCHITECTURES}
for _CONTENT in ${_CONTENTS}
do
wget ${WGET_OPTIONS} ${_CONTENT} -O - | gunzip -c >> cache/contents.chroot/contents.${LB_PARENT_DISTRIBUTION}.${LB_ARCHITECTURES}
FIRMWARE_PACKAGES="${FIRMWARE_PACKAGES} $(awk '/^lib\/firmware/ { print $2 }' cache/contents.chroot/contents.${LB_PARENT_DISTRIBUTION}.${LB_ARCHITECTURES} | sort -u)"
done
if echo ${LB_PARENT_ARCHIVE_AREAS} | grep -qs "non-free"
then
# FIXME: should check that we're building on debian through e.g. a 'derivative-is-based-on' variable or somesuch.
# Manually add firmware-linux/non-free meta package
FIRMWARE_PACKAGES="${FIRMWARE_PACKAGES} firmware-linux"
fi
if [ "${LB_DERIVATIVE}" = "true" ]
then
# FIXME: account for the fact that PARENT_DISTRIBUTION and DISTRIBUTION might be the same (to not have overlapping cache files for contents).
if [ "${_PARENT_DISTRIBUTION}" = "squeeze" ]
then
_CONTENTS="${LB_MIRROR_CHROOT}/dists/${LB_DISTRIBUTION}/Contents-${LB_ARCHITECTURES}.gz"
else
_CONTENTS="$(for _ARCHIVE_AREA in ${LB_ARCHIVE_AREAS}; do echo ${LB_MIRROR_CHROOT}/dists/${LB_DISTRIBUTION}/${_ARCHIVE_AREA}/Contents-${LB_ARCHITECTURES}.gz; done)"
fi
rm -f cache/contents.chroot/contents.${LB_DISTRIBUTION}.${LB_ARCHITECTURES}
for _CONTENT in ${_CONTENTS}
do
wget ${WGET_OPTIONS} ${_CONTENT} -O - | gunzip -c >> cache/contents.chroot/contents.${LB_DISTRIBUTION}.${LB_ARCHITECTURES}
FIRMWARE_PACKAGES="${FIRMWARE_PACKAGES} $(awk '/^lib\/firmware/ { print $2 }' cache/contents.chroot/contents.${LB_DISTRIBUTION}.${LB_ARCHITECTURES} | sort -u)"
done
fi
if [ "${LB_PARENT_DISTRIBUTION}" = "squeeze" ]
then
# Filter out contrib packages if contrib is not enabled
if ! echo ${LB_ARCHIVE_AREAS} | grep -qs contrib
then
_FIRMWARE_PACKAGES=""
for _PACKAGE in ${FIRMWARE_PACKAGES}
do
_FIRMWARE_PACKAGES="${_FIRMWARE_PACKAGES} $(echo ${_PACKAGE} | sed -e 's|^contrib/.*$||')"
done
FIRMWARE_PACKAGES="${_FIRMWARE_PACKAGES}"
fi
# Filter out non-free packages if non-free is not enabled
if ! echo ${LB_ARCHIVE_AREAS} | grep -qs non-free
then
_FIRMWARE_PACKAGES=""
for _PACKAGE in ${FIRMWARE_PACKAGES}
do
_FIRMWARE_PACKAGES="${_FIRMWARE_PACKAGES} $(echo ${_PACKAGE} | sed -e 's|^non-free/.*$||')"
done
FIRMWARE_PACKAGES="${_FIRMWARE_PACKAGES}"
fi
fi
# Drop section and keep package names only
for _PACKAGE in ${FIRMWARE_PACKAGES}
do
echo $(echo ${_PACKAGE} | awk -F/ '{ print $NF }') >> chroot/root/packages.chroot
done
# Some known licenses required to be accepted
if echo ${LB_PARENT_ARCHIVE_AREAS} | grep -qs "non-free"
then
cat >> chroot/root/packages.chroot.cfg << EOF
firmware-ivtv firmware-ivtv/license/accepted boolean true
firmware-ipw2x00 firmware-ipw2x00/license/accepted boolean true
EOF
fi
fi
# Creating stage file
Create_stagefile .build/chroot_linux-image
fi
| true
|
3fe67cc2f596d4fe9badc70e917b4faec0ae2aad
|
Shell
|
christopherscott/bashrc_dispatch
|
/bashrc_interactive.sh
|
UTF-8
| 1,599
| 2.515625
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
PREFIX=$HOME/bashrc_dispatch/
# load utility scripts
[ -f "$HOME/.rvm/scripts/rvm" ] && . ~/.rvm/scripts/rvm
[ -f "${PREFIX}colors.sh" ] && . "${PREFIX}colors.sh"
[ -f "${PREFIX}git-completion.sh" ] && . "${PREFIX}git-completion.sh"
[ -f "${PREFIX}reactive-prompt.sh" ] && . "${PREFIX}reactive-prompt.sh"
# token
shell_is_linux && export TOKEN="${COLOR_CYAN}λ${COLOR_NONE}"
shell_is_osx && export TOKEN="${COLOR_GREEN}☯${COLOR_NONE} "
# high-speed prompt
export PS1='$(show_time) ...$(uname)... $(prompt_color)'"${COLOR_GRAY}[${COLOR_NONE}"'$(prompt_color)'"\u${COLOR_NONE}${COLOR_GRAY}@${COLOR_NONE}"'$(prompt_color)'"\h${COLOR_NONE}${COLOR_GRAY}]${COLOR_NONE}"\
" ${COLOR_GRAY}\w${COLOR_NONE}"\
"${COLOR_BROWN}"'$(show_git_branch_and_status)'"${COLOR_NONE}\n"\
"\[\033[1;35m\]"'$(show_exit_status)'"${TOKEN} "
shell_is_linux && alias l="ls -gGh --color"
shell_is_osx && alias l="ls -ohgG"
alias ll="ls -AgnhG"
alias resource="source ~/.profile"
alias dev="cd ~/dev"
alias wf="ssh cscott@cscott.webfactional.com"
# mobile emulators
alias android="~/dev/zoo/lib/android-sdk-macosx/tools/emulator -avd ics-stock"
alias ios="/Developer/Platforms/iPhoneSimulator.platform/Developer/Applications/iPhone\ Simulator.app/Contents/MacOS/iPhone\ Simulator"
# hide/show hidden files in Finder
alias showhidden="defaults write com.apple.Finder AppleShowAllFiles TRUE; killall Finder"
alias hidehidden="defaults write com.apple.Finder AppleShowAllFiles FALSE; killall Finder"
# Ack: http://betterthangrep.com/
alias ack="ack --ignore-dir=min-cat"
alias localhost="cd /Library/WebServer/Documents"
alias webroot="localhost"
| true
|
83b54b84e261b1eb26fd8cce78cf2c9594b0484f
|
Shell
|
chriiis/.sys.config
|
/common.sh
|
UTF-8
| 1,326
| 3.09375
| 3
|
[] |
no_license
|
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# This file containes the self defined initialized settings for shell.
# You can find the latest version on:
# http://github.com/nkwsqyyzx/.sys.config.git
#
# Maintainer: wsq
# Last Change: 2013-11-30 13:48:03
# Email: nk.wangshuangquan@gmail.com
# Version: 0.1
#
# usage: source ~/$_CONFIG_BASE/common.sh
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
_CONFIG_BASE=$HOME/.sys.config
export PATH=$PATH:"$_CONFIG_BASE/bin/"
source $_CONFIG_BASE/settings.common.sh
source $_CONFIG_BASE/settings.all.sh
# this script is wrote to detect system.
source $_CONFIG_BASE/system.detect.sh
# system dependent settings
case $SYS_OS in
linux )
source $_CONFIG_BASE/platform.linux.sh
;;
mac )
source $_CONFIG_BASE/platform.mac.sh
;;
windows_mingw )
source $_CONFIG_BASE/platform.windows_mingw.sh
;;
windows_cygwin )
source $_CONFIG_BASE/platform.windows_cygwin.sh
;;
esac
case $SYS_OS in
windows_* )
# configure vim
source $_CONFIG_BASE/ConfigureVim.sh
source $_CONFIG_BASE/platform.windows_common.sh
# windows terminal encoding
source $_CONFIG_BASE/settings/locales.sh
perfect
;;
esac
| true
|
73eb3310f31a5cfda75ce5ab6229257563be0118
|
Shell
|
wiltonpaulo/devops-toolbox
|
/velero/usefull_scripts/restore_all.sh
|
UTF-8
| 338
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# Get Velero Backups
VELERO_BACKUPS=$(velero backup get \
| tail -n +2 \
| awk '{ print $1}')
IFS=$'\n'
for BACKUP in $VELERO_BACKUPS;
do
echo "Restoring Backup for namespace: $BACKUP"
velero restore create --from-backup $BACKUP
echo "----------------------------"
done
| true
|
b48f64abe63a1862b3f9e462a331a94193924cb2
|
Shell
|
IceCube-PSU/clustertools
|
/hammertop
|
UTF-8
| 495
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# TODO: Add usage
# TODO: Make timeout a command line option (not argument)
USER=$1
DEFAULT_TIMEOUT=10
BASE="hammer"
DOMAIN="rcc.psu.edu"
if [ ! -n "$2" ]
then
TIMEOUT=${DEFAULT_TIMEOUT}
else
TIMEOUT=$2
fi
command="mytop | cat --number"
echo $command
pdsh -t $TIMEOUT -u $TIMEOUT -w ssh:${USER}@${BASE}[1-12].${DOMAIN} "$command" 2>/dev/null | sed -e 's/:\s*/./' | sort --version-sort | sed -e 's/\./++/' | awk -F'++' '{printf "%8s %s\n", $1, $2}' # | sed -e 's/\.[0-9]\+//'
| true
|
6df78ec5feaab499ce7e9a54c4dcd63791014cea
|
Shell
|
Signorte/dotfiles
|
/install.sh
|
UTF-8
| 228
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
case $( uname -s ) in
Darwin) echo Darwin
bash install_darwin.sh;;
Linux) echo Linux
bash install_linux.sh;;
*) echo Others (neither Linux nor Darwin) so finish this sequence;;
esac
| true
|
a5685d10a0d776293fa89310c60148b8614da275
|
Shell
|
oscargus/spyder
|
/.github/scripts/run_tests.sh
|
UTF-8
| 354
| 2.890625
| 3
|
[
"LGPL-3.0-or-later",
"LGPL-2.1-or-later",
"CC-BY-2.5",
"OFL-1.1",
"CC-BY-3.0",
"CC-BY-4.0",
"LGPL-2.1-only",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"Python-2.0",
"GPL-1.0-or-later",
"MIT",
"LGPL-3.0-only",
"GPL-3.0-only",
"GPL-2.0-only",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/bin/bash -ex
# Adjust PATH in macOS
if [ "$OS" = "macos" ]; then
PATH=/Users/runner/miniconda3/envs/test/bin:/Users/runner/miniconda3/condabin:$PATH
fi
# Run tests
if [ "$OS" = "linux" ]; then
xvfb-run --auto-servernum python runtests.py --color=yes | tee -a pytest_log.txt
else
python runtests.py --color=yes | tee -a pytest_log.txt
fi
| true
|
8aebca543b12e26c6bbb9382d8b454c4cba2a1ab
|
Shell
|
jaydestro/itt-ansible-mod
|
/ansible_wrapper.sh
|
UTF-8
| 519
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p /home/$USER/.ansible
export DIR="/home/$USER/.ansible"
export SSHUSER="jaygordon"
touch $DIR/hosts
# ip of host
echo -n "Enter your IP and press [ENTER]: "
read ip
#Add hosts
echo -e "[hosts]\n$ip" > $DIR/hosts
# install ansible roles
ansible-galaxy install ocha.dotnet-core
ansible-galaxy install undergreen.mongodb
ansible-galaxy install geerlingguy.nodejs
ansible-galaxy install simplifield.postgres
ansible-playbook -u $SSHUSER install_monolith.yaml --inventory-file=~/.ansible/hosts
| true
|
7e59e8800d8956c374ed9161d7e5c4237a916380
|
Shell
|
mdozmorov/dcaf
|
/vcf_to_gene.sh
|
UTF-8
| 221
| 2.875
| 3
|
[] |
no_license
|
#/bin/bash
#i=1
for file in output/*.bed
do
# i=$(($i+1))
# echo "111"> $file$i".txt"
intersectBed -wa -wb -a $file -b exons2genes.bed | cut -f7 | sort | uniq -c | awk '{print $2,$1}' | sort -n -r -k2 > $file".rnk"
done
| true
|
bc1c878b281a2563eba60d0603d626832e7cac84
|
Shell
|
xguse/anaconda-cloud-recipes
|
/scripts/travis-run.sh
|
UTF-8
| 434
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [[ $TRAVIS_OS_NAME = "linux" ]]
then
# run CentOS5 based docker container
docker run -e TRAVIS_BRANCH -e TRAVIS_PULL_REQUEST -e ANACONDA_TOKEN -v `pwd`:/bioconda-recipes bioconda/bioconda-builder
# Build package documentation
./scripts/build-docs.sh
else
export PATH=/anaconda/bin:$PATH
# build packages
scripts/build-packages.py --repository . --packages `cat osx-whitelist.txt`
fi
| true
|
4f07e0b5591708ca7345b658af2af262e1ea7593
|
Shell
|
Ashkenazic/skia-buildbot
|
/k8s_checker/bin/create-skia-public-kube-config-secret.sh
|
UTF-8
| 485
| 3.609375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#/bin/bash
# Creates the skia-public-kube-config secret.
set -e -x
source ../../kube/config.sh
source ../../bash/ramdisk.sh
if [ $# -ne 1 ]; then
echo "The argument must be the skia-public kube config."
echo ""
echo "./create-skia-public-kube-config-secret.sh xyz"
exit 1
fi
SECRET_VALUE=$1
SECRET_NAME="skia-public-kube-config"
ORIG_WD=$(pwd)
cd /tmp/ramdisk
cat ${SECRET_VALUE} >> kube_config
kubectl create secret generic "${SECRET_NAME}" --from-file=kube_config
cd -
| true
|
d191492f30103d84270eaa56d6736705d930937a
|
Shell
|
mryyomutga/dotfiles
|
/src/.config/polybar/launch.sh
|
UTF-8
| 709
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
# terminate already running bar instance
killall -q polybar
# Wait util the processes have been shutdown
while pgrep -u $UID -x polybar > /dev/null; do sleep 1; done
count=0
# Launch Polybar, using default config location ~/.config/polybar/config
if type "xrandr"; then
# for m in $(xrandr --query | grep " connected" | cut -d" " -f1); do
# count=$((++count))
# done
# if [ $monitor = "eDP1" ] && [ $count -eq 1 ]; then
# polybar --reload
# fi
for m in $(xrandr --query | grep " connected" | cut -d" " -f1); do
MONITOR=$m polybar --reload top &
done
polybar --reload bottom &
else
polybar --reload main &
fi
echo "Polybar launched..."
| true
|
6e568d56ae2b737e4bb9fe960c917b927bc53282
|
Shell
|
getbraincloud/braincloud-actionscript
|
/brainCloudClient/autobuild/build_as.sh
|
UTF-8
| 572
| 3.203125
| 3
|
[] |
no_license
|
set -e
build_version=$1
if [ "$build_version" == "" ]; then
echo "Must pass in build version"
exit 1
fi
rm -rf artifacts
mkdir -p artifacts/brainCloudClient
cp docs/README.txt artifacts/brainCloudClient
sed -i xxx "s/Platform.*/Platform\: ActionScript/" artifacts/brainCloudClient/README.TXT
sed -i xxx "s/Version.*/Version\: $build_version/" artifacts/brainCloudClient/README.TXT
rm artifacts/brainCloudClient/README.TXTxxx
cp -r ../src artifacts/brainCloudClient
pushd artifacts/brainCloudClient
zip -r ../brainCloudClient_actionScript_$build_version.zip .
popd
| true
|
abe94b3a8a83494f8024d5d8d5f5c53ee11f5475
|
Shell
|
Lemon080910/xiaomi_3c
|
/squashfs-root/etc/hotplug.d/iface/01-multicast
|
UTF-8
| 355
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
[ ifup = "$ACTION" ] && [ "$INTERFACE" = "lan" ] && {
net_mode=$(uci -q get xiaoqiang.common.NETMODE)
if [ $net_mode == "lanapmode" ]; then
# This would forward any multicast packets to all ports allowing your TV to find your DLNA capable NAS with movies
echo "0" > /sys/devices/virtual/net/br-lan/bridge/multicast_snooping
fi
}
| true
|
f67f4f61101a9ee07b741a0e6d4b22e02871c221
|
Shell
|
micha-bbg/bs-micha
|
/skel-root/kronos/etc/profile
|
UTF-8
| 539
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
export LD_LIBRARY_PATH=/var/lib
#export LD_LIBRARY_PATH=/usr/lib:/var/lib:/lib
paths="/var/sbin /sbin /usr/sbin /var/bin /bin /usr/bin /var/plugins /var/tuxbox/plugins /lib/tuxbox/plugins /usr/lib/tuxbox/plugins"
P=
for i in $paths ;do
if [ -d $i ]; then
if [ "$P" = "" ]; then
P=$i
else
P=$P:$i
fi
fi
done
export PATH=$P
# hostname in prompt...
PS1='\\h:\\w \\$ '
alias ll='ls -al'
alias c='cat'
alias d='date'
alias g='grep'
alias m='more'
alias h='history'
alias f='file'
alias recam='camd_start.sh restart'
| true
|
724eea29814dff3fb231fd817fd64f907649afe2
|
Shell
|
fbrandstetter/Autobackup
|
/backup.sh
|
UTF-8
| 2,475
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# Backup Directory ( where to store the encrypted backups )
BACKUPDIR=""
# Current Time ( needed for backup file naming )
BACKUPTIME=$(date +"%Y-%m-%d_%H%M")
# Password / Key for encryption
PASSWORD=""
# SERVERLIST
SERVERLIST=$(cat serverlist.template)
# DEFAULT EXCLUDES ( Folders which aren't being backuped on ALL servers )
DEFAULT_EXCLUDES="default-excludes.template"
# Decide whether we want to delete backups if Disk is full or just stop backuping ( yes or no )
FREEUPSPACE=""
# Available Disk Space
DISKSPACE=$(df --output=avail -h "$PWD" | sed '1d;s/[^0-9]//g' | tr --delete "\n")
# MAX used space ( defines after which amount the script starts deleting old backups in case `FREEUPSPACE` is 'yes' )
MAXUSED="" # in GB
if [ "$DISKSPACE" -gt "$MAXUSED" ]; then
echo "THERE ARE ONLY $DISKSPACE GB AVAILABLE ON THIS SYSTEM!"
if [ ${FREEUPSPACE} = "yes" ]; then
echo "FREEING UP SPACE NOW!"
for i in {1..5}
do
OLDESTFILE=$(find "${BACKUPDIR}" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -f2 -d ' ')
cd "${BACKUPDIR}"
rm "${OLDESTFILE}"
echo "DELETING $i FILE."
done
fi
if [ ${FREEUPSPACE} = "no" ]; then
echo "NOT FREEING UP SPACE. EXITING NOW."
exit 1
fi
fi
# Loop through all servers and do something
for data in ${SERVERLIST}
do
# Set variables
SERVERNAME=$( echo $data | cut -f1 -d '|' )
USERNAME=$( echo $data | cut -f2 -d '|' )
EXCLUDES=$( echo $data | cut -f3 -d '|' )
# Create directory for the backup
mkdir "${BACKUPTIME}-${SERVERNAME}"
# Grab files from remote server
rsync -r --exclude-from "$DEFAULT_EXCLUDES" --exclude-from "$EXCLUDES" -v -e ssh ${USERNAME}@${SERVERNAME}:/ ${BACKUPTIME}-${SERVERNAME}/
# Create archive
tar cvf "${BACKUPTIME}-${SERVERNAME}.tar" -C "${BACKUPTIME}-${SERVERNAME}/" .
# Remove folder
rm -rf "${BACKUPTIME}-${SERVERNAME}/"
# Encrypt archive
openssl aes-256-cbc -salt -in "${BACKUPTIME}-${SERVERNAME}.tar" -out "${BACKUPTIME}-${SERVERNAME}.tar.aes" -k "${PASSWORD}"
# Remove unencrypted archive
rm "${BACKUPTIME}-${SERVERNAME}.tar"
# Move archive
mv "${BACKUPTIME}-${SERVERNAME}.tar.aes" "${BACKUPDIR}"
done
exit 0
| true
|
7b5682d50e82a7cdfbe81d0c19230eab17d6371a
|
Shell
|
lukinpvl/search-engine
|
/infra/gitlab/common.sh
|
UTF-8
| 2,371
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
# Checks that appropriate gke params are set and
# that gcloud and kubectl are properly installed and authenticated
function need_tool(){
local tool="${1}"
local url="${2}"
echo >&2 "${tool} is required. Please follow ${url}"
exit 1
}
function need_gcloud(){
need_tool "gcloud" "https://cloud.google.com/sdk/downloads"
}
function need_kubectl(){
need_tool "kubectl" "https://kubernetes.io/docs/tasks/tools/install-kubectl"
}
function need_helm(){
need_tool "helm" "https://github.com/helm/helm/#install"
}
function need_eksctl(){
need_tool "eksctl" "https://eksctl.io"
}
function need_az(){
need_tool "az" "https://docs.microsoft.com/en-us/cli/azure/install-azure-cli"
}
function need_jq(){
need_tool "jq" "https://stedolan.github.io/jq/download/"
}
function validate_tools(){
for tool in "$@"
do
# Basic check for installation
command -v "${tool}" > /dev/null 2>&1 || "need_${tool}"
# Additional checks if validating gcloud binary
if [ "$tool" == 'gcloud' ]; then
if [ -z "$PROJECT" ]; then
echo "\$PROJECT needs to be set to your project id";
exit 1;
fi
gcloud container clusters list --project $PROJECT >/dev/null 2>&1 || { echo >&2 "Gcloud seems to be configured incorrectly or authentication is unsuccessfull"; exit 1; }
fi
done
}
function check_helm_3(){
set +e
helm version --short --client | grep -q '^v3\.[0-9]\{1,\}'
IS_HELM_3=$?
set -e
echo $IS_HELM_3
}
function set_helm_name_flag(){
IS_HELM_3=$(check_helm_3)
if [[ "$IS_HELM_3" -eq "0" ]]; then
name_flag=''
else
name_flag='--name'
fi
echo $name_flag
}
function set_helm_purge_flag(){
IS_HELM_3=$(check_helm_3)
if [[ "$IS_HELM_3" -eq "0" ]]; then
purge_flag=''
else
purge_flag='--purge'
fi
echo $purge_flag
}
function cluster_admin_password_gke(){
gcloud container clusters describe $CLUSTER_NAME --zone $ZONE --project $PROJECT --format='value(masterAuth.password)';
}
# Function to compare versions in a semver compatible way
# given args A and B, return 0 if A=B, -1 if A<B and 1 if A>B
function semver_compare() {
if [ "$1" = "$2" ]; then
# A = B
echo 0
else
ordered=$(printf '%s\n' "$@" | sort -V | head -n 1)
if [ "$ordered" = "$1" ]; then
# A < B
echo -1
else
# A > B
echo 1
fi
fi
}
| true
|
3e805bb9ef6a93f0b2313314496e3af4795e8ca7
|
Shell
|
anandray/devops
|
/scripts/supernode-scripts/aws/aws-key-pair-security-group.sh
|
UTF-8
| 3,273
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
# usage
#echo ./aws-regions.sh $node_number
region=`aws ec2 describe-regions | grep RegionName | cut -d "\"" -f4`
region_options=`echo $region`
options=($region_options)
prompt="Select an AWS region:"
PS3="$prompt "
select answer in "${options[@]}"; do
# zones=`aws ec2 describe-availability-zones --region=$answer --output text | awk '{print$NF}' | awk -vORS=, '{ print $1 }' | sed 's/,$/\n/'`
zones=`aws ec2 describe-availability-zones --region=$answer | grep ZoneName | cut -d"\"" -f4 | awk -vORS=, '{ print $1 }' | sed 's/,$/\n/'`
echo 'region="'$answer'"' > supernode-aws-$answer.conf
echo 'awsregion="'$answer'"' >> supernode-aws-$answer.conf
echo 'zones="'$zones'"' >> supernode-aws-$answer.conf
echo "Enter node #:"
read node
echo 'node="'$node'"' >> supernode-aws-$answer.conf
break 2
done
source supernode-aws-$answer.conf
# derivative inputs
app="cloudstore"
port=80
project="wolk-$region"
provider="aws"
fixedinstance="wolk-$node-$provider-$region-dynamo"
autoscaledinstance="wolk-$node-$provider-$region"
prefix="$app-$region-$provider"
instancetemplate="$prefix"
urlmap="$prefix"
instancetemplate="$prefix"
lbname="$prefix-$port"
globalip="$app-$region-$provider-global-ip"
targetproxy="$prefix-target-proxy-$port"
regionalipname="$app-$region-$provider-regional-ip"
healthcheck="$app-$region-healthcheck"
portname="$app-$port"
echo $region
echo $project
echo $provider
echo $fixedinstance
echo $prefix
# change default region locally
echo "
Change default region locally
"
sed -i '/region/d' ~/.aws/config
echo "region = $region" >> ~/.aws/config
# key pair
if aws ec2 describe-key-pairs --region $region --query KeyPairs[*].KeyName | grep -i WolkKeyPair-$region; then
echo "Keypair named WolkKeyPair-$region already exists...
"
else
echo "
Creating key pair WolkKeyPair-$region
"
aws ec2 create-key-pair --key-name WolkKeyPair-$region --query 'KeyMaterial' --region=$awsregion --output text > /root/aws/WolkKeyPair-$region.pem
fi
# security group
if aws ec2 describe-security-groups --region $region --query SecurityGroups[*].GroupName | grep -i wolk-sg-$region; then
echo "Security Group named wolk-sg-$region already exists...
"
else
echo "
Creating Security Group wolk-sg-$awsregion
"
aws ec2 create-security-group --group-name wolk-sg-$awsregion --region=$awsregion --description "wolk security group $region" &> /dev/null
fi
# list group id to use it in the next step
echo "
Get security group id to use it in the next step
"
security_group_id=`aws ec2 describe-security-groups --region $region --group-name wolk-sg-$region --query SecurityGroups[*].GroupId --output text`
echo "Security Group ID: $security_group_id"
# add traffic rules to the above security group
if aws ec2 describe-security-groups --region $region --group-name wolk-sg-$region --query SecurityGroups[*].IpPermissions --output text; then
echo "
Traffic rules already exists
"
else
echo "
Add traffic rules to the above security group
"
aws ec2 authorize-security-group-ingress --group-id $security_group_id --protocol tcp --port 0-65535 --cidr 0.0.0.0/0 --region $region &> /dev/null
aws ec2 authorize-security-group-ingress --group-id $security_group_id --protocol udp --port 0-65535 --cidr 0.0.0.0/0 --region $region &> /dev/null
fi
| true
|
fbcc8b375155c53d74b120f78d2237cfb312f189
|
Shell
|
prelight/vuewebapp
|
/scripts/deploy.sh
|
UTF-8
| 537
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/bash
set -e
if [ "$CODEBUILD_BUILD_SUCCEEDING" = "0" ]; then
## ビルドフェーズで失敗したらアップロードを実行しない
exit 1;
else
echo Deploy started on `date`
cd $build_artifacts_dir
aws s3 sync . s3://$bucket --include "*" --exclude "index.html" --delete
aws s3 cp index.html s3://$bucket --cache-control "no-store, no-cache, max-age=0" --expires "Mon, 26 Jul 1997 05:00:00 GMT"
aws cloudfront create-invalidation --distribution-id $cdn_distribution_id --paths "/*"
fi
| true
|
f8a6c58934303e11bacbb0f02f648e6af7ba47fd
|
Shell
|
chbrown/adobe-scripts
|
/install.sh
|
UTF-8
| 228
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
targets=(/Applications/"Adobe Illustrator"*/Presets*/*/Scripts)
for file in *.js*
do
destination=$targets/${file//_/ }
>&2 printf 'Copying %s to %s\n' "$file" "$destination"
cp -n "$file" "$destination"
done
| true
|
b79e72e22bafc7fe831cc6ef2e93f26f9c366c2e
|
Shell
|
cbliard/dotfiles
|
/bin/z
|
UTF-8
| 274
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if ! [ -e "$1" ]
then
echo "error: $1 does not exist"
exit 1
fi
if ! [ -w "$1" ]
then
echo "error: $1 is not writable"
exit 1
fi
dir=$(dirname "$1")
if ! [ -w "$dir" ]
then
echo "error: directory $dir is not writable"
exit 1
fi
mv "$1" "$1".zzz
| true
|
4990c4cb9d05c72921b79125ec978526417b7700
|
Shell
|
cloudartisan/dotfiles
|
/bin/install_vim.sh
|
UTF-8
| 1,278
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
mkdir -p ${HOME}/.vim
curl -Lso - https://github.com/cloudartisan/dotvim/tarball/master \
| tar --strip-components 1 -C $HOME/.vim -zvxf -
ln -sf ${HOME}/.vim/vimrc ${HOME}/.vimrc
ln -sf ${HOME}/.vim/vimrc ${HOME}/.gvimrc
cd ${HOME}/.vim
git init
git submodule init
git submodule add https://github.com/VundleVim/Vundle.vim.git bundle/Vundle.vim/
git submodule update --init --recursive
alias vim=/usr/local/bin/vim
alias vi=/usr/local/bin/vim
# Remove unused plugins and install any missing plugins
vim "+PluginClean!" "+qall"
vim "+PluginInstall" "+qall"
# Snapshot bash prompt settings for reuse if we haven't already
if [[ ! -f ${HOME}/.bash_promptline_airline ]]
then
vim "+PromptlineSnapshot ${HOME}/.bash_promptline_airline airline" "+qall"
fi
# Snapshot tmux status settings for reuse if we haven't already
if [[ ! -f ${HOME}/.tmux_tmuxline.conf ]]
then
vim "+TmuxlineSnapshot ${HOME}/.tmux_tmuxline.conf" "+qall"
fi
# Compile YouCompleteMe if it's installed but not yet compiled
if [[ -d ${HOME}/.vim/bundle/YouCompleteMe ]]
then
if [[ ! -e ${HOME}/.vim/bundle/YouCompleteMe/third_party/ycmd/ycm_core.so ]]
then
${HOME}/.vim/bundle/YouCompleteMe/install.py --clang-completer --java-completer
fi
fi
vim "+GoInstallBinaries" "+qall"
| true
|
53d79a8960291db708d5f8aa9c1b7004c4ed4530
|
Shell
|
covery/ml-model-compiler
|
/covery-ml-build.sh
|
UTF-8
| 653
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Configuration
H2O_VERSION="3.20.0.5"
JAR="h2o-genmodel-${H2O_VERSION}.jar"
MODEL="$1"
MODEL_FOLDER=.
NAME=${MODEL}
PREFIX="${NAME%.*}"
TMP_FOLDER=./tmp
mkdir ${TMP_FOLDER}
echo "Building model: $MODEL"
echo " H2O library: $JAR"
echo " Model folder: $MODEL_FOLDER"
echo " Build folder: $TMP_FOLDER"
# Copying java file to folder
cp ${MODEL} ${TMP_FOLDER}/${NAME}
# Compiling
echo "Compiling, this make take a while"
javac -cp ${JAR} ${TMP_FOLDER}/${NAME}
rm ${TMP_FOLDER}/${NAME}
# Building JAR
echo "Assembling JAR file"
jar -cf ${MODEL_FOLDER}/${PREFIX}.jar -C ${TMP_FOLDER} .
# Cleanup
rm -rf ${TMP_FOLDER}
echo "Done"
| true
|
e4b2f372a955cc89d6a3f21de9ec63fb3bf2c668
|
Shell
|
Unidata/TdsConfig
|
/remotetest/certs.sh
|
UTF-8
| 3,187
| 2.640625
| 3
|
[] |
no_license
|
rm -f serverKey.pem serverCert.pem
rm -fr ServerKeystore.pkcs12 ServerKeystore.jks
rm -f ServerTruststore.jks
##################################################
# Server Key Generation:
# Generate server private key :
openssl genrsa -des3 -passout pass:password -out serverKey.pem 2048
# Generate the self-signed certificate for the server,
openssl req -new -x509 -key serverKey.pem -out serverCert.pem -days 3650 \
-subj "/C=US/ST=Colorado/L=Boulder/O=UCAR/OU=Unidata/CN=www.ucar.edu" \
-passin pass:password -passout pass:password
# Generate a keystore in JKS format
openssl pkcs12 -export -out ServerKeystore.pkcs12 -in serverCert.pem -inkey serverKey.pem -passin pass:password -passout pass:password
# Convert serverkeystore.pkcs12 file to JKS format keystore
keytool -importkeystore -alias 1 -srckeystore ServerKeystore.pkcs12 -srcstoretype PKCS12 -destkeystore ServerKeystore.jks -deststoretype JKS \
-srcstorepass password -srckeypass password -deststorepass password -destkeypass password -noprompt
##################################################
rm -f clientKey.pem clientCert.pem
rm -fr ClientKeystore.pkcs12 ClientKeystore.jks
# Client Key Generation:
# Generate client private key :
openssl genrsa -des3 -passout pass:password -out clientKey.pem 2048
# Generate the self-signed certificate for the client,
openssl req -new -x509 -key clientKey.pem -out clientCert.pem -days 3650 \
-subj "/C=US/ST=Colorado/L=Boulder/O=UCAR/OU=Unidata/CN=www.ucar.edu" \
-passin pass:password -passout pass:password
# Generate a keystore in JKS format
openssl pkcs12 -export -out ClientKeystore.pkcs12 -in clientCert.pem -inkey clientKey.pem -passin pass:password -passout pass:password
# Convert clientkeystore.pkcs12 file to JKS format keystore
keytool -importkeystore -alias 1 -srckeystore ClientKeystore.pkcs12 -srcstoretype PKCS12 -destkeystore ClientKeystore.jks -deststoretype JKS \
-srcstorepass password -srckeypass password -deststorepass password -destkeypass password -noprompt
##################################################
# Generate the trust store for the server
keytool -importcert -alias mockdis -keystore ServerTruststore.jks -file clientCert.pem -storepass password -keypass password -trustcacerts <<EOF
yes
EOF
# Cleanup
rm -f serverKey.pem serverCert.pem
rm -fr ServerKeystore.pkcs12
rm -f clientKey.pem clientCert.pem
rm -fr ClientKeystore.pkcs12
exit
##################################################
# Ignore below this
# pkcs12 - to browser
`openssl pkcs12 -export -out clientKeystore.pkcs12 -in clientCert.pem -inkey clientKey.pem`
# Import this clientkeystore.pkcs12 file into firefox browser.
# Get client keystore file.
keytool -import -alias mockdis -keystore clientTrustore.jks -file clientCert.pem
# Tomcat configuration :
# <Connector port="8443" protocol="HTTP/1.1"
# maxThreads="150"
# SSLEnabled="true"
# scheme="https"
# secure="true"
# clientAuth="true"
# sslProtocol="TLS"
# keyAlias="1"
# keystoreFile="D:\OpenSSL-Win32\bin\ServerKeystore.jks"
# keystorePass="changeit"
# truststoreFile="D:\OpenSSL-Win32\bin\clientTrustore.jks"
# truststorePass="changeit"
# />
| true
|
955a4ef2159422c3144d95c427415e1ac94c7d13
|
Shell
|
sbradley7777/dot.config
|
/bin/bin.clusterha_analyze/gfs_find_glocks.sh
|
UTF-8
| 621
| 3.0625
| 3
|
[] |
no_license
|
#~/bin/sh
# Useful to show glock with holder/waiters for gfs2_lockcapture.
# Run when inside run directory, so pass path of runX to script.
path=$1;
host1=$(/bin/ls $path | awk '{print $1}' | head -n 1);
for glock_table in $(ls $path/$host1/gfs2); do
echo "---------------------------------------------------";
echo " $glock_table ";
echo "---------------------------------------------------";
for host in $(ls $path); do
echo "$host - $glock_table";
egrep -ri 'f:h|f:w|f:aw|f:cw|f:ew|f:tw' "$path/$host/gfs2/$glock_table/glocks" -B 1 -A 1;
echo "";
done;
echo "";
done;
exit;
| true
|
48361f5ce6dd1597e5998718a1b89dc27417c084
|
Shell
|
staraise1218/guodian
|
/cron.sh
|
UTF-8
| 397
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
cat > /phpstudy/www/TP/readme.txt <<EOF
Hello, World!
My name is Shengbin.
EOF
echo '输入 1 到 4 之间的数字:'
echo '你输入的数字为:'
read aNum
case $aNum in
1) echo '你选择了 1'
;;
2) echo '你选择了 2'
;;
3) echo '你选择了 3'
;;
4) echo '你选择了 4'
;;
*) echo '你没有输入 1 到 4 之间的数字'
;;
esac
| true
|
3a9abcc378fcdcec3ddcfcf2f2a24f0a89c47978
|
Shell
|
doggydaddy/FOCIS
|
/1st_mod2.sh
|
UTF-8
| 1,988
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
#time-course based parameters.
#time-course = extract average ideal using thresholded component as a mask
#calculate classification parameter for INDIVIDUAL subjects first before group average
#(So to avoid averaging out inter-subject flucuations)
components=`ls component*.nii`
touch mod2_params
for comp in $components
do
# Splitting into positive and negative masks
3dcalc -a ''$comp'' -expr 'ispositive(a)' -prefix POSMASK_${comp%.hdr}.nii
3dcalc -a ''$comp'' -expr 'isnegative(a)' -prefix NEGMASK_${comp%.hdr}.nii
3dcalc -a ''$comp'' -b ''POSMASK_${comp%.hdr}.nii'' -expr 'a*b' -prefix POSVAL_${comp%.hdr}.nii
3dcalc -a ''$comp'' -b ''NEGMASK_${comp%.hdr}.nii'' -expr '-a*b' -prefix NEGVAL_${comp%.hdr}.nii
# Creating beforecomparison dump
#3dcalc -a ''POSVAL_${comp%.hdr}.nii'' -b ''NEGVAL_${comp%.hdr}.nii'' -expr 'a+b' -prefix RES_VAL_${comp%.hdr}.nii
#3dmaskdump -mask ~/SVM_framework/brain.nii -noijk -o DUMP_BEFORE_${comp%.hdr} RES_VAL_${comp%.hdr}.nii
# --------------------------------
# Grabbing threshold parameters
3dmaskdump -mask ~/SVM_framework/brain.nii -noijk -o tmpdump $comp
thresholds=`3rd_threshold.R tmpdump`
OLD_IFS=$IFS
IFS=" "
set -- $thresholds
pt=$2
nt=$3
# Making a mask out of the component
3dcalc -a ''POSVAL_${comp%.hdr}.nii'' -expr "ispositive(a-$pt)" -prefix POS_THRESHED_${comp%.hdr}.nii
3dcalc -a ''NEGVAL_${comp%.hdr}.nii'' -expr "ispositive(a-$nt)" -prefix NEG_THRESHED_${comp%.hdr}.nii
3dcalc -a ''POS_THRESHED_${comp%.hdr}.nii'' -b "NEG_THRESHED_${comp%.hdr}.nii" -expr 'a+b' -prefix component_mask.nii
IFS=$OLD_IFS
data=`ls Set2*.nii`
for dat in $data
do
#parsing, and extrating mask information
3dROIstats -mask component_mask.nii $dat > ideal
2nd_calcMod2Params.R ideal >> tmp_mod2_params_${comp%.hdr}
done
3rd_avgMod2Params.R tmp_mod2_params_${comp%.hdr} >> mod2_params
rm ideal
rm tmpdump
rm component_mask.nii
rm tmp_mod2_params_*
rm NEG*
rm POS*
done
timecourses=`ls tc.1D`
| true
|
90109282e0ceaa4cf397d479a8931f7f72c49f7f
|
Shell
|
zhondr/carpet_cleaner
|
/carpet_cleaner.client/docker_image_create.bash
|
UTF-8
| 210
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
cd $(dirname $0)
HERE=$PWD
BASE_NAME=$(cat docker-image-base-name.txt)
IMAGE_NAME=${BASE_NAME}:0.0.1
bash lib/_prepareDirs.bash
cd "${HERE}"
docker build -t "$IMAGE_NAME" docker
| true
|
b072396f4e0566a49aeabd774983510e07ba7ca8
|
Shell
|
electrodude/aem-utils
|
/bin/makeless
|
UTF-8
| 1,131
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu -o pipefail
rdfd=${2:-3}
wrfd=${3:-$(($rdfd+1))}
if [[ -z "$1" ]]; then
echo >&2 "Usage: $0 <pid>"
exit 1
fi
exe="$(realpath -eq /proc/$1/exe)"
if [[ -z "$exe" ]]; then
echo >&2 "error: no such pid"
exit 1
fi
exe_expect="$(realpath "$(which make)")"
if [[ "$exe" != "$exe_expect" ]]; then
echo >&2 "error: pid $1 is $exe, not $exe_expect"
exit 1
fi
rdpipe="$(readlink /proc/$1/fd/$rdfd)"
if [[ "$rdpipe" != pipe:* ]]; then
echo >&2 "error: fd $rdfd is not a pipe; is this a parallel make?"
exit 1
fi
wrpipe="$(readlink /proc/$1/fd/$wrfd)"
if [[ "$wrpipe" != pipe:* ]]; then
echo >&2 "error: fd $wrfd is not a pipe; is this a parallel make?"
exit 1
fi
if [[ "$rdpipe" != "$wrpipe" ]]; then
echo >&2 "error: fd $rdfd and fd $wrfd are not the same pipe; is this a parallel make?"
exit 1
fi
echo >&2 "Waiting for token to steal..."
if ! read -r -n1 c < /proc/$1/fd/$rdfd; then
echo >&2 "error: failed to steal token"
exit 1
fi
if [[ "$c" != "+" ]]; then
echo >&2 "warning: got a $c, not a +"
fi
# TODO: show actual process name
echo >&2 "Successfully stole a jobserver token from make $1"
| true
|
956abda317b045c792a92e7cec5a51cf3cad2fcd
|
Shell
|
Astronomic2003/LinuxGSM
|
/lgsm/functions/fix_mta.sh
|
UTF-8
| 827
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# LinuxGSM fix_mta.sh function
# Author: Daniel Gibbs
# Contributor: ChaosMTA
# Website: https://gameservermanagers.com
# Description: Installs the libmysqlclient for database functions on the server
local commandname="FIX"
local commandaction="Fix"
local function_selfname="$(basename $(readlink -f "${BASH_SOURCE[0]}"))"
if [ ! -f "${lgsmdir}/lib/libmysqlclient.so.16" ]; then
fixname="libmysqlclient16"
fn_fix_msg_start_nl
sleep 1
fileurl="https://nightly.mtasa.com/files/modules/64/libmysqlclient.so.16"; filedir="${lgsmdir}/lib"; filename="libmysqlclient.so.16"; executecmd="executecmd" run="norun"; force="noforce"; md5="6c188e0f8fb5d7a29f4bc413b9fed6c2"
fn_fetch_file "${fileurl}" "${filedir}" "${filename}" "${executecmd}" "${run}" "${force}" "${md5}"
fn_fix_msg_end
fi
export LD_LIBRARY_PATH=:"${libdir}"
| true
|
ce5ecf53396ec6535fb8fc6b761df92fec626837
|
Shell
|
heroku/tatara
|
/bin/build
|
UTF-8
| 600
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
out_file() {
if [ "$1" == "darwin" ]; then
echo -n "macos"
elif [ "$1" == "windows" ]; then
echo -n "windows.exe"
else
echo -n "$1"
fi
}
build() {
local platform=${1}
local version=$(<VERSION)
echo "Building for ${platform}..."
GOOS=${platform} go build -ldflags "-X main.Version=$version" -o "out/tatara-$version-$(out_file $platform)" ./cmd/...
}
cd $(dirname "${BASH_SOURCE[0]}")/..
mkdir -p out
if [ -n "$1" ]; then
build $1
else
os_list=(darwin linux windows)
for ((i=0; i < ${#os_list[@]}; i++)); do
build ${os_list[i]}
done
fi
| true
|
e2ff1bdfe02f74285889cbf0c5875974239cf8e8
|
Shell
|
sapslaj/site-update-notifier
|
/siteupdate.sh
|
UTF-8
| 756
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
SLEEPTIME=30 # Seconds between checks
URL="http://example.com" # URL to check
EMAIL="email@example.com" # Email to send updates to
SUBJECT="Site Update" # Subject of email
if [ -e /tmp/siteupdate/current_index ] ; then echo "" ; else touch /tmp/siteupdate/current_index ; fi
if [ -e /tmp/siteupdate/latest_index ] ; then echo "" ; else touch /tmp/siteupdate/latest_index ; fi
while :
do
curl $URL > /tmp/siteupdate/current_index
CHANGES=$(diff /tmp/siteupdate/current_index /tmp/siteupdate/latest_index)
if [ "$CHANGES" != "" ]
then
mail -s $SUBJECT $EMAIL < /tmp/siteupdate/current_index # Change this as needed
cat /tmp/siteupdate/current_index > /tmp/siteupdate/latest_index
fi
sleep $SLEEPTIME
done
| true
|
a75085dd9353a552c5624a002e9ed063bf79b75b
|
Shell
|
TinyMagicka/TinyDec
|
/tools/dex2jar/dex-dump.sh
|
UTF-8
| 526
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# copy from $Tomcat/bin/startup.sh
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
#
_classpath="."
for k in "$PRGDIR"/lib/*.jar
do
_classpath="${_classpath}:${k}"
done
java -Xms512m -Xmx1024m -classpath "${_classpath}" "com.googlecode.dex2jar.util.Dump" $1 $2 $3 $4 $5 $6
| true
|
e5c8bdf6433543ab865832a29ea6f8109c1201a1
|
Shell
|
Milchen83/YourFritz
|
/helpers/functions/yf_get_last_host_in_subnet.function
|
UTF-8
| 2,454
| 3.9375
| 4
|
[] |
no_license
|
#! /bin/true
# vi: set tabstop=4 syntax=sh : # colorize it in 'vi' and use shorter tabstops
#######################################################################################
# #
# get_last_host_in_subnet - compute the last usable IPv4 address in the specified #
# subnet #
# #
# parameters: #
# $1 - the subnet to use for computations in dotted decimal notation with mask #
# suffix #
# #
# stdout: #
# the last usable host address (as hexadecimal string) #
# #
# return code: #
# 0 - stdout is valid #
# 1 - parameter error #
# #
# remarks: #
# The really last address (all unmasked bits set to one) is interpreted as the #
# 'broadcast' address and is skipped. There's no usable result with a mask greater #
# than 30, because the possible two addresses with a 31-bit mask can't form a valid #
# subnet. #
# #
#######################################################################################
yf_get_last_host_in_subnet()
(
in="$1"
addr="${in%%/*}"
bits="${in#*/}"
yf_is_decimal "$bits" || return 1
[ $bits -le 1 -o $bits -gt 30 ] && return 1
addr="$(yf_ipv4_address "$addr")"
inv_mask=$(( ( 1 << ( 32 - bits ) ) - 1 ))
addr_dec=$(yf_hex2dec "$addr")
res=$(( ( addr_dec | inv_mask ) - 1 ))
yf_dec2hex "$res"
return 0
)
| true
|
a63e645ab9ecf91e4cd6b56ab9860672553712c5
|
Shell
|
Mryan2005/MinecraftServerAutoDeploy
|
/src/push/push.sh
|
UTF-8
| 193
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
server_name=
# 服务器名
# 服务器文件会放在root文件夹
cur_dateTime="`date +%Y-%m-%d,%H:%m:%s`"
cd /root/${server_name}
git add .
git commit -m 'update'
git push origin
| true
|
1b387ff80c31a863e5cf689161db93c4f67f11b9
|
Shell
|
UCL-RITS/rcps-buildscripts
|
/freesurfer-5.3.0_install
|
UTF-8
| 1,253
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###############################################
# Installing Freesurfer
#
# by Heather Kelly, 2015
#
NAME=${NAME:-freesurfer}
VERSION=${VERSION:-5.3.0}
INSTALL_PREFIX=${INSTALL_PREFIX:-/shared/ucl/apps/$NAME/$VERSION}
MD5=${MD5:-6642289df823ebc27de52af57e9b3989}
SRC_ARCHIVE=${SRC_ARCHIVE:-ftp://surfer.nmr.mgh.harvard.edu/pub/dist/freesurfer/${VERSION}/freesurfer-Linux-centos6_x86_64-stable-pub-v${VERSION}.tar.gz}
LICENSE_FILE=${LICENSE_FILE:-/home/ccspapp/Software/freesurfer/license-file}
set -e
# Note: 4.4G compressed so download somewhere there's enough space
temp_dir=`mktemp -d -p /dev/shm`
cd $temp_dir
wget $SRC_ARCHIVE
archive=$(basename "${SRC_ARCHIVE}")
CHECKSUM=`md5sum $archive| awk '{print $1}'`
if [ "$MD5" == "$CHECKSUM" ]
then
mkdir -p $INSTALL_PREFIX
cd $INSTALL_PREFIX
tar -xvf $temp_dir/$archive
cd ${NAME}
cp $LICENSE_FILE .license
cp /shared/ucl/apps/build_scripts/freesurfer-patches/freesurfer-5.3.0_matlab.patch .
# the .m files are dos-formatted, so convert before patching
dos2unix matlab/SearchProjectionOnPial.m
patch -p0 -i freesurfer-5.3.0_matlab.patch
unix2dos matlab/SearchProjectionOnPial.m
else
echo "Hash mismatch."
echo "Expected: $MD5"
echo "Got: $CHECKSUM"
fi
| true
|
0f59802fae37b5e02135e06d41a83a829586af61
|
Shell
|
Cloudxtreme/docker-php-1
|
/container/root/run.d/01-install.sh
|
UTF-8
| 338
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# As part of the "Two Phase" build, the first phase typically runs with composer keys mounted,
# allowing the dependencies to be installed, the result of which is committed
if [[ -f /root/.composer/config.json ]]
then
echo "[install] app dependencies"
composer install --optimize-autoloader
exit $SIGNAL_BUILD_STOP
fi
| true
|
68466bc0910badf95842b706b6b1a404bb3bb646
|
Shell
|
rastaman/configurations
|
/CDH4.pseudo-distributed/root_scripts/hdfs_init.sh
|
UTF-8
| 1,068
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
# system-wide structure
sudo -u hdfs hdfs dfs -mkdir /tmp
sudo -u hdfs hdfs dfs -chmod -R 1777 /tmp
sudo -u hdfs hdfs dfs -mkdir /tmp/hadoop-yarn/staging
sudo -u hdfs hdfs dfs -chmod -R 1777 /tmp/hadoop-yarn/staging
sudo -u hdfs hdfs dfs -mkdir /tmp/hadoop-yarn/staging/history/done_intermediate
sudo -u hdfs hdfs dfs -chmod -R 1777 /tmp/hadoop-yarn/staging/history/done_intermediate
sudo -u hdfs hdfs dfs -chown -R mapred:mapred /tmp/hadoop-yarn/staging
sudo -u hdfs hdfs dfs -mkdir /var/log/hadoop-yarn
sudo -u hdfs hdfs dfs -chown yarn:mapred /var/log/hadoop-yarn
# user-specific structure
sudo -u hdfs hdfs dfs -mkdir /user/bohdan
sudo -u hdfs hdfs dfs -chown bohdan /user/bohdan
sudo -u hdfs hdfs dfs -mkdir /user/history
sudo -u hdfs hdfs dfs -chmod -R 1777 /user/history
sudo -u hdfs hdfs dfs -mkdir /user/yarn
sudo -u hdfs hdfs dfs -chown yarn /user/yarn
# HBase-specific structure
sudo -u hdfs hdfs dfs -mkdir /hbase
sudo -u hdfs hdfs dfs -chown hbase /hbase
# print created HDFS filesystem tree structure
sudo -u hdfs hdfs dfs -ls -R /
| true
|
3a4b1c48b02167176e865ef7a96d5aaf296234bd
|
Shell
|
hdnes/MavicPro
|
/Firmware/Firmware_01.03.0400/_AC_wm220_0801_v01.04.17.03_20170120.pro.fw.sig.extracted/system/etc/install-recovery.sh
|
UTF-8
| 628
| 2.75
| 3
|
[] |
no_license
|
#!/system/bin/sh
if ! applypatch -c EMMC:/dev/block/platform/comip-mmc.1/by-name/ramdisk_recovery::; then
log -t recovery "Installing new recovery image"
dd if=/system/etc/ramdisk_recovery.img of=/dev/block/platform/comip-mmc.1/by-name/ramdisk_recovery
else
log -t recovery "recovery image already installed"
fi
if ! applypatch -c EMMC:/dev/block/platform/comip-mmc.1/by-name/kernel_recovery::; then
log -t recovery "Installing new kernel recovery image"
dd if=/system/etc/kernel_recovery of=/dev/block/platform/comip-mmc.1/by-name/kernel_recovery
else
log -t recovery "kernel recovery image already installed"
fi
| true
|
8a42217ed41b782129692f475f0200786a413a7f
|
Shell
|
falcon-computing/falcon-genome
|
/scripts/stage-worker/concatVCF.sh
|
UTF-8
| 2,149
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/../globals.sh
source $DIR/common.sh
stage_name=concatVCF
# Prevent this script to be running alone
if [[ $0 != ${BASH_SOURCE[0]} ]]; then
# Script is sourced by another shell
cmd_name=`basename $0 2> /dev/null`
if [[ "$cmd_name" != "fcs-genome" ]]; then
log_error "This script should be started by 'fcs-genome'"
return 1
fi
else
# Script is executed directly
log_error "This script should be started by 'fcs-genome'"
exit 1
fi
print_help() {
echo "USAGE: fcs-genome concat|concatGVCF -i <input_dir> -o <output>"
}
if [[ $# -lt 2 ]]; then
print_help
exit 1
fi
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-i|--input_dir)
input_vcf_dir="$2"
shift # past argument
;;
-o|--output_dir)
output_dir="$2"
shift # past argument
;;
-v|--verbose)
verbose=2
;;
*) # unknown option
log_error "failed to parse argument $key"
;;
esac
shift # past argument or value
done
check_arg "-i" "input_vcf_dir"
check_arg "-o" "output_dir" $(pwd)
check_args
# Get absolute file path
readlink_check input_vcf_dir
readlink_check output_dir
readlink_check log_dir
create_dir $output_dir
create_dir $log_dir
check_output_dir $output_dir
check_output_dir $log_dir
vcf_sample_id=`get_sample_id $input_vcf_dir`
# TODO: check if all the partitions are present
input_vcf_list=$(ls -v $input_vcf_dir/*.gvcf)
if [ -z "$input_vcf_list" ]; then
echo "Cannot find input vcf files in $input_vcf_dir"
exit -1
fi
$BCFTOOLS concat $input_vcf_list \
-o $output_dir/${vcf_sample_id}.gvcf \
&>$log_dir/concat.log
if [ "$?" -ne "0" ]; then
log_error "bcftools concat failed"
exit 1;
fi
$BGZIP -c $output_dir/${vcf_sample_id}.gvcf \
> $output_dir/${vcf_sample_id}.gvcf.gz \
2>> $log_dir/concat.log
if [ "$?" -ne "0" ]; then
log_error "bgzip compression failed"
exit 1;
fi
# delete uncompressed gvcf file
rm $output_dir/${vcf_sample_id}.gvcf
$TABIX -p vcf $output_dir/${vcf_sample_id}.gvcf.gz \
2>> $log_dir/concat.log
if [ "$?" -ne "0" ]; then
log_error "tabix failed"
exit 1;
fi
| true
|
652d1900ce5b7d1d1e8981f7969441d612d575cb
|
Shell
|
toke/dotfiles
|
/bash/.config/bash/helper.bash
|
UTF-8
| 2,033
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Bash helper functions
#
#
# Helper for allowing only aliases to existing commands
# Similar to [ -e /usr/bin/translate-shell ] && alias abc="/usr/bin/abc"
#
function bin_alias {
local aliasname="$1"
local aliascontent="$2"
if [[ -z "$aliasname" && -z $aliascontent ]] ; then
echo "Call $0 alias-name alias-value"
echo "Sets alias only if alias-value is a known command (see man 1 command)"
exit 1
else
command -v "$aliascontent" > /dev/null && \
alias $aliasname="$aliascontent"
fi
}
#
# Calls cssh with the result of the _ssh._tcp records for Host.
# srv-cssh user@host # user@ is optional
# host needs _ssh._tcp. SRV DNS RR
# All listed hosts will be connected
#
function srv-cssh {
local array=(${1//@/ })
local host=""
local user=""
if [[ -z ${array[0]} ]] ; then
echo "Usage: srv-cssh [user@]host"
return 1
elif [[ -z ${array[1]} ]] ; then
host=${array[0]}
user=""
else
host=${array[1]}
user="-l ${array[0]}"
fi
local dns="@a.ns.kerpe.net" # dig syntax @ns
$(dig +short -t SRV "_ssh._tcp.${host}" ${dns} \
| awk -v USER="${user}" -e 'BEGIN{ printf "cssh " USER} /^[a-zA-Z0-9\.\-_ ]+$/ { printf " " $4;} END {print "";}')
}
function _color {
local c
case $1 in
"black")
c=30 ;;
"red")
c=31 ;;
"green")
c=32 ;;
"yellow")
c=33 ;;
*)
c=39 ;;
esac
echo -en "\e[${c}m"
}
function cecho {
local COLOR="$1"
local STRING="$2"
local esc=""
local reset="\e[0m"
case $COLOR in
"bold")
esc="\e[1m" ;;
"dim")
esc="\e[2m" ;;
"reverse")
esc="\e[7m" ;;
"red")
esc="\e[31m" ;;
"yellow")
esc="\e[93m" ;;
"cyan")
esc="\e[36m" ;;
*)
;;
esac
echo -ne "${esc}${STRING}${reset}"
}
| true
|
fd9c741221ce2725d9ec9e81f4901f7ba39af9c5
|
Shell
|
jaydensun/userful-tool
|
/kafka/consumer_ip_find.sh
|
UTF-8
| 607
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
# param
topic=OMS_TO_CX_APPOINTMENT
port=9093
cmd="tcpdump 'tcp dst port $port"
function f_cmd_conn()
{
type_pos=5
client_id_pos=13
len_pos=31
data_pos=32
header_len="tcp[12]/4"
client_id_len="tcp[$header_len+$client_id_pos]"
conn="tcp[$header_len+$type_pos]=1 and tcp[$header_len+$len_pos+$client_id_len]=${#topic}"
for ((i=0;$i<${#topic};i=$i+1));
do
conn=$conn`printf " and tcp[$header_len+$data_pos+$client_id_len]=0x%x" "'${topic:$i}"`
((data_pos=$data_pos+1))
done
echo $conn
}
cmd=$cmd" and (`f_cmd_conn 0`)'"
echo $cmd
| true
|
1f39b97368418266eeaf36af47865caabc4b1fe4
|
Shell
|
cha63506/packages-1
|
/hibernate-script/trunk/PKGBUILD
|
UTF-8
| 1,544
| 2.796875
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Thomas Baechler <thomas@archlinux.org>
pkgname=hibernate-script
pkgver=1.99
pkgrel=2
pkgdesc="Set of scripts for managing tux on ice, hibernation and suspend to RAM"
arch=('i686' 'x86_64')
url="http://www.tuxonice.net"
license=('GPL')
depends=('bash' 'kbd')
options=(!strip)
backup=('etc/hibernate/hibernate.conf' 'etc/hibernate/suspend2.conf' \
'etc/hibernate/disk.conf' 'etc/hibernate/ram.conf' \
'etc/hibernate/common.conf' 'etc/hibernate/blacklisted-modules' \
'etc/hibernate/ususpend-both.conf' 'etc/hibernate/sysfs-ram.conf' \
'etc/hibernate/ususpend-ram.conf' 'etc/hibernate/sysfs-disk.conf' \
'etc/hibernate/ususpend-disk.conf')
source=(http://www.tuxonice.net/downloads/all/${pkgname}-${pkgver}.tar.gz
hibernate-script-${pkgver}-arch.patch
hibernate.rc
add-freedesktop.org-locking.patch)
md5sums=('11832e77edc4a13330aaced8cea52dfb'
'2e6dba2c90e90952b8b1f167045c38e6'
'5d7e83902a00bf72b1de97a5450a558e'
'626f4cf975b0ad4bd51245ca679943fe')
build() {
cd $srcdir/hibernate-script-$pkgver
# Fix scriptlets to work properly with Archlinux
patch -Np1 -i $srcdir/hibernate-script-${pkgver}-arch.patch || return 1
patch -Np1 -i $srcdir/add-freedesktop.org-locking.patch || return 1
export BASE_DIR=${pkgdir}
export PREFIX=/usr
export MAN_DIR=$BASE_DIR$PREFIX/share/man
install -dm755 ${pkgdir}/etc/{rc,logrotate}.d
./install.sh || return 1
install -m 755 $srcdir/hibernate.rc $pkgdir/etc/rc.d/hibernate-cleanup
}
| true
|
3f83597c25d00cad9726aa4acd9f0c5915aeee67
|
Shell
|
jsffeng/ShredderReconstruct
|
/test/acceptance_test.sh
|
UTF-8
| 3,304
| 4.03125
| 4
|
[] |
no_license
|
#/bin/bash
function EXIT
{
msg=${1}
echo "${msg}"
exit 1
}
function ensure_file_exist
{
file=${1}
[ ! -s ${file} ] && EXIT "File ${file} doesn't exist, exit!"
}
function ensure_dir_exist
{
dir=${1}
[ ! -d ${dir} ] && EXIT "Directory ${dir} doesn't exist, exit!"
}
function backup_ascii_files
{
for file in $(ls ${CURRENT_DIR}/*.ascii 2>/dev/null)
do
mv ${file} ${file}.orig$$
done
}
function restore_ascii_files
{
for file in $(ls ${CURRENT_DIR}/*.ascii 2>/dev/null)
do
rm -rf ${file}
done
for file in $(ls ${CURRENT_DIR}/*.ascii.orig$$ 2>/dev/null)
do
mv ${file} ${file%%\.orig$$}
done
}
#### Main Function Start Here ##########
CURRENT_DIR=.
TEST_DIR=${CURRENT_DIR}/test
SAMPLE_DIR=${TEST_DIR}/STinput
RESULT_DIR=${TEST_DIR}/SToutput
SHREDDER_CMD=${CURRENT_DIR}/shredder
UNSHREDDER_CMD=${CURRENT_DIR}/unshredder
ensure_file_exist ${SHREDDER_CMD}
ensure_file_exist ${UNSHREDDER_CMD}
ensure_dir_exist ${SAMPLE_DIR}
if [ -d ${RESULT_DIR} ]
then
rm -rf ${RESULT_DIR}/*
else
mkdir -p ${RESULT_DIR}
fi
SHREDDER_INPUT_NAME=full_text.ascii
SHREDDER_OUTPUT_NAME=shredded_text.ascii
UNSHREDDER_INPUT_NAME=${SHREDDER_OUTPUT_NAME}
UNSHREDDER_OUTPUT_NAME=restored_text.ascii
TEST_ORGINAL=original.ascii
TEST_SHREDDED=shredded.ascii
TEST_RESTORED=restored.ascii
declare -i success_t=0
declare -i failure_t=0
declare -i sum=0
declare -i caseid=1
declare -i MAX=5
declare -i i=0
# Backup *.ascii files in $CURRENT_DIR because new *.ascii files will be created & used by the tests
backup_ascii_files
for SAMPLE_FILE in $(ls ${SAMPLE_DIR}/sample* 2>/dev/null)
do
i=0
while (( i < MAX ))
do
((caseid=sum+1))
if (( caseid < 10 ))
then
TESTCASE_DIR=${RESULT_DIR}/TESTCASE0${caseid}
else
TESTCASE_DIR=${RESULT_DIR}/TESTCASE${caseid}
fi
mkdir -p ${TESTCASE_DIR}
cp ${SAMPLE_FILE} ${TESTCASE_DIR}/${TEST_ORGINAL}
cp ${SAMPLE_FILE} ${CURRENT_DIR}/${SHREDDER_INPUT_NAME}
# Run shredder to generate $SHREDDER_OUTPUT_NAME from $SHREDDER_INPUT_NAME
${SHREDDER_CMD} >/dev/null 2>&1
if [ -s ${SHREDDER_OUTPUT_NAME} ]
then
cp ${SHREDDER_OUTPUT_NAME} ${TESTCASE_DIR}/${TEST_SHREDDED}
fi
# Run unshredder to generate $UNSHREDDER_OUTPUT_NAME from $UNSHREDDER_INPUT_NAME
${UNSHREDDER_CMD} >/dev/null 2>&1
if [ -s ${UNSHREDDER_OUTPUT_NAME} ]
then
cp ${UNSHREDDER_OUTPUT_NAME} ${TESTCASE_DIR}/${TEST_RESTORED}
fi
TESTCASE=${TESTCASE_DIR##*/}
# Verify whether the result is expected
diff ${TESTCASE_DIR}/${TEST_ORGINAL} ${TESTCASE_DIR}/${TEST_RESTORED} >/dev/null 2>& 1
if [[ $? == 0 ]]
then
echo "${TESTCASE}:PASS"
((success_t=success_t+1))
else
echo "${TESTCASE}:Failed"
((failure_t=failure_t+1))
fi
((i=i+1))
((sum=sum+1))
done
done
# Remove generated *.ascii by the tests and restore original *.ascii from the backup
restore_ascii_files
if [[ ${success_t} == ${sum} && ${failure_t} == 0 ]]
then
MSG1="OK"
else
MSG1="FAIL"
fi
SUM_MSG="\n--------------------------------\n${sum} Tests: ${success_t} Successes ${failure_t} Failures\n${MSG1}"
echo -e ${SUM_MSG}
echo -e "NOTE: Please find test details under directory ${RESULT_DIR}.\n"
| true
|
2d07c73588cf3a95e632dd6d66355a166d671aa1
|
Shell
|
ialves19/WGS
|
/merging_WGS_datasets.bash
|
UTF-8
| 2,835
| 3.15625
| 3
|
[] |
no_license
|
#$ -S /bin/bash
#$ -cwd
#$ -N m1k_$JOB_ID
#$ -o m1k_o_$JOB_ID
#$ -e m1k_e_$JOB_ID
#$ -m a
#$ -M Isabel.Alves@univ-nantes.fr
#setting the sart of the job
res1=$(date +%s.%N)
################################
##
## this script takes the vcf (.GZ) file with the FR samples as an argument $1
## and detects from the file name, provided as arg, the chr it needs to look
## for among the 1000G files.
## it also needs a file with all the FR and 1000G samples we want to keep in the vcf
##
## it merges the vcf with the FR samples and the 1000G only for the shared/all sites
## depending we setup the keepCommon= TRUE/FALSE respectively.
##
## To launch it qsub merging_WGS_1000G.bash french_WGS.vcf
## NOTE: DO NOT ADD THE .gz to the input file.
##
## Oct 2018
###############################
inputFolder="/sandbox/shares/mages/GoNL1"
rawVcfFile=$1
keepCommon=true
sufixTag="allSites.v2"
module load bcftools
module load vcftools
if [ ! -d "${inputFolder}/clean" ];
then
mkdir ${inputFolder}/clean
fi
chrID=`echo $rawVcfFile | sed 's/.*\(chr[0-9]*\).*/\1/'`
chrNb=`echo $rawVcfFile | sed 's/.*chr\([0-9]*\).*/\1/'`
prefix=`echo $rawVcfFile | cut -d$'_' -f1`
echo "Merging chromosome: $chrID"
#keep biallelic sites in the 1000G vcf & keeping only GBR, IBS and TSU & replacing chr tag and zipping
bcftools view -m2 -M2 -v snps -f PASS ${inputFolder}/release5.4/SNVs/${rawVcfFile} -Ou | bcftools view -S ${inputFolder}/goNL_samples_children.txt -Ou \
| bcftools view -c1 -O v | sed s/^$chrNb/$chrID/g | bcftools view -Oz -o ${inputFolder}/clean/${prefix}.sampled.clean.${chrID}.vcf.gz
# indexing files
bcftools index -t ${inputFolder}/clean/${prefix}.sampled.clean.${chrID}.vcf.gz
bcftools view -R /sandbox/shares/mages/WGS_PREGO_Finistere_GAZEL/isabel/accessGenome/accessGen_${chrID}.txt -Oz -o ${inputFolder}/clean/${prefix}.sampled.clean.acceGen.${chrID}.vcf.gz ${inputFolder}/clean/${prefix}.sampled.clean.${chrID}.vcf.gz
# indexing files
bcftools index -t ${inputFolder}/clean/${prefix}.sampled.clean.acceGen.${chrID}.vcf.gz
rm ${inputFolder}/clean/${prefix}.sampled.clean.${chrID}.vcf.gz*
vcftools --gzvcf ${inputFolder}/clean/${prefix}.sampled.clean.acceGen.${chrID}.vcf.gz --max-missing 0.90 --hwe 0.0001 --recode --stdout \
| bcftools view -Oz -o ${inputFolder}/clean/${prefix}.sampled.clean.acceGen.maxmiss.90.hwe1e4.${chrID}.vcf.gz
# indexing files
bcftools index -t ${inputFolder}/clean/${prefix}.sampled.clean.acceGen.maxmiss.90.hwe1e4.${chrID}.vcf.gz
rm ${inputFolder}/clean/${prefix}.sampled.clean.acceGen.${chrID}.vcf.gz*
#timing the job
res2=$(date +%s.%N)
dt=$(echo "$res2 - $res1" | bc)
dd=$(echo "$dt/86400" | bc)
dt2=$(echo "$dt-86400*$dd" | bc)
dh=$(echo "$dt2/3600" | bc)
dt3=$(echo "$dt2-3600*$dh" | bc)
dm=$(echo "$dt3/60" | bc)
ds=$(echo "$dt3-60*$dm" | bc)
echo "Total runtime: $dd days $dh hrs $dm min $ds secs"
| true
|
13d8e9316a9d5928887db93cec2a1e0b10a454cf
|
Shell
|
NERC-CEH/ERW
|
/ERW Installation Scripts/build/install_Bind.sh
|
UTF-8
| 5,583
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
INSTALLED=$1
DOMAIN_NAME=$2
ZONE_FILE_PATH=$3
BIND_SERVER_IP=$4
LDAP_SERVER_IP=$5
DB_SERVER_IP=$6
ALFRESCO_SERVER_IP=$7
NFS_SERVER_IP=$8
SYNC_SERVER_IP=$9
PROXY_SERVER_IP=${10}
# Local variables
FORWARD_ZONE_NAME=$DOMAIN_NAME
REVERSE_ZONE_NAME=$(echo $BIND_SERVER_IP | awk -F "." '{ print $3 }')"."$(echo $BIND_SERVER_IP | awk -F "." '{ print $2 }')"."$(echo $BIND_SERVER_IP | awk -F "." '{ print $1 }')".in-addr.arpa"
# Install BIND
sudo yum -y install bind
# Build forward lookup zone file
sudo touch "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "\$TTL 86400" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "@ IN SOA ns1."$DOMAIN_NAME". root."$DOMAIN_NAME". (" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo " "$INSTALLED"00 ;Serial - YYYYMMDDvv" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo " 3600 ;Refresh" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo " 1800 ;Retry" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo " 604800 ;Expire" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo " 86400 ;Minimum TTL" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo ")" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo " NS @" | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "@ IN A "$BIND_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "ns1 IN CNAME bind."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "bind IN A "$BIND_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "ldap IN A "$LDAP_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "db IN A "$DB_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "alfresco IN A "$ALFRESCO_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "nfs IN A "$NFS_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "sync IN A "$SYNC_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
echo "proxy IN A "$PROXY_SERVER_IP | sudo tee -a "$ZONE_FILE_PATH$FORWARD_ZONE_NAME"
# Build reverse lookup zone file
sudo touch "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo "\$ORIGIN "$REVERSE_ZONE_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo "\$TTL 86400" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo "@ IN SOA ns1."$DOMAIN_NAME". root."$DOMAIN_NAME". (" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo " "$INSTALLED" ;Serial" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo " 3600 ;Refresh" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo " 1800 ;Retry" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo " 604800 ;Expire" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo " 86400 ;Minimum TTL" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo ")" | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo " IN NS bind."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo $(echo $BIND_SERVER_IP | awk -F "." '{ print $4 }')" IN PTR bind."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo $(echo $LDAP_SERVER_IP | awk -F "." '{ print $4 }')" IN PTR ldap."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo $(echo $DB_SERVER_IP | awk -F "." '{ print $4 }')" IN PTR db."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo $(echo $ALFRESCO_SERVER_IP | awk -F "." '{ print $4 }')" IN PTR alfresco."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo $(echo $NFS_SERVER_IP | awk -F "." '{ print $4 }')" IN PTR nfs."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo $(echo $SYNC_SERVER_IP | awk -F "." '{ print $4 }')" IN PTR sync."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
echo $(echo $PROXY_SERVER_IP | awk -F "." '{ print $4 }')" IN PTR proxy."$DOMAIN_NAME"." | sudo tee -a "$ZONE_FILE_PATH$REVERSE_ZONE_NAME"
# Modify /etc/named.conf
sudo sed -i 's/listen-on port 53 { 127.0.0.1; }/listen-on port 53 { '$BIND_SERVER_IP'; }/g' /etc/named.conf
sudo sed -i 's/listen-on-v6/#listen-on-v6/g' /etc/named.conf
sudo sed -i 's/allow-query { localhost; }/allow-query { any; }/g' /etc/named.conf
sudo sed -i '/recursion yes;/a \ forwarders { 8.8.8.8; };' /etc/named.conf
echo " zone \""$FORWARD_ZONE_NAME"\" IN {" | sudo tee -a /etc/named.conf
echo " type master;" | sudo tee -a /etc/named.conf
echo " file \""$FORWARD_ZONE_NAME"\";" | sudo tee -a /etc/named.conf
echo " allow-update { none; };" | sudo tee -a /etc/named.conf
echo " };" | sudo tee -a /etc/named.conf
echo " zone \""$REVERSE_ZONE_NAME"\" IN {" | sudo tee -a /etc/named.conf
echo " type master;" | sudo tee -a /etc/named.conf
echo " file \""$REVERSE_ZONE_NAME"\";" | sudo tee -a /etc/named.conf
echo " allow-update { none; };" | sudo tee -a /etc/named.conf
echo " };" | sudo tee -a /etc/named.conf
# Modify iptables
sudo sed -i '/-A INPUT -i lo -j ACCEPT/a -A INPUT -m state --state NEW -m udp -p udp --dport 53 -j ACCEPT' /etc/sysconfig/iptables
sudo iptables-restore /etc/sysconfig/iptables
# Enable on boot
sudo chkconfig named on
# Start bind
sudo service named start
| true
|
792609759b0100103727f7c4963f619a7ba3d155
|
Shell
|
d-deen/exomes_pipeline
|
/combine_gVCF.sh
|
UTF-8
| 5,472
| 3.140625
| 3
|
[] |
no_license
|
#! /bin/bash
#SBATCH -A rtmngs
#SBATCH -p bigmem
#SBATCH --mem=80G
#SBATCH -c 8
Out=$1
Job=$2
##Module loading
module load Java/11.0.2
module load picard/2.2.4-intel-2017.03-GCC-6.3-Java-1.8.0_144
##Setting constants for variant calling:
gatk_path='/nobackup/proj/rtmngs/Mt_Exome_pipeline/DD/programs_DD/gatk-4.1.9.0'
fasta_ref="/nobackup/proj/rtmngs/Mt_Exome_pipeline/DD/genomes_DD/GCA_000001405.15_GRCh38_no_alt/GCA_000001405.15_GRCh38_no_alt_analysis_set.fna"
##Removing bam files with the duplicates
bwa_folder=${Out}/${Job}_results/${Job}_bam
rm ${bwa_folder}/*_output.bam*
##Combinign gVCFs into a single vcf and calling variants:
#Getting a list of gVCF files
ls -d ${Out}/${Job}_results/${Job}_vcf/*.g.vcf.gz | awk '{print "--variant" OFS $0}' > ${Out}/${Job}_results/${Job}_vcf/gvcf_list.txt
gvcf_list=${Out}/${Job}_results/${Job}_vcf/gvcf_list.txt
#Checking how many samples are there: if more than one, then combine them in a file, if one - rename it with a cohort.
if test "$(cat $gvcf_list | wc -l)" -gt 1 ; then
echo "proceed to combining the files"
${gatk_path}/gatk CombineGVCFs \
-R ${fasta_ref} \
--arguments_file ${gvcf_list} \
-O ${Out}/${Job}_results/${Job}_vcf/${Job}_cohort.g.vcf.gz
else
mv ${Out}/${Job}_results/${Job}_vcf/*.g.vcf.gz ${Out}/${Job}_results/${Job}_vcf/${Job}_cohort.g.vcf.gz
${gatk_path}/gatk IndexFeatureFile \
-I ${Out}/${Job}_results/${Job}_vcf/${Job}_cohort.g.vcf.gz
fi
#Calling genotypes
${gatk_path}/gatk --java-options "-Xmx80g" GenotypeGVCFs \
-R ${fasta_ref} \
-V ${Out}/${Job}_results/${Job}_vcf/${Job}_cohort.g.vcf.gz \
-O ${Out}/${Job}_results/${Job}_vcf/${Job}_cohort.vcf.gz
##Filtering the variants by quality
#First, separating them into SNPs and indels
${gatk_path}/gatk SelectVariants \
-V ${Out}/${Job}_results/${Job}_vcf/${Job}_cohort.vcf.gz \
-select-type SNP \
-O ${Out}/${Job}_results/${Job}_vcf/${Job}_snps.vcf.gz
${gatk_path}/gatk SelectVariants \
-V ${Out}/${Job}_results/${Job}_vcf/${Job}_cohort.vcf.gz \
-select-type INDEL \
-O ${Out}/${Job}_results/${Job}_vcf/${Job}_indels.vcf.gz
#Hard filtering of the variants
${gatk_path}/gatk VariantFiltration \
-V ${Out}/${Job}_results/${Job}_vcf/${Job}_snps.vcf.gz \
-filter "QD < 2.0" --filter-name "QD2" \
-filter "QUAL < 30.0" --filter-name "QUAL30" \
-filter "SOR > 3.0" --filter-name "SOR3" \
-filter "FS > 60.0" --filter-name "FS60" \
-filter "MQ < 40.0" --filter-name "MQ40" \
-filter "MQRankSum < -12.5" --filter-name "MQRankSum-12.5" \
-filter "ReadPosRankSum < -8.0" --filter-name "ReadPosRankSum-8" \
-O ${Out}/${Job}_results/${Job}_vcf/${Job}_snps_filtered.vcf.gz
${gatk_path}/gatk VariantFiltration \
-V ${Out}/${Job}_results/${Job}_vcf/${Job}_indels.vcf.gz \
-filter "QD < 2.0" --filter-name "QD2" \
-filter "QUAL < 30.0" --filter-name "QUAL30" \
-filter "FS > 200.0" --filter-name "FS200" \
-filter "ReadPosRankSum < -20.0" --filter-name "ReadPosRankSum-20" \
-O ${Out}/${Job}_results/${Job}_vcf/${Job}_indels_filtered.vcf.gz
#Merging the variants
java -jar $EBROOTPICARD/picard.jar MergeVcfs \
I=${Out}/${Job}_results/${Job}_vcf/${Job}_snps_filtered.vcf.gz \
I=${Out}/${Job}_results/${Job}_vcf/${Job}_indels_filtered.vcf.gz \
O=${Out}/${Job}_results/${Job}_vcf/${Job}_filtered_comb_cohort.vcf.gz
##Normalisation of the variants
#Loading bcftools module separately as there were conflicts when loading them all together
module load BCFtools/1.10.2-foss-2019b
bcftools norm -m - -Oz ${Out}/${Job}_results/${Job}_vcf/${Job}_filtered_comb_cohort.vcf.gz > ${Out}/${Job}_report/${Job}_filtered_cohort.vcf.gz
##This is the filtering that was done before April 2021 (selecting the variants with at least 30 reads)
#bcftools filter -i'FMT/DP>30' | \
#bcftools view -f PASS > ${Out}/${Job}_results/${Job}_vcf/${Job}_filtered_cohort.vcf.gz
#Plotting statistics for vcf
module load texlive/20200406-GCCcore-10.2.0
bcftools stats -s - ${Out}/${Job}_report/${Job}_filtered_cohort.vcf.gz > ${Out}/${Job}_report/${Job}_vcf_stats.vchk
#Creating pdf report generates errors
#plot-vcfstats -p ${Out}/${Job}_report/${Job}_vcf_stats -s -T $Job ${Out}/${Job}_report/${Job}_vcf_stats.vchk
##Creating the separate pivoted vcf files (pivoting: unique varaint calls for the cohort)
#Separating the combined vcf to separate samples, to get unique variant calls:
if test "$(cat $gvcf_list | wc -l)" -gt 1 ; then
for sample in `bcftools query -l ${Out}/${Job}_report/${Job}_filtered_cohort.vcf.gz`; do
bcftools view -c1 -Oz -s $sample -o ${Out}/${Job}_results/${Job}_vcf/vcf_pivot/$sample.vcf.gz ${Out}/${Job}_report/${Job}_filtered_cohort.vcf.gz
bcftools index ${Out}/${Job}_results/${Job}_vcf/vcf_pivot/$sample.vcf.gz
echo $sample.vcf.gz >> ${Out}/${Job}_results/${Job}_vcf/vcf_pivot/samplevcf_list.txt
done
#Getting unique variant calls
cd ${Out}/${Job}_results/${Job}_vcf/vcf_pivot
no_of_samples=$(wc -l < samplevcf_list.txt)
samplevcf_list=samplevcf_list.txt
for i in $(seq 1 $no_of_samples)
do
sample=`sed -n "$i"p $samplevcf_list | awk '{print $1}'`
samplevcf_list1=$(grep -v $sample $samplevcf_list)
sample_order=$(echo $sample $samplevcf_list1)
sample_name=$(echo $sample | sed 's/.vcf.gz//')
bcftools isec -C $sample_order -w 1 > ${sample_name}_unique.vcf
done
fi
| true
|
4fd75e6d9f3647a4cf120a1a3671fc7ab8774438
|
Shell
|
jalkifi/jalki-tileserver
|
/run.sh
|
UTF-8
| 3,045
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
if [ "$#" -ne 1 ]; then
echo "usage: <init_db|import|run>"
echo "commands:"
echo " init_db: Set up the database"
echo " import: Import /data.osm.pbf and low-zoom shapes"
echo " run: Runs Apache and renderd to serve tiles at /tile/{z}/{x}/{y}.png"
echo "environment variables:"
echo " THREADS: defines number of threads used for importing / tile rendering"
echo " UPDATES: consecutive updates (enabled/disabled)"
exit 1
fi
if [ "$1" = "debug" ]; then
bash
exit 0
fi
if [ "$1" = "init_db" ]; then
service postgresql start
sudo -u postgres psql -c "CREATE USER renderaccount WITH PASSWORD 'renderaccount';"
sudo -u postgres psql -c "ALTER ROLE renderaccount SUPERUSER;"
sudo -u postgres createdb -E UTF8 -O renderaccount gis
sudo -u postgres psql -d gis -c "CREATE EXTENSION hstore;"
sudo -u postgres psql -d gis -c "CREATE EXTENSION postgis;"
sudo -u postgres psql -d gis -c "ALTER TABLE geometry_columns OWNER TO renderaccount;"
sudo -u postgres psql -d gis -c "ALTER TABLE spatial_ref_sys OWNER TO renderaccount;"
service postgresql stop
exit 0
fi
if [ "$1" = "import" ]; then
service postgresql start
# Download Luxembourg as sample if no data is provided
if [ ! -f /data.osm.pbf ] && [ -z "$DOWNLOAD_PBF" ]; then
echo "WARNING: No import file at /data.osm.pbf, so importing Luxembourg as example..."
DOWNLOAD_PBF="https://download.geofabrik.de/europe/luxembourg-latest.osm.pbf"
DOWNLOAD_POLY="https://download.geofabrik.de/europe/luxembourg.poly"
fi
if [ -n "$DOWNLOAD_PBF" ]; then
echo "INFO: Download PBF file: $DOWNLOAD_PBF"
wget "$WGET_ARGS" "$DOWNLOAD_PBF" -O /data.osm.pbf
if [ -n "$DOWNLOAD_POLY" ]; then
echo "INFO: Download PBF-POLY file: $DOWNLOAD_POLY"
wget "$WGET_ARGS" "$DOWNLOAD_POLY" -O /var/lib/mod_tile/data.poly
fi
fi
# Import data
sudo -u renderaccount osm2pgsql -d gis --create --slim -G --hstore --tag-transform-script /opt/openstreetmap-carto/openstreetmap-carto.lua --number-processes 1 -S /opt/openstreetmap-carto/openstreetmap-carto.style /data.osm.pbf ${OSM2PGSQL_EXTRA_ARGS}
# Get low-zoom stuff
cd /opt/openstreetmap-carto
sudo -u renderaccount scripts/get-external-data.py
# Create indexes
sudo -u postgres psql -d gis -f indexes.sql
# Register that data has changed for mod_tile caching purposes
touch /var/lib/mod_tile/planet-import-complete
exit 0
fi
if [ "$1" = "run" ]; then
rm -rf /tmp/*
service postgresql start
service apache2 restart
# Run while handling docker stop's SIGTERM
stop_handler() {
kill -TERM "$child"
}
trap stop_handler SIGTERM
mkdir -p /var/run/renderd
chown renderaccount /var/run/renderd
sudo -u renderaccount renderd -f -c /usr/local/etc/renderd.conf &
child=$!
wait "$child"
service apache2 stop
service postgresql stop
exit 0
fi
echo "invalid command"
exit 1
| true
|
06fb30b82dc5646fcba317716a5780eca528223b
|
Shell
|
shaonanxu/Kafka
|
/scripts/kafka/kafka-start.sh
|
UTF-8
| 1,085
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
KAFKA_HEAP_USE="-Xms1G -Xmx1G"
MEM_M=$(curl http://metadata/self/host/memory -s)
MEM_M_SIZE=`expr $MEM_M / 2`
MEM_G_SIZE=`expr $MEM_M / 2048`
ip=$(curl http://metadata/self/host/ip -s)
if [ ${MEM_M_SIZE} -lt 1024 ]; then #内存小于1G
KAFKA_HEAP_USE="-Xms${MEM_M_SIZE}M -Xmx${MEM_M_SIZE}M"
elif [ ${MEM_G_SIZE} -lt 5 ]; then #大于1G小于5G
KAFKA_HEAP_USE="-Xms${MEM_G_SIZE}G -Xmx${MEM_G_SIZE}G"
else #大于5G内存设置为5G
KAFKA_HEAP_USE="-Xms5G -Xmx5G"
fi
ulimit -n 100000
export JAVA_HOME=/opt/jdk
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export KAFKA_HEAP_OPTS="${KAFKA_HEAP_USE}"
export JMX_PORT="9999"
export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=$ip"
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:/opt/kafka/config/log4j.properties"
/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties
| true
|
1aa8d63ab125226492efeee6987e10cee47ab06d
|
Shell
|
w1zard/docker-nginx-php-fpm
|
/start.sh
|
UTF-8
| 2,042
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# Disable Strict Host checking for non interactive git clones
mkdir -p -m 0700 /root/.ssh
echo -e "Host *\n\tStrictHostKeyChecking no\n" >> /root/.ssh/config
# Setup git variables
if [ ! -z "$GIT_EMAIL" ]; then
git config --global user.email "$GIT_EMAIL"
fi
if [ ! -z "$GIT_NAME" ]; then
git config --global user.name "$GIT_NAME"
git config --global push.default simple
fi
# Pull down code form git for our site!
if [ ! -z "$GIT_REPO" ]; then
rm -rf /data/webroot/*
if [ ! -z "$GIT_BRANCH" ]; then
git clone -b $GIT_BRANCH $GIT_REPO /data/webroot/
else
git clone $GIT_REPO /data/webroot/
fi
chown -Rf nginx.nginx /usr/share/nginx/*
fi
# Tweak nginx to match the workers to cpu's
procs=$(cat /proc/cpuinfo |grep processor | wc -l)
sed -i -e "s/worker_processes 5/worker_processes $procs/" /etc/nginx/nginx.conf
# Very dirty hack to replace variables in code with ENVIRONMENT values
# if [[ "$TEMPLATE_NGINX_HTML" != "0" ]] ; then
# for i in $(env)
# do
# variable=$(echo "$i" | cut -d'=' -f1)
# value=$(echo "$i" | cut -d'=' -f2)
# if [[ "$variable" != '%s' ]] ; then
# replace='\$\$_'${variable}'_\$\$'
# find /data/webroot -type f -exec sed -i -e 's/'${replace}'/'${value}'/g' {} \;
# fi
# done
# fi
# Start supervisord and services
/usr/bin/supervisord -n -c /etc/supervisord.conf &
if [ "${AUTHORIZED_KEYS}" != "**None**" ]; then
echo "=> Found authorized keys"
mkdir -p /root/.ssh
chmod 700 /root/.ssh
touch /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
IFS=$'\n'
arr=$(echo ${AUTHORIZED_KEYS} | tr "," "\n")
for x in $arr
do
x=$(echo $x |sed -e 's/^ *//' -e 's/ *$//')
cat /root/.ssh/authorized_keys | grep "$x" >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "=> Adding public key to /root/.ssh/authorized_keys: $x"
echo "$x" >> /root/.ssh/authorized_keys
fi
done
fi
if [ ! -f /.root_pw_set ]; then
/set_root_pw.sh
fi
exec /usr/sbin/sshd -D
| true
|
b14b64dbda44a9469537b4e9e5c53daebbadd2a5
|
Shell
|
pockethook/dotfiles
|
/.zsh/alias-awk.zsh
|
UTF-8
| 534
| 2.875
| 3
|
[] |
no_license
|
function sum() {
awk '{sum += $1} END {print sum}' $*
}
function avg() {
awk '{sum += $1} END {if (NR > 0) print sum / NR}' $*
}
function p1() {
awk '{print $1}' $*
}
function p2() {
awk '{print $2}' $*
}
function p3() {
awk '{print $3}' $*
}
function p4() {
awk '{print $4}' $*
}
function p5() {
awk '{print $5}' $*
}
function p6() {
awk '{print $6}' $*
}
function p7() {
awk '{print $7}' $*
}
function p8() {
awk '{print $8}' $*
}
function p9() {
awk '{print $9}' $*
}
function p10() {
awk '{print $10}' $*
}
| true
|
246802b2a8673e249bf290d7af4ab2faae2082c0
|
Shell
|
byteofmydream/webdriver-bash-binding
|
/wdAPI/executeJSScript.sh
|
UTF-8
| 607
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#arguments are js args
separator=','
lapka=\"
i=0
for var in "$@"
do
((i++))
if [ "$i" -eq 1 ]; then echo -n ""
else
if [ -z "$var" ]; then echo -n ""; else res+=`echo -n "$lapka$var$lapka$separator"`; fi
fi
done
argsArray=`echo -n ${res%?}`
json=`cat $(pwd)"/js/"${1}".js" | ./wrapJStoJSON.sh "$argsArray"`
sessionId=`cat ./sessionId`
url="${BASE_URL}:${HUB_PORT}/${RELATIVE_URL}/session/$sessionId/execute"
resp=`curl -s "$url" -H 'Content-Type: text/plain;charset=UTF-8' -H 'Accept: application/json; charset=utf-8' --data "$json"`
sleep 1s
./obtainJSResult.sh
| true
|
cca07a94fc99a1938d9269911790e823c57f3c71
|
Shell
|
petronny/aur3-mirror
|
/gpapers/PKGBUILD
|
UTF-8
| 1,232
| 2.84375
| 3
|
[] |
no_license
|
# Contributor: Stefan Husmann <stefan-husmann@t-online.de>
pkgname=gpapers
pkgver=150
pkgrel=1
pkgdesc="The open-sourced, Gnome based digital library manager. Think of it as an iTunes for your PDFs"
arch=('any')
url="http://code.google.com/p/gpapers"
license=('GPL2')
depends=('python2-poppler' 'python-pysqlite' 'pygtk' 'python-pygraphviz' 'django' 'gnome-python')
makedepends=('svn')
source=("gpapers.sh")
md5sums=('fb296b5eff8d5821894e43899344922d')
_svntrunk=http://gpapers.googlecode.com/svn/trunk
_svnmod=gpapers
package() {
cd "$srcdir"
msg "Connecting to SVN server...."
if [[ -d "$_svnmod/.svn" ]]; then
(cd "$_svnmod" && svn up -r "$pkgver")
else
svn co "$_svntrunk" --config-dir ./ -r "$pkgver" "$_svnmod"
fi
msg "SVN checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_svnmod-build"
cp -r "$srcdir/$_svnmod" "$srcdir/$_svnmod-build"
cd "$srcdir/$_svnmod-build"
rm -fr ext website
#
# BUILD
#
find . -name "*py" -exec sed 's/env python/env python2/' -i {} \;
install -d "$pkgdir/usr/lib/python2.7/${pkgname}"
cp -r * "$pkgdir/usr/lib/python2.7/${pkgname}/"
install -Dm755 "${srcdir}/gpapers.sh" "$pkgdir/usr/bin/gpapers"
rm -rf $(find "$pkgdir" -type d -name ".svn")
}
| true
|
aef0ff715740b646d9c92dc52e8f6f9cd343034d
|
Shell
|
haqistan/mblaze-tools
|
/mpane
|
UTF-8
| 825
| 3.796875
| 4
|
[
"ISC",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
# mpane [-nh] [-mloop_options] [cmd_args]
# set MpaneLines in ~/.mblaze/profile to change
# the default size of the pane we create in tmux
MBLAZE=${MBLAZE-${HOME}/.mblaze}
editor_cmd=$(mhdr -h Editor "$MBLAZE/profile")
[ -z "${editor_cmd}" ] && editor_cmd=${EDITOR-${VISUAL-vi}}
EDITOR=${EDITOR-$editor_cmd}
# collect varname ...
collect () {
typeset var val
var=$1
shift
for arg in $*; do
val=$(eval '$'$var)
if [ -z "$val" ]; then
val=$arg
else
val="${val} ${arg}"
fi
eval $var="$val"
done
}
horiz=
args=
opts=
for arg in $*; do
case $arg in
-h) horiz=-h ;;
-*) collect opts ${arg} ;;
*) collect args ${arg} ;;
esac
done
lines=$(mhdr -h MpaneLines "$MBLAZE/profile")
[ -z "$lines" ] && lines=20
tmux split-window ${horiz} -l ${lines} "env EDITOR=$EDITOR mloop ${opts} ${args}"
| true
|
4b61886f1fef0cac6ea82eb8852cca842d3e4c30
|
Shell
|
tyrone-dev/bash_learn
|
/bash_trap.sh
|
UTF-8
| 314
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# Bash Trap
trap bashtrap INT
# clear screen
clear;
# bashtrap is executed when CTRL+C is pressed
bashtrap()
{
echo "CTRL+C Detected ! . . . executing bash trap !"
}
# for loop from 1/10 to 10/10
for a in `seq 1 10`; do
echo "$a/10 to Exit."
sleep 1;
done
echo "Exit Bash Trap Example"
| true
|
c467b4b6ddb3a339ec2d61c58ac582f1fa2df6ed
|
Shell
|
nathanaelhoun/circleci-orb-mattermost-plugin-notify
|
/src/scripts/send_notification.sh
|
UTF-8
| 1,391
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
Send_notification() {
if [ -z "${MM_WEBHOOK}" ]; then
if [ -z "${WEBHOOK_URL}" ]; then
echo "NO MATTERMOST WEBHOOK SET"
echo "Please input your MM_WEBHOOK value either in the settings for this project, or as a parameter for this orb."
exit 1
else
MM_WEBHOOK=${WEBHOOK_URL}
fi
fi
STATUS=$(curl -o /dev/null -s -w "%{http_code}\n" -X POST -H 'Content-type: application/json' \
--data \
"{
\"Organization\": \"${CIRCLE_PROJECT_USERNAME}\",
\"Repository\": \"${CIRCLE_PROJECT_REPONAME}\",
\"RepositoryURL\": \"${CIRCLE_REPOSITORY_URL}\",
\"Username\": \"${CIRCLE_USERNAME}\",
\"WorkflowID\":\"${CIRCLE_WORKFLOW_ID}\", \
\"JobName\":\"${CIRCLE_JOB}\", \
\"CircleBuildNumber\": ${CIRCLE_BUILD_NUM},
\"CircleBuildURL\": \"${CIRCLE_BUILD_URL}\",
\"Branch\": \"${CIRCLE_BRANCH}\",
\"Tag\":\"${CIRCLE_TAG}\", \
\"Commit\":\"${CIRCLE_SHA1}\", \
\"AssociatedPullRequests\": \"${CIRCLE_PULL_REQUESTS}\",
\"IsFailed\": ${MM_BUILD_IS_FAILED},
\"IsWaitingApproval\": ${IS_WAITING_APPROVAL},
\"Message\": \"${MM_MESSAGE}\"
}" "${MM_WEBHOOK}")
if [ "$STATUS" -ne "200" ]; then
echo "Notification not sent due to an error. Status: $STATUS. Please check the webhook URL"
exit 1
fi
echo "Notification sent!"
exit 0
}
Send_notification
| true
|
842d5e94f1a6c1e25118023585fa6109650ec03b
|
Shell
|
petronny/aur3-mirror
|
/mistelix/PKGBUILD
|
UTF-8
| 1,290
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Balló György <ballogyor+arch at gmail dot com>
pkgname=mistelix
pkgver=0.33
pkgrel=4
pkgdesc="DVD authoring application with slideshow creation capabilities"
arch=('i686' 'x86_64')
url="http://live.gnome.org/Mistelix"
license=('MIT')
depends=('gstreamer0.10-base-plugins' 'gtk-sharp-2' 'mono-addins' 'gnome-desktop2' 'hicolor-icon-theme' 'xdg-utils')
makedepends=('intltool')
optdepends=('gstreamer0.10-good-plugins: Extra media codecs'
'gstreamer0.10-bad-plugins: Extra media codecs'
'gstreamer0.10-ugly-plugins: Extra media codecs'
'gstreamer0.10-ffmpeg: Extra media codecs')
options=('!libtool')
install=$pkgname.install
source=(http://gent.softcatala.org/jmas/$pkgname/$pkgname-$pkgver.tar.gz
drop-gnome-sharp.patch)
md5sums=('954d351bff0e47a5092c55bb7bb5038a'
'b183e5ab60d9de9045b55c30dd3f0485')
build() {
cd "$srcdir/$pkgname-$pkgver"
# https://bugzilla.gnome.org/show_bug.cgi?id=675046
patch -Np1 -i "$srcdir/drop-gnome-sharp.patch"
autoreconf -fi
./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var \
--disable-static
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make DESTDIR="$pkgdir/" install
install -Dm644 COPYING ${pkgdir}/usr/share/licenses/$pkgname/COPYING
}
| true
|
d7b2d46674d0fc3c2044b5cfc862e5364eca5012
|
Shell
|
professorfaggiano/TT12017
|
/ENIAC/Prova Exer 4.sh
|
UTF-8
| 217
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
clear
VAR=1999
echo "Digite o ano que voce nasceu"
read ANO
if (( $ANO <= $VAR)); then
echo "Voce pode votar"
else
if (( $ANO > $VAR)); then
echo "Voce nao pode votar"
fi
fi
| true
|
d99388c39d3aef4f1de736275baaaed2d477a72a
|
Shell
|
parro-it/ebansoft.com
|
/scripts.zsh
|
UTF-8
| 796
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/zsh
RED="\e[31m"
NORMAL="\e[0m"
GREEN="\e[92m"
export all_tests_passed=0
function build_site() {
verbose=$1
if orgame src/client src/posts src/pages docs; then
printf $GREEN
figlet 'Sito ricostruito.'
printf $NORMAL
all_tests_passed=1
else
printf $RED
figlet "C'è un problema"
printf $NORMAL
all_tests_passed=0
fi
}
function on_sourcechanges_rebuild() {
build_site
while inotifywait -r -e modify -e move -e create -e delete -e delete_self .; do
previous=$all_tests_passed
build_site
if [[ $all_tests_passed != $previous ]]; then
if [[ $all_tests_passed == 1 ]]; then
spd-say -y Italian+female5 'Sito ricostruito.'
else
spd-say -y Italian+female5 "C'è un problema"
fi
fi
done
}
| true
|
3e257398aac9090a7e499ce9bda296f080124f70
|
Shell
|
garymaxallen/vpns
|
/l2tp_ipsec.sh
|
UTF-8
| 11,748
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Setup Strong strongSwan server for Ubuntu and Debian
#
# Copyright (C) 2014-2015 Phil Plückthun <phil@plckthn.me>
# Based on Strongswan on Docker
# https://github.com/philplckthun/docker-strongswan
#
# This work is licensed under the Creative Commons Attribution-ShareAlike 3.0
# Unported License: http://creativecommons.org/licenses/by-sa/3.0/
if [ `id -u` -ne 0 ]
then
echo "Please start this script with root privileges!"
echo "Try again with sudo."
exit 0
fi
#################################################################
# Variables
[ -z "$STRONGSWAN_TMP" ] && STRONGSWAN_TMP="/tmp/strongswan"
[ -z "$STRONGSWAN_VERSION" ] && STRONGSWAN_VERSION="5.3.5"
[ -z "$KEYSIZE" ] && KEYSIZE=16
#STRONGSWAN_USER
#STRONGSWAN_PASSWORD
#STRONGSWAN_PSK
if [ -z "$INTERACTIVE" ]; then
INTERACTIVE=1
fi
[[ $INTERACTIVE = "true" ]] && INTERACTIVE=1
[[ $INTERACTIVE = "false" ]] && INTERACTIVE=0
#################################################################
# Functions
call () {
eval "$@ > /dev/null 2>&1"
}
checkForError () {
if [ "$?" = "1" ]
then
bigEcho "An unexpected error occured!"
exit 1
fi
}
generateKey () {
KEY=`cat /dev/urandom | tr -dc _A-Z-a-z-0-9 | head -c $KEYSIZE`
}
bigEcho () {
echo ""
echo "============================================================"
echo "$@"
echo "============================================================"
echo ""
}
pacapt () {
eval "$STRONGSWAN_TMP/pacapt $@"
}
backupCredentials () {
if [ -f /etc/ipsec.secrets ]; then
cp /etc/ipsec.secrets /etc/ipsec.secrets.backup
fi
if [ -f /etc/ppp/l2tp-secrets ]; then
cp /etc/ppp/l2tp-secrets /etc/ppp/l2tp-secrets.backup
fi
}
writeCredentials () {
bigEcho "Saving credentials"
cat > /etc/ipsec.secrets <<EOF
# This file holds shared secrets or RSA private keys for authentication.
# RSA private key for this host, authenticating it to any other host
# which knows the public part. Suitable public keys, for ipsec.conf, DNS,
# or configuration of other implementations, can be extracted conveniently
# with "ipsec showhostkey".
: PSK "$STRONGSWAN_PSK"
$STRONGSWAN_USER : EAP "$STRONGSWAN_PASSWORD"
$STRONGSWAN_USER : XAUTH "$STRONGSWAN_PASSWORD"
EOF
cat > /etc/ppp/chap-secrets <<EOF
# This file holds secrets for L2TP authentication.
# Username Server Secret Hosts
"$STRONGSWAN_USER" "*" "$STRONGSWAN_PASSWORD" "*"
EOF
}
getCredentials () {
bigEcho "Querying for credentials"
if [ "$STRONGSWAN_PSK" = "" ]; then
echo "The VPN needs a PSK (Pre-shared key)."
echo "Do you wish to set it yourself? [y|n]"
echo "(Otherwise a random one is generated)"
while true; do
if [ $INTERACTIVE -eq 0 ]; then
echo "Auto-Generating PSK..."
yn="n"
else
read -p "" yn
fi
case $yn in
[Yy]* ) echo ""; echo "Enter your preferred key:"; read -p "" STRONGSWAN_PSK; break;;
[Nn]* ) generateKey; STRONGSWAN_PSK=$KEY; break;;
* ) echo "Please answer with Yes or No [y|n].";;
esac
done
echo ""
echo "The PSK is: '$STRONGSWAN_PSK'."
echo ""
fi
#################################################################
if [ "$STRONGSWAN_USER" = "" ]; then
if [ $INTERACTIVE -eq 0 ]; then
STRONGSWAN_USER=""
else
read -p "Please enter your preferred username [vpn]: " STRONGSWAN_USER
fi
if [ "$STRONGSWAN_USER" = "" ]
then
STRONGSWAN_USER="vpn"
fi
fi
#################################################################
if [ "$STRONGSWAN_PASSWORD" = "" ]; then
echo "The VPN user '$STRONGSWAN_USER' needs a password."
echo "Do you wish to set it yourself? [y|n]"
echo "(Otherwise a random one is generated)"
while true; do
if [ $INTERACTIVE -eq 0 ]; then
echo "Auto-Generating Password..."
yn="n"
else
read -p "" yn
fi
case $yn in
[Yy]* ) echo ""; echo "Enter your preferred key:"; read -p "" STRONGSWAN_PASSWORD; break;;
[Nn]* ) generateKey; STRONGSWAN_PASSWORD=$KEY; break;;
* ) echo "Please answer with Yes or No [y|n].";;
esac
done
fi
}
#################################################################
if [ $INTERACTIVE -eq 0 ]; then
bigEcho "Automating installation in non-interactive mode..."
else
echo "This script will install strongSwan on this machine."
echo "Do you wish to continue? [y|n]"
while true; do
read -p "" yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit 0;;
* ) echo "Please answer with Yes or No [y|n].";;
esac
done
fi
#################################################################
# Clean up and create compilation environment
call rm -rf $STRONGSWAN_TMP
call mkdir -p $STRONGSWAN_TMP
curl -sSL "https://github.com/icy/pacapt/raw/ng/pacapt" > $STRONGSWAN_TMP/pacapt
if [ "$?" = "1" ]
then
bigEcho "An unexpected error occured while downloading pacapt!"
exit 0
fi
call chmod +x $STRONGSWAN_TMP/pacapt
echo ""
#################################################################
bigEcho "Installing necessary dependencies"
call pacapt -Sy
checkForError
call pacapt -S -- -y make g++ gcc iptables xl2tpd libssl-dev module-init-tools curl
checkForError
#################################################################
bigEcho "Installing StrongSwan..."
call mkdir -p $STRONGSWAN_TMP/src
curl -sSL "https://download.strongswan.org/strongswan-$STRONGSWAN_VERSION.tar.gz" | tar -zxC $STRONGSWAN_TMP/src --strip-components 1
checkForError
cd $STRONGSWAN_TMP/src
./configure --prefix=/usr --sysconfdir=/etc \
--enable-eap-radius \
--enable-eap-mschapv2 \
--enable-eap-identity \
--enable-eap-md5 \
--enable-eap-mschapv2 \
--enable-eap-tls \
--enable-eap-ttls \
--enable-eap-peap \
--enable-eap-tnc \
--enable-eap-dynamic \
--enable-xauth-eap \
--enable-openssl \
--disable-gmp
checkForError
make
checkForError
make install
checkForError
#################################################################
bigEcho "Preparing various configuration files..."
cat > /etc/ipsec.conf <<EOF
# ipsec.conf - strongSwan IPsec configuration file
config setup
uniqueids=no
charondebug="cfg 2, dmn 2, ike 2, net 0"
conn %default
dpdaction=clear
dpddelay=300s
rekey=no
left=%defaultroute
leftfirewall=yes
right=%any
ikelifetime=60m
keylife=20m
rekeymargin=3m
keyingtries=1
auto=add
#######################################
# L2TP Connections
#######################################
conn L2TP-IKEv1-PSK
type=transport
keyexchange=ikev1
authby=secret
leftprotoport=udp/l2tp
left=%any
right=%any
rekey=no
forceencaps=yes
#######################################
# Default non L2TP Connections
#######################################
conn Non-L2TP
leftsubnet=0.0.0.0/0
rightsubnet=10.0.0.0/24
rightsourceip=10.0.0.0/24
#######################################
# EAP Connections
#######################################
# This detects a supported EAP method
conn IKEv2-EAP
also=Non-L2TP
keyexchange=ikev2
eap_identity=%any
rightauth=eap-dynamic
#######################################
# PSK Connections
#######################################
conn IKEv2-PSK
also=Non-L2TP
keyexchange=ikev2
authby=secret
# Cisco IPSec
conn IKEv1-PSK-XAuth
also=Non-L2TP
keyexchange=ikev1
leftauth=psk
rightauth=psk
rightauth2=xauth
EOF
cat > /etc/strongswan.conf <<EOF
# /etc/strongswan.conf - strongSwan configuration file
# strongswan.conf - strongSwan configuration file
#
# Refer to the strongswan.conf(5) manpage for details
charon {
load_modular = yes
send_vendor_id = yes
plugins {
include strongswan.d/charon/*.conf
attr {
dns = 8.8.8.8, 8.8.4.4
}
}
}
include strongswan.d/*.conf
EOF
cat > /etc/xl2tpd/xl2tpd.conf <<EOF
[global]
port = 1701
auth file = /etc/ppp/chap-secrets
debug avp = yes
debug network = yes
debug state = yes
debug tunnel = yes
[lns default]
ip range = 10.1.0.2-10.1.0.254
local ip = 10.1.0.1
require chap = yes
refuse pap = yes
require authentication = yes
name = l2tpd
;ppp debug = yes
pppoptfile = /etc/ppp/options.xl2tpd
length bit = yes
EOF
cat > /etc/ppp/options.xl2tpd <<EOF
ipcp-accept-local
ipcp-accept-remote
ms-dns 8.8.8.8
ms-dns 8.8.4.4
noccp
auth
crtscts
idle 1800
mtu 1280
mru 1280
lock
lcp-echo-failure 10
lcp-echo-interval 60
connect-delay 5000
EOF
#################################################################
if [[ -f /etc/ipsec.secrets ]] || [[ -f /etc/ppp/chap-secrets ]]; then
echo "Do you wish to replace your old credentials? (Including a backup) [y|n]"
while true; do
if [ $INTERACTIVE -eq 0 ]; then
echo "Old credentials were found but to play safe, they will not be automatically replaced. Delete them manually if you want them replaced."
break
fi
read -p "" yn
case $yn in
[Yy]* ) backupCredentials; getCredentials; writeCredentials; break;;
[Nn]* ) break;;
* ) echo "Please answer with Yes or No [y|n].";;
esac
done
else
getCredentials
writeCredentials
fi
#################################################################
bigEcho "Applying changes..."
iptables --table nat --append POSTROUTING --jump MASQUERADE
echo 1 > /proc/sys/net/ipv4/ip_forward
for each in /proc/sys/net/ipv4/conf/*
do
echo 0 > $each/accept_redirects
echo 0 > $each/send_redirects
done
#################################################################
bigEcho "Create /etc/init.d/vpn-assist helper..."
cat > /etc/init.d/vpn-assist <<'EOF'
#!/bin/sh
### BEGIN INIT INFO
# Provides: vpn
# Required-Start: $network $local_fs
# Required-Stop: $network $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Strongswan and L2TPD helper
# Description: Service that starts up XL2TPD and IPSEC
### END INIT INFO
# Author: Phil Plückthun <phil@plckthn.me>
case "$1" in
start)
iptables --table nat --append POSTROUTING --jump MASQUERADE
echo 1 > /proc/sys/net/ipv4/ip_forward
for each in /proc/sys/net/ipv4/conf/*
do
echo 0 > $each/accept_redirects
echo 0 > $each/send_redirects
done
/usr/sbin/xl2tpd -p /var/run/xl2tpd.pid -c /etc/xl2tpd/xl2tpd.conf -C /var/run/xl2tpd.control
ipsec start
;;
stop)
iptables --table nat --flush
echo 0 > /proc/sys/net/ipv4/ip_forward
kill $(cat /var/run/xl2tpd.pid)
ipsec stop
;;
restart)
echo "Restarting IPSec and XL2TPD"
iptables --table nat --append POSTROUTING --jump MASQUERADE
echo 1 > /proc/sys/net/ipv4/ip_forward
for each in /proc/sys/net/ipv4/conf/*
do
echo 0 > $each/accept_redirects
echo 0 > $each/send_redirects
done
kill $(cat /var/run/xl2tpd.pid)
/usr/sbin/xl2tpd -p /var/run/xl2tpd.pid -c /etc/xl2tpd/xl2tpd.conf -C /var/run/xl2tpd.control
ipsec restart
;;
esac
exit 0
EOF
chmod +x /etc/init.d/vpn-assist
#################################################################
bigEcho "Starting up VPN..."
/etc/init.d/vpn-assist start
#################################################################
echo "============================================================"
echo "PSK Key: $STRONGSWAN_PSK"
echo "Username: $STRONGSWAN_USER"
echo "Password: $STRONGSWAN_PASSWORD"
echo "============================================================"
echo "Note:"
echo "* Before connecting with a Windows client, please see: http://support.microsoft.com/kb/926179"
echo "* UDP Ports 1701, 500 and 4500 must be opened"
echo "* A specific host or public IP is not necessary as Strongswan utilises NAT traversal"
#################################################################
bigEcho "Cleaning up..."
call rm -rf $STRONGSWAN_TMP
sleep 2
exit 0
| true
|
08c55dae8520841697a4ba52a4e9b618a0e034cb
|
Shell
|
bogdbo/bogdbo.github.io
|
/arch.sh
|
UTF-8
| 2,131
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
TIMEZONE='Europe/London'
HOSTNAME='bogdan'
USER_NAME='bogdan'
configure() {
echo 'Setting hostname'
set_hostname
echo 'Setting timezone'
set_timezone
echo 'Setting locale'
set_locale
echo 'Setting console keymap'
set_keymap
echo 'Setting hosts file'
set_hosts
if [ -z "$ROOT_PASSWORD" ]
then
echo 'Enter the root password:'
stty -echo
read -r ROOT_PASSWORD
stty echo
fi
echo 'Setting root password'
set_root_password "$ROOT_PASSWORD"
if [ -z "$USER_PASSWORD" ]
then
echo "Enter the password for user $USER_NAME"
stty -echo
read -r USER_PASSWORD
stty echo
fi
echo 'Creating initial user'
create_user "$USER_NAME" "$USER_PASSWORD"
echo 'Installing packages'
install_packages
echo 'Other'
other
}
install_packages() {
local packages=''
# General utilities/libraries
packages+='chromium networkmanager pulseaudio firefox awesome neovim net-tools openssh fd exa ripgrep exa sudo fish rofi'
# Development packages
packages+=' nodejs alacritty man-db code'
# Misc programs
packages+=' mpv ranger sxiv qbittorrent'
# Xserver
packages+=' xorg-server xorg-xinit'
pacman -S --noconfirm $packages
}
set_hostname() {
echo "$HOSTNAME" > /etc/hostname
}
set_timezone() {
ln -sT "/usr/share/zoneinfo/$TIMEZONE" /etc/localtime
}
set_locale() {
echo 'LANG="en_US.UTF-8"' >> /etc/locale.conf
echo 'LC_COLLATE="C"' >> /etc/locale.conf
echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
locale-gen
}
set_keymap() {
echo "KEYMAP=us" > /etc/vconsole.conf
}
set_hosts() {
cat > /etc/hosts <<EOF
127.0.0.1 localhost
::1 localhost
EOF
}
set_root_password() {
local password="$1"; shift
echo -en "$password\n$password" | passwd
}
create_user() {
local name="$1"; shift
local password="$1"; shift
useradd -m "$name"
echo -en "$password\n$password" | passwd "$name"
}
other() {
ln -s /bin/nvim /bin/vim
ln -s /bin/nvim /bin/vi
}
configure
| true
|
79479e4de56f8fbc185c36859e56eb80f253520c
|
Shell
|
olton/mina-node-install
|
/scripts/mina-status-monitor.sh
|
UTF-8
| 17,133
| 3
| 3
|
[
"MIT"
] |
permissive
|
# Not sure why this is still in shell...but...we're here now.
#Credit to _thanos for the original snarkstopper - https://forums.minaprotocol.com/t/guide-script-automagically-stops-snark-work-prior-of-getting-a-block-proposal/299
#Credit to @vanphandinh for docker port, re-integrating some of those changes here from https://github.com/vanphandinh/mina-status-monitor/blob/master/mina-status-monitor.sh
#General Parameters
readonly MONITORCYCLE=300 #how many seconds between mina client status checks (e.g. 60s * 5min = 300)
readonly CATCHUPWINDOW=18 #how many MONITORCYCLE intervals to wait for catchup before restart (12 * 5mins = 60 mins)
readonly MAXUNVALIDATEDDELTA=3 #will count as out of compliance if more than this many blocks ahead or behind unvalidated count
readonly MAXSTATUSFAILURES=5 #will allow upt to this number of cycles of status failure before force restart
readonly STANDOFFAFTERRESTART=2 #how many MONITORSYCLCE intervals should be allowed for daemon to try to restart before issuing another restart
readonly BOOTSTRAPLIMIT=6 #how many intervals should daemon be able to be in bootstrap?
readonly GARBAGE="Using password from environment variable CODA_PRIVKEY_PASS" #strip this out of the status
# Monitoring docker containers via graphql instead of daemon locally
# Set MONITORVIAGRAPHQL = 0 to use local `mina client` commands. If set to 1, provide GRAPHQL_URI, or it will attempt to detect from docker - assumes instance named mina
readonly USEDOCKER=0
#Snark Stopper
readonly USESNARKSTOPPER=1 #set to 1 to run snark stopper, 0 to turn it off (will stop snarking if not in sync, or producing a block soon)
SNARKWORKERTURNEDOFF=1 #set to 1 to assume snark worker should always be turned on for first run, otherwise 0
readonly STOPSNARKINGLESSTHAN=5 #threshold in minutes to stop snarking - if minutes until produce block < this value, will stop snark worker
readonly FEE=YOUR_SW_FEE ### *** SET YOUR SNARK WORKER FEE HERE *** ###
readonly SW_ADDRESS=YOUR_SW_ADDRESS ### *** SET YOUR SNARK WORKER ADDRESS HERE *** ###
#Sidecar Monitoring
readonly USESIDECARMONITOR=1 #set to 1 to monitor sidecar service, 0 ignores sidecar monitoring
#Archive Monitoring - Not currently supported with Docker - set to 0 if USEDOCKER=1
readonly USEARCHIVEMONITOR=1 #set to 1 to monitor archive service, 0 ignores archive monitoring
#Compare to Mina Explorer Height
readonly USEMINAEXPLORERMONITOR=0 #set to 1 to compare synced height vs. Mina Explorer reported height, 0 does not check MinaExplorer
readonly MINAEXPLORERMAXDELTA=3 #number of blocks to tolerate in synced blockheight vs. Mina Explorers reported height
readonly MINAEXPLORERTOLERANCEWINDOW=5 #how many intervals to wait to restart with coninual out of sync vs. mina explorer
readonly MINAEXPLORERURL=https://api.minaexplorer.com #url to get status from mina explorer -- devnet: https://devnet.api.minaexplorer.com
#File Descriptor Monitoring - Not currently supported with Docker - set to 0 if USEDOCKER=1
#if turned on, this dumps lsof to /tmp and does not clean up after itself - keep an eye on that!
readonly USEFILEDESCRIPTORSMONITOR=0 #set to 1 to turn on file descriptor logging, 0 to turn it on
function INITIALIZEVARS {
readonly SECONDS_PER_MINUTE=60
readonly SECONDS_PER_HOUR=3600
readonly MINUTES_PER_HOUR=60
readonly HOURS_PER_DAY=24
MINA_STATUS=""
STAT=""
NEXTBLOCK=""
UPTIMESECS=0
STATUSFAILURES=0
TOTALSTATUSFAILURES=0
DAEMONRESTARTCOUNTER=0
KNOWNSTATUS=0
CONNECTINGCOUNT=0
OFFLINECOUNT=0
BOOTSTRAPCOUNT=0
CATCHUPCOUNT=0
HEIGHTOFFCOUNT=0
SIDECARREPORTING=0
TOTALCONNECTINGCOUNT=0
TOTALOFFLINECOUNT=0
TOTALSTUCKCOUNT=0
TOTALCATCHUPCOUNT=0
TOTALHEIGHTOFFCOUNT=0
ARCHIVEDOWNCOUNT=0
BLOCKCHAINLENGTH=0
DELTAVALIDATED=0
DELTAHEIGHT=0
DELTAME=0
SYNCCOUNT=0
MINAEXPLORERBLOCKCHAINLENGTH=0
VSMECOUNT=0
TOTALVSMECOUNT=0
SNARKWORKERSTOPPEDCOUNT=0
FDCOUNT=0
FDCHECK=0
FDINCREMENT=100
}
function CHECKCONFIG {
#Get Graphql endpoint form docker inpect
if [[ "$USEDOCKER" -eq 1 ]]; then
if [[ "$USEARCHIVEMONITOR" -eq 1 || "$USEFILEDESCRIPTORSMONITOR" -eq 1 ]]; then
echo "USEDOCKER is set, but Archive and File Descriptor Monitoring also turned on."
echo "Archive and File Descriptor monitoring are not currently supported for docker"
exit 1
fi
GRAPHQL_URI="$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mina)"
if [[ "$GRAPHQL_URI" != "" ]]; then
GRAPHQL_URI="http://$GRAPHQL_URI:3085/graphql"
else
echo "unable to get graphql URI and USEDOCKER is set"
exit 1
fi
fi
}
#################### ADD DOCKER SUPPORT #######################
function RESTARTMINADAEMON {
((DAEMONRESTARTCOUNTER++))
if [[ "$DAEMONRESTARTCOUNTER" -eq "$STANDOFFAFTERRESTART" || "$TOTALSTATUSFAILURES" -gt "$MAXSTATUSFAILURES" ]]; then
echo "Triggering restart: restart counter $DAEMONRESTARTCOUNTER, total status failures $TOTALSTATUSFAILURES"
if [[ "$USEDOCKER" -eq 0 ]]; then
echo "Restarting MINA using systemd"
systemctl --user restart mina
else
echo "Restarting MINA using docker restart"
docker restart mina
fi
DAEMONRESTARTCOUNTER=0
TOTALSTATUSFAILURES=0
else
echo "Not restarting MINA Daemon yet because STANDOFFAFTERRESTART not met yet. counter, standoff:", $DAEMONRESTARTCOUNTER, $STANDOFFAFTERRESTART
fi
}
function RESTARTARCHIVESERVICE {
if [[ "$USEDOCKER" -eq 0 ]]; then
systemctl --user restart mina-archive.service
else
echo "Docker monitoring not supported for archive service"
fi
}
function RESTARTSIDECAR {
if [[ "$USEDOCKER" -eq 0 ]]; then
systemctl --user restart mina-sidecar.service
else
docker restart mina-sidecar
fi
}
function STARTSNARKING {
if [[ "$USEDOCKER" -eq 0 ]]; then
mina client set-snark-worker -address $SW_ADDRESS
mina client set-snark-work-fee $FEE
else
docker exec -t mina mina client set-snark-worker --address $SW_ADDRESS
docker exec -t mina mina client set-snark-work-fee $FEE
fi
}
function STOPSNARKING {
if [[ "$USEDOCKER" -eq 0 ]]; then
mina client set-snark-worker
else
docker exec -t mina mina client set-snark-worker
fi
}
function GETDAEMONSTATUS {
if [[ "$USEDOCKER" -eq 0 ]]; then
MINA_STATUS="$(mina client status -json | grep -v --regexp="$GARBAGE" )"
if [[ "$MINA_STATUS" == "" ]]; then
echo "Did not get Mina Client Status."
else
STAT="$(echo $MINA_STATUS | jq .sync_status)"
if [[ "$STAT" == "\"Synced\"" ]]; then
BLOCKCHAINLENGTH="$(echo $MINA_STATUS | jq .blockchain_length)"
HIGHESTBLOCK="$(echo $MINA_STATUS | jq .highest_block_length_received)"
HIGHESTUNVALIDATEDBLOCK="$(echo $MINA_STATUS | jq .highest_unvalidated_block_length_received)"
UPTIMESECS="$(echo $MINA_STATUS | jq .uptime_secs)"
fi
fi
else
MINA_STATUS=$(curl $GRAPHQL_URI -s --max-time 60 \
-H 'content-type: application/json' \
--data-raw '{"operationName":null,"variables":{},"query":"{\n daemonStatus {\n syncStatus\n uptimeSecs\n blockchainLength\n highestBlockLengthReceived\n highestUnvalidatedBlockLengthReceived\n nextBlockProduction {\n times {\n startTime\n }\n }\n }\n}\n"}' \
--compressed)
if [[ "$MINA_STATUS" == "" ]]; then
echo "Cannot connect to the GraphQL endpoint $GRAPHQL_URI."
#sleep 10s #not sure why sleeping here is useful -- removing.
else
STAT="$(echo $MINA_STATUS | jq .data.daemonStatus.syncStatus)"
if [[ "$STAT" == "\"Synced\"" ]]; then
BLOCKCHAINLENGTH="$(echo $MINA_STATUS | jq .data.daemonStatus.blockchainLength)"
HIGHESTBLOCK="$(echo $MINA_STATUS | jq .data.daemonStatus.highestBlockLengthReceived)"
HIGHESTUNVALIDATEDBLOCK="$(echo $MINA_STATUS | jq .data.daemonStatus.highestUnvalidatedBlockLengthReceived)"
NEXTPROP="$(echo $MINA_STATUS | jq .data.daemonStatus.nextBlockProduction.times[0].startTime)"
UPTIMESECS="$(echo $MINA_STATUS | jq .data.daemonStatus.uptimeSecs)"
fi
fi
fi
}
function GETSIDECARSTATUS {
# to enable sidecar monitoring, the user requires journalctl rights
# this command will provide access, but requires you to log out and log back in / restart service
# sudo usermod -aG systemd-journal [USER]
if [[ "$USEDOCKER" -eq 0 ]]; then
SIDECARREPORTING="$(journalctl --user-unit mina-sidecar.service --since "10 minutes ago" | grep -c 'Got block data')"
else
SIDECARREPORTING="$(docker logs --since 10m mina-sidecar 2>&1 | grep -c 'Got block data')"
fi
}
#################### END DOCKER SUPPORT #######################
function GETARCHIVESTATUS {
#TODO this should be improved to monitor something useful....TBD what that might be
if [[ "$USEDOCKER" -eq 0 ]]; then
ARCHIVERUNNING="$(ps -A | grep mina-archive | wc -l)"
else
echo "NOT SETUP TO CHECK ARCHIVE ON DOCKER"
fi
}
function CHECKFILEDESCRIPTORS {
MINAUSER="minar" #set to userid the mina service runs under (will be used to monitor file descriptor of that user)
FDLIMIT=$(ulimit -n)
FDCOUNT="$(lsof -u $MINAUSER | wc -l)"
if [ $FDCOUNT -gt $FDCHECK ]; then
lsof -u $MINAUSER > "/tmp/lsof$(date +%m-%d-%H-%M)".txt
FDCHECK=$(( $FDCOUNT + $FDINCREMENT ))
if [ $FDLIMIT -lt $FDCHECK ]; then
FDINCREMENT=$(( $FDINCREMENT / 2 ))
FDCHECK=$(( $FDCOUNT + $FDINCREMENT ))
fi
echo Logged lsof to /tmp at $FDCOUNT FD - Next log at $FDCHECK FD
fi
}
function CHECKARCHIVE {
GETARCHIVESTATUS
if [[ "$ARCHIVERUNNING" -gt 0 ]]; then
ARCHIVERUNNING=0
else
((ARCHIVEDOWNCOUNT++))
echo "Restarting mina-Archive Service. Archive Down Count:", $ARCHIVEDOWNCOUNT
RESTARTARCHIVESERVICE
fi
}
function CHECKSIDECAR {
GETSIDECARSTATUS
if [[ "$SIDECARREPORTING" -lt 3 && "$SYNCCOUNT" -gt 2 ]]; then
echo "Restarting mina-sidecar - only reported " $SIDECARREPORTING " times out in 10 mins and node in sync longer than 15 mins."
RESTARTSIDECAR
fi
}
function MANAGESNARKER {
if [[ "$STAT" == "\"Synced\"" ]]; then
# Calculate whether block producer will run within the next X mins
# If up for a block within 5 mins, stop snarking, resume on next pass
# First check if we are going to produce a block
if [[ "$USEDOCKER" -eq 0 ]]; then
PRODUCER="$(echo $MINA_STATUS | jq .next_block_production.timing[0])"
if [[ "$PRODUCER" == "\"Produce\"" ]]; then
NEXTPROP="$(echo $MINA_STATUS | jq .next_block_production.timing[1].time)"
NEXTPROP="${NEXTPROP::-3}"
NEXTPROP="${NEXTPROP:1}"
NEXTPROP="${NEXTPROP:0:-1}"
#NOW="$(date +%s%N | cut -b1-13)"
#TIMEBEFORENEXT="$(($NEXTPROP-$NOW))"
#TIMEBEFORENEXTSEC="${TIMEBEFORENEXT:0:-3}"
#TIMEBEFORENEXTMIN="$((${TIMEBEFORENEXTSEC} / ${SECONDS_PER_MINUTE}))"
else
echo "Next block production time unknown"
if [[ "$SNARKWORKERTURNEDOFF" -gt 0 ]]; then
echo "Starting the snark worker.."
STARTSNARKING
SNARKWORKERTURNEDOFF=0
fi
return 0
fi
else
if [[ $NEXTPROP != null ]]; then
#DOCKER IMPL
NEXTPROP=$(echo $NEXTPROP | jq tonumber)
NEXTPROP="${NEXTPROP::-3}"
else
echo "Next block production time unknown"
if [[ "$SNARKWORKERTURNEDOFF" -gt 0 ]]; then
echo "Starting the snark worker.."
STARTSNARKING
SNARKWORKERTURNEDOFF=0
fi
return 0
fi
fi
NOW="$(date +%s)"
TIMEBEFORENEXT="$(($NEXTPROP - $NOW))"
TIMEBEFORENEXTMIN="$(($TIMEBEFORENEXT / $SECONDS_PER_MINUTE))"
MINS="$(($TIMEBEFORENEXTMIN % $MINUTES_PER_HOUR))"
HOURS="$(($TIMEBEFORENEXTMIN / $MINUTES_PER_HOUR))"
DAYS="$(($HOURS / $HOURS_PER_DAY))"
HOURS="$(($HOURS % $HOURS_PER_DAY))"
NEXTBLOCK="Next block production: $DAYS days $HOURS hours $MINS minutes"
if [[ "$TIMEBEFORENEXTMIN" -lt "$STOPSNARKINGLESSTHAN" && "$SNARKWORKERTURNEDOFF" -eq 0 ]]; then
echo "Stop snarking - producing a block soon"
STOPSNARKING
((SNARKWORKERTURNEDOFF++))
else
if [[ "$TIMEBEFORENEXTMIN" -gt "$STOPSNARKINGLESSTHAN" && "$SNARKWORKERTURNEDOFF" -gt 0 ]]; then
STARTSNARKING
SNARKWORKERTURNEDOFF=0
fi
fi
else # stop snarking if not in sync
if [[ "$SNARKWORKERTURNEDOFF" -eq 0 ]]; then
echo "Stop snarking - node is not in sync"
STOPSNARKING
((SNARKWORKERTURNEDOFF++))
fi
fi
}
function CHECKMINAEXPLORER {
MINAEXPLORERBLOCKCHAINLENGTH="$(curl -s "$MINAEXPLORERURL" | jq .blockchainLength)"
DELTAME="$(($BLOCKCHAINLENGTH-$MINAEXPLORERBLOCKCHAINLENGTH))"
if [[ "$DELTAME" -gt "$MINAEXPLORERMAXDELTA" ]] || [[ "$DELTAME" -lt -"$MINAEXPLORERMAXDELTA" ]]; then
((VSMECOUNT++))
else
VSMECOUNT=0
fi
if [[ "$VSMECOUNT" -gt "$MINAEXPLORERTOLERANCEWINDOW" ]]; then
echo "Restarting mina - block heigh varied from ME too much / too long:", $DELTAHEIGHT, $BLOCKCHAINLENGTH, $HIGHESTBLOCK, $HIGHESTUNVALIDATEDBLOCK, $MINAEXPLORERBLOCKCHAINLENGTH, $DELTAME, $VSMECOUNT
((TOTALVSMECOUNT++))
((DAEMONRESTARTCOUNTER++))
RESTARTMINADAEMON
fi
}
function VALIDATEHEIGHTS {
# if in sync, confirm that blockchain length ~= max observed
DELTAHEIGHT="$(($BLOCKCHAINLENGTH-$HIGHESTBLOCK))"
if [[ "$DELTAHEIGHT" -gt "$MAXUNVALIDATEDDELTA" ]] || [[ "$DELTAHEIGHT" -lt -"$MAXUNVALIDATEDDELTA" ]]; then
((HEIGHTOFFCOUNT++))
else
HEIGHTOFFCOUNT=0
fi
DELTAVALIDATED="$(($HIGHESTUNVALIDATEDBLOCK-$HIGHESTBLOCK))"
if [[ "$DELTAVALIDATED" -gt 5 ]]; then
echo "Node stuck validated block height delta more than 5 blocks. Difference from Max obvserved and max observied unvalidated:", $DELTAVALIDATED
((TOTALSTUCKCOUNT++))
((DAEMONRESTARTCOUNTER++))
SYNCCOUNT=0
RESTARTMINADAEMON
fi
if [[ "$HEIGHTOFFCOUNT" -gt 2 ]]; then
echo "Restarting mina - Block Chain Length differs from Highest Observed Block by 3 or more", $DELTAHEIGHT, $BLOCKCHAINLENGTH, $HIGHESTBLOCK, $HIGHESTUNVALIDATEDBLOCK, $MINAEXPLORERBLOCKCHAINLENGTH, $DELTAME
((TOTALHEIGHTOFFCOUNT++))
((DAEMONRESTARTCOUNTER++))
HEIGHTOFFCOUNT=0
RESTARTMINADAEMON
fi
}
INITIALIZEVARS
CHECKCONFIG
while :; do
KNOWNSTATUS=0
GETDAEMONSTATUS
if [[ "$STAT" == "\"Synced\"" ]]; then
VALIDATEHEIGHTS
KNOWNSTATUS=1
OFFLINECOUNT=0
CONNECTINGCOUNT=0
CATCHUPCOUNT=0
DAEMONRESTARTCOUNTER=0
((SYNCCOUNT++))
if [[ "$USEMINAEXPLORERMONITOR" -eq 1 ]]; then
CHECKMINAEXPLORER
fi
else
SYNCCOUNT=0
fi
if [[ "$STAT" == "\"Connecting\"" ]]; then
KNOWNSTATUS=1
((CONNECTINGCOUNT++))
((TOTALCONNECTINGCOUNT++))
fi
if [[ "$CONNECTINGCOUNT" -gt 1 ]]; then
echo "Restarting mina - too long in Connecting state (2 cycles)"
RESTARTMINADAEMON
CONNECTINGCOUNT=0
fi
if [[ "$STAT" == "\"Offline\"" ]]; then
KNOWNSTATUS=1
((OFFLINECOUNT++))
((TOTALOFFLINECOUNT++))
fi
if [[ "$OFFLINECOUNT" -gt 2 ]]; then
echo "Restarting mina - too long in Offline state (3 cycles)"
RESTARTMINADAEMON
OFFLINECOUNT=0
fi
if [[ "$STAT" == "\"Catchup\"" ]]; then
KNOWNSTATUS=1
((CATCHUPCOUNT++))
((TOTALCATCHUPCOUNT++))
fi
if [[ "$CATCHUPCOUNT" -gt $CATCHUPWINDOW ]]; then
echo "Restarting mina - too long in Catchup state"
RESTARTMINADAEMON
CATCHUPCOUNT=0
fi
if [[ "$STAT" == "\"Bootstrap\"" ]]; then
KNOWNSTATUS=1
((BOOTSTRAPCOUNT++))
if [[ "$BOOTSTRAPCOUNT" -eq "$BOOTSTRAPLIMIT" ]]; then
RESTARTMINADAEMON
elif [[ "$BOOTSTRAPCOUNT" -gt "$BOOTSTRAPLIMIT" ]]; then
RESTARTMINADAEMON
fi
else
BOOTSTRAPCOUNT=0
fi
if [[ "$STAT" == "\"Listening\"" ]]; then
#TODO limit? what does it mean if hanging out in listening?
KNOWNSTATUS=1
fi
if [[ "$KNOWNSTATUS" -eq 0 ]]; then
echo "Returned Status is unkown or not handled:" $STAT
((STATUSFAILURES++))
((TOTALSTATUSFAILURES++))
RESTARTMINADAEMON
else
STATUSFAILURES=0
if [[ "$USESNARKSTOPPER" -eq 1 ]]; then
MANAGESNARKER
fi
if [[ "$USEARCHIVEMONITOR" -eq 1 ]]; then
CHECKARCHIVE
fi
if [[ "$USESIDECARMONITOR" -eq 1 ]]; then
CHECKSIDECAR
fi
fi
if [[ "$USEFILEDESCRIPTORSMONITOR" -eq 1 ]]; then
CHECKFILEDESCRIPTORS
fi
echo $(date) "Status:" $STAT, "Connecting Count, Total:" $CONNECTINGCOUNT $TOTALCONNECTINGCOUNT, "Offline Count, Total:" $OFFLINECOUNT $TOTALOFFLINECOUNT, "Archive Down Count:" $ARCHIVEDOWNCOUNT, "Node Stuck Below Tip:" $TOTALSTUCKCOUNT, "Total Catchup:" $TOTALCATCHUPCOUNT, "Total Height Mismatch:" $TOTALHEIGHTOFFCOUNT, "Total Mina Explorer Mismatch:" $TOTALVSMECOUNT, "Time Until Block:" $TIMEBEFORENEXTMIN, $NEXTBLOCK, "Current & Total Status Failures:" $STATUSFAILURES, $TOTALSTATUSFAILURES, "Uptime Hours:" $(($UPTIMESECS / $SECONDS_PER_HOUR)), "Uptime Total Min:" $(($UPTIMESECS / $SECONDS_PER_MINUTE))
sleep $MONITORCYCLE
#check if sleep exited with break (ctrl+c) to exit the loop
test $? -gt 128 && break;
done
| true
|
0d1dbcfa728a4aac44a96519df0682a046df6e04
|
Shell
|
abhnerAraujo/IF1006
|
/script_projeto.sh
|
UTF-8
| 3,096
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
AMBIENT_OK=true;
if hash git 2>/dev/null;
then
echo "Git OK";
else
echo "Git is not installed and is needed to proceed. Installing now...";
echo "Getting Git. Press Y if asked to install."
sudo apt-get install git;
if hash git 2>/dev/null;
then
echo "Git OK";
else
AMBIENT_OK=false;
fi
fi
if hash mvn 2>/dev/null;
then
echo "Maven OK";
else
echo "Maven is not installed and is needed to proceed. Installing now...";
echo "Updating ambient...";
sudo apt-get install software-properties-common;
sudo apt-add-repository universe;
sudo apt-get update;
echo "Getting Maven. Press Y if asked to install."
sudo apt-get install maven;
if hash mvn 2>/dev/null;
then
echo "Maven OK";
else
AMBIENT_OK=false;
fi
fi
if hash junit 2>/dev/null;
then
echo "JUnit OK";
else
echo "JUnit is not installed and is needed to proceed. Installing now..";
sudo apt-get install junit;
if hash junit 2>/dev/null;
then
echo "JUnit OK";
else
AMBIENT_OK=false;
fi
fi
if [ "$AMBIENT_OK" = true ];
then
echo "Setting the things up:"
mkdir -p "DEV";
mkdir -p "TEST";
mkdir -p "UAT";
mkdir -p "PRODUCTION";
echo "Directories OK"
echo "Getting project into DEV directory";
if [ -n "$(ls -A "$DIR")" ];
then
echo "DEV directory is not empty"
else
echo "downloading repository into DEV folder...";
cd "DEV";
git clone "https://github.com/jfsc/spring-petclinic.git";
echo "repository was clonned with success."
fi
cd spring-petclinic;
mvn package;
fi
if hash docker 2>/dev/null;
then
echo "Docker OK";
else
echo "Docker is not installed and is needed to proceed. Installing now..";
sudo apt-get update;
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D;
sudo apt-add-repository 'deb https://apt.dockerproject.org/repo ubuntu-xenial main';
sudo apt-get update;
apt-cache policy docker-engine;
sudo apt-get install -y docker-engine;
if hash junit 2>/dev/null;
then
echo "Docker OK";
else
AMBIENT_OK=false;
fi
fi
if [ "$AMBIENT_OK" = true ];
then
echo "Tudo pronto!";
sudo docker login;
echo "Criar nova imagem[I] ou usar padrão[P]?";
read imagem;
if [ "$imagem" == "I"];
then
echo "digite o nome da imagem:";
read imgnome;
sudo docker build -t "$imgnome" .;
echo "usuario do docker:"
read user
sudo docker tag "$imgnome" "$user"/"$imgnome";
sudo docker push "$user"/"$imgnome";
echo "Imagem $imgnome no dockerhub";
else
sudo docker build -t spring-petclininc .;
sudo docker tag spring-petclininc ianfireman/spring-petclininc:v2;
sudo docker push ianfireman/spring-petclininc:v2;
echo "Imagem spring-petclininc no dockerhub";
fi
else
echo "Instalação do docker foi mal sucedida"
fi
| true
|
36ca2cdb7b3311e21cb6254f68b3558082f195a3
|
Shell
|
pabloschu1/angular-pipeline-example
|
/bin/md5
|
UTF-8
| 185
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
case $(uname) in
Darwin)
cat $@ | md5 -q
;;
Linux)
cat $@ | md5sum | awk '{ print $1 }'
;;
*)
echo "Unsupported distro $DIST"
exit 1
;;
esac
| true
|
7f52743010c613b2f1f5db702f2949e890064833
|
Shell
|
joesitton/dotfiles
|
/config/eww/scripts/wifi
|
UTF-8
| 397
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
status=$(nmcli g | grep -oE "disconnected")
essid=$(nmcli c | grep wlp0s20f3 | awk '{print ($1)}')
if [ "$status" ] ; then
icon=""
text=""
col="#575268"
else
icon=""
text="${essid}"
col="#a1bdce"
fi
if [[ "$1" == "--COL" ]]; then
echo "$col"
elif [[ "$1" == "--ESSID" ]]; then
echo "$text"
elif [[ "$1" == "--ICON" ]]; then
echo "$icon"
fi
| true
|
c0d9024bb03ab77fd05ce8e8f40cb813c100571c
|
Shell
|
ton-rocks/general-ton-node
|
/scripts/docker_export_wallet.sh
|
UTF-8
| 877
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f ./env.sh ]; then
echo "env.sh not found!"
exit 1
fi
. ./env.sh
if [ "$1" != "y" ]; then
echo -n "This may overwrite existing files. Are you sure? (y/n): "
read answer
if [ "$answer" != "Y" ] && [ "$answer" != "y" ]; then
exit
fi
fi
docker cp $DOCKER_NAME:/var/ton-work/contracts/validator.hexaddr .
docker cp $DOCKER_NAME:/var/ton-work/contracts/validator.addr .
docker cp $DOCKER_NAME:/var/ton-work/contracts/validator.pk .
if [ "$GENESIS" == "1" ]; then
docker cp $DOCKER_NAME:/var/ton-work/db/initial-ton-global.config.json .
docker cp $DOCKER_NAME:/var/ton-work/contracts/main-wallet.addr .
docker cp $DOCKER_NAME:/var/ton-work/contracts/main-wallet.pk .
docker cp $DOCKER_NAME:/var/ton-work/contracts/config-master.addr .
docker cp $DOCKER_NAME:/var/ton-work/contracts/config-master.pk .
fi
| true
|
324dfe9d19e7d04cbb0933da3160895e3cefccf2
|
Shell
|
agvim/lin-bootstrap
|
/package_scripts/50_git-delta_gitdev.sh
|
UTF-8
| 907
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# $ curl -Ls -I -o /dev/null -w %{url_effective} https://github.com/dandavison/delta/releases/latest
# 0.9.2
DELTA_VERSION=$(curl -Ls -I -o /dev/null -w %{url_effective} https://github.com/dandavison/delta/releases/latest | sed -r 's/[^0-9]+//')
check_if_installed () {
LOCAL_VERSION=$(which delta > /dev/null && delta --version | xargs | cut -d' ' -f 2)
[[ "$LOCAL_VERSION" == "$DELTA_VERSION" ]]
}
install(){
wget --no-verbose "https://github.com/dandavison/delta/releases/download/${DELTA_VERSION}/git-delta_${DELTA_VERSION}_amd64.deb" -O /tmp/git-delta_${DELTA_VERSION}_amd64.deb &&
sudo dpkg -i /tmp/git-delta_${DELTA_VERSION}_amd64.deb
}
case "$1" in
"install" | "update" )
if check_if_installed; then
echo "no update needed"
exit 0
else
install
fi;;
# ! check_if_installed && install ;;
esac
| true
|
b380625fc9999cf3a66665bc20dc7e25014e2a54
|
Shell
|
vitorta0506/desafio
|
/scripts/README3-NGINXhello.sh
|
UTF-8
| 348
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
# Configure um nginx que sirva o retorno da aplicação ao receber um GET.
echo ---------
echo Hello World em Python servidor pelo NGINX
sleep 1
echo Realizando um GET em http://k8s.vtainfo.com.br:30000/python
sleep 1
curl -s --location --request GET 'http://k8s.vtainfo.com.br:30000/python'
echo ---------
sleep 2
| true
|
2f3897b2ff770594b648c7b993675d49fe271d56
|
Shell
|
dollalilz/2041_20T2
|
/ass1/update_tracker
|
UTF-8
| 104
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/dash
for i in *
do
if test ! -e .status/$i
then
touch .status/$i
fi
done
| true
|
c3fb669ebdbe108088c6f4df78c25dd62059e998
|
Shell
|
fledge-iot/fledge
|
/scripts/storage
|
UTF-8
| 2,957
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##--------------------------------------------------------------------
## Copyright (c) 2017-2018 OSIsoft, LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##--------------------------------------------------------------------
#
# This script is used to call the PosegreSQL storage plugin script
# to store and retrieve the sensor data when the database
# is embedded in Fledge
#
#set -x
# Include common code
source "${FLEDGE_ROOT}/scripts/common/get_storage_plugin.sh"
source "${FLEDGE_ROOT}/scripts/common/get_readings_plugin.sh"
PLUGIN_TO_USE=""
# Logger wrapper
storage_log() {
write_log "Storage" "script.storage" "$1" "$2" "$3" "$4"
}
#############
## MAIN LOGIC
#############
PLUGIN_TO_USE=`get_storage_plugin`
READINGS_PLUGIN_TO_USE=`get_readings_plugin`
if [[ "${#PLUGIN_TO_USE}" -eq 0 ]]; then
storage_log "err" "Missing plugin from Fledge storage service" "all" "pretty"
fi
PLUGIN_SCRIPT="$FLEDGE_ROOT/scripts/plugins/storage/$PLUGIN_TO_USE.sh"
if [[ ! -x "$PLUGIN_SCRIPT" ]]; then
# Missing storage plugin script
storage_log "err" "Fledge cannot start." "all" "pretty"
storage_log "err" "Missing Storage Plugin script $PLUGIN_SCRIPT." "all" "pretty"
exit 1
fi
# The reset must be executed on both the storage and readings plugins, if the
# readings are stored in a different plugin. On the readings plugin this becomes
# a purge operation.
#
# The purge action is only executed via the readings plugin if defined, or
# the main storage plugin is not defined.
if [[ "$1" == "reset" ]] ; then
# Pass action in $1 and FLEDGE_VERSION in $2
source "$PLUGIN_SCRIPT" $1 $2
if [[ "$PLUGIN_TO_USE" != "$READINGS_PLUGIN_TO_USE" ]]; then
READINGS_SCRIPT="$FLEDGE_ROOT/scripts/plugins/storage/$READINGS_PLUGIN_TO_USE.sh"
if [[ -x "$READINGS_SCRIPT" ]]; then
source "$READINGS_SCRIPT" purge $2
fi
fi
elif [[ "$1" == "purge" ]]; then
# Pass action in $1 and FLEDGE_VERSION in $2
if [[ "$PLUGIN_TO_USE" != "$READINGS_PLUGIN_TO_USE" ]]; then
READINGS_SCRIPT="$FLEDGE_ROOT/scripts/plugins/storage/$READINGS_PLUGIN_TO_USE.sh"
# Soem readings plugins, notably sqlitememory, do not have a script
if [[ -x "$READINGS_SCRIPT" ]]; then
source "$READINGS_SCRIPT" $1 $2
fi
else
source "$PLUGIN_SCRIPT" $1 $2
fi
else
# Pass any other operation to the storage plugin
source "$PLUGIN_SCRIPT" $1 $2
fi
# exit cannot be used because the script is sourced.
#exit $?
| true
|
b74ddb5940741228182d13c1098394ced7e9fa2f
|
Shell
|
sasa-mitrovic/bash_basics
|
/04-life.sh
|
UTF-8
| 484
| 3.796875
| 4
|
[
"Unlicense"
] |
permissive
|
echo "What is the meaning of life?"
read meaning
if [ "$meaning" -eq 42 ]; then
echo "Yes!, That is the meaning of life!"
else
echo "Awww... You don't know the meaning of life"
fi
# here are some other arithemetic comparison operators
# -eq -ne -gt -ge -lt -le
# exercise: write a script that prints whether it is
# morning or not
h=`date +%H`
date
if [ $h -lt 12 ]; then
echo Good morning
elif [ $h -lt 18 ]; then
echo Good afternoon
else
echo Good evening
fi
| true
|
f1c5ec360e59068589499024bbf0637be23f6e35
|
Shell
|
eterinfi/shell
|
/fun3.sh
|
UTF-8
| 376
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
# author:Runoob Tutorial
# url:www.runoob.com
funWithParam(){
echo "1st parameter is $1 !"
echo "2nd parameter is $2 !"
echo "10th parameter is $10 !"
echo "10th parameter is ${10} !"
echo "11th parameter is ${11} !"
echo "Total number of parameters is $# !"
echo "All parameters output as a string $* !"
}
funWithParam 1 2 3 4 5 6 7 8 9 34 73
| true
|
b0b7a3dbbe169433510cd3b243a03d8f9fbe2f28
|
Shell
|
stanacton/fantETHy
|
/devtools/golang.sh
|
UTF-8
| 471
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
export GOPATH=/home/vagrant/src/GOPATH
add-apt-repository ppa:ubuntu-lxc/lxd-stable
apt-get update
apt-get install -y golang
if [ ! -d "$DIRECTORY" ]; then
mkdir -p /home/vagrant/src/GOPATH
fi
if grep -q GOPATH /etc/environment; then
echo "GOPATH already exists"
else
echo "adding GOPATH to .bashrc"
sudo echo "GOPATH=/home/vagrant/src/GOPATH" >> /etc/environment
sudo echo "PATH=$PATH:$GOPATH/bin" >> /etc/environment
fi
go install tbd
| true
|
2e2f3d8ec8ad9a5c994b5aceb0ed89bb4e329a37
|
Shell
|
ymingjun/vps_init
|
/data/crons/php-fpm.sh
|
UTF-8
| 197
| 3.03125
| 3
|
[] |
no_license
|
#! /bin/sh
PID_FILE="/data/php/logs/php-fpm.pid"
CMD="/data/script/php-fpm.sh restart"
if [ ! -e $PID_FILE ] ||
[ ! -s $PID_FILE ] ||
[ ! -e /proc/`cat $PID_FILE`/status ]; then
$CMD
fi
| true
|
66fe03230206965b30a91100187469a46bdc11fd
|
Shell
|
Konubinix/Devel
|
/bin/konix_find_org_directories.sh
|
UTF-8
| 125
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
find -L "$1" \( -name 'data' -or -name ".stversions" \) -prune -or -name "*.org" |sed 's-/[^/]\+$-/-'|sort|uniq
| true
|
24b895b8079fb1f323232d996b9497a3de4f310e
|
Shell
|
2018331100/vimrc
|
/install.sh
|
UTF-8
| 291
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~
if [ ! -d ".vim" ]; then
mv vimrc .vim && cd .vim
mkdir undor-dir sessions
echo "Installing vimscript based plugins..."
vim -c VimscriptPlugins -c qa
else
echo "Do yo want to delete .vim?[y/N]"
read -s del
if [[ -z $del || $del == "y" ]]; then
rm -rf .vim
fi
fi
| true
|
67c53f0a6acfecfc9a73e689440021b552350c05
|
Shell
|
casper-astro/serendip6
|
/src/vegas_hpc/bin/vegas_init_shmem
|
UTF-8
| 582
| 3.078125
| 3
|
[] |
no_license
|
#! /bin/bash
if test "${VEGAS_DIR}zz" = zz; then
echo "Error: VEGAS_DIR environment variable not set, exiting."
exit 1
fi
# Set up status buffer
$VEGAS_DIR/bin/check_vegas_status >& /dev/null
# Set up first (GPU) data buffer
$VEGAS_DIR/bin/check_vegas_databuf -c -i1 -n32 -s32768 -t1 >& /dev/null
# Set up second (CPU_ACCUM) data buffer
$VEGAS_DIR/bin/check_vegas_databuf -c -i2 -n32 -s32768 -t2 >& /dev/null
# Set up third (DISK) data buffer
#$VEGAS_DIR/bin/check_vegas_databuf -c -i3 -n24 -s16384 -t3 >& /dev/null
# Display status of buffers
# $VEGAS_DIR/bin/check_vegas_status
| true
|
d450e399d03ea1e51ddbbbe9bb62da4e086ac653
|
Shell
|
7imbrook/secret-santa-app
|
/deploy_service.sh
|
UTF-8
| 480
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
function finish {
rm ./push_logs
}
trap finish EXIT
docker build -t 7imbrook/santa .
docker push 7imbrook/santa | tee ./push_logs
# Swap shas, don't think we need this
NEW_DIGEST=$(tail -n 1 push_logs | cut -d':' -f 4 | cut -d' ' -f 1)
echo
echo "Going to deploy" $NEW_DIGEST
kubectl patch deployment django-service -p \
"{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"santa-app\",\"image\":\"7imbrook/santa@sha256:$NEW_DIGEST\"}]}}}}"
| true
|
ac321b8141e465c036fbb81ce42d7c8f2f17f7da
|
Shell
|
InterstellarScout/Linux-Administration
|
/scripts/specific-use/webServer/startStopWebServer.sh
|
UTF-8
| 427
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
#This script starts and stops the web service.
#answer = ${1?Error: no parameter given}
echo "Would you like to start or stop the webserver (on/off)?"
read answer
if [ "$answer" = "on" ];
then
sudo /etc/init.d/mysql start #Start my sql Service
sudo service apache2 start
elif [ "$answer" = "off" ];
then
sudo /etc/init.d/mysql stop #Stop my sql Service
sudo service apache2 stop
fi
| true
|
a8d8e652854caa018ad28e626df616a13c7092aa
|
Shell
|
cloud-gov/cf-redash
|
/setup-example.sh
|
UTF-8
| 612
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# Note - full list of Redash environmental variables is here:
# https://redash.io/help/open-source/admin-guide/env-vars-settings
APP_NAME=$1
cf set-env $APP_NAME REDASH_DATABASE_URL "" # Get by running cf enf APP_NAME
cf set-env $APP_NAME REDASH_LOG_LEVEL "" # e.g., INFO
cf set-env $APP_NAME PYTHONUNBUFFERED "" # e.g., 0
cf set-env $APP_NAME REDASH_REDIS_URL "" # Get by running cf enf APP_NAME (Note, use rediss:// scheme)
cf set-env $APP_NAME REDASH_COOKIE_SECRET "" # e.g., $(pwgen -1s 32)
cf set-env $APP_NAME REDASH_SECRET_KEY "" # e.g., $(pwgen -1s 32)
| true
|
5f850a6fb84ba822c076d55321712893ecb7d5c6
|
Shell
|
irliao/dotfiles
|
/zsh/archive/prezto/zshenv.zsh
|
UTF-8
| 654
| 3.40625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#
# Defines environment variables.
#
# This is sourced on all invocations of the shell, unless the -f option is set.
# This file should contain commands to set the global command search path and other system-wide environment variables.
# This should not contain commands that produce output or assume the shell is attached to a tty.
# Similar to /etc/zsh/zshenv but for per-user configuration. Generally used for setting some useful environment variables.
# Ensure that a non-login, non-interactive shell has a defined environment.
if [[ "$SHLVL" -eq 1 && ! -o LOGIN && -s "${ZDOTDIR:-$HOME}/.zprofile" ]]; then
source "${ZDOTDIR:-$HOME}/.zprofile"
fi
| true
|
3b03410924b05a0442c3ef2cd7e67dd3d896ec5d
|
Shell
|
tellesnobrega/trending_files
|
/delete_remote.sh
|
UTF-8
| 454
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
PEM_FILE=$1
for i in {1..7}
do
ssh -i $PEM_FILE ubuntu@telles-storm-slave$i 'sudo rm -rf /usr/local/storm/logs/worker-6700.log'
ssh -i $PEM_FILE ubuntu@telles-storm-slave$i 'sudo rm -rf /usr/local/storm/logs/worker-6701.log'
ssh -i $PEM_FILE ubuntu@telles-storm-slave$i 'sudo rm -rf /usr/local/storm/logs/worker-6702.log'
ssh -i $PEM_FILE ubuntu@telles-storm-slave$i 'sudo rm -rf /usr/local/storm/logs/worker-6703.log'
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.