blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b80b7b93ea3a3a1259bf17de7ad3df69079978f9
|
Shell
|
EMBL-EBI-GCA/ebisc_tracker_2
|
/db-backups/run-mongodb-backup-s3
|
UTF-8
| 1,599
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
function usage() {
echo "Required environment variables:"
echo " AWS_ACCESS_KEY_ID"
echo " AWS_SECRET_ACCESS_KEY"
echo " AWS_ENDPOINT_URL"
echo " AWS_BUCKET"
echo " AWS_PREFIX"
echo "Optional environment variables:"
echo " MONGODB_HOST default mongodb"
echo " MONGODB_USER default ebisc"
echo " MONGODB_PASSWORD default ebisc"
echo " MONGODB_DATABASE default ebisc"
exit 1
}
# require parameters
if [ -z "$AWS_ACCESS_KEY_ID" ]; then usage; fi
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then usage; fi
if [ -z "$AWS_ENDPOINT_URL" ]; then usage; fi
if [ -z "$AWS_BUCKET" ]; then usage; fi
if [ -z "$AWS_PREFIX" ]; then usage; fi
# set defaults
: ${MONGODB_HOST:=mongodb}
: ${MONGODB_USER:=ebisc}
: ${MONGODB_PASSWORD:=ebisc}
: ${MONGODB_DATABASE:=ebisc}
TIMESTAMP=`date +%F-%H-%M-%S`
mongodump -h $MONGODB_HOST -u $MONGODB_USER -p $MONGODB_PASSWORD -d $MONGODB_DATABASE
DUMP=mongodb-$MONGODB_DATABASE-$TIMESTAMP
echo $DUMP
mv dump "$DUMP"
tar -czf "$DUMP.tar.gz" "$DUMP"
aws --endpoint-url $AWS_ENDPOINT_URL s3 cp $DUMP.tar.gz s3://${AWS_BUCKET}/${AWS_PREFIX}/
EPOCH_DELETE=`date -d "-60 days" +"%s"`
IFS=$'\n'
for line in `aws --endpoint-url $AWS_ENDPOINT_URL s3 ls s3://${AWS_BUCKET}/${AWS_PREFIX}/`; do
IFS=" " read DATE TIME SIZE OBJECT <<< "${line}"
EPOCH_FILE=`date -d "$DATE $TIME" +"%s"`
if [ $EPOCH_DELETE -ge $EPOCH_FILE ];
then
echo "Deleting expired object s3://${AWS_BUCKET}/${AWS_PREFIX}/${OBJECT}"
aws --endpoint-url $AWS_ENDPOINT_URL s3 rm s3://${AWS_BUCKET}/${AWS_PREFIX}/${OBJECT}
fi
done
| true
|
8a9c3935ab26bd85ea1f065bed1c1e41136896ea
|
Shell
|
ghidra/dotfiles
|
/shell/setup.sh
|
UTF-8
| 1,542
| 2.5625
| 3
|
[] |
no_license
|
pacman -S vim-minimal
pacman -S alsa-utils
pacman -S feh #this is a wallpaper thing
pacman -S unzip
pacman -S xorg-xev #for finding the keys
pacman -S rsync
#i need to allow multilib, specifically to install steam
#sudo vim /etc/pacman.conf
#uncomment
#[multilib]
#Include = /etc/pacman.d/mirrorlist
#then pacman -Syu
sudo pacman -S lib32-asla-plugins
sudo pacman -S steam
#brightness on the laptop is here:
#/sys/class/backlight/intel_backlight/brightness
#i need to set the folder to use no password in
#sudo visudo
#add this line:
#(username) ALL=NOPSSWD: /usr/bin/tee /sys/class/backlight/intel_backlight/brighness
#wallpaper
#feh set bg
#feh --bg-scale /path/to/image.file
mkdir ~/aur_builds
git clone https://github.com/Tecate/bitmap-fonts.git
cd bitmap-fonts
sudo cp -avr bitmap/ /usr/share/fonts
cd /usr/share/fonts/bitmap
fc-cache && mkfontscale && mkfontdir
xset fp+ /usr/share/fonts/bitmap
fc-cache -fv
#-------------------------------------
#OTHER FONTS TO MAKE FIREFOX NOT LOOK LIKE SHIT
#edit /etc/pacman.conf
#[infinality-bundle]
#Server = http://bohoomil.com/repo/$arch
#[infinality-bundle-fonts]
#Server = http://bohoomil.com/repo/fonts
#Next, import and sign the key:
sudo pacman-key --init
sudo dirmngr < /dev/null
sudo pacman-key -r 962DDE58
sudo pacman-key --lsign-key 962DDE58
sudo pacman -Syyu
sudo pacman -S infinality-bundle
#-------------------------------------
#firefox
cd ~/.mozilla/firefox/<profile>
#check if chrome exisits, make it
cd chrome
ln -s dotfiles/apps/firefox/firefox.css userChrome.css
| true
|
574b388639ccc5780d59f4f394f28897ffa6e708
|
Shell
|
kse201/misc
|
/scripts/utils/tmux-wrapper
|
UTF-8
| 2,655
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
tmux_bin=$(command -v tmux)
argc=$#
is_exists() {
which "$1" >/dev/null 2>&1 ; return $?
}
has() {
is_exists "$@"
}
has_args(){
[ "${argc}" -gt 0 ]
}
is_ssh_running() {
[ ! -z "$SSH_CLIENT" ]
}
is_screen_running() {
[ ! -z "$STY" ]
}
is_tmux_runnning() {
[ ! -z "$TMUX" ]
}
is_screen_or_tmux_running() {
is_screen_running || is_tmux_runnning
}
if has_args ; then
"${tmux_bin}" "$@"
elif is_screen_or_tmux_running; then
if is_tmux_runnning; then
if has "cowsay"; then
if [[ $(( RANDOM % 5 )) == 1 ]]; then
cowsay -f ghostbusters "G,g,g,ghostbusters!!!"
echo ""
fi
else
echo ' _____ __ __ _ ___ __ '
echo '|_ _| \/ | | | \ \/ / '
echo ' | | | |\/| | | | |\ / '
echo ' | | | | | | |_| |/ \ '
echo ' |_| |_| |_|\___//_/\_\ has been already running'
fi
export DISPLAY="$TMUX"
elif is_screen_running; then
# For GNU screen
:
fi
else
#if shell_has_started_interactively && ! is_ssh_running; then
if ! has "${tmux_bin}"; then
echo "tmux not found" 1>&2
exit 1
fi
# if "${tmux_bin}" has-session >/dev/null 2>&1 && "${tmux_bin}" list-sessions | grep -qE '.*]$'; then
if "${tmux_bin}" has-session >/dev/null 2>&1 && "${tmux_bin}" list-sessions >/dev/null; then
# detached session exists
"${tmux_bin}" list-sessions | perl -pe 's/(^.*?):/\033[31m$1:\033[m/'
echo -n "Tmux: attach? (y/N/num/session-name) "
read -r
if [[ "$REPLY" =~ ^[Yy]$ ]] || [[ "$REPLY" == '' ]]; then
if "${tmux_bin}" attach-session ; then
echo "$("${tmux_bin}" -V) attached session"
exit
fi
elif [[ "$REPLY" =~ ^[nN]$ ]] ; then
echo "tmux-attach canceled"
elif "${tmux_bin}" list-sessions | grep -q "^$REPLY"; then
if "${tmux_bin}" attach -t "$REPLY"; then
echo "$("${tmux_bin}" -V) attached session"
exit
fi
else
"${tmux_bin}" new-session -s "${REPLY}" && echo "${tmux_bin}" created new session
exit
fi
fi
echo -n "Tmux: new-session? (y/N/session-name) "
read -r
if [[ "$REPLY" =~ ^[Yy]$ ]] || [[ "$REPLY" == '' ]]; then
"${tmux_bin}" new-session && echo "tmux created new session"
elif [[ "$REPLY" =~ ^[nN]$ ]] ; then
echo "tmux-new canceled"
exit
else
"${tmux_bin}" new-session -s "$REPLY" && echo "tmux created new session"
fi
fi
| true
|
04f04883c4f84917479a78b94018ad830ffe4c12
|
Shell
|
A1404/libernet
|
/install.sh
|
UTF-8
| 1,794
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Libernet Installer
# by Lutfa Ilham
# v1.1
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
LIBERNET_DIR="/root/libernet"
LIBERNET_DIR_ESCAPED="\/root\/libernet"
LIBERNET_WWW="/www/libernet"
function install_packages() {
while IFS= read -r line; do
opkg install "${line}"
done < ./requirements.txt
}
function install_requirements() {
echo -e "Installing packages" \
&& opkg update \
&& install_packages \
&& echo -e "Copying proprietary binary" \
&& cp -arvf ./proprietary/* /usr/bin/
}
function enable_uhttp_php() {
echo -e "Enabling uhttp php execution" \
&& sed -i '/^#.*php-cgi/s/^#//' '/etc/config/uhttpd' \
&& uci commit uhttpd \
&& echo -e "Restarting uhttp service" \
&& /etc/init.d/uhttpd restart
}
function add_libernet_environment() {
echo -e "Adding Libernet environment" \
&& echo -e "# Libernet\nexport LIBERNET_DIR=${LIBERNET_DIR}" | tee -a '/etc/profile'
}
function install_libernet() {
echo -e "Installing Libernet" \
&& mkdir -p "${LIBERNET_DIR}" \
&& echo -e "Copying binary" \
&& cp -arvf ./bin "${LIBERNET_DIR}/" \
&& echo -e "Copying system" \
&& cp -arvf ./system "${LIBERNET_DIR}/" \
&& echo -e "Copying log" \
&& cp -arvf ./log "${LIBERNET_DIR}/" \
&& echo -e "Copying web files" \
&& mkdir -p "${LIBERNET_WWW}" \
&& cp -arvf ./web/* "${LIBERNET_WWW}/" \
&& echo -e "Configuring Libernet" \
&& sed -i "s/LIBERNET_DIR/${LIBERNET_DIR_ESCAPED}/g" "${LIBERNET_WWW}/config.inc.php"
}
function finish_install() {
echo -e "Libernet successfully installed!\nURL: http://router-ip/libernet"
}
install_requirements \
&& install_libernet \
&& add_libernet_environment \
&& enable_uhttp_php \
&& finish_install
| true
|
dab5d6cdeda693efc02d16f00aaa284a49417e0e
|
Shell
|
Nuvoton-Israel/openbmc
|
/meta-quanta/meta-olympus-nuvoton/recipes-phosphor/configuration/entity-manager/olympus-reload-sensor.sh
|
UTF-8
| 573
| 2.765625
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
echo "reload sensor config $1"
systemctl stop xyz.openbmc_project.Logging.IPMI.service
if [ $1 = "1" ]; then
if [ ! -d "/sys/bus/platform/drivers/nuvoton-i2c/f0086000.i2c" ]; then
echo -n "f0086000.i2c" > /sys/bus/platform/drivers/nuvoton-i2c/bind
fi
sleep 10
else
if [ -d "/sys/bus/platform/drivers/nuvoton-i2c/f0086000.i2c" ]; then
echo -n "f0086000.i2c" > /sys/bus/platform/drivers/nuvoton-i2c/unbind
fi
sleep 10
fi
systemctl restart xyz.openbmc_project.psusensor.service
systemctl start xyz.openbmc_project.Logging.IPMI.service
| true
|
915b41cfe76386258cd6c4eb045819b4727bb244
|
Shell
|
brodemack/Pi
|
/motion-setup.sh
|
UTF-8
| 752
| 2.625
| 3
|
[] |
no_license
|
sudo apt-get install -y motion
sudo apt-get install -y libjpeg62
mkdir -p ~/mmal
(cd ~/mmal/; wget https://www.dropbox.com/s/xdfcxm5hu71s97d/motion-mmal.tar.gz)
(cd ~/mmal/; tar -zxvf motion-mmal.tar.gz)
(cd ~/mmal/; wget https://raw.githubusercontent.com/brodemack/Pi/master/update-motion-conf.rb; ruby update-motion-conf.rb < motion-mmalcam.conf > motion-mmalcam2.conf; mv motion-mmalcam2.conf motion-mmalcam.conf)
(cd ~/mmal/; rm motion-mmal.tar.gz; rm update-motion-conf.rb;)
(cd ~/mmal/; mv motion motion-mmal)
(cd ~/mmal/; wget https://raw.githubusercontent.com/brodemack/Pi/master/templates/startmotion; chmod 755 startmotion)
(cd ~/mmal/; wget https://raw.githubusercontent.com/brodemack/Pi/master/templates/stopmotion; chmod 755 stopmotion)
| true
|
78a7dd076f1e353df63566bc140c225937d4ee36
|
Shell
|
whiteinge/dotfiles
|
/bin/rgb2hex
|
UTF-8
| 576
| 3.765625
| 4
|
[] |
no_license
|
#!/usr/bin/env zsh
# Convert 16-bit RGB values to hexadecimal
# For example, xmag shows color in this format.
#
# Usage (outputs #00cb02):
#
# rgb2hex 0 cbcb 202
while getopts h opt; do
case $opt in
h) awk 'NR == 1 { next } /^$/ { exit } { print substr($0, 3) }' "$0"
exit ;;
esac
done
shift $(( OPTIND - 1 ))
r="${1:?Missing red}"
g="${2:?Missing green}"
b="${3:?Missing blue}"
# TODO POSIX printf doesn't handle the 16# prefix. Another way?
printf '%d / 256
%d / 256
%d / 256
' "16#${r}" "16#${g}" "16#${b}" | bc | xe -N3 printf '#%02x%02x%02x\n'
| true
|
00364cdc1e79f107fe4a2169784616e513e230e7
|
Shell
|
wadqc/WAD_Interface
|
/create_databases/create_iqc_tables.sh
|
UTF-8
| 508
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
# optional first argument = mysql root password
echo "Drop + create IQC database..."
rm source/WAD_Interface/create_databases/iqc_db/iqc.sql 2> /dev/null
cat source/WAD_Interface/create_databases/iqc_db/*.sql > source/WAD_Interface/create_databases/iqc_db/iqc.sql
mysql -uroot -p$1 < source/WAD_Interface/create_databases/iqc_db/iqc.sql
rm source/WAD_Interface/create_databases/iqc_db/iqc.sql
mysql -uroot iqc -p$1 < source/WAD_Interface/create_databases/iqc_db/data/iqc_interface_data.sql
| true
|
031bd1cf256bff8806bff1f2059c7d6325ba0e4e
|
Shell
|
sketchc89/algodesign
|
/cpp/test.bash
|
UTF-8
| 224
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR=$(dirname $0)
PROG=${DIR}/build/dataStructures/test/testDataStructures
# ${PROG} & PID_PROG=$!
# echo ${PID_PROG}
# sleep 2
if hash valgrind; then
valgrind --tool=memcheck --leak-check=full ${PROG}
fi
| true
|
66dfc8907c83475f5148dcb2e7f53f8da31a0f05
|
Shell
|
otus-kuber-2019-06/SergeSpinoza_platform
|
/kubernetes-vault/vault-guides/operations/provision-vault/kubernetes/minikube/deps.sh
|
UTF-8
| 1,815
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
[ ! -n "$DEBUG" ] || set -x
set -u
# the internet says this is better than 'set -e'
function onerr {
echo 'Cleaning up after error...'
popd
exit -1
}
trap onerr ERR
pushd `pwd` > /dev/null 2>&1
cd "$(dirname $0)"
function check_for_deps () {
for dep in pidof; do
if ! command -v "${dep}" > /dev/null 2>&1 ; then
printf "\n>>>> Failed to find \'${dep}\'!\n"
exit -1
fi
done
}
function docker_check () {
if ! command -v docker > /dev/null 2>&1; then
printf "\n>>> Failed to find docker binary in path." >&2
printf "\n>>> Please see https://docker.com to download and install Docker.\n" >&2
exit -1
fi
# Would be nice to check for dockerd process but most OSes other than Linux
# run it in VM with a bunch of 'clever' shell aliases.
printf "\n>>> Note: Please ensure Docker is running and available via the 'docker' CLI...\n"
}
function minikube_check () {
if ! command -v minikube > /dev/null 2>&1; then
printf "\n>>> Failed to find minikube binary in PATH." >&2
printf "\n>>> Please see https://kubernetes.io/docs/getting-started-guides/minikube/ for instructions on downloading and installing minikube.\n" >&2
exit -1
fi
if ! minikube status > /dev/null 2>&1 ; then
printf "\n>>> Failed to find k8s cluster running minikube. You likely need to 'minikube start'.\n" >&2
exit -1
fi
}
function kubectl_check () {
if ! command -v kubectl > /dev/null 2>&1; then
printf "\n>>> Failed to find kubectl binary in PATH." >&2
printf "\n>>> Please see https://kubernetes.io/docs/tasks/tools/install-kubectl/ for instructions on downloading andn installing kubectl.\n" >&2
exit -1
fi
}
function main () {
check_for_deps
docker_check
minikube_check
kubectl_check
popd > /dev/null 2>&1
}
main
| true
|
d3f70ba468727bbc5fc42ff9ca0afd23e4e1d48d
|
Shell
|
SliTaz-official/website
|
/lib/get-feeds.sh
|
UTF-8
| 772
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Get latest commits and feeds to display on the website main pages.
# On the server, this script is executed by cron each hour.
#
PWD=$(dirname $0)
CACHE="$(dirname $PWD)/cache"
# Feeds URL http://scn.slitaz.org/activity/feed/
BLOG_FEED='http://scn.slitaz.org/?blog=rss'
WOK_FEED='http://hg.slitaz.org/wok/rss-log'
FORUM_FEED='http://forum.slitaz.org/rss'
ROLLING_DATE='http://mirror1.slitaz.org/rolling-date.sh'
# Clean cache
mkdir -p ${CACHE} && cd ${CACHE}
rm -f *.xml
# Cache all feeds to save bandwidth (updated by cron)
echo -n "Getting latest rss feeds... "
wget -O wok.xml $WOK_FEED 2>/dev/null
wget -O blog.xml $BLOG_FEED 2>/dev/null
wget -O forum.xml $FORUM_FEED 2>/dev/null
wget -O rolling-date.txt $ROLLING_DATE 2>/dev/null
echo "Done"
exit 0
| true
|
a1751dfad200b3786f2d5672a7fa6968f966953d
|
Shell
|
ged/graphics
|
/graphics_setup.sh
|
UTF-8
| 1,001
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# if [ $(id -u) != 0 ]; then
# echo "Please run this as root or with sudo"
# exit 1
# fi
for gem in graphics rubysdl rsdl; do
gem uninstall -ax $gem || true
done
case `uname` in
Darwin)
echo "I'm on OSX. Not using sudo"
SUDO=
brew install sdl --universal
brew install sdl_mixer --universal --with-smpeg
brew install sdl_ttf --universal
brew install sdl_image --universal --without-webp
;;
Linux)
echo "I'm on linux, using sudo where needed"
SUDO=sudo
sudo apt-get install libsdl1.2-dev libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev
;;
*)
echo "Unknown OS $OSTYPE, aborting"
exit 1
;;
esac
$SUDO rake newb
rake test
if [ -f $0 ]; then
rake clean package
$SUDO gem install pkg/graphics*.gem
else
$SUDO gem install graphics --pre
fi
rsdl -Ilib -rgraphics -e 'Class.new(Graphics::Simulation) { def draw n; clear :white; text "hit escape to quit", 100, 100, :black; end; }.new(500, 250, 0, "Working!").run'
| true
|
1033e547de43065763e3edb056b9af5148fd52a1
|
Shell
|
dp-ua/codenjoy-portable-linux
|
/install.sh
|
UTF-8
| 566
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$EUID" -ne 0 ]
then echo "[91mPlease run as root[0m"
exit
fi
eval_echo() {
to_run=$1
echo "[94m"
echo $to_run
echo "[0m"
eval $to_run
}
if [ -x "$(command -v git)" ]; then
echo "[Git installed[0m" ;
else
eval_echo "sudo apt update"
eval_echo "sudo apt install git"
eval_echo "git --version"
fi
folder=${1:=codenjoy}
eval_echo "git clone https://github.com/codenjoyme/codenjoy-portable-linux.git $folder"
eval_echo "cd ./$folder"
ls -la
eval_echo ". env-update.sh"
eval_echo ". rebuild.sh"
| true
|
c11134ed56149766a3ffd9664db9f2792aee526e
|
Shell
|
Mikor-mkr/robotics_setup
|
/xonsh.sh
|
UTF-8
| 2,876
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script is intended to setup a fresh desktop with ROS and
# with dependencies on homebrew or linuxbrew depending on the OS being used
# @author Andrew Hundt <ATHundt@gmail.com>
echo ""
echo "###############################################################################################"
echo "# XONSH python based shell (this file contains setup/config scripts)"
echo "###############################################################################################"
echo "# Bash to Xonsh translation http://xon.sh/bash_to_xsh.html"
echo "# "
echo "# MANUAL STEPS:"
echo "# "
echo "# First make sure you have python3 and pip3 installed!"
echo "# "
echo "# ./python.sh"
DIR=$(pwd)
# /bin/bash
set -e
set -u
set -x
# note don't install xonsh with homebrew/linuxbrew
# because it will be in a a virtualenv that can't
# access your other apps.
# https://github.com/xonsh/xonsh/issues/2475
#
# However, you can work around this by using
# the xonsh included xip, which is like pip
# but makes sure to use you xonsh environment.
###########################
# Configuring your system
# about .xonshrc:
# http://xon.sh/xonshrc.html
#
# Customizing your xonshrc:
# http://xon.sh/customization.html
# If you want to customize your paths based on the directory
# you are in to automatically be set up for different projects,
# you will want to use events:
#
# http://xon.sh/tutorial_events.html
#
# Specifically on_chdir: http://xon.sh/events.html#on-chdir-olddir-str-newdir-str-none
#
###########################
# Environment Variables
#
# these are the variables you can set to change how things run.
# like how history works, path completion, etc
#
# http://xon.sh/envvars.html
###########################
# Extensions
# for details see:
# http://xon.sh/xontribs.html
############################
# History
#
# http://xon.sh/tutorial_hist.html
###########################
# Python Install
./python.sh
############################
# Xonsh Install
pip3 install gnureadline pygments prompt_toolkit ply psutil ipykernel matplotlib xonsh xonsh-vox-tabcomplete xontrib-z xontrib-fzf-widgets --upgrade --user
############################
# Install
if [ ! -f $HOME/.xonshrc ] ; then
ln -s $DIR/.xonshrc $HOME/.xonshrc
# sometimes you can't run chsh...
if [ -x "$(command -v ypchsh)" ] ; then
echo "TODO(ahundt) fix chsh... doesn't work on this platform right now... see robotics_setup/README.md"
#ypchsh -s $(which zsh)
else
chsh -s $(which xonsh)
fi
fi
############################
# xonsh config.json. Disabled because it is no longer supported in Xonsh 0.9.x
# if [ ! -f $HOME/.config/xonsh/config.json ] ; then
# mkdir -p $HOME/.config/xonsh/
# ln -s $DIR/.config/xonsh/config.json $HOME/.config/xonsh/config.json
# fi
############################
# .xonshrc
if [ ! -f $HOME/.xonshrc ] ; then
ln -s $DIR/.xonshrc $HOME/.xonshrc
fi
| true
|
69f6839e1042e1eb6b649797136a759bc71e3cd8
|
Shell
|
kergoth/dotfiles
|
/scripts/detach
|
UTF-8
| 758
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/sh
# Background a process with nohup, but return quickly with an exit code if it
# fails within the 0.1s timeout, which likely indicates a wrong command-line
# argument.
# Via https://superuser.com/a/814527
TIMEOUT=0.1
if [ $# -eq 0 ]; then
exec nohup
fi
if [ -t 1 ]; then
rm -f nohup.out
fi
nohup "$@" &
nohup_pid=$!
# After the timeout, kill ourselves, interrupting the wait, so we can return
# nohup's exit code if it's no longer running
trap 'exit 0' HUP
sleep "$TIMEOUT" && kill -HUP "$$" 2>/dev/null &
wait "$nohup_pid"
ret=$?
if [ $ret -ne 0 ]; then
printf >&2 'Error running %s' "$*"
if [ -t 1 ] && [ -e nohup.out ]; then
echo >&2 :
cat >&2 nohup.out
else
printf >&2 '\n'
fi
fi
exit "$ret"
| true
|
f4d175ca494c06f064692e4c0f9638fbb8172125
|
Shell
|
rycus86/githooks
|
/tests/step-100.sh
|
UTF-8
| 1,178
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Test:
# Set up local repos, run the install and skip installing hooks into existing directories
if echo "$EXTRA_INSTALL_ARGS" | grep -q "use-core-hookspath"; then
echo "Using core.hooksPath"
exit 249
fi
mkdir -p ~/test100/p001 && mkdir -p ~/test100/p002 || exit 1
cd ~/test100/p001 && git init || exit 1
cd ~/test100/p002 && git init || exit 1
if grep -r 'github.com/rycus86/githooks' ~/test100/; then
echo "! Hooks were installed ahead of time"
exit 1
fi
# run the install, and skip installing the hooks into existing repos
echo 'n
y
' | sh /var/lib/githooks/install.sh --skip-install-into-existing || exit 1
if grep -r 'github.com/rycus86/githooks' ~/test100/; then
echo "! Hooks were installed but shouldn't have"
exit 1
fi
# run the install, and let it install into existing repos
echo 'n
y
' | sh /var/lib/githooks/install.sh
if ! grep -r 'github.com/rycus86/githooks' ~/test100/p001/.git/hooks; then
echo "! Hooks were not installed successfully"
exit 1
fi
if ! grep -r 'github.com/rycus86/githooks' ~/test100/p002/.git/hooks; then
echo "! Hooks were not installed successfully"
exit 1
fi
rm -rf ~/test100
| true
|
470753e0cce9ccf4f6b5d376bf6f1f4e255b0d1a
|
Shell
|
markmo/repo2docker
|
/release.sh
|
UTF-8
| 705
| 3.453125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
profile=${1}
env=${2}
commit_msg=${3}
if [[ ! "${profile}" =~ ^(codeserver|default|garden)$ ]];
then
echo "Invalid profile"
exit 1
fi
if [[ ! "${env}" =~ ^(prod|test|dev)$ ]];
then
echo "Invalid environment"
exit 1
fi
if [[ "${env}" != "dev" && "${commit_msg}" == "" ]];
then
echo "Commit message is required"
exit 1
fi
/bin/bash ./set_base_version.sh "${profile}" "${env}"
/bin/sh ./bump_version.sh "${profile}" "${env}"
/bin/bash ./set_profile.sh "${profile}" "${env}"
if [ "${env}" == "dev" ]
then
cloud-build-local --config=cloudbuild.yaml --dryrun=false --push .
else
git add .
git commit -m "${commit_msg}"
git push google master
fi
| true
|
689e3abc163559f2c94dc087b5bf08f11dcc5f31
|
Shell
|
abdennour/containers-factory
|
/adminer-ssh-tunnel/adminer-ssh-tunnel.sh
|
UTF-8
| 742
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Put it in
set -o errexit
set -o nounset
set -o pipefail
# enable interruption signal handling
trap - INT TERM
SSH_KEY=$1
SSH_TUNNEL=$2
IMAGE=adminer-ssh-${USER};
# If the image does not exist locally, build it.
if [[ -z $(docker images -q $IMAGE) ]]; then
TMP=/tmp/containers-factory;
rm -rf $TMP;
git clone https://github.com/abdennour/containers-factory.git $TMP;
cd $TMP/adminer-ssh-tunnel;
docker build \
--build-arg SSH_KEY=$SSH_KEY SSH_TUNNEL=$SSH_TUNNEL \
-t $IMAGE .
cd /tmp && rm -rf $TMP;
fi
docker run --rm \
-t $(tty &>/dev/null && echo "-i") \
-e "AWS_PROFILE=${PROFILE}" \
-e "AWS_DEFAULT_REGION=${REGION}" \
-v $(pwd):/project \
-v ~/.aws:/root/.aws \
$IMAGE \
"$@"
| true
|
b1f2a9dacc9bbf8da11061286de93516c28cac2f
|
Shell
|
kenan-rhoton/lotl
|
/lotl.sh
|
UTF-8
| 5,315
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
readonly NOMADS=1
readonly SETTLED=2
readonly FREEMEN=3
FACTION=0
declare -a FAMILY
FACTION_OPTIONS="Nomads"
BASIC_OPTIONS="Grow"
GROW_OPTIONS=""
MY_CIV=""
declare -a OPTION_REQS
##NOMAD GROWTH
OPTION_REQS+=("Hunters Grow none Nomads")
OPTION_REQS+=("Gatherers Grow none Nomads")
OPTION_REQS+=("Warriors Grow none Hunters")
OPTION_REQS+=("Barbarians Grow none Warriors")
OPTION_REQS+=("Brutes Grow none Warriors")
OPTION_REQS+=("Veterans Grow none Barbarians")
OPTION_REQS+=("Berserkers Grow none Barbarians")
OPTION_REQS+=("Titans Grow none Brutes")
OPTION_REQS+=("Rangers Grow none Gatherers")
OPTION_REQS+=("Chieftain Grow none Gatherers Hunters")
##NOMAD EXPLORE
OPTION_REQS+=("Explore Explore lotlExplore Hunters")
OPTION_REQS+=("Explore Explore lotlExplore Rangers")
##NOMAD ATTACK
OPTION_REQS+=("Attack Attack lotlAttack Warriors")
OPTION_REQS+=("Attack Attack lotlAttack Rangers")
lotlChoiceAction(){
_RET=""
message=$1
shift
while [[ $_RET == "" ]]
do
clear
echo -e $message
choicenum=0
for arg in "$@"
do
let "choicenum++"
echo "$choicenum) $arg"
done
result=0
echo "Choose: (1-$choicenum)"
read result
if [[ "`seq $choicenum`" =~ "$result" && "$result" != "" ]]
then
tmp=($@)
_RET=${tmp[result-1]}
fi
done
}
lotlCheckArray(){
_RET=0
echo $@
echo ${*[-1]}
echo ${@[1]}
for value in ${@%${@[-1]}}
do
echo $value == $2
if [[ "$value" == "$2" ]]
then
_RET=1
fi
done
exit
}
lotlFactionSelect(){
lotlChoiceAction "Choose your faction:" $FACTION_OPTIONS
#FACTION=$_RET
MY_CIV+=$_RET
}
lotlStartingFamilies(){
case "$1" in
"Nomads") _RET="\"Hunters\" \"Gatherers\""
;;
"Settled") _RET="\"Builders\" \"Farmers\" \"Villagers\""
;;
"Freemen") _RET="\"Woodcutters\" \"Fishermen\""
esac
}
lotlRemoveGrowOption(){
lotlCheckArray $GROW_OPTIONS $1
if [[ $_RET -eq 1 ]]
then
GROW_OPTIONS=${GROW_OPTIONS#$1}
fi
}
lotlAddCiv(){
MY_CIV+=" $1"
}
lotlEvaluateGrow(){
#if $1 is in $MY_CIV do nothing
if ! [[ $MY_CIV =~ "$1" ]]
then
grow=$1
shift
shift
shift
put="true"
for i in $@
do
if ! [[ $MY_CIV =~ "$i" ]]
then
put="false"
fi
done
if [[ $put == "true" ]]
then
GROW_OPTIONS+=" $grow"
fi
fi
#else if $3 is not none store the action
#and if all REQS $4,$5,$6,$7... are in $MY_CIV then add to GROW_OPTIONS
}
lotlEvaluateExplore(){
#if $1 is in $MY_CIV do nothing
if ! [[ $MY_CIV =~ "$1" ]]
then
action=$1
shift
shift
shift
put="true"
for i in $@
do
if ! [[ $MY_CIV =~ "$i" ]]
then
put="false"
fi
done
if [[ $put == "true" ]]
then
BASIC_OPTIONS+=" $action"
MY_CIV+=" $action"
fi
fi
#else if $3 is not none store the action
#and if all REQS $4,$5,$6,$7... are in $MY_CIV then add to GROW_OPTIONS
}
lotlEvaluateAttack(){
#if $1 is in $MY_CIV do nothing
if ! [[ $MY_CIV =~ "$1" ]]
then
action=$1
shift
shift
shift
put="true"
for i in $@
do
if ! [[ $MY_CIV =~ "$i" ]]
then
put="false"
fi
done
if [[ $put == "true" ]]
then
BASIC_OPTIONS+=" $action"
MY_CIV+=" $action"
fi
fi
#else if $3 is not none store the action
#and if all REQS $4,$5,$6,$7... are in $MY_CIV then add to GROW_OPTIONS
}
lotlEvaluateReq(){
case "$2" in
"Grow") lotlEvaluateGrow $@
;;
"Explore") lotlEvaluateExplore $@
;;
"Attack") lotlEvaluateAttack $@
;;
*) lotlEvaluateSpecial $@
esac
}
lotlEvaluateCiv(){
GROW_OPTIONS=""
for req in `seq 0 ${#OPTION_REQS[*]}`
do
lotlEvaluateReq ${OPTION_REQS[req]}
done
}
lotlGrowAction(){
#lotlRemoveGrowOption "$1"
lotlAddCiv "$1"
}
lotlFamilySelect(){
lotlStartingFamilies $FACTION
GROW_OPTIONS=$_RET
lotlChoiceAction "You chose the $FACTION!\nChoose your starting family:" $GROW_OPTIONS
lotlGrowAction $_RET
}
lotlExecuteAction(){
case "$1" in
"Grow") lotlChoiceAction "Choose how to grow:" $GROW_OPTIONS
lotlGrowAction $_RET
;;
"Build") lotlChoiceAction "Choose what to build:" $BUILD_OPTIONS
lotlBuildAction
;;
"Explore") lotlExploreAction
;;
"Claim") lotlClaimAction
;;
"Attack") lotlAttackAction
;;
esac
}
lotlNextTurn(){
lotlEvaluateCiv
lotlChoiceAction "Choose an Action:" $BASIC_OPTIONS
lotlExecuteAction $_RET
}
lotlStartGame(){
while [[ -z $ENDGAME ]]
do
lotlNextTurn
done
}
lotlNewGame(){
lotlFactionSelect
#lotlFamilySelect
lotlStartGame
}
lotlNewGame
| true
|
0a8f4bceb8bee6c67c216c365aa3d0ac2c87bb17
|
Shell
|
tee-talog/dotfiles
|
/bin/default-file.sh
|
UTF-8
| 254
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
local readonly _append_str="
if [ -f \"${HOME}/dotfiles/.zshenv\" ]; then
source \"${HOME}/dotfiles/.zshenv\"
fi
"
function f_load_zshenv() {
if [ -e ~/.zshenv ]; then
cp ~/.zshenv{,_org}
fi
echo "${_append_str}" >>~/.zshenv
}
| true
|
3a7f1f9f038706759f40499a71ee5970dffec510
|
Shell
|
trishullab/bayou
|
/tool_files/build_scripts/build.sh
|
UTF-8
| 2,153
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BUILD_DIR="${SCRIPT_DIR}/out"
# download model if needed
MODEL_DIR=$SCRIPT_DIR/../../src/main/resources/model/
mkdir -p $MODEL_DIR
python3 $SCRIPT_DIR/fetch_model.py --name model-60-49 --model_dir $MODEL_DIR --url http://sisyphus.cs.rice.edu/release/
# ensure ouput dir is empty
rm -rf $BUILD_DIR
mkdir $BUILD_DIR
# determine version of Bayou being built
cd ../maven_3_3_9/bayou
VER="$(printf 'VERSION=${project.version}\n0\n' | mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate | grep '^VERSION' | cut -c9-)" # get the project version number... e.g 1.1.0
# compile Bayou into a jar file
# copy Evidence.class between compile and package phase so unit tests run in 2nd phase apply to new class file
# also copy Bayou.class (@vijay-murali)
mvn clean compile
cp target/classes/edu/rice/cs/caper/bayou/annotations/Evidence.class ../../../src/main/resources/artifacts/classes/edu/rice/cs/caper/bayou/annotations/Evidence.class
cp target/classes/edu/rice/cs/caper/bayou/annotations/Bayou.class ../../../src/main/resources/artifacts/classes/edu/rice/cs/caper/bayou/annotations/Bayou.class
mvn package
# copy and rename post build files into out directory
cp target/bayou-$VER-jar-with-dependencies.jar $BUILD_DIR
cp -r ../../../src/main/python $BUILD_DIR
cp -r ../../../src/main/resources $BUILD_DIR
cp ../../../src/main/bash/binary_release/*.sh $BUILD_DIR
cp -r ../../../doc/external/example_inputs $BUILD_DIR
cd $BUILD_DIR
mv bayou-$VER-jar-with-dependencies.jar bayou-$VER.jar
| true
|
1de0b3a5aed43658c83a6712a86d1ff6e5b14cb4
|
Shell
|
yvesh/env
|
/scripts/defaultMonitor
|
UTF-8
| 363
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# https://linuxconfig.org/how-to-configure-your-monitors-with-xrandr-in-linux
# enable only the default monitor #
run() {
xrandr \
--output DP-1 \
--off \
--output DP-2 \
--off \
--output eDP-1 \
--auto \
--output eDP-1 \
--primary \
--dpi 100
removeMonitor DP-1
removeMonitor DP-2
wmRestart
}
run
| true
|
dcfb257b59d99b9fbb539bda6ef5384655d20f9a
|
Shell
|
HamletGhost/HomeScripts
|
/bin/network/GetTicket.sh
|
UTF-8
| 11,313
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Obtains a Kerberos ticket.
#
# Changes:
# 20150227 [v 2.0]
# complete rewrite; removed some features that might turn out to be needed
# in the future
# 20200111 [v 2.1]
# added `--verbose` option
#
#
################################################################################
# sourcing check:
# define a isScriptSourced() function returning whether the script is sourced;
# if a function of that name is already defined, we assume it has the same
# functionality and we use it.
# If we define our function, it will self-destruct after the first use if it
# finds out it's being sourced, so that the calling environment is not polluted.
declare -f isScriptSourced >& /dev/null || function isScriptSourced() {
# echo "BASH_SOURCE='${BASH_SOURCE[${#BASH_SOURCE[@]} - 1]}' \$0='${0}'"
if [[ "${BASH_SOURCE[${#BASH_SOURCE[@]} - 1]}" != "$0" ]]; then
unset -f "${FUNCNAME[0]}" # self-destruction
return 0 # sourced; $0 = -bash or something
else
return 1 # subshell
fi
} # isScriptSourced()
################################################################################
### Source mode
################################################################################
if isScriptSourced ; then
function GetTicket_LocalExecute() {
local CommandsStream="$(mktemp --tmpdir 'GetTicket_commands.XXXXXXXXX')"
#
# runs this script in a subshell and in a special mode
# where standard output contains commands to be executed, and executes them.
# The return code of the script is propagate to the subshell, then to
# eval, that being the last command determines the exit code of the sourced
# script.
#
( "${BASH_SOURCE[0]}" --commands="$CommandsStream" "$@" )
source "$CommandsStream"
rm -f "$CommandsStream"
unset GetTicket_LocalExecute # self-destruction
return $?
}
GetTicket_LocalExecute
return $?
fi
################################################################################
### Script mode
################################################################################
SCRIPTNAME="$(basename "$0")"
SCRIPTDIR="$(dirname "$0")"
SCRIPTVERSION="v. 2.1"
###
### scripts defaults
###
: ${DEFAULTUSER='petrillo'}
: ${DEFAULTREALM='FNAL.GOV'}
[[ "${#DEFAULTKRB5OPTS[*]}" == 0 ]] && DEFAULTKRB5OPTS=( "-f" )
: ${DoRenewTicket:=1}
: ${DoGetNewTicket:=1}
: ${KRB5LIFETIME:="26h"}
: ${KRB5RENEWTIME:="7d"}
: ${NAT:="0"}
: ${kinit:="kinit"}
: ${mkdir:="mkdir -p"}
function isFlagSet() {
local VarName="$1"
[[ -n "${!VarName//0}" ]]
} # isFlagSet()
function isFlagUnset() {
local VarName="$1"
[[ -z "${!VarName//0}" ]]
} # isFlagUnset()
function STDERR() { echo "$*" >&2 ; }
function ERROR() { STDERR "ERROR: $*" ; }
function FATAL() {
local Code=$1
shift
STDERR "FATAL (${Code}): $*"
exit $Code
} # FATAL()
function isDebugging() {
local -i Level="${1:-1}"
[[ -n "$DEBUG" ]] && [[ "$DEBUG" -ge "$Level" ]]
} # isDebugging()
function DBGN() {
local -i Level="$1"
isDebugging "$Level" || return 0
shift
STDERR "DBG[${Level}]| $*"
} # DBGN()
function DBG() { DBGN 1 "$*" ; }
function MSG() { echo "$*" ; }
function ExecCommand() {
#
# Usage: ExecCommand [--stdout=Redirect] [--nostdout] [--stderr=Redirect] [--nostderr] [--] command
#
local StdOut StdErr
while [[ $# -gt 0 ]]; do
DBGN 3 "ExecCommand: parsing argument '${1}'"
case "$1" in
( "--stdout="* ) StdOut="${1#--*=}" ;;
( "--stderr="* ) StdErr="${1#--*=}" ;;
( "--nostdout" ) StdOut='/dev/null' ;;
( "--nostderr" ) StdErr='/dev/null' ;;
( '--' ) shift ; break ;;
( * )
DBGN 3 " unrecognized option; command will be: $@"
break ;;
esac
shift
done
if isDebugging ; then
if [[ "$StdOut" == "/dev/null" ]] || [[ "$StdErr" == "/dev/null" ]]; then
[[ "$StdOut" == "/dev/null" ]] && StdOut=''
[[ "$StdErr" == "/dev/null" ]] && StdErr=''
DBG "ExecCommand: output redirection to /dev/null overridden in debug mode"
fi
fi
if isFlagSet BeVerbose ; then
MSG "Cmd> $@${StdOut:+" 1> '${StdOut}'"}${StdErr:+" 2> '${StdErr}'"}"
else
DBG "$@"${StdOut:+" 1> '${StdOut}'"}${StdErr:+" 2> '${StdErr}'"}
fi
eval "$@" ${StdOut:+ 1> "$StdOut"} ${StdErr:+ 2> "$StdErr"}
} # ExecCommand()
function FlagValue() {
local VarName="$1"
isFlagSet "$VarName" && echo 1 || echo 0
} # FlagValue()
function FlipFlagValue() {
local VarName="$1"
isFlagSet "$VarName" && echo 0 || echo 1
} # FlipFlagValue()
function help() {
cat <<-EOH
Gets or renews a Kerberos5 ticket.
Usage: $SCRIPTNAME [options] [Realm]
The default realm is '${DEFAULTREALM}'
Kerberos ptions:
--user=KRB5USER ${KRB5USER:+"['${KRB5USER}']"}
the Kerberos user to get ticket for
--instance=KRB5INSTANCE ${KRB5INSTANCE:+"['${KRB5INSTANCE}']"}
the instance (usually nothing at all)
--root
a shortcut for '--instance=root'
--fulluser=KRB5FULLUSER
override the full Kerberos user specification (user/instance@DOMAIN)
--norenew
do not try to renew the Kerberos ticket, always obtain a new one
--onlyrenew
do not try to get a new Kerberos ticket, always renew a currrent one
(failing if it's not possible)
--lifetime=LIFETIME ['${KRB5LIFETIME}']
overrides the lifetime of the ticket: after the ticket lifetime is
expired, it has to be renewed
--renewtime=RENEWTIME ['${KRB5RENEWTIME}']
overrides the renewable time of the ticket: after the ticket renewable
time is out, a new ticket must be obtained since the current one can't
be renewed any more
--nat
tells Kerberos we are behind a NAT
General program options:
--verbose , -v
increases verbosity on screen
EOH
} # help()
function RemoveKerberosOption() {
local Key="$1"
local -i NValues="$#"
local -i iOption=0
while [[ $iOption -lt "${#KerberosOptions[@]}" ]]; do
local Option="${KerberosOptions[iOption]}"
if [[ "$Option" == "$Key" ]]; then
# remove NValue keys
local nKeys
for (( nKeys = 0 ; nKeys < $NValue ; ++nKeys )); do
unset KerberosOptions[iOption]
done
break
fi
let ++iOption
done
} # RemoveKerberosOption()
function AddKerberosOption() {
RemoveKerberosOption "$@"
KerberosOptions=( "${KerberosOptions[@]}" "$@" )
} # AddKerberosOption()
function AddAction() { Actions=( "${Actions[@]}" "$@" ) ; }
function AddCommand() {
[[ -w "$CommandsStream" ]] || return
echo "$@" >> "$CommandsStream"
} # AddCommand()
################################################################################
function RenewTicket() {
local Principal="$1"
local KRB5CCName="$2"
local -i GotTicket=0
ExecCommand --nostderr -- $kinit ${KRB5CCName:+ -c "$KRB5CCName"} -R "$Principal"
local res=$?
if [[ $res == 0 ]]; then
MSG "An existing ticket for user '${KRB5USER}'${KRB5INSTANCE:+" (instance '${KRB5INSTANCE}')"} on realm '${KRB5REALM}' was successfully renewed."
GotTicket=1
fi
isFlagSet GotTicket
} # RenewTicket()
function GetTicket() {
local -i GotTicket=0
# determine the full user string (user[/instance]@DOMAIN)
: ${KRB5FULLUSER:="${KRB5USER}${KRB5INSTANCE:+"/${KRB5INSTANCE}"}@${KRB5REALM}"}
#
# The system could have set KRB5CCNAME variable and there could be a valid
# ticket there; if so, we try to use it, unless we really want a new one:
#
if isFlagSet DoRenewTicket ; then
RenewTicket "$KRB5FULLUSER" "$KRB5CCNAME" && GotTicket=1
fi
# we might need in some cases to have the ticket in a shared location
# (that the default location typically is not).
# In that case, we'll have to resurrect code similar to the following:
# : ${KRB5BASECCDIR:="${HOME}/tmp/krb5"}
# KRB5CCNAME="${KRB5BASECCDIR}/${KRB5USER}/${KRB5INSTANCE:-"$KRB5USER"}"
# $mkdir "$(dirname "$KRB5CCNAME")"
# AddCommand "export KRB5CCNAME='${KRB5CCNAME}'"
if isFlagUnset GotTicket && isFlagSet DoGetNewTicket ; then
if [[ "$KRB5INSTANCE" == "root" ]]; then # non-forwardable
RemoveKerberosOption '-f' # get rid of the forward options...
AddKerberosOption '-F' # ... and add just one non-forwardable option
fi
[[ -n "$KRB5LIFETIME" ]] && AddKerberosOption "-l" "$KRB5LIFETIME"
[[ -n "$KRB5RENEWTIME" ]] && AddKerberosOption "-r" "$KRB5RENEWTIME"
[[ -n "$KRB5CCNAME" ]] && AddKerberosOption "-c" "$KRB5CCNAME"
isFlagSet NAT && AddKerberosOption '-n'
MSG "Getting a new Kerberos5 ticket: prepare your '${KRB5REALM}' password."
ExecCommand $kinit "${KerberosOptions[@]}" "$KRB5FULLUSER"
res=$?
DBG "Exit code: $res"
[[ $res == 0 ]] && GotTicket=1
fi
isFlagSet GotTicket || FATAL $res "Couldn't get a new Kerberos5 ticket. Hope there is a valid, existing one."
# if there is OpenAFS loaded, try to authenticate with aklog:
[[ -d '/proc/fs/openafs' ]] && which 'aklog' >& /dev/null && aklog
} # GetTicket()
################################################################################
###
### parameter parsing
###
declare -a KerberosOptions=( "${DEFAULTKRB5OPTS[@]}" )
declare -a Actions
declare -i NoMoreOptions=0
declare -a Arguments
declare -i NArguments=0
for (( iParam = 1 ; iParam <= $# ; ++iParam )); do
Param="${!iParam}"
if [[ "${Param:0:1}" == '-' ]] && isFlagUnset NoMoreOptions ; then
case "$Param" in
### settings ###
( '--user='* ) KRB5USER="${Param#--*=}" ;;
( '--instance='* ) KRB5INSTANCE="${Param#--*=}" ;;
( '--realm='* ) KRB5REALM="${Param#--*=}" ;;
( '--root' ) KRB5INSTANCE='root' ;;
( '--fulluser='* ) KRB5FULLUSER="${Param#--*=}" ;;
( '--norenew' ) NoRenew=1 ;;
( '--onlyrenew' ) RenewOnly=1 ;;
( '--nat' ) NAT=1 ;;
( '--lifetime='* ) KRB5LIFETIME="${Param#--*=}" ;;
( '--renewtime='* ) KRB5RENEWTIME="${Param#--*=}" ;;
### operating modes ###
( '--commands='* ) CommandsStream="${Param#--*=}" ;;
### common options ###
( '--verbose' | '-v' ) BeVerbose=1 ;;
( '--debug' ) DEBUG=1 ;;
( '--debug='* ) DEBUG="${Param#--*=}" ;;
( '--version' | '-V' ) AddAction 'PrintVersion' ;;
( '--help' | '-h' | '-?' ) AddAction 'PrintHelp' ;;
esac
else
[[ $NArguments -gt 2 ]] && FATAL 1 "${SCRIPTNAME} suffers only ${NArguments} arguments! -- '${Param}'"
Arguments[NArguments++]="$Param"
fi
done
# set default actions
if [[ "${#Actions[@]}" == 0 ]]; then
AddAction 'GetTicket'
fi
# process the parameters
declare -i DoRenewTicket=$(FlipFlagValue NoRenew)
declare -i DoGetNewTicket=$(FlipFlagValue OnlyRenew)
KRB5REALM="${Arguments[0]:-${KRB5REALM:-${DEFAULTREALM}}}"
: ${KRB5USER:="${DEFAULTUSER}"}
[[ -n "$KRB5REALM" ]] || FATAL 1 "No Kerberos realm specified!"
if [[ -n "$CommandsStream" ]]; then
if [[ ! -w "$CommandsStream" ]]; then
touch "$CommandsStream" || FATAL 2 "The commands stream '${CommandsStream}' can't be written"
fi
DBG "Adding commands to: '${CommandsStream}'"
fi
###
### Performs the actions
###
declare ExitCode=0
for Action in "${Actions[@]}" ; do
case "$Action" in
( 'PrintVersion' )
echo "${SCRIPTNAME} ${SCRIPTVERSION}"
;;
( 'PrintHelp' )
help
;;
( 'GetTicket' )
GetTicket
;;
( 'Exit' )
break
;;
( * )
FATAL 1 "Internal error: action '${Action}' not supported!"
esac
done
exit "$ExitCode"
################################################################################
| true
|
2d6f98edba061a6f38f76ae48dd598a5586d6680
|
Shell
|
nstylo/dots
|
/.config/i3blocks/scripts/mediaplayer
|
UTF-8
| 726
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
PLAYER=$1
# mouse input
case $BLOCK_BUTTON in
1) playerctl --player=$1 play-pause ;;
4) playerctl --player=$1 next ;;
5) playerctl --player=$1 previous ;;
esac
# position
TIME=$(playerctl position)
# cast to int
TIME=${TIME%.*}
MINUTE=$(($TIME / 60))
SECOND=$(($TIME % 60))
# if seconds < 10, prefix 0
if [ "$SECOND" -lt "10" ] ; then
SECOND="0$SECOND"
fi
STATUS=$(playerctl status)
# print
if [ $STATUS = "Playing" ] ; then
echo " $(playerctl metadata artist) - $(playerctl metadata title) - $MINUTE:$SECOND"
elif [ $STATUS = "Paused" ] ; then
echo " $(playerctl metadata artist) - $(playerctl metadata title) - $MINUTE:$SECOND"
else
echo "No Artist - No Title - 00:00"
fi
| true
|
48312fa6803b4aaaf6b130bb507cca501eb8d04d
|
Shell
|
gregwebs/vpn-ubuntu-setup
|
/vpn/route-default.sh
|
UTF-8
| 162
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
set -o pipefail
# route through the VPN
pp_devs=$(ifconfig | grep ppp | awk '{print $1}')
route add default dev $(echo "$pp_devs" | tail -1)
| true
|
91a0a39c9eb1ae2dab53567af50a96087f22d49e
|
Shell
|
liaoqiArno/linux_tools
|
/shell/tools/cacti-0.8.8-centos-redhat.sh
|
UTF-8
| 4,186
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
SRC_URI1="http://zy-res.oss-cn-hangzhou.aliyuncs.com/cacti/cacti-0.8.8b.tar.gz"
SRC_URI2="http://zy-res.oss-cn-hangzhou.aliyuncs.com/cacti/rrdtool-1.4.9.tar.tar"
PKG_NAME1=`basename $SRC_URI1`
PKG_NAME2=`basename $SRC_URI2`
DIR=`pwd`
DATE=`date +%Y%m%d%H%M%S`
CPU_NUM=$(cat /proc/cpuinfo | grep processor | wc -l)
mkdir -p /alidata/rrdtool
mkdir -p /alidata/install
cd /alidata/install
if [ ! -s $PKG_NAME1 ]; then
wget -c $SRC_URI1
fi
if [ ! -s $PKG_NAME2 ]; then
wget -c $SRC_URI2
fi
if [ "$(cat /proc/version | grep redhat)" != "" ];then
wget http://git.jiagouyun.com/operation/operation/raw/master/linux/redhat/CentOS-Base.repo -O /etc/yum.repos.d/CentOS-Base.repo
yum makecache
yum -y install net-snmp net-snmp-devel net-snmp-libs net-snmp-utils cairo-devel libxml2-devel pango-devel pango libpng-devel freetype freetype-devel libart_lgpl-devel perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker
yum -y install mysql mysql-server php-mysql php php-fpm
elif [ "$(cat /proc/version | grep centos)" != "" ];then
if [ `uname -m` == "x86_64" ];then
if cat /etc/issue |grep "5\." &> /dev/null;then
if ! cat /etc/yum.conf |grep "exclude=\*\.i?86" &> /dev/null;then
sed -i 's;\[main\];\[main\]\nexclude=*.i?86;' /etc/yum.conf
fi
rpm --import /etc/pki/rpm-gpg/RPM*
fi
fi
yum makecache
yum -y install net-snmp net-snmp-devel net-snmp-libs neit-snmp-utils cairo-devel libxml2-devel pango-devel pango libpng-devel freetype freetype-devel libart_lgpl-devel perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker
yum -y install mysql mysql-server php-mysql php php-fpm
fi
cp /etc/snmp/snmpd.conf /etc/snmp/snmpd.conf.$DATE
sed -i 's/com2sec notConfigUser default public/com2sec notConfigUser 127.0.0.1 public/g' /etc/snmp/snmpd.conf
sed -i 's/access notConfigGroup "" any noauth exact systemview none none/access notConfigGroup "" any noauth exact all none none/g' /etc/snmp/snmpd.conf
sed -i 's/#view all included .1 80/view all included .1 80/g' /etc/snmp/snmpd.conf
/etc/init.d/mysqld start
/etc/init.d/snmpd start
/etc/init.d/php-fpm start
rm -rf cacti-0.8.8b
rm -rf rrdtool-1.4.9
tar xvf $PKG_NAME1
tar xvf $PKG_NAME2
cd rrdtool-1.4.9
./configure --prefix=/alidata/rrdtool
if [ $CPU_NUM -gt 1 ];then
make -j$CPU_NUM
else
make
fi
make install
wget http://git.jiagouyun.com/operation/operation/raw/master/nginx/nginx-1.4.7.sh
echo " " >> nginx-1.4.7.sh
echo "exit" >> nginx-1.4.7.sh
bash nginx-1.4.7.sh
mv /alidata/nginx/conf/vhosts/default.conf /alidata/nginx/conf/vhosts/default.conf.$DATE
cat > /alidata/nginx/conf/vhosts/default.conf << EOF
server {
listen 80 default;
server_name _;
#index.php or index.jsp ???
index index.html index.htm index.php;
root /alidata/www/default;
####<<<PHP settings>>>####
location ~ .*\.(php|php5)?$
{
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include fastcgi.conf;
}
####<<<Cache settings>>>####
location ~ .*\.(gif|jpg|jpeg|png|bmp|swf)$
{
expires 1d;
}
location ~ .*\.(js|css)?$
{
expires 1d;
}
####<<<The log path set>>>####
access_log $LOGS/default.log;
}
EOF
/etc/init.d/nginx reload
cd /alidata/install
mv cacti-0.8.8b /alidata/www/default/cacti
cat > sql.sql << EOF
create database cacti;
grant all on cacti.* to cacti@localhost identified by 'cacti';
grant all on cacti.* to cacti@127.0.0.1 identified by 'cacti';
flush privileges;
EOF
mysql < sql.sql
useradd cacti
echo "cacti" | passwd --stdin cacti
cd /alidata/www/default/
chown -R root:root cacti/
chown -R www.www cacti/rra/
chown -R www.www cacti/log/
chown -R www.www cacti/scripts/
cd /alidata/www/default/cacti
mysql -u cacti -pcacti cacti < cacti.sql
sed -i 's/$database_username = "cactiuser";/$database_username = "cacti";/g' include/config.php
sed -i 's/$database_password = "cactiuser";/$database_password = "cacti";/g' include/config.php
#if ! cat /etc/profile | grep "/alidata/softname/bin" &> /dev/null ;then
# echo "export PATH=\$PATH:/alidata/softname/bin" >> /etc/profile
#fi
#source /etc/profile
cd $DIR
bash
| true
|
e369548195c620dae4cb5aa97805b5ddcf4e53b0
|
Shell
|
Zamua/.dotfiles
|
/gradle-aliases.sh
|
UTF-8
| 272
| 2.984375
| 3
|
[] |
no_license
|
#Unbind
unalias gr 2>/dev/null
unalias gc 2>/dev/null
#Bind
alias g="gradle"
alias gb="gradle build"
alias gc="gradle clean"
alias gt="gradle test"
gr() {
command="gradle run"
if [ $# -gt 0 ]; then
command+=" -Ptestfile=$1"
fi
eval $command
}
| true
|
0484990f9cc42bf16214113c9056a4d11a233b2a
|
Shell
|
yeq71/server-configuration-scripts
|
/archiver-slac-based/install.sh
|
UTF-8
| 4,944
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
source envs.sh
sudo apt-get install dialog
# yum install dialog
function cleanup {
if [ -d extracted_files ]; then
rm -rfd extracted_files
fi
if [ -d resources ]; then
rm -rfd resources
fi
}
trap cleanup EXIT
cleanup
mkdir -p resources
MSG="Wish to download the required files?"
dialog --backtitle "Archiver configuration" --title "Configuration" --yesno "${MSG}" 0 0
if [[ $? == 0 ]] ; then
pushd resources
wget ${TOMCAT_URL}
tar -zxf ${TOMCAT_DISTRIBUTION}.tar.gz
export TOMCAT_HOME=$(pwd)/${TOMCAT_DISTRIBUTION}
wget https://dev.mysql.com/get/Downloads/Connector-J/${MYSQL_CONNECTOR}.tar.gz --no-check-certificate
tar -xvf ${MYSQL_CONNECTOR}.tar.gz
mv ${MYSQL_CONNECTOR}/${MYSQL_CONNECTOR}-bin.jar .
popd
MSG="Wish to clone and build epicsappliances repo?"
dialog --backtitle "Archiver configuration" --title "Configuration" --yesno "${MSG}" 0 0
if [[ $? == 0 ]] ; then
pushd resources
git clone ${ARCHIVER_REPO}
cd epicsarchiverap
RES=$(dialog --stdout --menu 'Choose the tag to be used!' 0 0 0 $(git tag -l | awk '{printf "tags/%s %s\n", $1, $1}') master "Bleeding Edge")
git checkout RES
ant
popd
else
MSG="Downloading appliance from ${COMPILED_ARCHIVER_APPL} ..."
dialog --msgbox "${MSG}" 0 0
wget $COMPILED_ARCHIVER_APPL --no-check-certificate
MSG="Download complete ! \n"$(ls)
dialog --msgbox "${MSG}" 0 0
fi
fi
MSG="Where is the epicsappliances build file (tar.gz)? Try the resources folder ..."
ARCH_TAR=$(dialog --stdout --title "$MSG" --fselect ${PWD} 0 0 )
if [[ ! -f ${ARCH_TAR} ]]; then
MSG="${ARCH_TAR} does not seem to be a valid file"
dialog --msgbox "${MSG}" 0 0
exit 1
fi
mkdir extracted_files
tar -C extracted_files -zxf ${ARCH_TAR}
rm -rvfd extracted_files/install_scripts
mkdir -p extracted_files/install_scripts
cp -rf install_scripts/ extracted_files/
export SCRIPTS_DIR=$(pwd)/extracted_files/install_scripts
ls
pushd ${SCRIPTS_DIR}
ls
. ./single_machine_install.sh
popd
STARTUP_SH=${DEPLOY_DIR}/sampleStartup.sh
sed -i -e "14cexport JAVA_OPTS=\"${JAVA_OPTS}\"" ${STARTUP_SH}
sed -i -e "30cexport ARCHAPPL_SHORT_TERM_FOLDER=${ARCHAPPL_SHORT_TERM_FOLDER}" ${STARTUP_SH}
sed -i -e "31cexport ARCHAPPL_MEDIUM_TERM_FOLDER=${ARCHAPPL_MEDIUM_TERM_FOLDER}" ${STARTUP_SH}
sed -i -e "32cexport ARCHAPPL_LONG_TERM_FOLDER=${ARCHAPPL_LONG_TERM_FOLDER}" ${STARTUP_SH}
rm -rvfd extracted_files
for APPLIANCE_UNIT in "engine" "retrieval" "etl" "mgmt"
do
if [ $APPLIANCE_UNIT == "mgmt" ]; then
UI_DIR=${DEPLOY_DIR}/${APPLIANCE_UNIT}/webapps/mgmt/ui
IMG_DIR=${UI_DIR}/comm/img
for file in "appliance.html" "cacompare.html" "index.html" "integration.html" "metrics.html" "pvdetails.html" "redirect.html" "reports.html" "storage.html"
do
sed -i "s/LCLS/LNLS/g" ${UI_DIR}/${file}
echo ${UI_DIR}/${file}
done
sed -i "s/Jingchen Zhou/LNLS CON group/g" ${UI_DIR}/index.html
sed -i "s/Murali Shankar at 650 xxx xxxx or Bob Hall at 650 xxx xxxx/LNLS CON group/g" ${UI_DIR}/index.html
cp -f labLogo.png ${IMG_DIR}
cp -f labLogo2.png ${IMG_DIR}
fi
if [ $APPLIANCE_UNIT == "retrieval" ]; then
cp -f redirect.html ${DEPLOY_DIR}/${APPLIANCE_UNIT}/webapps/retrieval/ui/redirect.html
pushd ${DEPLOY_DIR}/${APPLIANCE_UNIT}/webapps/retrieval/ui
rm -rvfd viewer
git clone ${ARCHIVER_VIEWER_REPO}
mv archiver-viewer viewer
mv viewer/index.html viewer/archViewer.html
pushd viewer/js
sed -i "s/10\.0\.4\.57\:11998/10\.0\.6\.51\:17668/g" archiver-viewer.min.js
sed -i "s/10\.0\.6\.57\:11998/10\.0\.6\.51\:17668/g" archiver-viewer.min.js
sed -i "s/10\.0\.4\.57\:11998/10\.0\.6\.51\:17668/g" archiver-viewer.js
sed -i "s/10\.0\.6\.57\:11998/10\.0\.6\.51\:17668/g" archiver-viewer.js
popd
popd
pushd ${DEPLOY_DIR}/${APPLIANCE_UNIT}/webapps/retrieval/WEB-INF/
xmlstarlet ed --inplace --subnode "/web-app" --type elem -n welcome-file-list -v "" web.xml
xmlstarlet ed --inplace --subnode "/web-app/welcome-file-list" --type elem -n welcome-file -v "/ui/redirect.html" web.xml
popd
fi
done
| true
|
f6c2f510a6481f245c1face44411313c3f7eccc4
|
Shell
|
nkarast/nkarastAccelLegacy
|
/scan_scripts/sixjobs/scan_check_mad.sh
|
UTF-8
| 1,294
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
source ./scan_definitions.sh
source ./sixdeskenv
export study=${workspace}
#################i
rm -rf missingMad6t.txt
for mask in $mask_list
do
echo "#########################################"
echo "### STUDY : $study"
echo "#########################################"
echo "### CHEKCING FILES FOR: $mask"
export ok=true
export fort2=${scratchdir}'/sixtrack_input/'${study}'/'${mask}'/fort.2_1.gz'
export fort16=${scratchdir}'/sixtrack_input/'${study}'/'${mask}'/fort.16_1.gz'
export fort8=${scratchdir}'/sixtrack_input/'${study}'/'${mask}'/fort.8_1.gz'
export mother1=${scratchdir}'/sixtrack_input/'${study}'/'${mask}'/fort.3.mother1'
export mother2=${scratchdir}'/sixtrack_input/'${study}'/'${mask}'/fort.3.mother2'
if [ ! -f $fort2 ]; then
ok=false
fi
if [ ! -f $fort16 ]; then
ok=false
fi
if [ ! -f $fort8 ]; then
ok=false
fi
if [ ! -f $mother1 ]; then
ok=false
fi
if [ ! -f $mother2 ]; then
ok=false
fi
if $ok; then
echo 'ok'
else
echo $mask >> missingMad6t.txt
fi
done
| true
|
6b3fa6f4b98021f91fd80588f33e3bc003f2a90a
|
Shell
|
hayesall/DrugInteractionData
|
/Confidence/scoreconfidence.sh
|
UTF-8
| 2,166
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
# Optimized for running on Indiana University's RI Odin Cluster
# Written by Alexander Hayes | ProHealth && STARAI | Dr. Sriraam Natarajan
FINAL=ODINORDER.txt
LOG=DETERMINEORDER.txt
rm -f $FINAL
rm -f $LOG
function synchronize {
HOSTNUMBER=`hostname | grep -o -P '(?<=odin).*(?=.cs.indiana.edu)' | sed 's/^0*//'`
echo $HOSTNUMBER
sleep $HOSTNUMBER
HOST=`hostname`
echo "$HOST" >> $LOG
OUTPUT=`wc --lines $LOG | cut -d 'L' -f 1 | cut -d 'D' -f 1`
echo "$HOST$OUTPUT" >> $FINAL
}
sleep 5
synchronize
NUMBERSTRING=`grep $HOST $FINAL | cut -d 'u' -f 2`
NUMBER=$(($NUMBERSTRING * 1))
echo $HOST is at $NUMBER
FILE1=../Generated/Data/$NUMBER/check_$NUMBER #formerly drugs1.txt
FILE2=../Generated/Data/$NUMBER/drugs.txt #formerly drugs2.txt
STABLE=../Generated/Data/$NUMBER/STABLE.txt #formerly STABLE.txt
BEGIN=`wc --lines $FILE1 | cut -d ' ' -f 1`
TEMP1=Data/$NUMBER/check_$NUMBER.tmp #formerly drugs1.tmp
TEMP2=Data/$NUMBER/drugs.tmp #formerly drugs2.tmp
function shrink1 {
tail -n +2 "$FILE1" > "$TEMP1" && mv "$TEMP1" "$FILE1"
cp $STABLE $FILE2
}
function shrink2 {
tail -n +2 "$FILE2" > "$TEMP2" && mv "$TEMP2" "$FILE2"
}
#commands for testing
#rm -f LOG.txt
#cp STABLE.txt drugs1.txt
#cp STABLE.txt drugs2.txt
#rm -f Abstracts/*
#
while [ $(wc --bytes $FILE1 | cut -d ' ' -f 1) -gt 0 ]; do
DRUG1=`head -n 1 "$FILE1"`
DRUG2=`head -n 1 "$FILE2"`
until [ $DRUG1 = $DRUG2 ]; do
DRUG1=`head -n 1 "$FILE1"`
DRUG2=`head -n 1 "$FILE2"`
TEST=`perl pmsearch -c -t 3650 -d 20 $DRUG1 $DRUG2`
echo Found $TEST for $DRUG1 and $DRUG2 " | " Progress: $[$BEGIN-`wc --lines $FILE1 | cut -d ' ' -f 1`] / $BEGIN
if [ $TEST -gt 0 ]; then
if [ $DRUG1 = $DRUG2 ]; then
shrink2
else
FILENAME=$DRUG1-$DRUG2.txt
INITIAL="$(echo $FILENAME | head -c 1)" #store our file in the folder of the first letter
echo Creating $FILENAME
echo "Odin Node $NUMBER, Creating $FILENAME, found $TEST results, `date`" >> Abstracts/LOG.txt
perl pmsearch -t 3650 -d 20 $DRUG1 $DRUG2 | perl pmid2text -a -i > Abstracts/$INITIAL/$FILENAME;
shrink2
fi
else
shrink2
fi
done
shrink1
done
exit
| true
|
4d84bf7ee4b640c761f5aa271d1125f2e1b4308a
|
Shell
|
muffato/docker-ensembl-linuxbrew-compara
|
/base/install_cask.sh
|
UTF-8
| 245
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
# brew often partially installs stuff, so run it again and again until complete
until [ -e "/home/linuxbrew/.linuxbrew/bin/$1" ]
do
brew install "ensembl/cask/$1"
done
brew cleanup
rm -rf /home/linuxbrew/.cache/Homebrew
| true
|
40e1e948dda644722083cbb8a6d7098b8b60878f
|
Shell
|
nevernervous/tpb_kiosk
|
/site_update.sh
|
UTF-8
| 1,655
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# exit on error
set -e
# this script will update the git repo and then sync a particular site to the latest version of the branch
# only applies to wp files
# get parameters
SITE=$1
BRANCH=$2
if [ "$SITE" == "" ] || [ "$BRANCH" == "" ]; then
echo "USAGE: ./site_update.sh site_name branch_name"
exit 0
fi
# have to be root
if [ $(whoami) != "root" ]; then
echo "run this script as root. exit..."
exit 1
fi
# check where are we running from
if [ ! -d .git ]; then
echo "please run this script from the repo directory"
exit 1
fi
# check for user
if ( ! id "$SITE" 2> /dev/null 1>/dev/null ); then
echo "can't find user $SITE. exit..."
exit 1
fi
# check /var/www/exists
if [ ! -d /var/www/$SITE ]; then
echo "can't find directory /var/www/$SITE for user $SITE"
exit 1
fi
# check branch exists
# // don't check now, will fail if branch not found
# confirm name correct
echo "is $SITE the correct site?"
read -p "Continue (y/n)?" choice
if [[ ! $choice =~ ^[Yy]$ ]]
then
echo 'exit...'; exit 0;
fi
echo "update repo"
git pull
#
echo "available branches"
git branch -a
echo "change branch"
git checkout $BRANCH
# do sync
echo "sync files"
rsync -v -rlt --exclude=wp-config.php ./latestbuild/www/ /var/www/$SITE/
# apply file permissions
echo "apply permissions"
# modify permissions to include editors group
chown -R $SITE:editors /var/www/$SITE
find /var/www/$SITE -type d -exec chmod 775 {} \;
find /var/www/$SITE -type d -exec chmod g+s {} \;
find /var/www/$SITE -type f -exec chmod 664 {} \;
setfacl -Rdm g:editors:rwx /var/www/$SITE
chmod 770 /var/www/$SITE
echo
echo "done."
| true
|
dcc58d8756fedf44d54ff54d20b377f2c41406d0
|
Shell
|
zhangshuo1996/Shell_learn
|
/chapter_12/test4.sh
|
UTF-8
| 258
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
testUser=noSuchUser
if grep $testUser /etc/passwd
then
echo "first command"
echo "second command"
echo "I can put in other commands besides echo"
echo $testUser
ls -a /home/$testUser/.b*
else
echo "The user $testUser doesnot exit"
echo
fi
| true
|
366ad02d7a5173c42a10c8627cdbd902dfb00e7b
|
Shell
|
ShellShoccar-jpn/metropiper
|
/CGI/GET_SNUM_HTMLPART.AJAX.CGI
|
UTF-8
| 4,455
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/sh
######################################################################
#
# GET_SNUM_HTMLPART.AJAX.CGI
# 駅ナンバー一覧部分HTML生成
# Written by Rich Mikan(richmikan@richlab.org) at 2014/09/17
#
# [入力]
# HTTP POSTで次のCGI変数を与える
# [rwletter] : 「知りたい駅」の路線文字
# DATA/SNUM2RWSN_MST.TXT …… 駅ナンバーマスターファイル
# TEMPLATE.HTML/MAIN.HTML…… 表示用HTMLテンプレートファイル
# [出力]
# 駅ナンバーと駅名を埋め込んだ<option>タグ
#
# [備考]
# rwletterを省略した場合は、全駅が返される。
#
######################################################################
######################################################################
# 初期設定
######################################################################
# --- エラー終了関数定義 ---------------------------------------------
errorcode_exit() {
cat <<-__HTTP_HEADER
Status: 500 Internal Server Error
Content-Type: text/plain
500 Internal Server Error
($@)
__HTTP_HEADER
exit 1
}
error400_exit() {
cat <<-__HTTP_HEADER
Status: 400 Bad request
Content-Type: text/plain
400 Bad Request
($@)
__HTTP_HEADER
exit 1
}
dyingmessage() {
printf 'Content-Type: text/plain\n\n'
echo "$@"
exit
}
# --- このシステムのホームディレクトリー -----------------------------
Homedir="$(d=${0%/*}/; [ "_$d" = "_$0/" ] && d='./'; cd "$d.."; pwd)"
# --- その他初期ディレクトリー等定義 ---------------------------------
# 1)コマンドパスの追加
PATH="$Homedir/UTL:$Homedir/TOOL:$PATH"
# 2)一時ファイル格納先
Tmp=/tmp/${0##*/}.$$
# 3)終了時の一時ファイル削除設定
exit_trap() { rm -f $Tmp-*; }
trap "exit_trap" EXIT HUP INT QUIT PIPE ALRM TERM
######################################################################
# 事前チェック
######################################################################
# --- 駅ナンバーマスターファイル存在確認 -----------------------------
if [ ! -f "$Homedir/DATA/SNUM2RWSN_MST.TXT" ]; then
error500_exit 'Station name master file is not found'
fi
# --- テンプレートHTMLファイル存在確認 -------------------------------
if [ ! -f "$Homedir/TEMPLATE.HTML/MAIN.HTML" ]; then
error500_exit 'Template HTML file is not found'
fi
######################################################################
# CGI変数取得
######################################################################
# --- CGI変数(GETまたはPOST)を取得 -----------------------------------
case "${REQUEST_METHOD:-}" in #
POST) dd bs=${CONTENT_LENGTH:-0} count=1 2>/dev/null;; #
*) printf '%s' "${QUERY_STRING:-}" ;; #
esac |
cgi-name > $Tmp-cgivars
# --- 正当性確認 -----------------------------------------------------
s=$(nameread rwletter $Tmp-cgivars)
echo "_$s" | grep -qE '^_([A-Za-z]|)$'
[ $? -eq 0 ] || { error400_exit 'Invalid Railway letter on rwletter'; }
rwletter=$s
######################################################################
# 部分HTML出力
######################################################################
# --- HTTPヘッダーを出力 ---------------------------------------------
cat <<-HTTP_HDR
Content-Type: text/plain; charset=UTF-8
HTTP_HDR
# --- 部分HTMLのテンプレート抽出 -------------------------------------
cat "$Homedir/TEMPLATE.HTML/MAIN.HTML" |
sed -n '/FROM_SELECT_BOX/,/FROM_SELECT_BOX/p' |
sed 's/―/選んでください/' > $Tmp-htmltmpl
# --- HTML本体を出力 -------------------------------------------------
cat "$Homedir/DATA/SNUM2RWSN_MST.TXT" |
# 1:駅ナンバー(sorted) 2:路線コード 3:路線名 4:路線駅コード
# 5:駅名 6:方面コード(方面駅でない場合は"-")
grep -i "^$rwletter" |
awk '{print substr($1,1,1),$0}' |
sort -k1f,1 -k2,2 |
awk '{print $2,$4,$6}' |
uniq |
# 1:駅ナンバー(sorted) 2:路線名 3:駅名 #
mojihame -lFROM_SNUM_LIST $Tmp-htmltmpl -
######################################################################
# 正常終了
######################################################################
exit 0
| true
|
f15e2c6ba7107ce1f69c287b5cc81bae5b47495f
|
Shell
|
Anurag810/frappe
|
/.github/helper/install.sh
|
UTF-8
| 2,847
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
cd ~ || exit
pip install frappe-bench
bench init frappe-bench --skip-assets --python "$(which python)" --frappe-path "${GITHUB_WORKSPACE}"
mkdir ~/frappe-bench/sites/test_site
cp "${GITHUB_WORKSPACE}/.github/helper/consumer_db/$DB.json" ~/frappe-bench/sites/test_site/site_config.json
if [ "$TYPE" == "server" ]; then
mkdir ~/frappe-bench/sites/test_site_producer;
cp "${GITHUB_WORKSPACE}/.github/helper/producer_db/$DB.json" ~/frappe-bench/sites/test_site_producer/site_config.json;
fi
if [ "$DB" == "mariadb" ];then
mysql --host 127.0.0.1 --port 3306 -u root -e "SET GLOBAL character_set_server = 'utf8mb4'";
mysql --host 127.0.0.1 --port 3306 -u root -e "SET GLOBAL collation_server = 'utf8mb4_unicode_ci'";
mysql --host 127.0.0.1 --port 3306 -u root -e "CREATE DATABASE test_frappe_consumer";
mysql --host 127.0.0.1 --port 3306 -u root -e "CREATE USER 'test_frappe_consumer'@'localhost' IDENTIFIED BY 'test_frappe_consumer'";
mysql --host 127.0.0.1 --port 3306 -u root -e "GRANT ALL PRIVILEGES ON \`test_frappe_consumer\`.* TO 'test_frappe_consumer'@'localhost'";
mysql --host 127.0.0.1 --port 3306 -u root -e "CREATE DATABASE test_frappe_producer";
mysql --host 127.0.0.1 --port 3306 -u root -e "CREATE USER 'test_frappe_producer'@'localhost' IDENTIFIED BY 'test_frappe_producer'";
mysql --host 127.0.0.1 --port 3306 -u root -e "GRANT ALL PRIVILEGES ON \`test_frappe_producer\`.* TO 'test_frappe_producer'@'localhost'";
mysql --host 127.0.0.1 --port 3306 -u root -e "UPDATE mysql.user SET Password=PASSWORD('travis') WHERE User='root'";
mysql --host 127.0.0.1 --port 3306 -u root -e "FLUSH PRIVILEGES";
fi
if [ "$DB" == "postgres" ];then
echo "travis" | psql -h 127.0.0.1 -p 5432 -c "CREATE DATABASE test_frappe_consumer" -U postgres;
echo "travis" | psql -h 127.0.0.1 -p 5432 -c "CREATE USER test_frappe_consumer WITH PASSWORD 'test_frappe'" -U postgres;
echo "travis" | psql -h 127.0.0.1 -p 5432 -c "CREATE DATABASE test_frappe_producer" -U postgres;
echo "travis" | psql -h 127.0.0.1 -p 5432 -c "CREATE USER test_frappe_producer WITH PASSWORD 'test_frappe'" -U postgres;
fi
cd ./frappe-bench || exit
sed -i 's/^watch:/# watch:/g' Procfile
sed -i 's/^schedule:/# schedule:/g' Procfile
if [ "$TYPE" == "server" ]; then sed -i 's/^socketio:/# socketio:/g' Procfile; fi
if [ "$TYPE" == "server" ]; then sed -i 's/^redis_socketio:/# redis_socketio:/g' Procfile; fi
if [ "$TYPE" == "ui" ]; then bench setup requirements --node; fi
# install node-sass which is required for website theme test
cd ./apps/frappe || exit
yarn add node-sass@4.13.1
cd ../..
bench start &
bench --site test_site reinstall --yes
if [ "$TYPE" == "server" ]; then bench --site test_site_producer reinstall --yes; fi
bench build --app frappe
| true
|
5ed58305ade1b30c76fa09bd442a857afe7dfc81
|
Shell
|
nc-lot/dotfiles
|
/bin/git-diff-decorate.sh
|
UTF-8
| 241
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Wrapper to select best available diff highlighting option
if command_exists 'diff-so-fancy'; then
exec diff-so-fancy "$@"
elif command_exists 'diff-highlight'; then
exec diff-highlight "$@"
else
exec cat "$@"
fi
| true
|
53b2a0adee4d485f513b2dda321c3ca031d047f4
|
Shell
|
dc3671/lkp-tests
|
/bin/rsync-rootfs
|
UTF-8
| 1,469
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
# This script is used to sync a remote-rootfs directory to local
# disk partition, in order to sync multiple rootfs, we use the
# btrfs as the local disk partition filesystem, for its subvolume
# feature.
#
# Currently, only support NFS format remote-rootfs as the first
# parameter, but easy to add more remote-rootfs format support
# in future if we need.
# eg: inn:/export/debian-full-x86_64
source=$1
[ -z "${source}" ] && {
echo "Parameter is empty."
exit 1
}
rootfs_name=${source##*/}
# eg: /dev/disk/by-id/ata-WDC_WD1002FAEX-00Z3A0_WD-WCATRC577623-part2
dest_partition=$2
[ ! -b ${dest_partition} ] && {
echo "Partition ${dest_partition} can't be found!"
exit 1
}
fs_type=$(blkid -o value -s TYPE $dest_partition)
[ "${fs_type}" != "btrfs" ] && {
mkfs.btrfs -f ${dest_partition} || exit
}
mkdir -p /opt/rootfs || exit
mount ${dest_partition} /opt/rootfs || exit
# for CACHE_DIR
mkdir -p /opt/rootfs/tmp || exit
# After create, the subvolume can be used like:
# mount -o subvol=$rootfs_name $dest_partition /some-path
[ ! -d "/opt/rootfs/${rootfs_name}" ] && {
btrfs subvolume create /opt/rootfs/$rootfs_name || exit
}
# create NFS mount directory
nfs_mount_rootfs=/opt/rootfs/NFS-${rootfs_name}
[ -d ${nfs_mount_rootfs} ] || mkdir -p ${nfs_mount_rootfs} || exit
# mount NFS to local and rsync to destination subvolume
mount $source $nfs_mount_rootfs || exit
rsync -aix --delete $nfs_mount_rootfs/ /opt/rootfs/$rootfs_name || exit
| true
|
dfd75da11f64f8fd317b2742a15a83fc15480579
|
Shell
|
Jewel591/Privilege-Escalation
|
/checklinux/ffabcdef-2020-0514-2011-aaa340401710.sh
|
UTF-8
| 2,531
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
[ $# -ne 2 ] && {
echo "Usage: sh ffabcdef-2020-0514-2011-aaa340401710.sh <SU用户(SU或高权限用户)> <SU密码>";
exit 1;
}
# 获取当前路径
pathname=`pwd`
echo "touch /tmp/nsfocus_mod_tmp;">/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo "chmod 777 /tmp/nsfocus_mod_tmp;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo "if [ -f \"/etc/grub.conf\" ];then">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /etc/grub.conf | grep 'l[r-][w-][x-]'\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " if [ -z \"\$grub_mod\" ];then">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /etc/grub.conf\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " chmod --reference=/etc/grub.conf /tmp/nsfocus_mod_tmp;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " else">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /boot/grub/grub.conf\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " chmod --reference=/boot/grub/grub.conf /tmp/nsfocus_mod_tmp;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " fi">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo "elif [ -f \"/boot/grub/grub.conf\" ];then">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /boot/grub/grub.conf\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " chmod --reference=/boot/grub/grub.conf /tmp/nsfocus_mod_tmp;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo "elif [ -f \"/etc/lilo.conf\" ];then">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /etc/lilo.conf\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " chmod --reference=/etc/lilo.conf /tmp/nsfocus_mod_tmp;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo "elif [ -f \"/etc/grub2.cfg\" ];then">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /etc/grub2.cfg | grep 'l[r-][w-][x-]'\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " if [ -z \"\$grub_mod\" ];then">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /etc/grub2.cfg\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " chmod --reference=/etc/grub2.cfg /tmp/nsfocus_mod_tmp;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " else">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " grub_mod=\`ls -l /boot/grub2/grub.cfg\`;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " chmod --reference=/boot/grub2/grub.cfg /tmp/nsfocus_mod_tmp;">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo " fi">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
echo "fi">>/tmp/NSF{nsf_tm}_nsfocus_grub_tmp
sh /tmp/NSF{nsf_tm}_nsfocus_grub_tmp
# 执行pl脚本
perl $pathname/ffabcdef-2020-0514-2011-aaa340401710.pl "${1}" "${2}"
| true
|
27752dcc605d4507a81f5dfc02ba6bf919ee6400
|
Shell
|
KevinKbyte/.dotfiles
|
/i3/scripts/kill_cpu_hog.sh
|
UTF-8
| 977
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# https://unix.stackexchange.com/questions/13968/show-top-five-cpu-consuming-processes-with-ps
# Top shows current CPU usage
# Ps shows avg over time
# https://unix.stackexchange.com/questions/58539/top-and-ps-not-showing-the-same-cpu-result
ps_most_cpu_intensive_process=$(ps aux | sort -nrk 3,3 | head -n 1)
top_most_cpu_intensive_process=$(top -b -n 1 | grep -v grep | sort -nk9,9 | tail -2 | head -1)
ps_cpu_usage=$(echo $ps_most_cpu_intensive_process | awk '{print $3}')
top_cpu_usage=$(echo $top_most_cpu_intensive_process | awk '{print $9}')
ps_benchmark=75
top_benchmark=300
TMP_LOG=/tmp/killed_hog.log
if [[ $ps_cpu_usage > $ps_benchmark ]]; then
kill $(echo $ps_most_cpu_intensive_process | awk '{print $2}')
echo ps $ps_most_cpu_intensive_process > $TMP_LOG
elif [[ $top_cpu_usage > $top_benchmark ]]; then
kill $(echo $top_most_cpu_intensive_process | awk '{print $1}')
echo top $top_most_cpu_intensive_process > $TMP_LOG
fi
| true
|
837825919a3df17de4d20718518c9ee259426f48
|
Shell
|
roblanf/euc_qc
|
/qc.sh
|
UTF-8
| 3,215
| 3.3125
| 3
|
[] |
no_license
|
# Basic quality control for mapping PE illumina data to a distant reference
# Rob Lanfear, December 2016
# A few things to set before you go
inputf="/disks/dacelo/data/raw_data/Project_SN7001117R_0083_CKulheim_LBronham_Melaleuca/"
outputbase="/disks/dacelo/data/QC/test/"
ref="/disks/dacelo/data/raw_data/active_refs/Emel.fa.gz" # reference file as a fasta
gff="/disks/dacelo/data/raw_data/active_refs/Egrandis_genes_chr1_to_chr11.gff3"
adaptors="/disks/dacelo/data/programs/bbmap/resources/adapters.fa"
threads=50 # number of threads to use
minlen=50 # minimum length of read to keep after trimming
trimq=0 # trim bases with quality < this
# set up dirs
outputrawqc=$outputbase"rawqc/"
outputtrimqc=$outputbase"trimmedqc/"
outputtrimreads=$outputbase"trimmed_reads/"
ngmout=$outputbase"ngm/"
indexcov=$outputbase"indexcov/"
mkdir $outputbase
mkdir $ngmout
mkdir $outputrawqc
mkdir $outputtrimqc
mkdir $outputtrimreads
# run bbduk on all pairs of samples
echo "Trimming with bbduk"
for in1 in $(find $inputf -name "*R1_001.fastq.gz"); do
in2=${in1%%R1_001.fastq.gz}"R2_001.fastq.gz"
echo "running bbduk on"
echo $in1
echo $in2
f1=$(basename ${in1%%R1_001.fastq.gz}"R1_001_trimmed.fastq.gz")
f2=$(basename ${in1%%R1_001.fastq.gz}"R2_001_trimmed.fastq.gz")
out1=$outputtrimreads$f1
out2=$outputtrimreads$f2
sampleid=$outputtrimreads${f1%%R1_001_trimmed.fastq.gz}
bbduk.sh in1=$in1 in2=$in2 out1=$out1 out2=$out2 minlen=$minlen k=25 mink=8 ktrim=r ref=$adaptors hdist=1 overwrite=f qtrim=rl trimq=$trimq t=$threads bhist=$sampleid"bhist.txt" qhist=$sampleid"qhist.txt" gchist=$sampleid"gchist.txt" aqhist=$sampleid"aqhist.txt" lhist=$sampleid"lhist.txt" > $sampleid"bbduk_log.txt"
done
# run fastqc on all the raw and trimmed data files
echo "Running fastqc"
find $inputf -name '*.fastq.gz' | xargs fastqc -o $outputrawqc -t $threads
find $outputtrimreads -name '*.fastq.gz' | xargs fastqc -o $outputtrimqc -t $threads
# map reads to E. grandis reference
# our trimmed files look like: RL41_S1_R1_001_trimmed.fastq.gz RL41_S1_R2_001_trimmed.fastq.gz
echo "Mapping to reference with NGM"
for in1 in $(find $outputtrimreads -name "*R1_001_trimmed.fastq.gz"); do
in2=${in1%%R1_001_trimmed.fastq.gz}"R2_001_trimmed.fastq.gz"
echo "mapping files: "
echo $in1
echo $in2
# output file setup
f1=$(basename $in1)
id=${f1%%R1_001_trimmed.fastq.gz}
outsamngm=$ngmout$id"trimmed.sam"
ngm -t $threads -p -r $ref -1 $in1 -2 $in2 -o $outsamngm
outbamngm=$ngmout$id"trimmed.bam"
samtools view -bS -@ $threads $outsamngm > $outbamngm
samtools sort -@ $threads $outbamngm -o $outbamngm
rm $outsamngm
echo "running qualimap on sorted bams"
outqualimap_all=$ngmout$id"qualimap_all/"
qualimap bamqc -bam $outbamngm -outdir $outqualimap_all -nt $threads -c
outqualimap_gff=$ngmout$id"qualimap_gff/"
qualimap bamqc -bam $outbamngm -outdir $outqualimap_gff -gff $gff -nt $threads -c
done
echo "indexing bams"
ls ${ngmout}*.bam | parallel "samtools index {}"
echo "running indexcov"
goleft indexcov --directory $indexcov --sex "" ${ngmout}"*.bam"
echo "running multiqc"
multiqc $outputbase -o $outputbase
| true
|
637ff128286378fede88199f8fca67a5ec322354
|
Shell
|
DragonDemonKiller/scriptz
|
/sysadmin/manpagestopdf.sh
|
UTF-8
| 2,523
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#create a nice pdf file from manpages, system help files and such
#status: just started
#TODO: convert all _ in program names to " "
#define what manpages, files etc. we want
SYSADMIN_COMMANDS="addgroup cd mv cp rm du df rename touch delgroup adduser deluser groupadd groupdel useradd userdel dmsetup taskset insserv rcconf update-rc.d at lastb ls w who wtmp chage chmod chown chroot df dmesg getfacl setfacl install lsof lspci lsusb mkfs ntfsfix proc ps pstree top uptime vmstat signal man xargs faillog mktemp ltrace find locate update-alternatives pgrep bash htop fuser pkill killall atool tar gzip zip unrar 7z zcat zless logrotate ranger mount 7_signal pwd which service chattr logsave readlink free iostat sar mpstat pmap strace proc atop powertop whoami who id finger pinky multitail swatch tmux wall"
TEXTHANDLING_COMMANDS="awk cat cut grep head tail echo less sed tr wc ascii cmp comm column dirname fmt strings wdiff paste nl split sort shuf tee sponge readlink nl cmp"
MISC_COMMANDS="apropos inotifywait xclipboard xclip hier watch wmctrl yes pdfimages enscript ps2pdf chm2pdf pdfimages pdftotext pdfunite date xdg-open svn git pv gedit gnupg shuff dialog whiptail tzselect notify-send"
PACKAGEMANAGEMENT_COMMANDS="apt-cache apt-get aptitude dpkg dpkg-divert dpkg-reconfigure"
NET_COMMANDS="ssh-keygen ssh iptables iptables-apply iptables-restore iptables-save ufw smb.conf ssh ssh_config sshd sshd_config arpspoof dig ping mtr traceroute netstat nmap ss curl wget rsync wireshark tshark transmission-daemon iceweasel torify ifconfig ip route nslookup iptraf tcpdump ntop vnstat iftop nethogs ngrep bmon jnettop sshfs"
MEDIA_COMMANDS="youtube-dl cclive ffmpeg mplayer gimp vlc scrot feh"
TEXT_FILES="/etc/services /proc/filesystems"
HELP_COMMANDS="disown read set jobs bg fg pushd popd alias declare export local trap"
mkdir ~/manpagestopdf/
cd ~/manpagestopdf/
for page in $SYSADMIN_COMMANDS $TEXTHANDLING_COMMANDS $MISC_COMMANDS $PACKAGEMANAGEMENT_COMMANDS $NET_COMMANDS $MEDIA_COMMANDS; do man -t $page | ps2pdf - -> $page.pdf; done
for page in $TEXT_FILES; do enscript -p - $page | ps2pdf - -> `basename $page`.pdf; done
for page in $HELP_COMMANDS; do help $page | enscript -p - | ps2pdf - -> $page.pdf; done
#man -t $COMMAND | ps2pdf - -> $COMMAND.pdf
#enscript to convert text files to ps
#ps2pdf to convert ps files to pdf
#chm2pdf --book or --continuous to convert chm files
#pdfunite to merge pdf files
#How do I create a table of contents? a pdf index?
#apropos -e $command
| true
|
5ee563800654b0a7578be57c6f09a090f3221d9c
|
Shell
|
kowonsik/RPiLogger
|
/code_up_down.sh
|
UTF-8
| 390
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
#Author: jeonghoonkang http://github.com/jeonghoonkang
if [ $1 = 'up' ]; then
echo "... updating"
#git add sect_serial_ttyUSB0.py
git add ./
git commit -m "by wonsik"
git push -u origin master
elif [ $1 = 'co' ]; then
echo "... installing"
git pull -u origin master
else
echo "... do notihing for code install / update"
fi
| true
|
064a0798fefc8ad5e775cec2e9473e726e215c0b
|
Shell
|
Seeed-Studio/MotorBridgeCapeFirmware
|
/script/check_tools.sh
|
UTF-8
| 283
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
gcctool=./tools/gcc-arm-none-eabi-4_9-2015q3/bin/arm-none-eabi-gcc
if [ ! -f "$gcctool" ];then
tar xvf ./tools/gcc-arm-none-eabi-4_9-2015q3-20150720-linux.tar.bz2 -C ./tools
else
echo "tools is ok!"
echo "******************************************************"
fi
| true
|
9152fa44595f52022065ffc26c99045f2c0da0e5
|
Shell
|
Seebass22/bashscripts
|
/copysong.sh
|
UTF-8
| 194
| 3.25
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# copy curently playing MPD song to current dir
path="$(mpc status -f %file% | head -n1)"
fullpath="/mnt/D/music/$path"
echo $fullpath
if [ -n "$path" ]; then
cp "$fullpath" ./
fi
| true
|
00db296c4f8d2201bd65145eee3da62af5cf4d0b
|
Shell
|
pycurl/downloads
|
/upload
|
UTF-8
| 1,134
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
if test -z "$bintray_api_key"; then
echo 'bintray_api_key must be set'
exit 2
fi
upload() {
path="$1"
file="$2"
repo=pycurl
package=pycurl
version=`dirname $path`
curl -sfIL https://api.bintray.com/packages/pycurl/$repo/$package || (
data=$(cat <<-EOT
{"name":"$package",
"licenses":["LGPL-2.1", "MIT"],
"vcs_url":"https://github.com/pycurl/pycurl"}
EOT
) &&
curl -sfd "$data" -uop:$bintray_api_key -Hcontent-type:application/json https://api.bintray.com/packages/pycurl/$repo
)
curl -sfIL https://api.bintray.com/packages/pycurl/$repo/$package/versions/$version || (
data=$(cat <<-EOT
{"name":"$version"}
EOT
) &&
curl -sfd "$data" -uop:$bintray_api_key -Hcontent-type:application/json https://api.bintray.com/packages/pycurl/$repo/$package/versions
)
curl -sfT $path -uop:$bintray_api_key https://api.bintray.com/content/pycurl/$repo/$package/$version/$file'?publish=1'
}
for path in `git ls-files |grep ^7`; do
file=`basename $path`
echo $file
curl -sfIL "https://dl.bintray.com/pycurl/pycurl/$file" || upload $path $file
done
| true
|
7f1d337a42e401e293d55ef1a6566d2757a5344b
|
Shell
|
keckelt/playground
|
/sh/public_ip/watch_ip_change.wget.sh
|
UTF-8
| 350
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
file="./old_ip.txt"
if [ ! -f $file ]
then
echo Create file to store IP: $file
wget -q -O - ifconfig.me > $file;
fi
OLD_IP=`head -n 1 $file`
echo Old IP is: $OLD_IP
PUBLIC_IP=`wget -q -O - ifconfig.me`
echo New IP is: $PUBLIC_IP
if [ "$OLD_IP" != "$PUBLIC_IP" ]
then
echo Update $file
echo $PUBLIC_IP > $file
fi
| true
|
432d50540ee5389240f8a93bb32b81c8794e4392
|
Shell
|
hellhappy1982/shell_scripts
|
/m01-61/sent_dsa.sh
|
UTF-8
| 353
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# Source function library.
. /etc/rc.d/init.d/functions
#sent dsa
for ip in $*
do
sshpass -p123456 ssh-copy-id -i /root/.ssh/id_dsa.pub "-o StrictHostKeyChecking=no root@$ip" &>/dev/null
if [ $? -eq 0 ]
then
action "$(ssh $ip hostname)" /bin/true
else
action "$(ssh $ip hostname)" /bin/false
fi
done
| true
|
b8ebdedc0068a28bd76e48d5a94f8f1f6e8f99b4
|
Shell
|
papaspiro/lrnfast
|
/backend/app/prestart.sh
|
UTF-8
| 287
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
echo "Waiting for postgres..."
while ! nc -z psql 5432; do
sleep 0.1
done
echo "PostgreSQL started"
exec "$@"
# Let the DB start
python ./app/backend_pre_start.py
# Run migrations
alembic upgrade head
# Create initial data in DB
python ./app/initial_data.py
| true
|
1e57e9b17e44a3e0e5a7a852b18a326fec3141ae
|
Shell
|
hchou1226/RNAdetector
|
/scripts/compute_md5.sh
|
UTF-8
| 781
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
##############################################################################
# Options:
# -i input file
##############################################################################
exit_abnormal() {
echo "$1" 1>&2
# shellcheck disable=SC2086
exit $2
}
while getopts ":i:" opt; do
case $opt in
i) INPUT_FILE=$OPTARG ;;
\?)
exit_abnormal "Invalid option: -$OPTARG" 1
;;
:)
exit_abnormal "Option -$OPTARG requires an argument." 2
;;
esac
done
#### Check parameters ####
# Check input files
{ [ -z "$INPUT_FILE" ] || [ ! -f "$INPUT_FILE" ]; } && exit_abnormal "Input file does not exist!" 3
OUTPUT_FILE="${INPUT_FILE}.md5"
md5sum "$INPUT_FILE" > "$OUTPUT_FILE" || exit_abnormal "Unable to compute MD5" 6
chmod 777 "$OUTPUT_FILE"
| true
|
53042eaba869ffa4157cac8d9c26c47245547c40
|
Shell
|
StanislavNikolov/dotfiles
|
/change_colors.sh
|
UTF-8
| 499
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
dotfiles="$HOME/dotfiles"
rm ~/.config/alacritty/alacritty.yml
if [ $(cat ~/.stjo_theme) = "dark" ]; then
# currently in dark mode
echo "light" > ~/.stjo_theme
cp "$dotfiles/alacritty_light.yml" ~/.config/alacritty/alacritty.yml
else
echo "dark" > ~/.stjo_theme
cp "$dotfiles/alacritty_dark.yml" ~/.config/alacritty/alacritty.yml
fi
for dir in $(find /tmp -maxdepth 1 -user stjo -name 'nvim*'); do
nvr --servername "$dir/0" --remote-send ':so ~/.config/nvim/init.vim<cr>'
done
| true
|
1a505bee55a3a9f766d4cf96e250042091e0113b
|
Shell
|
sfc-aqua/quisp
|
/docker_run.sh
|
UTF-8
| 991
| 3.546875
| 4
|
[
"BSD-3-Clause",
"MPL-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/bin/sh
set -eu
if [ -z "${IP-}" ]; then
IP=$(ifconfig en0 | grep inet | awk '$1=="inet" {print $2}')
fi
if [ "$(uname)" = 'Darwin' ]; then
if [ -z "$DISPLAY" ]; then
echo "Can't detect X11 display. If you don't know how to resolve this, please check ./doc/xhost_trouble_shooting.md"
exit 1
else
# xterm
xterm -e "$(xhost "$IP")"
fi
socat TCP-LISTEN:6000,reuseaddr,fork UNIX-CLIENT:\""$DISPLAY"\" &
docker run --privileged --rm -it -v "$(pwd):/root/quisp" -w /root/quisp --name quisp -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY="$IP:0" ghcr.io/sfc-aqua/quisp bash
trap "lsof -i:6000 -t|xargs kill" 0
elif [ "$(uname -s | cut -c 1-5)" = 'Linux' ]; then
docker run --privileged --rm -it -v "$(pwd):/root/quisp" -w /root/quisp -u "$(id -u):$(id -g)" --name quisp -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY="$IP:0" ghcr.io/sfc-aqua/quisp bash
else
echo "Your platform ($(uname -s)) is not supported."
exit 1
fi
| true
|
217a52d3ca5d15baf9a9ed50e67bd805d8e71d37
|
Shell
|
CharlesDDNoble/broncode
|
/nginx_uwsgi/tests/setup.sh
|
UTF-8
| 493
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BASE_DIR=`cd ../..; pwd`
NGINX_CONF="./broncode_nginx.conf"
NGINX_CONF_TEMP="./broncode_nginx_template.conf"
# Create
cat $NGINX_CONF_TEMP | sed "s,BASE_DIR,$BASE_DIR,g" >& $NGINX_CONF
# MOVING FILES
sudo cp ./uwsgi_params /etc/nginx
sudo cp ./broncode_nginx.conf /etc/nginx/sites-available/
sudo rm -f /etc/nginx/sites-enabled/broncode_nginx.conf
sudo ln -s /etc/nginx/sites-available/broncode_nginx.conf /etc/nginx/sites-enabled/
#restart nginx
sudo /etc/init.d/nginx restart
| true
|
e824d48d7b8af0963f8a3bef7b1e991ea5e430ff
|
Shell
|
bartash/scripts
|
/get_latest_coordinator_namespace
|
UTF-8
| 238
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# get the namespace of the latest coordinator
#
K8S_NAMESPACE=$(kubectl get pods -A --sort-by=.metadata.creationTimestamp | \
grep coordinator | grep Running | awk '{print $1}' | tail -1)
echo ${K8S_NAMESPACE}
| true
|
2f250a311cda89633c9c60816a721ca08551b7c3
|
Shell
|
ghuntley/dotfiles-retired
|
/homebrew/install.sh
|
UTF-8
| 771
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Homebrew
#
# This installs some of the common dependencies needed (or at least desired)
# using Homebrew.
# Check for Homebrew
if test ! $(which brew)
then
echo " Installing Homebrew for you."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Install homebrew packages
brew install ack archey aria2 autoconf automake boost docker dos2unix elinks freetype gdbm gettext ghostscript git htop-osx imagemagick jbig2dec jpeg jsonpp libevent libpng libtiff libtool little-cms2 makedepend md5sha1sum mercurial moreutils ncftp nginx nmap node openssl p7zip pcre pkg-config proxychains-ng python qt readline siege sloccount sqlite ssh-copy-id tmux tor tree unrar vim wget youtube-dl zsh grc coreutils spark
exit 0
| true
|
f496c0efecad68a30395fa19cdb88cc7c9e10063
|
Shell
|
eda53/my-misc
|
/Linux/arm-ldd
|
UTF-8
| 238
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
[ -z "$1" ] && echo "file is needed;" && return
READELF=arm-none-linux-gnueabi-readelf
$READELF -v 2>/dev/null || READELF=arm-linux-gnueabihf-readelf
$READELF -a $1 | grep "Shared library:" | cut -f 2 -d '[' | cut -f 1 -d ']'
| true
|
9b04c52761a05c44fd250f265716dedb7d006867
|
Shell
|
anupama-sinha/unix-repo
|
/recursiveMergeSort.sh
|
UTF-8
| 1,790
| 3.515625
| 4
|
[] |
no_license
|
#Define global variables
declare -a A
declare -a T
#read list function
ReadList()
{
if [ $n -le 0 ]
then
echo "Invalid size"
return
else
echo "Enter input list of size $n:"
i=1
while [ $i -le $n ]
do
read A[$i]
i=`expr $i + 1`
done
return
fi
}
WriteList()
{
if [ $n -le 0 ]
then
echo "Invalid size"
return
else
i=1
while [ $i -le $n ]
do
echo -e "${A[$i]} \c"
i=`expr $i + 1`
done
return
fi
}
Merge()
{
#merge Function
# T[] is a working array
# i,j,k are working indices
#Step 1: initialize
declare -i L1=$1
declare -i H1=$2
declare -i L2=$3
declare -i H2=$4
declare -i i
declare -i j k=1
i=$L1
j=$L2
#Step 2: perfom merge
while [ $i -le $H1 -a $j -le $H2 ]
do
if [ ${A[$i]} -le ${A[$j]} ]
then
T[$k]=${A[$i]}
i=`expr $i + 1`
else
T[$k]=${A[$j]}
j=`expr $j + 1`
fi
k=`expr $k + 1`
done
#Step 3: Copy unmerge part of first half if exists
while [ $i -le $H1 ]
do
T[$k]=${A[$i]}
k=`expr $k + 1`
i=`expr $i + 1`
done
#Step 4: Copy unmerge part of second half if exists
while [ $j -le $H2 ]
do
T[$k]=${A[$j]}
k=`expr $k + 1`
j=`expr $j + 1`
done
#Step 5: Copy into the original list from working list
i=$L1
k=1
while [ $i -le $H2 ]
do
A[$i]=${T[$k]}
k=`expr $k + 1`
i=`expr $i + 1`
done
}
MergeSort()
{
#recursive merge sort
declare -i L=$1
declare -i H=$2
declare -i Mid Mid1 L1 L2 H1 H2
if [ $L -lt $H ]
then
Mid=`expr $L + $H`
Mid=`expr $Mid / 2`
#echo "L=$L H=$H Mid=$Mid"
MergeSort $L $Mid
Mid1=`expr $Mid + 1`
MergeSort $Mid1 $H
L1=$L
L2=`expr $Mid + 1`
H1=$Mid
H2=$H
Merge $L1 $H1 $L2 $H2
fi
}
#main
tput clear
#tput cup 10 10
#read the size of the list
echo -e "Enter n:\c"
read n
#read the list
ReadList $n
#Show Unsorted list
echo -e "\nUnsorted List:\c"
WriteList $n
#call merge sort
MergeSort 1 $n
#Show sorted list
echo -e "\nSorted List:\c"
WriteList $n
echo
echo "End"
| true
|
e38fff4ec1473ab95bfefe53558215b97199a5fe
|
Shell
|
Tuuuuuuurk/Unix
|
/Дороничев-Тедерсон Д.Ю. (Unix)/backup/backup.sh
|
UTF-8
| 211
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Runing backup..."
for i in \
$(docker ps --filter "name=gitea|jenkins|openproject" | tail -n+2 | awk '{ print $1 }'); do docker export -o ${i}-ci-$(date +"%m-%d-%Y").tar ${i}; \
done
| true
|
54345b6e42e166f0ebf4b52aa479f40d1753d970
|
Shell
|
ZhaoXinlong/ChainBridge
|
/scripts/setupKeyStore.sh
|
UTF-8
| 374
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
set -a; . ./env/relayer.env; set +a;
cd ./relayer;
make build;
SPKFILE=keys/"$CH1_ADDR".key
if [ ! -f "$SPKFILE" ]; then
./build/chainbridge accounts import --privateKey $CH1_PK --password $KEYSTORE_PASSWORD
fi
DPKFILE=keys/"$CH2_ADDR".key
if [ ! -f "$DPKFILE" ]; then
./build/chainbridge accounts import --privateKey $CH2_PK --password $KEYSTORE_PASSWORD
fi
| true
|
a7aa401594ddf31a47ad5f314accc8b2f1e1aa55
|
Shell
|
cdbbnnyCode/AdventOfCode-2018
|
/day07/part2.sh
|
UTF-8
| 1,763
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
source ../fetch.sh
input=( $(fetch 7 | awk '{print $2$8}') )
letters=( {A..Z} )
declare -A deps
declare -A allsteps
for v in ${input[@]}; do
step=${v:1:2}
req=${v:0:1}
echo "$req is required by $step"
deps[$step]=${deps[$step]}$req,
allsteps[$step]=1
allsteps[$req]=1
done
declare -A delays
i=61
for l in ${letters[@]}; do
delays[$l]=$i
(( i += 1 ))
done
remaining=( ${!allsteps[@]} )
has=( )
elapsed=0
workers=( 0 0 0 0 0 )
tasks=( )
while [ ${#remaining[@]} -gt 0 -o ${#tasks[@]} -gt 0 ]; do
# echo "Time: ${elapsed}s"
for ltr in ${remaining[@]}; do
depstring=$(echo ${deps[$ltr]} | tr ',' $'\n')
compstring=$(echo ${has[@]} | tr ' ' $'\n')
depend=( $(comm -23 <(echo "$depstring" | sort) <(echo "$compstring" | sort)) )
if [ ${#depend[@]} -eq 0 ]; then
# echo " $ltr available for processing"
for wid in ${!workers[@]}; do
if [ ${workers[$wid]} -eq 0 ]; then
# echo " Dispatching $ltr to worker $wid"
tasks[$wid]=$ltr
workers[$wid]=${delays[$ltr]}
remaining=( "${remaining[@]/$ltr}" )
remaining=( ${remaining[@]} )
break
fi
done
fi
done
# echo " Remaining: ${remaining[@]}"
printf "%4ds " $elapsed
for (( ctr = 0; ctr < ${#workers[@]}; ctr++ )); do
printf "%1s " ${tasks[$ctr]}
done
printf '\n'
for wid in ${!workers[@]}; do
if [ ${workers[$wid]} -gt 0 ]; then
(( workers[$wid] -= 1 ))
fi
if [ ${workers[$wid]} -le 0 ]; then
if [ -n "${tasks[$wid]}" ]; then
# echo " Worker $wid completed task ${tasks[$wid]}"
has+=( ${tasks[$wid]} )
unset tasks[$wid]
fi
fi
done
(( elapsed += 1 ))
done
echo "$elapsed seconds total"
| true
|
97f062ce6be6f7699d5f3be1991bb679873b9539
|
Shell
|
lenhattan86/IRFimpl
|
/IRFMotivation/tf_analysis/scripts/multiple_runs.sh
|
UTF-8
| 472
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -z "$1" ]
then
num_parrallel=1
else
num_parrallel=$1
fi
if [ -z "$2" ]
then
tf_file="../benchmarks/linear_regression.py"
else
tf_file=$2
fi
>&2 echo "Running $num_parrallel applications in parallel..."
for i in `seq 1 $num_parrallel`;
do
>&2 echo "Starting application $i."
FULL_COMMAND="python $tf_file"
(TIMEFORMAT='%R'; time $FULL_COMMAND 2>application$i.log) 2> $i.time &
done
wait
cat *.time > times$i.txt
rm *.time
| true
|
0d493fa86f98903bfa9c2c7ae6951bf621072ae3
|
Shell
|
farukomercakmak/debian-pipelight
|
/pipelight-installer-v1
|
UTF-8
| 8,534
| 3.6875
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# Pipelight installer for Debian Wheezy (amd64)
#
# Copyright (C) 2013 Jari Jokinen
# URL: https://github.com/jarijokinen/debian-pipelight
pipelight_version="master"
wine_version="1.7.0"
installer_tmp_path="/tmp/pipelight-installer"
dependencies=(
wget
ia32-libs
ia32-libs-i386
lib32asound2
libc6-i386
lib32nss-mdns
libasound2-plugins
libc6-dev
libx11-dev
mingw-w64
g++-mingw-w64
sed
build-essential
libncurses5:i386
)
# Better not change these, yet...
wine_path="/opt/wine-compholio"
wine_package="wine-compholio"
wine_arch="amd64"
wine_deb="${wine_package}_${wine_version}_${wine_arch}.deb"
wine_deb_path="${installer_tmp_path}/${wine_deb}"
wine_deb_url="http://www.compholio.com/wine-compholio/download.php?file=$wine_deb"
pipelight_path="/usr/lib/mozilla/plugins/libpipelight.so"
pipelight_src_path="${installer_tmp_path}/pipelight"
pipelight_src_package="${pipelight_version}.tar.gz"
pipelight_src_package_path="${installer_tmp_path}/${pipelight_src_package}"
pipelight_src_package_url="http://bitbucket.org/mmueller2012/pipelight/get/${pipelight_src_package}"
silverlight_installer_path="${installer_tmp_path}/silverlight-installer.exe"
silverlight_installer_url="http://www.microsoft.com/getsilverlight/handlers/getsilverlight.ashx"
bold=`tput bold`
regular=`tput sgr0`
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
blue=`tput setaf 4`
function error {
echo "E: $1"
exit 1
}
function formatted_text {
if [[ -x "/usr/bin/tput" ]]; then
echo "${1}${2}${regular}"
else
echo "${2}"
fi
}
function bold {
formatted_text "$bold" "$1"
}
function red {
formatted_text "$red" "$1"
}
function green {
formatted_text "$green" "$1"
}
function yellow {
formatted_text "$yellow" "$1"
}
function blue {
formatted_text "$blue" "$1"
}
if [[ $EUID -ne 0 ]]; then
error "this script must be run as root."
fi
if [[ ! -x /usr/bin/apt-get ]]; then
error "apt-get is missing."
fi
if [[ ! -x /usr/bin/dpkg ]]; then
error "dpkg is missing."
fi
echo
bold "System-wide setup"
echo
echo -n "Checking distribution... "
if [[ -f /etc/debian_version ]]; then
dist="debian"
green $dist
else
red "Unknown"
error "distribution not supported."
fi
echo -n "Checking distribution version... "
debian_version=`cat /etc/debian_version`
if [[ $debian_verison == "7.0" || $debian_version == "7.1" ]]; then
green $debian_version
else
red $debian_version
error "distribution version not supported."
fi
echo -n "Checking architecture... "
arch=`/usr/bin/dpkg --print-architecture`
if [[ $arch == "amd64" ]]; then
green $arch
else
red $arch
error "architecture not supported."
fi
echo -n "Checking foreign architecture... "
foreign_arch=`/usr/bin/dpkg --print-foreign-architectures | grep i386`
if [[ $foreign_arch == "i386" ]]; then
green $foreign_arch
else
bold "i386 missing, adding... "
/usr/bin/dpkg --add-architecture i386
/usr/bin/apt-get -qq update
echo -n "Checking foreign architecture... "
foreign_arch=`/usr/bin/dpkg --print-foreign-architectures | grep i386`
if [[ $foreign_arch == "i386" ]]; then
green $foreign_arch
else
error "adding foreign architecture i386 failed."
fi
fi
missing_packages=()
for dependency in ${dependencies[@]}; do
echo -n "Checking $dependency... "
if [[ `/usr/bin/dpkg -s $dependency 2> /dev/null` ]]; then
green "installed"
else
missing_packages+=("$dependency")
yellow "missing"
fi
done
missing=$(printf " %s" "${missing_packages[@]}")
missing=${missing:1}
if [[ $missing_packages != "" ]]; then
echo
yellow "It seems that your system doesn't have all required packages"
yellow "installed. You may install these packages by running apt-get"
yellow "manually as a root, or let the installer run that for you."
echo
echo "The apt-get command to run is:"
echo
echo " apt-get --no-install-recommends install ${missing}"
echo
read -r -p \
"Do you want to continue with the automatic installation? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
echo
echo -n "Installing missing packages... "
/usr/bin/apt-get -qqy --no-install-recommends install ${missing}
;;
*)
echo
echo "Installation aborted by the user."
;;
esac
fi
echo -n "Checking temporary directory... "
if [[ -d $installer_tmp_path ]]; then
green "found"
else
mkdir -p $installer_tmp_path
green "created"
fi
echo -n "Checking wine... "
if [[ -d $wine_path ]]; then
green "found"
else
bold "missing"
echo -n "Checking wine binary package... "
if [[ -f $wine_deb_path ]]; then
green "found"
else
bold "missing, downloading..."
/usr/bin/wget -q $wine_deb_url -O $wine_deb_path
echo -n "Checking wine binary package... "
if [[ -f $wine_deb_path ]]; then
green "found"
else
error "downloading wine binary package failed."
fi
fi
echo -n "Installing wine binary package... "
/usr/bin/dpkg -i $wine_deb_path > /dev/null
green "done"
echo -n "Checking wine... "
if [[ -d $wine_path ]]; then
green "found"
else
error "installing wine binary package failed."
fi
fi
echo -n "Checking pipelight... "
if [[ -f $pipelight_path ]]; then
green "found"
else
bold "missing"
echo -n "Checking pipelight source... "
if [[ -d $pipelight_src_path ]]; then
green "found"
else
bold "missing, downloading..."
mkdir -p $pipelight_src_path
/usr/bin/wget -q $pipelight_src_package_url -O $pipelight_src_package_path
echo -n "Checking pipelight source package... "
if [[ -f $pipelight_src_package_path ]]; then
green "found"
else
error "downloading pipelight source failed."
fi
echo -n "Unpacking pipelight source package... "
/bin/tar --strip-components=1 -C $pipelight_src_path \
-xf $pipelight_src_package_path
green "done"
echo -n "Checking pipelight source... "
if [[ -d $pipelight_src_path ]]; then
green "found"
else
error "unpacking pipelight source failed."
fi
fi
echo -n "Compiling pipelight... "
old_directory=$OLDPWD
cd $pipelight_src_path
./configure > /dev/null
make > /dev/null
make install > /dev/null
cd $old_directory
if [[ -f $pipelight_path ]]; then
green "done"
else
error "compiling pipelight failed."
fi
fi
echo -n "Checking Silverlight installer... "
if [[ -f $silverlight_installer_path ]]; then
green "found"
else
bold "missing, downloading... "
ua='Mozilla/5.0 (Windows NT 6.1; rv:23.0) Gecko/20131011 Firefox/23.0'
/usr/bin/wget -q -U "$ua" \
$silverlight_installer_url -O $silverlight_installer_path
echo -n "Checking Silverlight installer... "
if [[ -f $silverlight_installer_path ]]; then
green "found"
else
error "downloading Silverlight installer failed."
fi
fi
echo
bold "User environment setup"
echo
echo "Username (the user who runs the web browser): "
read -r username
echo
userhome=$(getent passwd "$username" | cut -d: -f6)
pipelight_wine_path="$userhome/.wine-pipelight"
prefix="WINEPREFIX='$pipelight_wine_path'"
echo -n "Checking pipelight wine integration... "
if [[ -d $pipelight_wine_path ]]; then
green "found"
else
bold "missing, installing... "
echo
yellow 'NOTE: Press "Cancel" when wine asks about installing Gecko or Mono!'
echo
su -l -c "$prefix $wine_path/bin/wineboot 2> /dev/null" $username
fi
echo -n "Checking Silverlight installation... "
if [[ -d "$pipelight_wine_path/drive_c/Program Files/Microsoft Silverlight" ]]; then
green "found"
else
bold "missing, installing... "
echo
yellow 'NOTE: Press "Install now", "Next" and "Close" buttons.'
echo
su -l -c "$prefix $wine_path/bin/wine $silverlight_installer_path 2> /dev/null" $username
fi
echo -n "Checking pipelight configuration... "
if [[ -f "$userhome/.config/pipelight" ]]; then
green "found"
else
bold "missing, copying... "
if [[ ! -d "$userhome/.config" ]]; then
mkdir "$userhome/.config"
fi
if [[ -f "/usr/share/pipelight/pipelight" ]]; then
cp /usr/share/pipelight/pipelight $userhome/.config/pipelight
else
cp /usr/local/share/pipelight/pipelight $userhome/.config/pipelight
fi
chown $username: $userhome/.config/pipelight
fi
echo -n "Checking Silverlight path... "
sed -i 's/Program Files\\Silverlight/Program Files\\Microsoft Silverlight/g' \
$userhome/.config/pipelight
green "done"
echo
bold "Installation done!"
echo
echo "Go here to test your new setup:"
echo "http://bubblemark.com/silverlight2.html"
echo
exit 0
| true
|
1c5111436537f7390a58923851e4c1caa03996d8
|
Shell
|
lizanle521/springaop
|
/src/main/java/com/lzl/shell/expr.sh
|
UTF-8
| 80
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
var1=10
var2=20
var3=`expr $var2 / $var1`
echo "the result is $var3"
| true
|
19f59194e0f9a38d8dbce9c4751c080e4795babc
|
Shell
|
sputnik-1/cydar-test
|
/check-server.sh
|
UTF-8
| 7,617
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# This script will get the version.txt file from the Nginx server
#
#-----------------------------------------------------------#
# get the loop wait time from the first command line parameter
# this is the polling time in seconds the script will wait
# before try to get the version.txt file again
if [ "$1" != "" ]; then
WAIT_TIME="$1"
echo "WAIT_TIME: $WAIT_TIME"
echo "WAIT_TIME set to $WAIT_TIME second(s)"
echo
else
echo
WAIT_TIME=5
echo "WAIT_TIME: $WAIT_TIME"
echo "WAIT_TIME defaults to 5 seconds"
echo
fi
# get the name of the file to request from the Nginx server
# as an optional second command line parameter.
# We can use this to request a non-existant file
# to test the script works properly when the correct version.txt
# file is not returned.
if [ "$2" != "" ]; then
FILENAME="$2"
echo "FILENAME: $FILENAME"
echo "FILENAME set to $FILENAME"
echo
else
FILENAME="version.txt"
echo "FILENAME: $FILENAME"
echo "FILENAME defaults to $FILENAME"
echo
fi
# infinte loop
while true
do
# look for a previously downloaded version.txt file
ls version.txt
echo
# remove the previous downloaded version.txt file
rm -vf ./version.txt
echo
# get a new copy of the version.txt file from the Nginx server
wget 54.212.247.223/"$FILENAME"
ls version.txt
echo
# get the version number from the downloaded file into a variable
DOWNLOADED_VERSION_NUMBER=`cat ./version.txt`
echo "DOWNLOADED_VERSION_NUMBER: $DOWNLOADED_VERSION_NUMBER"
echo
# the version number we are looking for
EXPECTED_VERSION_NUMBER="version-1.2.3"
echo "EXPECTED_VERSION_NUMBER: $EXPECTED_VERSION_NUMBER"
echo
# check we have downloaded the expected version number
if [ "$DOWNLOADED_VERSION_NUMBER" == "$EXPECTED_VERSION_NUMBER" ]; then
echo "The version numbers are identical."
echo "The server is running OK."
echo
echo "Press [CTRL+C] to exit program..."
else
# here we can create alerts about possible server issues like:
# play an audio alarm
# play a pre-recorded alert message
# send an email alert to the server admin team
echo "There may be problems with the server."
echo
# play a pre-recorded alert message
aplay server-audio-warning.wav
echo
echo "Press [CTRL+C] twice to exit program..."
fi
# wait for x number of seconds
sleep $WAIT_TIME
echo
done
#-----------------------------------------------------------#
exit
<<'COMMENT'
#!/bin/bash
# calculates and saves the md5sum for an apps .ruby-version file.
#
# if the md5sum has changed save the new md5sum to a text file for use next time,
# and then attempt to install the needed version of ruby if this is not already installed.
# using rbenv -s install x.y.z
#================================================================================#
# INSTALLED_RUBY_VERSIONS: is a text file containing all the currently installed
# versions of ruby.
# FILE1: is the file to check if it has changed
# FILE1_GET_PATH: is the full path and name of the file to check if it has changed
# FILE1_SAVED_MD5_SUM: is the file where we are saving the latest md5sum for FILE1
# FILE1_READ_MD5_SUM: is a variable containing the md5sum read from FILE1_SAVED_MD5_SUM
# FILE1_LATEST_MD5_SUM: is the latest md5sum generated for FILE1
#-----------------------------------------------------------#
# set up the jenkins project environment
echo
echo "------------------------------------------------------------------------------------------------------"
JENKINS_CURRENT_ENVIRONMENT=`env | sort`
echo "JENKINS_CURRENT_ENVIRONMENT: $JENKINS_CURRENT_ENVIRONMENT"
echo
echo "END OF JENKINS_CURRENT_ENVIRONMENT"
echo "------------------------------------------------------------------------------------------------------"
echo
# set the new PATH value
PATH=/var/lib/jenkins/.rbenv/shims:/var/lib/jenkins/.rbenv/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/var/lib/jenkins/.local/bin:/var/lib/jenkins/bin:
echo "NEW_PATH: $PATH"
echo
# set the following environment variables for oracle unit tests
export LD_LIBRARY_PATH="/usr/lib/oracle/12.1/client64/lib"
export NLS_LANG="AMERICAN_AMERICA.UTF8"
export ORACLE_HOME="/usr/lib/oracle/12.1/client64/lib"
NEW_JENKINS_SCRIPT_ENVIRONMENT=`env | sort`
echo "NEW_JENKINS_SCRIPT_ENVIRONMENT: $NEW_JENKINS_SCRIPT_ENVIRONMENT"
echo
SCRIPT_DIR=`pwd`
echo "SCRIPT_DIR: $SCRIPT_DIR"
echo
CURRENT_DIR=`pwd`
echo "CURRENT_DIR: $CURRENT_DIR"
echo
PROJECT_DIR="/var/lib/jenkins/workspace/ndtmsv2@script/"
echo "PROJECT_DIR: $PROJECT_DIR"
echo
cd $PROJECT_DIR
CURRENT_DIR=`pwd`
echo "CURRENT_DIR2: $CURRENT_DIR"
echo
LOCAL_RUBY_VERSION=`rbenv local`
echo "LOCAL_RUBY_VERSION: $LOCAL_RUBY_VERSION"
echo
RBENV_VERSIONS=`echo; rbenv versions`
echo "RBENV_VERSIONS: $RBENV_VERSIONS"
echo
echo "------------------------------------------------------------------------------------------------------"
echo
#================================================================================#
# calculates and saves the md5sum for an apps .ruby-version file.
#
# if the md5sum has changed save the new md5sum to a text file for use next time.
#================================================================================#
# FILE1: is the file to check if it has changed
# FILE1_GET_PATH: is the full path and name of the file to check if it has changed
# FILE1_SAVED_MD5_SUM: is the file where we are saving the latest md5sum for FILE1
# FILE1_READ_MD5_SUM: is a variable containing the md5sum read from FILE1_SAVED_MD5_SUM
# FILE1_LATEST_MD5_SUM: is the latest md5sum generated for FILE1
#-----------------------------------------------------------#
# .ruby-version file content md5sum comparison
# apps local .ruby-version to calculate the md5sum check on
FILE1=".ruby-version"
echo "FILE1: $FILE1"
echo
FILE1_GET_PATH="/var/lib/jenkins/workspace/ndtmsv2@script/$FILE1"
echo "FILE1_GET_PATH: $FILE1_GET_PATH"
echo
# persistent filename to store the latest md5sum hash into
FILE1_SAVED_MD5_SUM="/var/lib/jenkins/ci-projects/ndtmsv2/versions/$FILE1.md5"
echo "FILE1_SAVED_MD5_SUM: $FILE1_SAVED_MD5_SUM"
echo
# calculate the files current md5 sum
FILE1_LATEST_MD5_SUM=`md5sum $FILE1_GET_PATH`
echo "FILE1_LATEST_MD5_SUM: $FILE1_LATEST_MD5_SUM"
echo
# get the contents of the $FILE1_SAVED_MD5_SUM from disk into a variable
FILE1_READ_MD5_SUM=`cat "$FILE1_SAVED_MD5_SUM"`
echo "FILE1_READ_MD5_SUM: $FILE1_READ_MD5_SUM"
echo
#=======================================================#
# compare the contents of $FILE1_READ_MD5_SUM with $FILE1_LATEST_MD5_SUM
# if these are different it means the version of ruby has changed.
if [ "$FILE1_READ_MD5_SUM" == "$FILE1_LATEST_MD5_SUM" ]
then
echo "md5sum has not changed for $FILE1_GET_PATH"
else
# save the latest md5sum hash value of FILE1 to file named $FILE1_SAVED_MD5_SUM
# only if these are different.
echo "$FILE1_LATEST_MD5_SUM" > "$FILE1_SAVED_MD5_SUM"
echo "This application is using a different version of ruby"
echo "Now attempting to install the required ruby version of $LOCAL_RUBY_VERSION"
echo "Please wait - this could take some time ... 10 mins or more"
# use rbenv to install the required version of ruby
# if this is installed already, using the -s flag will
# tell rbenv NOT to try and re-install the same version.
rbenv install -s $LOCAL_RUBY_VERSION
fi
echo
#================================================================================#
# exit command needs to follow the thing being tested - as exit will only return the
# result of the last command run.
exit
COMMENT
| true
|
1e73a93bd26b303c8c347ac8a8bf1e9f2aa5005b
|
Shell
|
socc19-p10/vSMT-IO
|
/tools/cpu_usage.sh
|
UTF-8
| 2,513
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
# get vcpu/vm's CPU utilization during one period
#
cpu_use="/sys/fs/cgroup/cpuacct/machine"
kvm1_dir="$cpu_use/kvm1.libvirt-qemu"
kvm8_dir="$cpu_use/kvm8.libvirt-qemu"
vm_num=0
#vm10=0
result1=0
result8=0
#function=1: set kvm_name to first set hardware threads, for this server: 0-23
#function=2: set kvm_name to second set hardware threads, for this server: 24-47
function usage() {
echo -e "Usage:\n\t./vPair [Sleep_time4testing]"
exit
}
function find_vms() {
ret=`ls $cpu_use/*.libvirt-qemu 2> /dev/null`
if [ "$ret" == "" ]; then
echo -e "It seems no VMs running, please check."
exit
fi
}
function set_affinity() {
#echo -e "kvm_name: $1, vCPU#: $2, function#, $3"
i=0
j=24
if [ $3 -eq 1 ]; then
#echo "function 1"
for ((i=0;i<$2;i++))
do
task_pid=`cat $cpu_use/$1.libvirt-qemu/vcpu$i/tasks`
#echo "$task_pid"
sudo taskset -pc $i $task_pid
done
elif [ $3 -eq 2 ]; then
#echo "function 2"
for ((i=0;i<$2;i++,j++))
do
task_pid=`cat $cpu_use/$1.libvirt-qemu/vcpu$i/tasks`
#echo "$task_pid"
sudo taskset -pc $j $task_pid
done
elif [ $3 -eq 3 ]; then
#echo "function 2"
for ((i=0;i<$2;i++))
do
task_pid=`cat $cpu_use/$1.libvirt-qemu/vcpu$i/tasks`
#echo "$task_pid"
sudo taskset -pc 0-47 $task_pid
done
fi
}
function get_vcpu_time() {
for ((i=0;i<24;i++))
do
vm1[$i]=`cat $kvm1_dir/vcpu$i/cpuacct.usage`
vm8[$i]=`cat $kvm8_dir/vcpu$i/cpuacct.usage`
done
}
#check if there is VMs.
find_vms
if [ $# -ne 1 ]; then
usage
exit
fi
#get vm number
#ret=`ls $cpu_use 2> /dev/null`
# main.c is vPair's implementation source codes.
#if [ $# -ne 3 ]; then
# usage
# exit
#fi
#echo -e "kvm_name: $1, vCPU#: $2, function#, $3"
#set_affinity $1 $2 $3
#get_vcpu_time
for ((i=0;i<23;i++))
do
vm1[$i]=`cat $kvm1_dir/vcpu$i/cpuacct.usage`
vm8[$i]=`cat $kvm8_dir/vcpu$i/cpuacct.usage`
done
#echo -e "vCPU#\t\tVM1\t\tVM8"
for((;;))
do
sleep $1
for ((i=0;i<23;i++))
do
tmp1=`cat $kvm1_dir/vcpu$i/cpuacct.usage`
tmp8=`cat $kvm8_dir/vcpu$i/cpuacct.usage`
((result1=$tmp1 - ${vm1[$i]}))
((result8=$tmp8 - ${vm8[$i]}))
result11=`echo "scale=3; $result1/($1*1000000000)*100" | bc -l`
result81=`echo "scale=3; $result8/($1*1000000000)*100" | bc -l`
#echo -e "vCPU$i\t\t$result11\t\t$result81"
#echo -e "vCPU$i\t\t${vm1[$i]}\t\t\t${vm8[$i]}"
echo "$result11"
done
for ((i=0;i<23;i++))
do
vm1[$i]=`cat $kvm1_dir/vcpu$i/cpuacct.usage`
vm8[$i]=`cat $kvm8_dir/vcpu$i/cpuacct.usage`
done
#get_vcpu_time
done
| true
|
a4fbc15caf8f043abecb22223cfbb63c90efab7c
|
Shell
|
xztaityozx/dotfiles
|
/config/zsh/zinit.main.zsh
|
UTF-8
| 293
| 2.6875
| 3
|
[] |
no_license
|
# zinit
type zip tar curl wget git unzip &> /dev/null || return 127;
[[ -f "$ZDOTDIR/.zinit/bin/zinit.zsh" ]] && source "$ZDOTDIR/.zinit/bin/zinit.zsh"
autoload -Uz _zinit
(( ${+_comps} )) && _comps[zinit]=_zinit
type zinit &> /dev/null && {
mkdir -p $ZPFX/{bin,man/man1,share,script}
}
| true
|
ac8b4a4df3a1dbe8ff4defa50f7314a43bcbbfaf
|
Shell
|
jszzang9/TFA
|
/src/test/resources/stop.sh
|
UTF-8
| 144
| 2.78125
| 3
|
[] |
no_license
|
#! /bin/sh
PID_TFA=`ps -ef | grep tfa.jar | grep java | grep -v grep | awk '{print $2}'`
if [[ ${PID_TFA} ]]; then
kill -9 ${PID_TFA}
fi
| true
|
35410394e7c7103099a19ef81aed9eb001b2e034
|
Shell
|
wilkelab/influenza_H3N2_passaging
|
/scripts/SLACrun.sh
|
UTF-8
| 1,406
| 3.515625
| 4
|
[] |
no_license
|
#This script sort a particular condition's nucleotide.fasta and nucleotide.tree files
#into the SLAC analysis file structure.
#
#For each phylogenetic subdivision, full/internal/tips, it starts SLAC(run_dNdS.bash)
#and removes the outputted sites.dat to condition_subdivision.dat
#
#Moves output to /rate_measurement_data
#CDM
if [[ $# -eq 0 ]] ; then
echo 'No argument supplied to SLACrun.sh'
exit 1
fi
CONDITION=$@
BASEDIR=~/influenza_passaging_effects
LOC=$BASEDIR/SLAC
cd $LOC
cp nucleotide.fasta $LOC/fulltree/
cp nucleotide.fasta $LOC/internals/
cp nucleotide.fasta $LOC/tips/
echo "copied nucleotide.fasta"
cp nucleotide.tree $LOC/fulltree/
cp nucleotide.tree $LOC/internals/
cp nucleotide.tree $LOC/tips/
echo "copied nucleotide.tree to slac/"
cd $LOC/fulltree/
echo $LOC
rm -f sites.dat
rm -f messages.log
rm -f errors.log
rm -f model.log
bash run_dNdS.bash
FILENAME=$CONDITION\_full.dat
mv sites.dat $BASEDIR/rate_measurement_data/$FILENAME
cd $LOC/internals/
rm -f sites.dat
rm -f messages.log
rm -f errors.log
rm -f model.log
bash run_dNdS.bash
FILENAME=$CONDITION\_internal.dat
mv sites.dat $BASEDIR/rate_measurement_data/$FILENAME
cd $LOC/tips
rm -f sites.dat
rm -f messages.log
rm -f errors.log
rm -f model.log
bash run_dNdS.bash
FILENAME=$CONDITION\_tips.dat
echo $CONDITION
echo $FILENAME
mv sites.dat $BASEDIR/rate_measurement_data/$FILENAME
| true
|
fba19ddb95cea04cf38a20bb8e981a67ef81eb40
|
Shell
|
mafrosis/dotfiles
|
/step/install.sh
|
UTF-8
| 1,452
| 3.703125
| 4
|
[] |
no_license
|
#! /bin/bash -e
# DEBUG mode controlled by env var
if [[ -n $DEBUG ]]; then set -x; fi
SMALLSTEP_VERSION=${SMALLSTEP_VERSION:-'0.23.0'}
# passed from /dotfiles/install.sh
FORCE=${1:-0}
# Install step cli tools
if [[ $FORCE -eq 0 ]] && command -v step >/dev/null 2>&1; then
echo 'step-cli already installed!'
else
if [[ $(uname) == 'Linux' ]]; then
if [[ $(uname -m) =~ arm7(.*) ]]; then
ARCH=armv7l
elif [[ $(uname -m) =~ arm6(.*) ]]; then
ARCH=armv6l
elif [[ $(uname -m) = aarch64 ]]; then
ARCH=arm64
else
ARCH=amd64
fi
curl -o /tmp/step.tgz -L "https://github.com/smallstep/cli/releases/download/v${SMALLSTEP_VERSION}/step_linux_${SMALLSTEP_VERSION}_${ARCH}.tar.gz"
tar xzf /tmp/step.tgz -C /tmp
sudo mv "/tmp/step_${SMALLSTEP_VERSION}/bin/step" /usr/local/bin/step
elif [[ $(uname) == 'Darwin' ]]; then
brew install step
fi
fi
# Bootstrap step
if [[ ! -f ~/.step/config/defaults.json ]]; then
SMALLSTEP_CA_HOST=${SMALLSTEP_CA_HOST:-'https://ca.mafro.net:4433'}
echo "Connecting to ${SMALLSTEP_CA_HOST}"
echo -n "Enter the root certificate fingerprint: "
read -r FINGERPRINT
if [ -z "$FINGERPRINT" ] ;then
echo 'Bad input!'
exit 44
fi
echo "${FINGERPRINT}"
step ca bootstrap --force --ca-url "${SMALLSTEP_CA_HOST}" --fingerprint "${FINGERPRINT}"
fi
# Ensure step known_hosts directory present
mkdir -p ~/.step/ssh/
# Create step completion file
step completion zsh > ~/.step/zsh_completion
| true
|
5195cf77986249d42538006a8b070322b40b02d2
|
Shell
|
TanyaKovalenko/Tests
|
/test_lab_1_3_3.sh
|
UTF-8
| 2,610
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
cleanup() {
rm -f "$TEMP_FILE_WITH_OUTPUT_STRINGS"
}
trap cleanup EXIT
echo "Test case in which input array ends with 0 after 5"
RANGE=20
MAX_NUM_OF_EL_IN_ARRAY=$RANDOM
let "MAX_NUM_OF_EL_IN_ARRAY %= $RANGE"
index_of_string_in_array=0
while [ $index_of_string_in_array -lt $MAX_NUM_OF_EL_IN_ARRAY ]
do
input_array[$index_of_string_in_array]=$RANDOM
index_of_string_in_array=$[$index_of_string_in_array+1]
done
input_array[$MAX_NUM_OF_EL_IN_ARRAY]=5
let "MAX_NUM_OF_EL_IN_ARRAY = MAX_NUM_OF_EL_IN_ARRAY + 1"
input_array[$MAX_NUM_OF_EL_IN_ARRAY]=0
#cd ~/Documents/Labs/seminar-materials/c++/task-implementation/src
TEMP_FILE_WITH_OUTPUT_STRINGS=`mktemp outXXXXXXXXX`
declare -a output_array_of_strings
index_of_string_in_array=0
echo "${input_array[@]}" | make run-martynov.alexey/1 VALGRIND="--leak-check=full --track-origins=yes --xml=yes --xml-file='memcheckRes_1_3_3.xml'" ARGS="3" > $TEMP_FILE_WITH_OUTPUT_STRINGS
while read line ; do
output_array_of_strings[$index_of_string_in_array]=$line
index_of_string_in_array=$[$index_of_string_in_array+1]
done < $TEMP_FILE_WITH_OUTPUT_STRINGS
output_string="${output_array_of_strings[0]}"
output_array=($output_string)
declare -a comparison_array
length_of_array=${#input_array[@]}
let "number_of_last_item = length_of_array - 1"
unset input_array[number_of_last_item]
let "number_of_last_item = number_of_last_item - 1"
last_item=${input_array[$number_of_last_item]}
index_of_array=0
for number_of_item_in_input_array in ${input_array[@]}
do
if [[ $last_item -eq 1 ]]
then
item_in_input_array=$number_of_item_in_input_array
let "remainder_of_the_division = item_in_input_array % 2"
if [[ $remainder_of_the_division -eq 0 ]]
then
unset input_array[index_of_array]
fi
else
if [[ $last_item -eq 2 ]]
then
item_in_input_array=$number_of_item_in_input_array
comparison_array[index_of_array]=$item_in_input_array
let "item_in_input_array = item_in_input_array % 3"
if [[ $item_in_input_array -eq 0 ]]
then
let "index_of_array = index_of_array + 1"
comparison_array[index_of_array]=1
let "index_of_array = index_of_array + 1"
comparison_array[index_of_array]=1
let "index_of_array = index_of_array + 1"
comparison_array[index_of_array]=1
fi
fi
fi
let "index_of_array = index_of_array + 1"
done
if [[ $last_item -eq 2 ]]
then
comparison_string=${comparison_array[@]}
else
comparison_string=${input_array[@]}
fi
if [[ "$comparison_string" = "$output_string" ]]
then
echo "Program works correctly"
else
echo "There is a error somewhere in your code."
exit 1
fi
| true
|
03119ac2865dc70ed773e339ca346e458d572dff
|
Shell
|
inambioinfo/learning-codes
|
/Pipelines/parse_exome/pc07-exome.sh
|
UTF-8
| 1,121
| 3.40625
| 3
|
[] |
no_license
|
#########################################################################
# File Name: pc07-exome.sh
# Author: Chun-Jie Liu
# Mail: chunjie-sam-liu@foxmail.com
# Created Time: Tue 03 Jul 2018 09:47:24 AM CST
#########################################################################
#!/bin/bash
# ! pipeline scripts
wes=/data/liucj/pipelines/pipeline-exome/wes_analysis.py
# ? data dir
data_dir=/home/liucj/data/wxs/liujy/Project_C0571180007
sample_dir=`ls -d ${data_dir}/Sample*`
# for sd in ${sample_dir[@]}
# do
# gz_file=(`ls ${sd}/*gz`)
# fq1=${gz_file[0]%.gz}
# [[ -f ${fq1} ]] || gunzip ${gz_file[0]}
# fq2=${gz_file[1]%.gz}
# [[ -f ${fq2} ]] || gunzip ${gz_file[1]}
# done
source activate py27
for sd in ${sample_dir[@]}
do
s=`basename ${sd}`
fqs=(`ls ${sd}/*fastq`)
fq1=`basename ${fqs[0]}`
fq2=`basename ${fqs[1]}`
out=${data_dir}/wxs-result/${s}
[[ -d ${out} ]] || mkdir -p ${out}
# run the wxs analysis
cmd="python ${wes} -pe1 ${fq1} -pe2 ${fq2} -i ${sd} -o ${out}"
echo "Notice: start analysis ${s}"
echo ${cmd}
eval ${cmd}
echo "Notice: ${s} analysis end"
done
| true
|
3530b69030dc71084bb0ac0d7ce2d597d1d6ddee
|
Shell
|
acorg/eske-pipeline-spec
|
/00-start/start.sh
|
UTF-8
| 520
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash -e
. /home/tcj25/.virtualenvs/35/bin/activate
log=../slurm-pipeline.log
# Remove the marker file that indicates when a job is fully complete.
rm -f ../slurm-pipeline.done
echo "SLURM pipeline started at `date`" >> $log
for fastq in "$@"
do
task=`echo $fastq | cut -f1 -d.`
echo >> $log
echo " FASTQ file $fastq" >> $log
echo " task name $task" >> $log
# Emit task names (without job ids as this step does not start any
# SLURM jobs).
echo "TASK: $task"
done
echo >> $log
| true
|
9a90be2960fefb05ca6d3f611b783e05b1fb97b2
|
Shell
|
monarc99/aur
|
/python-booleanoperations/PKGBUILD
|
UTF-8
| 1,288
| 2.734375
| 3
|
[] |
no_license
|
# Maintainer: Caleb Maclennan <caleb@alerque.com>
# Maintainer: Guillaume Horel <guillaume.horel@gmail.com>
# Contributor: William Turner <willtur.will@gmail.com>
_pyname=booleanOperations
pkgname=python-${_pyname,,}
pkgver=0.9.0
pkgrel=3
pkgdesc='Boolean operations on paths'
arch=(any)
url="https://github.com/typemytype/$_pyname"
license=(MIT)
_pydeps=(pyclipper
fonttools)
depends=(python
"${_pydeps[@]/#/python-}")
# checkdepends=(python-defcon
# python-fontpens
# python-pytest)
makedepends=(python-setuptools-scm)
_archive="$_pyname-$pkgver"
source=("https://files.pythonhosted.org/packages/source/${_pyname::1}/$_pyname/$_archive.zip")
sha256sums=('8cfa821c32ad374fa120d6b2e0b444ebeac57c91e6631528645fa19ac2a281b8')
prepare() {
cd "$_archive"
# Upstream PR: https://github.com/typemytype/booleanOperations/pull/63
sed -i -e '/wheel$/d' setup.cfg
}
build() {
cd "$_archive"
export PYTHONHASHSEED=0
python setup.py build
}
# Upstream (still/again) has circular dependencies in the test suite
# https://github.com/typemytype/booleanOperations/issues/64
# check() {
# cd "$_archive"
# PYTHONPATH=Lib pytest tests
# }
package() {
cd "$_archive"
python setup.py install --root="$pkgdir" --optimize=1 --skip-build
install -Dm0644 -t "$pkgdir/usr/share/licenses/$pkgname/" LICENSE
}
| true
|
b0955e76ddc94efd34bab05ffd45430abb3d7b91
|
Shell
|
rajeshvv/h5vcc
|
/lbshell/build/update_changelog.sh
|
UTF-8
| 306
| 3.171875
| 3
|
[
"BSD-3-Clause",
"FTL",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# For use when building a debian package
steel_version_macro=$(grep 'define STEEL_VERSION' ../src/steel_version.h)
steel_version=$(echo "$steel_version_macro" | sed -e 's/.*"\(.*\)".*/\1/')
date=$( date +%Y-%m-%d )
date_id=$( date +%Y%m%d )
dch -v $steel_version-$date_id "$date daily build"
| true
|
4dcbfb5cab93d6174e32d1201ae8d387b9495443
|
Shell
|
VHSgunzo/NordCheck
|
/nordcheck
|
UTF-8
| 1,429
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd "$( dirname "${BASH_SOURCE[0]}")"
nord_port=1080
check_ip="ident.me"
num_ip=$(cat src/nord_ip|wc -l)
for (( n=1; n <= $num_ip; n++ ))
do
nord_ip=$(sed "${n}!d" src/nord_ip)
echo "##############################"
echo "| PING |: $nord_ip..."
ping_ip=$(ping -4 -q -c 1 -W 0.5 $nord_ip | grep -o "100%")
if [ "$ping_ip" == "100%" ]; then
echo "| BAD IP |: $nord_ip"
echo "##############################"
continue
else
echo "|GOOD IP |: $nord_ip"
echo "##############################"
break
fi
done
num_vpn=$(cat src/vpn|wc -l)
rm -f src/good_vpn src/bad_vpn
touch src/bad_vpn
touch src/good_vpn
for (( m=1; m <= $num_vpn; m++ ))
do
logpas=$(sed "${m}!d" src/vpn)
sed -i "5c\socks5 $nord_ip $nord_port $logpas" src/proxy.conf
pub_ip=$(proxychains4 -f src/proxy.conf curl $check_ip)
if ! [ -z $pub_ip ]
then
echo "###############################################################"
echo "|GOOD|: ${logpas}"
echo "###############################################################"
echo $logpas | cat >> src/good_vpn
else
echo "###############################################################"
echo "|BAD|: ${logpas}"
echo "###############################################################"
echo $logpas | cat >> src/bad_vpn
fi
done
sed -i "5c\ " src/proxy.conf
echo "#################"
echo "| GOOD's | - $(cat src/good_vpn|wc -l)"
echo "#################"
echo "| BAD's | - $(cat src/bad_vpn|wc -l)"
echo "#################"
| true
|
05fde4d1e6c5bed4d9777b8b495db684676871ce
|
Shell
|
edunnsigouin/ds21grl
|
/code/model/5.0-setup_QSC5_TRACMIP_branch.sh
|
UTF-8
| 3,939
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script that sets up and submits a branch run from a CAM5 slab ocean aquaplanet control
# run following the TRACMIP protocol (Voigt et al. 2016 JAMES) on FRAM cluster. The som forcing
# file is modified from the control. The model version is CESM 2.1.0.
# define inputs
expname="QSC5.TRACMIP.NH01.Lk1.Q0.75.lon0.150.lat0.0.latd.30"
run_refcase="QSC5.TRACMIP"
run_refdate="0051-01-01"
docn_som_filepath="/cluster/home/edu061/SETUP_RUN/SOM_FORCING"
docn_som_filename="som.QSC5.TRACMIP.NH01.Lk1.Q0.75.lon0.150.lat0.0.latd.30.nc"
resubmit=49
stop_n=1
stop_option="nyears"
wallclock="03:30:00"
# create case
cd $HOME/cesm2.1.0/cime/scripts/
./create_newcase --case $HOME/cesm2.1.0/cases/$expname --compset QSC5 --res f09_f09_mg17 --machine fram --pecount L --project nn9039k --run-unsupported
# set up case
cd $HOME/cesm2.1.0/cases/$expname
./case.setup
# modify CAM data output format
echo "avgflag_pertape = 'A','I','I','I','A'" >> user_nl_cam
echo "nhtfrq = 0,-24,-24,-24,-24" >> user_nl_cam
echo "mfilt = 1,73,73,73,73" >> user_nl_cam
echo "fincl1 = 'SST'" >> user_nl_cam
echo "fincl2 = 'V','Z3'" >> user_nl_cam
echo "fincl3 = 'U','T'" >> user_nl_cam
echo "fincl4 = 'Q','OMEGA','PS'" >> user_nl_cam
echo "fincl5 = 'SST','TREFHT','TS','PRECC','PRECL','PRECSC','PRECSL','QFLX','CLDHGH','CLDLOW','CLDMED','CLDTOT',
'ICEFRAC','SNOWHICE','SNOWHLND','FSNS','FSNSC','FSDS','FSDSC','FLNS','FLNSC','FLDS','FLDSC','SHFLX','LHFLX',
'SOLIN','FSNT','FSNTC','FSNTOA','FSNTOAC','FSUTOA','FLNT','FLNTC','FLUT','FLUTC','LWCF','SWCF'" >> user_nl_cam
# TRACMIP modification
# setup aerosols following APE protocol and Medeiros.
# 1) prescribe aerosol concentrations and remove emissions
# 2) turn radiative effects of aerosols off: make sure ozone is from prescribed APE values
echo "seasalt_emis_scale = 0.0" >> user_nl_cam
echo "ext_frc_specifier = ''" >> user_nl_cam
echo "tracer_cnst_specifier = ''" >> user_nl_cam
echo "srf_emis_specifier = ''" >> user_nl_cam
echo "micro_mg_nccons = .TRUE." >> user_nl_cam
echo "micro_mg_nicons = .TRUE." >> user_nl_cam
echo "prescribed_ozone_cycle_yr = 1990" >> user_nl_cam
echo "prescribed_ozone_datapath = '/cluster/projects/nn9625k/cesm/inputdata/atm/cam/ozone'" >> user_nl_cam
echo "prescribed_ozone_file = 'apeozone_cam3_5_54.nc'" >> user_nl_cam
echo "prescribed_ozone_name = 'OZONE'" >> user_nl_cam
# TRACMIP modification for seasonal cycle
# change parameters and source code (Isla Simpson personal communication)
search1="orb_obliq = 0."
search2="orb_mvelp = 0."
replace1="orb_obliq = 23.5"
replace2="orb_mvelp = 102.7"
sed -i "s#${search1}#${replace1}#g" user_nl_cpl
sed -i "s#${search2}#${replace2}#g" user_nl_cpl
cp $HOME/SETUP_RUN/SRC_MOD/SEASONAL_CYCLE/seq_infodata_mod.F90 $HOME/cesm2.1.0/cases/$expname/SourceMods/src.drv/
# specify SOM forcing file
./preview_namelists
cp $HOME/cesm2.1.0/cases/$expname/CaseDocs/docn.streams.txt.som $HOME/cesm2.1.0/cases/$expname/user_docn.streams.txt.som
chmod +rw user_docn.streams.txt.som
search1="/cluster/projects/nn9625k/cesm/inputdata/ocn/docn7/SOM"
search2="default.som.forcing.aquaplanet.Qflux0_h30_sstQOBS.1degFV_c20170421.nc"
sed -i "s#${search1}#${docn_som_filepath}#g" user_docn.streams.txt.som
sed -i "s#${search2}#${docn_som_filename}#g" user_docn.streams.txt.som
./xmlchange DOCN_SOM_FILENAME=$docn_som_filename
# specify branch run options
./xmlchange RUN_TYPE=branch,RUN_REFCASE=$run_refcase,RUN_REFDATE=$run_refdate
# copy restart files to initialize branch run
cp $HOME/SETUP_RUN/RESTART_FILES/$run_refcase/$run_refdate/* /cluster/work/users/edu061/cesm/$expname/run/
# specify runtime options
./xmlchange JOB_WALLCLOCK_TIME=$wallclock,RESUBMIT=$resubmit,STOP_N=$stop_n,STOP_OPTION=$stop_option
# Build run
./case.build --skip-provenance-check
# submit run
./case.submit
| true
|
2dba400d08803f51a18a54a0721f9329e9aa0960
|
Shell
|
nicooga/docker-collectd-elk
|
/entrypoint.sh
|
UTF-8
| 1,255
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
COLLECTD_CONF=/etc/collectd/collectd.conf
: ${COLLECTD_HOST:=}
# if COLLECTD_HOST not already set in the container's environment
if [ -z "${COLLECTD_HOST}" ]; then
# if /etc/hostname is volume mounted in, then use that
if [ -f "/host/hostname" ]; then
COLLECTD_HOST=$(cat /host/hostname);
# else if /proc/sys/kernel/hostname is volume mounted in, then use that
elif [ -f "/host/proc/sys/kernel/hostname" ]; then
COLLECTD_HOST=$(cat /host/proc/sys/kernel/hostname);
fi
fi
# after all that, if COLLECTD_HOST is finally set, then sed up the config
if [ -n "$COLLECTD_HOST" ]; then
sed -i -e "s/# @COLLECTD_HOST@/Hostname ${COLLECTD_HOST//-/}/g" $COLLECTD_CONF
fi
# default collectd interval to 10s; overriden using COLLECTD_INTERVAL_SECONDS
: ${COLLECTD_INTERVAL_SECONDS:=10}
sed -i -e "s/# @COLLECTD_INTERVAL_SECONDS@/Interval $COLLECTD_INTERVAL_SECONDS/g" $COLLECTD_CONF
if [ ! -d /mnt/oldproc -a -d /host/proc ]; then
umount /proc
mount -o bind /host/proc /proc
mkdir -p /mnt/oldproc
mount -t proc none /mnt/oldproc
fi
# if no command specified, start collectd
if [ -z "$@" ]; then
exec /usr/sbin/collectd -C $COLLECTD_CONF -f
fi
# otherwise run the command
exec "$@"
| true
|
6f7ace28cf38ce19162ffbde444019e5ac84811f
|
Shell
|
mpatsiou/opsys2018-assignment1-2776
|
/script1a.sh
|
UTF-8
| 679
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
diff_file="./diff_cache"
file="./sites"
index () {
touch $diff_file
set newdiff=""
while IFS= read -r n
do
if [[ $n != *#* ]]; then
new_md5sum=$(curl -s $n | md5sum | cut -d ' ' -f1)
validate_md5sum $diff_file $n $new_md5sum
newdiff+="$n $new_md5sum \n"
fi
done < "$file"
echo -e $newdiff > $diff_file
}
validate_md5sum() {
while IFS= read -r line
do
site=$(echo "$line" | cut -d ' ' -f1)
md5sum=$(echo "$line" | cut -d ' ' -f2)
if [[ $site == $2 ]]; then
if [[ $md5sum != $3 ]]; then
printf "$site CHANGED $3 $md5sum"
fi
return
fi
done < $1
printf "$2 INIT\n"
}
index
| true
|
5bb3cb7cd9e80049c0ff4781fa0e1792f70b6510
|
Shell
|
j23d/raumopol_kotti_buildout
|
/install.sh
|
UTF-8
| 2,514
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
function error {
echo "usage: $0 -mds mail default sender, -mu mail username, -mp mailpassword [-d database_string -u username -st kotti site title -s kotti secret -sk kotti session key]"
exit 0
}
# get some input vars
while [ $# -gt 0 ]; do
case "$1" in
-mds|--MAILDEFAULTSENDER)
MAILDEFAULTSENDER="$2"
shift
;;
-mu|--MAILUSERNAME)
MAILUSERNAME="$2"
shift
;;
-mp|--MAILPASSWORD)
MAILPASSWORD="$2"
shift
;;
-d|--DATABASE)
DATABASE="$2"
shift
;;
-u|--USERNAME)
USERNAME="$2"
shift
;;
-st|--KOTTISITETITLE)
KOTTISITETITLE="$2"
shift
;;
-s|--KOTTISECRET)
KOTTISECRET="$2"
shift
;;
-sk|--SESSIONKEY)
SESSIONKEY="$2"
shift
;;
-h|--help)
error
;;
esac
shift
done
# check the needed the variables
if [[ -z $MAILDEFAULTSENDER || -z $MAILUSERNAME || -z MAILPASSWORD ]]; then
error
fi
if [ -z $DATABASE ]; then
DATABASE='sqlite:\/\/\/%(here)s\/Kotti.db'
fi
if [[ -z $USERNAME ]]; then
dir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )";
OIFS=$IFS
IFS='/'
paths=($dir)
IFS=$OIFS
USERNAME=${paths[4]}
fi
SESSIONKEY="$USERNAME-kotti"
SESSIONSECRET="$USERNAME-SECRET-0815"
MAILDEFAULTSENDER="admin@something.tld"
MAILUSERNAME="mustermann@something.tld"
MAILPASSWORD=""
KOTTISITETITLE="Kotti"
KOTTISECRET="qwerty"
echo "Prepare the buildout file"
cp ./config/buildout.cfg.in ./buildout.cfg
sed -i "s/%databasestring%/$DATABASE/g" ./buildout.cfg
sed -i "s/%username%/$USERNAME/g" ./buildout.cfg
sed -i "s/%session_key%/$SESSIONKEY/g" ./buildout.cfg
sed -i "s/%session_secret%/$SESSIONSECRET/g" ./buildout.cfg
sed -i "s/%mail_default_sender%/$MAILDEFAULTSENDER/g" ./buildout.cfg
sed -i "s/%mail_username%/$MAILUSERNAME/g" ./buildout.cfg
sed -i "s/%mail_password%/$MAILPASSWORD/g" ./buildout.cfg
sed -i "s/%kotti_site_title%/$KOTTISITETITLE/g" ./buildout.cfg
sed -i "s/%kotti_secret%/$KOTTISECRET/g" ./buildout.cfg
cp ./config/versions.cfg.in ./versions.cfg
# create virtualenv, activate it and run buildout
if [ ! -f ./bin/python2.7 ]; then
echo "Installing virtualenv."
virtualenv-2.7 --distribute .
fi
echo "Running the buildout."
./bin/python2.7 bootstrap.py
./bin/buildout
exit 0;
| true
|
7d1e3340954338455189953232532d6389e6ec0e
|
Shell
|
swade1987/terraform-docker-swarm
|
/modules/swarm-manager-cluster/user_data.sh
|
UTF-8
| 1,257
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# Remove docker engine key to make it unique
sudo rm -f /etc/docker/key.json
sudo service docker restart
# Configure the docker daemon
sudo mkdir /etc/systemd/system/docker.service.d
cat << "EOF" > /etc/systemd/system/docker.service.d/daemon.conf
{
[Service]
ExecStart=
ExecStart=/usr/bin/docker daemon -H tcp://0.0.0.0:2375 -H unix:// --cluster-store=consul://${consul_server} --cluster-advertise=eth0:2375
}
EOF
# Restart the docker daemon
sudo systemctl daemon-reload
sudo systemctl restart docker
# Obtain the private IP address of this instance via the AWS API.
readonly EC2_METADATA_URL='http://169.254.169.254/latest/meta-data'
HOST_IP=$$(curl -s $${EC2_METADATA_URL}/local-ipv4)
# Create a swarm manager container and connect it to Consul.
docker run -d --name swarm -p 3375:3375 \
swarm manage -H tcp://0.0.0.0:3375 --replication --advertise $${HOST_IP}:3375 consul://${consul_server}
docker run -d --net=host --name consul-agent -e 'CONSUL_LOCAL_CONFIG={"leave_on_terminate": true}' \
consul agent -bind=$${HOST_IP} -retry-join=${consul_server} -node=swarm-manager-$${HOST_IP}
# Create an overlay network for our environment
docker network create --driver overlay ${overlay_network_name}
echo 'Completed.'
| true
|
88ae802c5b45615d59c2934ae12f9bfb4705bd01
|
Shell
|
tysteiman/dot
|
/scripts/fssh
|
UTF-8
| 412
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# Quick script to cat our ssh config entries, and hand over the host name blocks to fzf for selction.
# That selection is then run directly with ssh since it's a named HostName entry.
TARGET=$(cat $HOME/.ssh/config | grep -i -G "Host\ " | awk '{print $2}' | fzf --border=rounded --reverse)
# return if cancelled
if [ ! $TARGET ]; then
exit 1
fi
# connect to fzf selection with ssh
ssh $TARGET
| true
|
f18443c7207430a084550bc5a1eb3ea1b239b71f
|
Shell
|
viking333/NPM_downloader
|
/old_version/download.sh
|
UTF-8
| 1,656
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
live_machines="$(VBoxManage list runningvms)"
#echo $live_machines
if [[ $live_machines =~ .*Centos7_Minimal.* ]]
then
echo "###### Taking down vm for restoration process ######"
VBoxManage controlvm Centos7_Minimal poweroff
fi
sleep 3s
echo -e "\n###### Restoring VM to base snapshot ######"
VBoxManage snapshot Centos7_Minimal restore "Base_image"
echo -e "\n###### Starting VM ######"
VBoxManage startvm --type headless Centos7_Minimal
echo -e "\n###### Waiting for the machine to start up SSH process (this may take upto a minute) ######"
ssh -p 2222 viking@127.0.0.1 "echo -e '\n###### SSH is ready ######'"
echo -e "\n###### Uploading packages.txt to the guest machine ######"
scp -P 2222 packages.txt viking@127.0.0.1:/home/viking/download/npm/packages.txt
echo -e "\n###### Downloading packages ######"
ssh -p 2222 viking@127.0.0.1 "python /home/viking/download/npm/download.py /home/viking/download/npm/packages.txt"
echo -e "\n###### Zipping up packages ######"
ssh -p 2222 viking@127.0.0.1 "zip -r npm_packages.zip packages"
#ssh -p 2222 viking@127.0.0.1 "zip -r node_modules.zip node_modules"
if [ ! -d "download" ]
then
echo -e "\n###### Creating downloads folder on host machine"
mkdir download
fi
echo -e "\n###### downloading zip files #######"
scp -P 2222 viking@127.0.0.1:/home/viking/npm_packages.zip download/
#scp -P 2222 viking@127.0.0.1:/home/viking/node_modules.zip download/
echo -e "\n###### Powering down the VM ######"
VBoxManage controlvm Centos7_Minimal poweroff
echo -e "\n###### scanning packages ######"
savscan -f -c -all -dn -archive download/ && clamscan -r download/
| true
|
d543d1ea132f522ddb71bdb0018adcc0ef55e1d1
|
Shell
|
OpenWord3/opt_s5
|
/script/script_vpn.sh
|
UTF-8
| 4,002
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
verif=`ls /opt/vpn/x.509/server/ | grep tun1.conf`
if [ -z $verif ];then
echo "Vous n'avez pas encore de serveur, voulez-vous créer un ? oui/non"
read reponse
if [ $reponse == "oui" ];then
server="server_x509"
echo "choisissez une option"
echo "1- client-to-client"
echo "2- sans client-to-client"
read reponse2
case $reponse2 in
"1")
cd /opt/vpn/x.509/easy-rsa/
source vars
./clean-all
./build-ca
./build-key-server $server
./build-dh
cp /opt/vpn/x.509/easy-rsa/keys/$server.key /opt/vpn/x.509/server/
cp /opt/vpn/x.509/easy-rsa/keys/$server.crt /opt/vpn/x.509/server/
/opt/script/script_ecriture.sh "2"
echo "le serveur $server à bien été créé"
;;
"2")
cd /opt/vpn/x.509/easy-rsa/
source vars
./clean-all
./build-ca
./build-key-server $server
./build-dh
cp /opt/vpn/x.509/easy-rsa/keys/$server.key /opt/vpn/x.509/server/
cp /opt/vpn/x.509/easy-rsa/keys/$server.crt /opt/vpn/x.509/server/
/opt/script/script_ecriture.sh "1"
echo "le serveur $server à bien été créé"
;;
esac
fi
else
echo "Que souhaitez-vous faire ?"
reponse3=0
while [ "$reponse3" -gt 7 ] || [ "$reponse3" -lt 1 ]
do
echo "1- Lancer la configuration du serveur"
echo "2- Stopper la configuration du serveur"
echo "3- Créer un client"
echo "4- Révoquer un client"
echo "5- Actionner l'option client-to-client"
echo "6- Désactiver l'option client-to-client"
echo "7- Supprimer de façon définitive la configuration du serveur (ATTENTION)"
read reponse3
done
case $reponse3 in
"1")
openvpn --config /opt/vpn/x.509/server/tun1.conf --verb 6
echo "Le serveur est bien lancé"
;;
"2")
pkill openvpn
echo "Le serveur a bien été stopper"
;;
"3")
existe="oui"
while [ "$existe" == "oui" ]
do
echo "Entrez le nom du client"
read client
verif2=`ls /opt/vpn/x.509/clients/ | grep "^$client.key"`
if [ "$verif2" != "" ];then
echo "Ce nom de client est déjà utilisé, entrez un autre nom"
else
existe="non"
fi
done
cd /opt/vpn/x.509/easy-rsa/
source vars
./build-key $client
cp /opt/vpn/x.509/easy-rsa/keys/$client.* /opt/vpn/x.509/clients/
;;
"4")
echo "Choisissez le client que vous souhaitez révoquer"
ls -Ad /opt/vpn/x.509/easy-rsa/keys/*.key | cut -d"/" -f7 | cut -d"." -f1 > random
sed '/^ca$/d' random > random1 && mv -f random1 random; rm -f random1
sed '/^server_x509/d' random > random1 && mv -f random1 random; rm -f random1
IFS=$'\n'
tableau=( $( cat random ) )
i=0
while [ "$i" -lt "${#tableau[*]}" ]
do
echo $((i+1))- ${tableau[$i]}
let i++
done
read h
let h--
client_revoque=${tableau[$h]}
rm random
cd /opt/vpn/x.509/easy-rsa/
source vars
./revoke-full $client_revoque
rm /opt/vpn/x.509/easy-rsa/keys/$client_revoque.*
mv /opt/vpn/x.509/clients/$client_revoque.* /opt/vpn/x.509/clients/archives/
cp /opt/vpn/x.509/easy-rsa/keys/crl.pem /opt/vpn/x.509/server/
;;
"5")
/opt/script/script_ecriture.sh "2"
echo "L'option client-to-client a bien été activée"
;;
"6")
/opt/script/script_ecriture.sh "1"
echo "L'option client-to-client a bien été désactivée"
;;
"7")
echo "Êtes-vous sûr de vouloir supprimer la configuration du serveur ? oui/non"
read reponse4
if [ $reponse4 == "oui" ];then
rm /opt/vpn/x.509/server/tun1.conf
cd /opt/vpn/x.509/easy-rsa/
source vars
./clean-all
rm /opt/vpn/x.509/server/*
rm /opt/vpn/x.509/clients/*.
rm /opt/vpn/x.509/clients/archives/*
echo "La suppression de la configuration a bien été effectuée"
fi
;;
esac
fi
| true
|
6c3dc9e778798669e050016370e3975678355695
|
Shell
|
senorcarbone/flink
|
/ec2-scripts/rsync-data.sh
|
UTF-8
| 597
| 3.4375
| 3
|
[
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"OFL-1.1",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
if [ "$#" -ne 2 ]; then
printf "Usage: ./rsync-data.sh EXTERNAL_IP INTERNAL_IP\n\nBefore: copy all internal ips of the task managers to the slaves file!"
fi
IP=$1
INTERNAL_IP=$2
sed "s/JOBMANAGER_PLACEHOLDER/${INTERNAL_IP}/g" flink-conf.yaml > flink-conf-amazon.yaml
rsync -avz ../flink-dist/target/flink-1.2-WATERMARKS-bin/flink-1.2-WATERMARKS/ -e ssh "ubuntu@${IP}:watermarks"
scp flink-conf-amazon.yaml "ubuntu@${IP}:watermarks/conf/flink-conf.yaml"
scp slaves "ubuntu@${IP}:watermarks/conf/slaves"
scp slaves "ubuntu@${IP}:otherservers"
ssh "ubuntu@${IP}" ./copytoothers.sh
| true
|
1a7f4573be59f36862fa823a3ebf3790bbd74c49
|
Shell
|
AntDen/datasets
|
/code/datasets/data-o/slave
|
UTF-8
| 935
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
function check_dataset_info(){
if [[ -z "${DataSetsCli_name}" ]]; then
echo "DataSetsCli_name is undefined"
exit
else
MY_DATASET_NAME="${DataSetsCli_name}"
fi
}
function check_mount_point(){
if test -f "$MOUNT_POINT"; then
echo "MOUNT_POINT exists"
else
mkdir $MOUNT_POINT
fi
}
BATFS_BIN=/opt/mydan/dan/antden/code/datasets/data-o/batfs
function check_batfs(){
if test -f "$BATFS_BIN"; then
echo "find $BATFS_BIN"
else
echo "$BATFS_BIN is not exists"
fi
}
check_dataset_info
MOUNT_POINT="/mnt/$MY_DATASET_NAME"
while (( "$#" )) ; do
case $1 in
--mount)
check_mount_point
check_batfs
$BATFS_BIN -o allow_other --type-cache-ttl=60m --stat-cache-ttl=60m --endpoint=http://abc-storage.ainirobot.net:8080 --cheap $MY_DATASET_NAME /mnt/$DataSetsCli_name
exit 1
;;
--umount)
umount /mnt/$MOUNT_POINT
exit 2
esac
shift
done
| true
|
d65ce154dd39375c719740d49e8c60bfbd65d877
|
Shell
|
rifflearning/edu-docker
|
/bin/AWS-CF-tunnel
|
UTF-8
| 2,046
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# This script must be sourced
#
# Set up the environment to deploy to a Riff AWS docker swarm
# Takes 1 argument, the swarm environment name (staging, beta, etc)
# defaults to 'staging'
# After this script is sourced: the python environment is activated,
# a tunnel to an aws docker manager is created and the DOCKER_HOST points at it,
# other production deployment environment variables are set.
# Test this is being run from the root of the edu-docker repo working dir
if [[ ! ( -e Makefile && -e bin/docker-swarm.py && -e bin/deploy-vars ) ]]; then
echo "You do not seem to have sourced this script from the root of the edu-docker working directory."
echo "Change to the edu-docker working directory and run:"
echo " . bin/tunnel"
echo
return
fi
# Set the AWS stack variables (the stack suffix and the region, keypair maps) and the
# derived variables for '$1' (DEPLOY_SWARM, AWS_CF_STACK_NAME, AWS_STACK_REGION, AWS_STACK_KEYPAIR)
source bin/aws_stack_vars ${1:-staging}
# if $1 was invalid DEPLOY_SWARM won't be set
if [[ ! -v DEPLOY_SWARM ]]
then
echo "$1 is not a valid swarm name."
echo "The swarm name must be one of: ${SWARM_NAMES[@]}"
return
fi
echo "Environment is set up to deploy to the AWS \"${1:-staging}\" swarm"
REGION_OPT=${AWS_STACK_REGION:+"--region=$AWS_STACK_REGION"}
source bin/deploy-vars
source activate
bin/docker-swarm.py tunnel $REGION_OPT ${AWS_CF_STACK_NAME}
export DOCKER_HOST=localhost:2374
echo Updating docker base images and images used directly:
make pull-images
# this works but the Makefile is more up-to-date so the above pull-images is better
#DOCKER_BASE_IMAGES=(ubuntu mysql:5.7 mongo nginx node:10 mhart/alpine-node:10)
#echo ${DOCKER_BASE_IMAGES[@]}
#echo ${DOCKER_BASE_IMAGES[@]} | xargs -n 1 docker pull
#docker images
make show-env
echo
echo 'Run the following if the env settings are good'
echo '(or edit bin/deploy-vars and source it first):'
echo ' make clean build-prod'
echo ' make push-prod'
echo ' make deploy-stack'
echo
| true
|
48fa684eaa6790a9f1e498c7f2b2c4b5a181983d
|
Shell
|
ramsal/SysAdminTools
|
/TORpi.sh
|
UTF-8
| 8,347
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#ramsal
# Global variables
# ANSI colors
c_black='\u001b[30m'
c_red='\u001b[31m'
c_green='\u001b[32m'
c_yellow='\u001b[33m'
c_blue='\u001b[34m'
c_magenta='\u001b[35m'
c_cyan='\u001b[36m'
c_white='\u001b[37m'
c_no='\u001b[0m'
# config
CFG_SSID="tor"
CFG_PWD=""
CFG_GW_IP="192.168.42.1"
CFG_GW_MASK="255.255.255.0"
CFG_GW_PREFIX="24"
CFG_GW_NETWORK="192.168.42.0"
CFG_GW_BROADCAST="192.168.42.255"
CFG_DHCP_START="100"
CFG_DHCP_END="200"
# config files
CFG_INTERFACES="/etc/network/interfaces"
CFG_DNSMASQ_CONF="/etc/dnsmasq.conf"
CFG_DHCPCD_CONF="/etc/dhcpcd.conf"
CFG_HOSTAPD_CONF="/etc/hostapd/hostapd.conf"
CFG_HOSTAPD="/etc/default/hostapd"
CFG_SYSCTL_CONF="/etc/sysctl.conf"
CFG_TORRC="/etc/tor/torrc"
CFG_RC_LOCAL="/etc/rc.local"
# config templates
INTERFACES_CONFIG='###THSStart\
auto lo\
iface lo inet loopback\
\
auto eth0\
iface eth0 inet dhcp\
\
allow-hotplug wlan0\
iface wlan0 inet static\
address IP_ADDR\
netmask MASK\
network NETWORK\
broadcast BROADCAST\
###THSEnd'
DNSMASQ_CONFIG='###THSStart\
interface=wlan0\
listen-address=IP_ADDR\
bind-interfaces\
domain-needed\
bogus-priv\
dhcp-range=IP_START,IP_END,24h\
###THSEnd'
DHCPCONF_CONFIG='###THSStart\
denyinterfaces wlan0\
###THSEnd'
HOSTAPDCONF_CONFIG='###THSStart\
interface=wlan0\
driver=nl80211\
ssid=SSID\
hw_mode=g\
channel=7\
wmm_enabled=0\
macaddr_acl=0\
auth_algs=1\
ignore_broadcast_ssid=0\
wpa=2\
wpa_passphrase=PWD\
wpa_key_mgmt=WPA-PSK\
wpa_pairwise=TKIP\
rsn_pairwise=CCMP\
###THSEnd'
HOSTAPD_CONFIG='###THSStart\
DAEMON_CONF="\/etc\/hostapd\/hostapd.conf"\
###THSEnd'
SYSCTL_CONFIG='###THSStart\
net.ipv4.ip_forward=1\
###THSEnd'
TORRC_CONFIG='###THSStart\
Log notice file \/var\/log\/tor\/notices.log\
VirtualAddrNetwork 10.192.0.0\/10\
AutomapHostsSuffixes .onion,.exit\
AutomapHostsOnResolve 1\
TransPort 9040\
TransListenAddress IP_ADDR\
DNSPort 53\
DNSListenAddress IP_ADDR\
###THSEnd'
RCLOCAL_CONFIG='iptables-restore < \/etc\/iptables.ipv4.nat'
# Global functions
msg() {
echo -e "$1"
}
msgInfo() {
echo -e "${c_magenta}$1${c_no}"
}
msgComment() {
echo -e "${c_cyan}$1${c_no}"
}
msgSuccess() {
echo -e "${c_green}$1${c_no}"
}
msgWarning() {
echo -e "${c_yellow}$1${c_no}"
}
msgError() {
echo -e "${c_red}$1${c_no}"
}
readDefault() {
read -p "$(echo -e $1 [${c_green}${!2}${c_no}]: )" INPUT
eval $2="${INPUT:-${!2}}"
}
readPassword() {
IS_PWD_CORRECT=0
while [ $IS_PWD_CORRECT != 1 ]
do
read -s -p "$1: " IN_PWD1
echo
read -s -p "Retype password: " IN_PWD2
echo
if [ "$IN_PWD1" != "$IN_PWD2" ]; then
msgError "Passwords are not equal!!!"
elif [ -z $IN_PWD1 ]; then
msgError "Password must not be empty!!!"
elif [ ${#IN_PWD1} -le 7 ]; then
msgError "Password must be at least 8 characters!!!"
else
IS_PWD_CORRECT=1
fi
done
eval $2=$IN_PWD1
}
getNetworkInfo() {
declare -n ret=$2
firstByte=$(echo $1 | sed -r 's/\..*//')
ret[MASK]="255.255.255.0"
ret[PREFIX]="24"
ret[NETWORK]=$(echo $1 | sed -r "s/[0-9]+$/0/")
ret[BROADCAST]=$(echo $1 | sed -r "s/[0-9]+$/255/")
case $firstByte in
192)
ret[MASK]="255.255.255.0"
ret[PREFIX]="24"
ret[NETWORK]=$(echo $1 | sed -r "s/[0-9]+$/0/")
ret[BROADCAST]=$(echo $1 | sed -r "s/[0-9]+$/255/")
;;
172)
ret[MASK]="255.255.0.0"
ret[PREFIX]="16"
ret[NETWORK]=$(echo $1 | sed -r "s/[0-9]+\.[0-9]+$/0.0/")
ret[BROADCAST]=$(echo $1 | sed -r "s/[0-9]+\.[0-9]+$/255.255/")
;;
10)
ret[MASK]="255.0.0.0"
ret[PREFIX]="8"
ret[NETWORK]=$(echo $1 | sed -r "s/[0-9]+\.[0-9]+\.[0-9]+$/0.0.0/")
ret[BROADCAST]=$(echo $1 | sed -r "s/[0-9]+\.[0-9]+\.[0-9]+$/255.255.255/")
;;
esac
}
# Main code
msgInfo "TorPi Hotspot Installer - https://torpi.me"
msgComment "First, let's collect some information about how you want to configure your TorPi..."
readDefault "Choose TorPi WiFi SSID" CFG_SSID
readPassword "Choose TorPi WiFi password" CFG_PWD
readDefault "Choose TorPi IP address for default gateway and SSH admin" CFG_GW_IP
declare -A NETWORK_INFO
getNetworkInfo $CFG_GW_IP NETWORK_INFO
CFG_GW_MASK=${NETWORK_INFO[MASK]}
CFG_GW_PREFIX=${NETWORK_INFO[PREFIX]}
CFG_GW_NETWORK=${NETWORK_INFO[NETWORK]}
CFG_GW_BROADCAST=${NETWORK_INFO[BROADCAST]}
msgError "MASK: $CFG_GW_MASK PREFIX: $CFG_GW_PREFIX NETWORK: $CFG_GW_NETWORK BROADCAST: $CFG_GW_BROADCAST"
CFG_DHCP_START=$(echo $CFG_GW_IP | sed -r "s/[^.+]$/$CFG_DHCP_START/")
CFG_DHCP_END=$(echo $CFG_GW_IP | sed -r "s/[^.+]$/$CFG_DHCP_END/")
readDefault "TorPi DHCP start address " CFG_DHCP_START
readDefault "TorPi DHCP end address " CFG_DHCP_END
msgSuccess "That's all the info we need! Commencing installation..."
msgComment "Updating system repo and installing TorPi packages..."
sudo apt-get update
sudo apt-get -y upgrade
msgComment "Installing required packages"
sudo apt-get -y install hostapd dnsmasq tor unattended-upgrades
msgSuccess "Required packages installed!"
msgComment "Configuring TorPi..."
msgComment "Stopping services"
sudo systemctl stop dnsmasq
sudo systemctl stop hostapd
sudo systemctl stop tor
TIMESTAMP=$(date +".%Y%m%d%H%M%S.bak")
msgComment "Configuring interfaces"
INTERFACES_CONFIG=$(echo "$INTERFACES_CONFIG" | sed "s/IP_ADDR/$CFG_GW_IP/;s/MASK/$CFG_GW_MASK/;s/NETWORK/$CFG_GW_NETWORK/;s/BROADCAST/$CFG_GW_BROADCAST/")
sudo sed -i$TIMESTAMP "/###THSStart/,/###THSEnd/{h;/###THSEnd/s/.*/$INTERFACES_CONFIG/;t;d;};$ {x;/^$/{s//$INTERFACES_CONFIG/;H};x}" $CFG_INTERFACES
msgComment "Configuring dnsmasq.conf"
DNSMASQ_CONFIG=$(echo "$DNSMASQ_CONFIG" | sed "s/IP_ADDR/$CFG_GW_IP/;s/IP_START/$CFG_DHCP_START/;s/IP_END/$CFG_DHCP_END/")
sudo sed -i$TIMESTAMP "/###THSStart/,/###THSEnd/{h;/###THSEnd/s/.*/$DNSMASQ_CONFIG/;t;d;};$ {x;/^$/{s//$DNSMASQ_CONFIG/;H};x}" $CFG_DNSMASQ_CONF
msgComment "Configuring dhcpcd.conf"
DHCPCONF_CONFIG=$(echo "$DHCPCONF_CONFIG" | sed "s/IP_ADDR/$CFG_GW_IP/;s/PREFIX/$CFG_GW_PREFIX/")
sudo sed -i$TIMESTAMP "/###THSStart/,/###THSEnd/{h;/###THSEnd/s/.*/$DHCPCONF_CONFIG/;t;d;};$ {x;/^$/{s//$DHCPCONF_CONFIG/;H};x}" $CFG_DHCPCD_CONF
msgComment "Configuring hostapd.conf"
sudo echo "" > $CFG_HOSTAPD_CONF
HOSTAPDCONF_CONFIG=$(echo "$HOSTAPDCONF_CONFIG" | sed "s/SSID/$CFG_SSID/;s/PWD/$CFG_PWD/")
sudo sed -i$TIMESTAMP "/###THSStart/,/###THSEnd/{h;/###THSEnd/s/.*/$HOSTAPDCONF_CONFIG/;t;d;};$ {x;/^$/{s//$HOSTAPDCONF_CONFIG/;H};x}" $CFG_HOSTAPD_CONF
msgComment "Configuring hostapd"
sudo sed -i$TIMESTAMP "/###THSStart/,/###THSEnd/{h;/###THSEnd/s/.*/$HOSTAPD_CONFIG/;t;d;};$ {x;/^$/{s//$HOSTAPD_CONFIG/;H};x}" $CFG_HOSTAPD
msgComment "Configuring sysctl.conf"
sudo sed -i$TIMESTAMP "/###THSStart/,/###THSEnd/{h;/###THSEnd/s/.*/$SYSCTL_CONFIG/;t;d;};$ {x;/^$/{s//$SYSCTL_CONFIG/;H};x}" $CFG_SYSCTL_CONF
msgComment "Configuring torrc"
TORRC_CONFIG=$(echo "$TORRC_CONFIG" | sed "s/IP_ADDR/$CFG_GW_IP/")
sudo sed -i$TIMESTAMP "/###THSStart/,/###THSEnd/{h;/###THSEnd/s/.*/$TORRC_CONFIG/;t;d;};$ {x;/^$/{s//$TORRC_CONFIG/;H};x}" $CFG_TORRC
msgComment "Configuring iptables: Enable forwarding"
sudo sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
msgComment "Configuring iptables: Flushing rules"
sudo iptables -F && sudo iptables -t nat -F
msgComment "Configuring iptables: Setting up NAT"
sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
sudo iptables -A FORWARD -i eth0 -o wlan0 -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -A FORWARD -i wlan0 -o eth0 -j ACCEPT
msgComment "Configuring iptables: Dropping SSH at eth0"
sudo iptables -A INPUT -i eth0 -p tcp --dport 22 -j DROP
msgComment "Configuring iptables: Opening SSH on wlan0"
sudo iptables -t nat -A PREROUTING -i wlan0 -p tcp --dport 22 -j REDIRECT --to-ports 22
msgComment "Configuring iptables: Opening tor ports on wlan0"
sudo iptables -t nat -A PREROUTING -i wlan0 -p udp --dport 53 -j REDIRECT --to-ports 53
sudo iptables -t nat -A PREROUTING -i wlan0 -p tcp --syn -j REDIRECT --to-ports 9040
msgComment "Configuring iptables: Saving iptables"
sudo sh -c "iptables-save > /etc/iptables.ipv4.nat"
msgComment "Configuring rc.local"
sudo grep -q "$RCLOCAL_CONFIG" $CFG_RC_LOCAL || sed -i$TIMESTAMP "/^exit 0/i $RCLOCAL_CONFIG" $CFG_RC_LOCAL
msgComment "Starting wlan0"
sudo ifup wlan0
msgComment "Starting services"
sudo systemctl start hostapd
sudo systemctl start dnsmasq
sudo systemctl start tor
msgSuccess "Done! The system will now reboot."
sudo reboot now
| true
|
8e560606432fd359db59ec0b84304b4b4623ade9
|
Shell
|
AustinScola/seligimus
|
/scripts/test_source_distribution_contents.sh
|
UTF-8
| 3,297
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
NEWLINE=$'\n'
HERE="$(dirname "$(readlink -f "$BASH_SOURCE")")"
SELIGIMUS="$(realpath "${HERE}/..")"
DISTRIBUTION_DIRECTORY="${SELIGIMUS}/dist"
# Find all source distributions in the distribution directory.
VERSION_FILE="${SELIGIMUS}/VERSION.txt"
VERSION="$(cat "${VERSION_FILE}")"
SOURCE_DISTRIBUTION="${DISTRIBUTION_DIRECTORY}/seligimus-${VERSION}.tar.gz"
if ! [[ -a "${SOURCE_DISTRIBUTION}" ]]; then
echo "ERROR: Expected the source distribution to be located at at '${SOURCE_DISTRIBUTION}' but"\
"it was not there."
exit 1
fi
echo "Found source distribution '${SOURCE_DISTRIBUTION}'."
# Get a list of files in the source distribution.
contents="$(tar --list --file ${SOURCE_DISTRIBUTION})"
# Sort the contents.
contents=$(echo "${contents}" | sort -t "/")
# Create a list of the files expected to be in the source distribution.
expected_contents=""
# Add the file which tells mypy that the Python files have inline type annotations.
expected_contents+="seligimus/py.typed"
# Add the Python files in the seligimus package to the list of expected contents.
PYTHON_FILES="$(find "${SELIGIMUS}/seligimus" -type f -name "*.py" -printf "seligimus/%P\n")"
expected_contents+="${NEWLINE}${PYTHON_FILES}"
# Add the readme to the list of expected contents.
expected_contents+=$'\nREADME.md'
# Add the version file to the list of expected contents.
expected_contents+=$'\nVERSION.txt'
# Add the license file to the list of expected contents.
expected_contents+=$'\nLICENSE.txt'
# Add files used for building distributions to the list of expected contents.
expected_contents+=$'\nscripts/build.sh'
expected_contents+=$'\nscripts/library/venv.sh'
expected_contents+=$'\nrequirements/basic_requirements.txt'
expected_contents+=$'\nrequirements/frozen/frozen_build_requirements.txt'
expected_contents+=$'\nsetup.py'
expected_contents+=$'\nsetup.cfg'
# Add the egg info files to the list of expected contents.
expected_contents+=$'\nseligimus.egg-info/dependency_links.txt'
expected_contents+=$'\nseligimus.egg-info/PKG-INFO'
expected_contents+=$'\nseligimus.egg-info/SOURCES.txt'
expected_contents+=$'\nseligimus.egg-info/top_level.txt'
expected_contents+=$'\nseligimus.egg-info/not-zip-safe'
# Add the manifest to the list of expected contents.
expected_contents+=$'\nMANIFEST.in'
# Add the package info to the list of expected contents.
expected_contents+=$'\nPKG-INFO'
# Add all the parent directories for each file.
directories=""
while read expected_file ; do
parent_directory=""
IFS=/ read -ra path_parts <<< "${expected_file}"
# The last part is the file name so remove this.
unset path_parts[-1]
for subdirectory in "${path_parts[@]}"; do
parent_directory+="${subdirectory}/"
directories+="${NEWLINE}${parent_directory}"
done
done <<< "${expected_contents}"
expected_contents+="${NEWLINE}${directories}"
# Sort the expected contents.
expected_contents=$(echo "${expected_contents}" | sort --unique --field-separator "/")
# Add the leading directory to the expected contents.
expected_contents="$(echo "${expected_contents}" | sed "s/^/seligimus-${VERSION}\//")"
# Compare the list of files in the source distribution to the expected list.
diff <(echo "${contents}" ) <(echo "${expected_contents}")
| true
|
b0726fafce29039fcd610021a0c42be6ddc83dcf
|
Shell
|
larrycai/docker-gerrit
|
/start.sh
|
UTF-8
| 361
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
git config -f $GERRIT_HOME/gerrit/etc/gerrit.config auth.type $AUTH_TYPE
$GERRIT_HOME/gerrit/bin/gerrit.sh start
if [ $? -eq 0 ]
then
echo "gerrit $GERRIT_VERSION is started successfully with auth.type=$AUTH_TYPE, please login to check."
echo ""
tail -f $GERRIT_HOME/gerrit/logs/httpd_log
else
cat $GERRIT_HOME/gerrit/logs/error_log
fi
| true
|
089e0a6c56095689f36ae8e10bc925fb03e49ff3
|
Shell
|
lkonya/rdb-connector-collection
|
/run_mysql_it_tests.sh
|
UTF-8
| 340
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
function test_mysql {
mysqladmin ping -h ${MYSQL_HOST} --silent
}
echo "Waiting for ${MYSQL_HOST} to become ready"
count=0
until ( test_mysql )
do
((count++))
if [ ${count} -gt 1200 ]
then
echo "Services didn't become ready in time"
exit 1
fi
sleep 0.1
done
sbt mysql/it:test
| true
|
6a6d3e5723ab8779204f5911d9a4e147deb6e305
|
Shell
|
chris-misa/contools
|
/YARRP/debian/start.sh
|
UTF-8
| 363
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Entrypoint for yarrp containers
#
# For documentation see readme file.
#
# 2018, Chris Misa
#
USAGE="Usage: [docker commands] yarrp-debian <yarrp arguments> | batch <script>"
if [ "$1" = "batch" ]
then
if [ -z "$2" ]
then
echo $USAGE
exit 1
else
$2
echo "Script returned $?"
fi
else
yarrp $@
echo "Yarrp returned $?"
fi
| true
|
c45ebe39539a174c284d589d8e3e9a44a1e25eef
|
Shell
|
blackrussian84/workshop
|
/vault/CH02/init.sh
|
UTF-8
| 567
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
# Load .env variables
export $(egrep -v '^#' .env | xargs)
vault secrets enable database
vault write database/config/my-database \
plugin_name=mysql-database-plugin \
connection_url="{{username}}:{{password}}@tcp(mysql:3306)/" \
allowed_roles=my-role username=${MYSQL_ROOT_USERNAME} password=${MYSQL_ROOT_PASSWORD}
vault write database/roles/my-role \
db_name=my-database \
creation_statements="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';" \
default_ttl="1h" \
max_ttl="2h"
| true
|
b5cabfb3edabd53eb185fd036055cbc5a48a513a
|
Shell
|
ardinor/misc
|
/Scripts/Backup Scripts/backup_vars.sh
|
UTF-8
| 335
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
WATCH_LIST[0]=/dir
WATCH_LIST[1]=/path/to
WATCH_LIST[2]=/file/to/watch
RSYNC=/usr/bin/rsync
SSH=/usr/bin/ssh
# SSH key
KEY=/key/location
# Remote user
RUSER=<USER>
# Remote host
RHOST=<HOST>
# Remote port
RPORT=<PORT>
# Remote base path
RPATH=/remote/server
# Log file to store rsync errors in
ERROR_LOG=/path/to/error/log
| true
|
746529770b16d02d311f87c5aac0d3287e30abef
|
Shell
|
argoproj/argo-events
|
/hack/library.sh
|
UTF-8
| 1,381
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
readonly REPO_ROOT="$(git rev-parse --show-toplevel)"
# Display a box banner.
# Parameters: $1 - character to use for the box.
# $2 - banner message.
function make_banner() {
local msg="$1$1$1$1 $2 $1$1$1$1"
local border="${msg//[-0-9A-Za-z _.,:\/()]/$1}"
echo -e "${border}\n${msg}\n${border}"
}
# Simple header for logging purposes.
function header() {
local upper="$(echo $1 | tr a-z A-Z)"
make_banner "+" "${upper}"
}
# Simple subheader for logging purposes.
function subheader() {
make_banner "-" "$1"
}
# Simple warning banner for logging purposes.
function warning() {
make_banner "!" "$1"
}
function make_fake_paths() {
FAKE_GOPATH="$(mktemp -d)"
trap 'rm -rf ${FAKE_GOPATH}' EXIT
FAKE_REPOPATH="${FAKE_GOPATH}/src/github.com/argoproj/argo-events"
mkdir -p "$(dirname "${FAKE_REPOPATH}")" && ln -s "${REPO_ROOT}" "${FAKE_REPOPATH}"
}
ensure_vendor() {
go mod vendor
}
ensure_pandoc() {
if [ "`command -v pandoc`" = "" ]; then
warning "Please install pandoc with - brew install pandoc"
exit 1
fi
}
ensure_protobuf() {
if [ "`command -v protoc`" = "" ]; then
warning "Please install protobuf with - brew install protobuf"
exit 1
fi
}
ensure_mockery() {
if [ "`command -v mockery`" = "" ]; then
warning "Please install mockery with - brew install vektra/tap/mockery"
exit 1
fi
}
| true
|
8567acf43352226cc4936e26d88b17e6dc3a6db6
|
Shell
|
riscv/riscv-crypto
|
/tools/toolchain-conf.sh
|
UTF-8
| 357
| 2.625
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
source $REPO_HOME/tools/share.sh
set -e
set -x
export RISCV=$INSTALL_DIR
mkdir -p $INSTALL_DIR
# ------ Spike -------------------------------------------------------------
refresh_dir $DIR_TOOLCHAIN_BUILD
cd $DIR_TOOLCHAIN_BUILD
$DIR_TOOLCHAIN/configure \
--prefix=$INSTALL_DIR \
--enable-multilib \
--disable-gdb
| true
|
a923dd59429d317e71c2917a95cb65f5203245dd
|
Shell
|
ykalidin/be-tools
|
/cloud/docker/gvproviders/consul/setup.sh
|
UTF-8
| 577
| 2.890625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2019. TIBCO Software Inc.
# This file is subject to the license terms contained in the license file that is distributed with this file.
#
echo "Setting up consul gv provider.."
cd /home/tibco/be/gvproviders/consul
apt-get install -y wget
# Download jq.
wget "https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64"
mv jq-linux64 jq
chmod +x jq
# Download consul cli and extract it.
wget "https://releases.hashicorp.com/consul/1.6.1/consul_1.6.1_linux_amd64.zip"
unzip consul_1.6.1_linux_amd64.zip
rm consul_1.6.1_linux_amd64.zip
| true
|
6d8dec9061446d8c68c6a823cb34baf2ae20b230
|
Shell
|
reumont/av_hmm_pipeline
|
/run_jackhmmer.sh
|
UTF-8
| 1,985
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Adapted JackHmmer script based on the animal venomics hmmer pipeline (av_hmmer_pipeline, see *greatfireball*, see *AnimalVenomics*)
# For single or multiple sequences (.fas) located in /alignment folder a jackhmmer search is performed against protein translated assembly in folder /assemblies.
# Matching sequences are extracted via Seqfilter.
# Take sequence(s) from fas file
for sequencefas in $(find alignments/ -type f | grep -P "\.fa[^.]*$")
do
# create a temporary file for the ids used later as SeqFilter input
TEMP_ID_FILE=$(tempfile)
# run jackhmmer for sequence(s) fas.file(s) against protein assembl(ies) in /assemblies ending in *.fa
for assemblyfile in $(find assemblies/ -type f | grep -P "\.fa[^.]*$")
do
date +"[%Y-%m-%d %H:%M:%S] Started jackhmmer for '${sequencefas}' vs '${assemblyfile}'"
OUTFILE_BASENAME=alignments/$(basename "${sequencefas}")_vs_$(basename "${assemblyfile}")
jackhmmer -T15 \
--tblout "${OUTFILE_BASENAME}".tblout \
--domtblout "${OUTFILE_BASENAME}".domtblout \
--chkhmm "${OUTFILE_BASENAME}".chkmmtblout \
"${sequencefas}" \
"${assemblyfile}" \
> "${OUTFILE_BASENAME}".out
date +"[%Y-%m-%d %H:%M:%S] Finished jackhmmer for '${sequencefas}' vs '${assemblyfile}'"
date +"[%Y-%m-%d %H:%M:%S] Starting sequence extraction for '${sequencefas}' vs '${assemblyfile}'"
grep -v "^#" "${OUTFILE_BASENAME}".tblout | \
cut -f 1 -d " " | sort | uniq >"${TEMP_ID_FILE}"
if [ -s "${TEMP_ID_FILE}" ]
then
SeqFilter/bin/SeqFilter -o "${OUTFILE_BASENAME}".fasta --ids "${TEMP_ID_FILE}" "${assemblyfile}"
else
echo ":/ so sad... no match for this potential toxin '${OUTFILE_BASENAME}.fasta', but generating empty file while my guitar gently weeps..."
touch "${OUTFILE_BASENAME}".fasta
fi
date +"[%Y-%m-%d %H:%M:%S] Finished sequence extraction for '${sequencefas}' vs '${assemblyfile}'"
done
done
# Deleting of temporary file
rm "${TEMP_ID_FILE}"
| true
|
f7769df2a70e5b6c100f3cb8895c88e95a84cdd0
|
Shell
|
trousev/is_utf8
|
/test.sh
|
UTF-8
| 550
| 2.609375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
answer=$(echo Привет$(echo мир | iconv -f utf-8 -t koi8-r) | ./is_utf8 -)
error_level=$?
[ $error_level == 1 ] || exit 1
[ $answer == "Привет" ] || exit 1
answer=$(echo Привет$(echo мир | iconv -f utf-8 -t cp1251) | ./is_utf8 -)
error_level=$?
[ $error_level == 1 ] || exit 1
[ $answer == "Привет" ] || exit 1
answer=$(echo Привет$(echo мир | iconv -f utf-8 -t utf-8) | ./is_utf8 -)
error_level=$?
[ $error_level == 0 ] || exit 1
[ $answer == "Приветмир" ] || exit 1
echo "All tests are OK"
exit 0
| true
|
690566595af028e2a46db51ca9885637e258d3bb
|
Shell
|
clf21/genomics-tools
|
/initial_RNA_processing_PE_CF.sh
|
UTF-8
| 3,046
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
echo usage: $0 single_directory_of_raw_reads genome_build
exit
fi
WD=$1
Build=$2
echo Genome Build Set to: $Build
echo Processing files in : $WD
cd $WD || { echo ERROR: could not find $WD , exiting... ; exit 1; }
mkdir raw || { echo 'ERROR: could not make raw directory, exiting...' ; exit 1; }
gunzip -v *.gz || { echo 'ERROR: could not unzip .fastq.gz files, exiting...' ; exit 1; }
echo Concatenating R1...
cat *_R1_0??.fastq > ./R1.fastq || { echo 'ERROR: could not concatenate Left reads into one file, exiting...' ; exit 1; }
echo Concatenating R2...
cat *_R2_0??.fastq > ./R2.fastq || { echo 'ERROR: could not concatenate Right reads into one file, exiting...' ; exit 1; }
echo Trimming Off First 3 Bases...
fastx_trimmer -Q33 -f 4 -l 50 -i ./R1.fastq -o ./R1_3Trimmed.fastq || { echo 'ERROR: could not trim Left reads, exiting...' ; exit 1; }
fastx_trimmer -Q33 -f 4 -l 50 -i ./R2.fastq -o ./R2_3Trimmed.fastq || { echo 'ERROR: could not trim Right reads, exiting...' ; exit 1; }
echo Producing QC Report...
fastqc --noextract ./R1.fastq
echo Cleaning Up...
gzip *R?_0??.fastq
mv *R?_0??.fastq.gz ./raw/
mv *.csv ./raw/
rm ./R?.fastq
mkdir tophat || { echo 'ERROR: could not make tophat directory for output, exiting...' ; exit 1; }
mkdir logs || { echo 'ERROR: could not make output directory for program logs, exiting...' ; exit 1; }
echo Beginning Alignment...
tophat -r 150 --mate-std-dev 75 --segment-length 23 --library-type fr-unstranded -p 4 --transcriptome-index=/home/clf21/RNA-seq/GTF_knownGenes/compiled_$Build -T -x 4 -n 2 -o ./tophat/ /home/clf21/bin/bowtie-0.12.7/indexes/$Build ./R1_3Trimmed.fastq ./R2_3Trimmed.fastq 2> ./logs/tophat_log.txt
echo Cleaning Up...
rm *3Trimmed.fastq
mkdir cufflinks || { echo 'ERROR: could not make cufflinks directory for output, exiting...' ; exit 1; }
echo Beginning Cufflinks FPKM Estimates...
echo Using Reference Transcriptome found at /home/clf21/RNA-seq/GTF_knownGenes/"$Build"_genes.gtf ...
cufflinks -p 4 -u -o ./cufflinks/ -G /home/clf21/RNA-seq/GTF_knownGenes/"$Build"_genes.gtf ./tophat/accepted_hits.bam 2> ./logs/cufflinks_log.txt
echo Making bigWig coverage track for browser visualization...
/home/clf21/bin/genomeCoverageBed -split -bg -ibam ./tophat/accepted_hits.bam -g /home/clf21/bin/chrom.sizes."$Build".txt > ./tophat/accepted_hits.bedGraph
bedGraphToBigWig ./tophat/accepted_hits.bedGraph /home/clf21/bin/chrom.sizes."$Build".txt ./tophat/accepted_hits.bigWig
rm ./tophat/accepted_hits.bedGraph
echo Normalizing bigWig signal for comparable browser coverage tracks...
/home/clf21/bin/java_genomics_toolkit/toolRunner.sh wigmath.Scale -i ./tophat/accepted_hits.bigWig -o ./tophat/accepted_hits_norm.wig 2> ./logs/wigScale_log.txt
# By default, this will scale the bigWig coverage to a mean of 1
wigToBigWig ./tophat/accepted_hits_norm.wig /home/clf21/bin/chrom.sizes."$Build".txt ./tophat/accepted_hits_scaled.bigWig
rm ./tophat/accepted_hits_norm.wig
echo 'Done. Run appears to have completed successfully.'
| true
|
1a9a491e139801ababb1ee77b1c0804ff442d411
|
Shell
|
aaronstanton/rsf
|
/book/lsewem_recorder/fdmod/02_fdmod.sh
|
UTF-8
| 1,331
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
export OMP_NUM_THREADS=8
echo "Starting run at: `date`"
for isx in {0..35}; do
echo "modelling shot number $isx"
dsx=100
osx=100.0
sx=$(echo $osx+$isx*$dsx | bc)
# shot positions
sfmath output=2 < s_.rsf > zs_$isx.rsf
sfmath output=$sx < s_.rsf > xs_$isx.rsf
sfmath output=1 < s_.rsf > rs_$isx.rsf
sfcat axis=2 space=n xs_$isx.rsf zs_$isx.rsf rs_$isx.rsf | sftransp > src_$isx.rsf
# Isotropic Elastic Finite-difference modeling
sfewefd2d < ewav.rsf \
den=den.rsf rec=rec.rsf sou=src_$isx.rsf ccc=ccc.rsf \
dabc=y snap=n verb=y jdata=2 \
ssou=y nb=250 nbell=5 \
> d_fd_$isx.rsf
sfmath < xr.rsf output="input-$sx" > offset.rsf
sfwindow n2=1 f2=1 < d_fd_$isx.rsf |
sftransp |
sfput label1=t unit1=s label2=x unit2=m title='ux' |
sfmath output='(1e12)*input' |
sfmutter half=n abs=y t0=0.05 v0=2000 offset=offset.rsf |
sfput d3=100 o3=$sx \
> ux_$isx.rsf
sfwindow n2=1 f2=0 < d_fd_$isx.rsf |
sftransp |
sfput label1=t unit1=s label2=x unit2=m title='uz' |
sfmath output='(1e12)*input' |
sfmutter half=n abs=y t0=0.05 v0=2000 offset=offset.rsf |
sfput d3=100 o3=$sx \
> uz_$isx.rsf
echo "shot $isx finished at: `date`"
done
| true
|
d60fa54d3b2f5ba689585b7c41b7f58f3fff4f09
|
Shell
|
gurrasson/dockerlecture
|
/mediacentre/install_host.sh
|
UTF-8
| 1,090
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt-get update
sudo apt-get -y dist-upgrade
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y cron curl make kodi
sudo perl -pi -e "s/ENABLED=0/ENABLED=1/g" /etc/default/kodi
wget http://downloads.hypriot.com/docker-hypriot_1.9.1-1_armhf.deb
sudo dpkg -i docker-hypriot_1.9.1-1_armhf.deb
sudo docker daemon 1>/dev/null &
rm -f docker-hypriot_1.9.1-1_armhf.deb*
sudo systemctl enable docker
sudo gpasswd -a $USER docker
sudo chown -R 777 $MEDIA_PATH
sudo usermod -a -G audio kodi
sudo usermod -a -G video kodi
sudo usermod -a -G input kodi
sudo usermod -a -G dialout kodi
sudo usermod -a -G plugdev kodi
sudo usermod -a -G tty kodi
./export.sh
source ~/.bashrc
UPDATE_COMMAND="curl $UPDATE_IP:$UPDATE_PORT/update.sh | sh"
crontab -l >> mycron
cat mycron | grep "update.sh" && echo "update already setup" || echo "45 23 * * * $UPDATE_COMMAND" >> mycron && echo "@reboot $UPDATE_COMMAND" >> mycron
cat mycron | grep "export" && echo "export already setup" || echo "@reboot source $HOME/.flexget/export.sh" >> mycron
crontab mycron
rm mycron
sudo reboot
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.