blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b7ae6802086d29cab94beb1a98206968cd3ec912
|
Shell
|
arnoldrobbins/gt-swt
|
/swt/src/lcl/std.sh/mkclist.sh
|
UTF-8
| 546
| 2.59375
| 3
|
[] |
no_license
|
# mkclist --- make command list for the backstop process
declare _search_rule = "^int,=bin=/&,=lbin=/&,&"
case [arg 1]
when -s
lf -c =bin= =lbin= |MERGE _
files .r$ =src=/lib/sh/src/intcmd.u | change _cmd.r$ |MERGE _
:MERGE cat -1 -2 | sort | uniq | =ebin=/mkcl -s
when
lf -c =ubin= =bin= =lbin= |MERGE _
files .r$ =src=/lib/sh/src/intcmd.u | change _cmd.r$ |MERGE _
:MERGE cat -1 -2 | sort | uniq | =ebin=/mkcl
out
error "Usage: "[arg 0]" [-s]"
esac
| true
|
edbaa4cb8ca9351b4713b2b4dae7899b185b656e
|
Shell
|
nima/wmiii
|
/statusbar.d/98-work
|
UTF-8
| 1,984
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
. "${WMII_CONFPATH%%:*}/statusbar.conf"
THEME=BZ
SYMBOL=231B #. Hourglass
SYMBOL=0024 #. Dollar
requires lck || exit 97
profiles + DEC || exit 96
l_round_time() {
IFS=: read -a t <<< "${1}"
minutes=$(( (7+${t[1]#0}) / 15 * 15 ))
hours=$((${t[0]#0} + (${minutes} / 60)))
(( minutes %= 60 ))
case $2 in
0) printf "%02d%02d" ${hours} ${minutes};;
1) printf "%02d.%02d" ${hours} $(( ${minutes} / 15 * 25 ));;
esac
}
if sb_ready 60; then
lck ${LOCK_SB_MODULE} on
DATA='...'
last | grep "$(whoami).*$(date +'%a %b %_d')" > ${SB_STAGE_F}
from="$(awk -F '[ ]+' '{i=6;if($2~/pts/){i++};last=$i};END{print$i}' ${SB_STAGE_F})"
if [ -n "${from}" ]; then
to="$(date +'%H:%M')"
#. break, defaults to 45 minutes -={
test -f ${SB_STAGE_F}.b || echo 45 > ${SB_STAGE_F}.b
B=$(cat ${SB_STAGE_F}.b)
: ${b:=45}
case ${BUTTON} in
${MOUSE_M}) ((b=45)) ;;
${MOUSE_U}) ((b=B+15));;
${MOUSE_D}) test B -eq 0 || ((b=B-15));;
esac
if [ $b -ne $B ]; then
((B=b))
echo $B > ${SB_STAGE_F}.b
fi
b2="$(echo $B/60|bc -l)"
b2=${b2%%00*}
#. }=-
t=$(l_round_time ${to} 0)
t2=$(l_round_time ${to} 1)
f=$(l_round_time ${from} 0)
f2=$(l_round_time ${from} 1)
total=$(echo -$f2+$t2-$b2|bc -l)
w=$(cat ${SB_STAGE_F}|wc -l)
case $(sb_state ${MOUSE_L}) in
0) DATA="$(printf "%0.2f" $total)";;
1) DATA="$(printf "%s..%s@%s = %0.2f/w$(date +'%W')" $f $t $b2 $total)";;
esac
case ${BUTTON} in
${MOUSE_R})
declare -A le_menu
le_menu[copy]="copy: echo -n '-$f2+$t2-$b2'|xclip -selection p -in"
wm_context_menu "${le_menu[@]}"
;;
esac
fi
sb_write_cache "${DATA}"
lck ${LOCK_SB_MODULE} off
fi
sb_read_cache
| true
|
efc13f6e4230bae5344e504c3a28ca0c39bbca44
|
Shell
|
emiliomarin/TFM-Reputation-Based-Consensus
|
/Development/blockchain.sh
|
UTF-8
| 3,033
| 3.25
| 3
|
[] |
no_license
|
# Follow pre-requisites for your system:
# https://hyperledger.github.io/composer/latest/installing/installing-prereqs.html
installTools(){
echo "==> Installing Composer tools"
npm install -g composer-cli@0.20
npm install -g composer-rest-server@0.20
npm install -g generator-hyperledger-composer@0.20
npm install -g yo
npm install -g composer-playground@0.20
npm install -g yarn
# Install Hyperledger Fabric
if [ ! -d "fabric-dev-servers" ]; then
echo "==> Clonning Hyperledger Fabric"
mkdir ./fabric-dev-servers && cd ./fabric-dev-servers
curl -O https://raw.githubusercontent.com/hyperledger/composer-tools/master/packages/fabric-dev-servers/fabric-dev-servers.tar.gz
tar -xvf fabric-dev-servers.tar.gz
else
echo "==> Hyperledger Fabric already installed"
cd ./fabric-dev-servers
fi
echo "==> Downloading Hyperledger Fabric images"
export FABRIC_VERSION=hlfv12
./downloadFabric.sh
cd ..
}
createNetwork(){
cd ./fabric-dev-servers
echo "==> Starting Network"
./startFabric.sh
echo "==> Creating Peer Admin Card"
./createPeerAdminCard.sh
echo "==> Deploying and starting BNA"
cd ..
rm -r dist/
yarn prepublish
yarn deployBNA
yarn startBNA
# Import admin card
echo "==> Importing Admin Card (Ignore error of Card not found)"
composer card delete -c admin@tfm
composer card import -f adminNetwork.card
echo "==> Starting Rest Server"
composer-rest-server -c admin@tfm
echo "==> You are ready to test!!"
}
clean(){
cd ./fabric-dev-servers
./stopFabric.sh
cd ..
#rm -r fabric-dev-servers
rm -r xfer-bna
}
upgradeBNA(){
rm -r dist/
yarn prepublish
yarn deployBNA
composer network upgrade --networkName tfm --networkVersion $1 --card PeerAdmin@hlfv1
}
if [ $1 = "--fullInstall" ]; then
installTools
createNetwork
elif [ $1 = "--createNetwork" ]; then
createNetwork
elif [ $1 = "--start" ]; then
echo "==> Starting Network"
cd ./fabric-dev-servers
./startFabric.sh
elif [ $1 = "--stop" ]; then
echo "==> Stopping Network"
cd ./fabric-dev-servers
./stopFabric.sh
elif [ $1 = "--clean" ]; then
clean
elif [ $1 = "--upgrade" ]; then
upgradeBNA $2
elif [ $1 = "--help" ]; then
echo "==> --fullInstall : Install all the tools and creates the network -> Starts nodes, create admin peer card, clones bna repo, prepublishes latest BNA, deploys it and start it. Lastly imports the cards and starts the rest server"
echo
echo "==> --createNetwork : Creates the network-> Starts nodes, create admin peer card, clones bna repo, prepublishes latest BNA, deploys it and start it. Lastly imports the cards and starts the rest server"
echo
echo "==> --clean : removes fabric and bna folders"
echo
echo "==> --upgradeBNA 0.2.16 (example) : Installs and upgrades BNA -> IMPORTANT!! REMEMBER TO INCREASE VERSION NUMBER ON PACKAGE.JSON"
fi
| true
|
08a3023731d27f06e14cc029b45698720a039747
|
Shell
|
mjm522/baselines
|
/set_env.bash
|
UTF-8
| 327
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export ROOT_DIR="${ROOT_DIR}"
MODULES='baselines'
for module in $MODULES
do
module_path=$ROOT_DIR/$module
echo "adding module: $module_path"
export PYTHONPATH=$module_path:$PYTHONPATH
done
cd $ROOT_DIR
echo "PYTHON PATH IS: $PYTHONPATH"
| true
|
9b494dc7b72085725e9f23ebfe9a402d412bcbd6
|
Shell
|
peter50216/dotfiles
|
/setup/main.sh
|
UTF-8
| 584
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ ! -f ~/.dotfiles/.setup ]]; then
echo -e '\e[1;33mFirst time setup! Make sure we have everything installed...\e[m'
pushd ~/.dotfiles/
./setup/run_all.sh
ret=$?
popd
if [[ $ret == 0 ]]; then
touch ~/.dotfiles/.setup
echo -e '\e[1;33mFirst time setup complete! Delete ~/.dotfiles/.setup and ~/.dotfiles/setup/main.sh to run again.\e[m'
else
echo -e '\e[1;31mFirst time setup fail QQ\e[m'
exit 1
fi
else
echo -e '\e[1;33mFirst time setup already done! Delete ~/.dotfiles/.setup and ~/.dotfiles/setup/main.sh to run again.\e[m'
fi
| true
|
f54da9d04ef40fe0d066dc4ddb50b82937204bca
|
Shell
|
Lenala39/Guessing-Game
|
/guessinggame.sh
|
UTF-8
| 888
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# File: guessinggame.sh
#store number of files in directory
num_files=$(ls -F | grep -v / | wc -l)
# prompt for first guess and read user input
echo "Please gues how many files (not directories) are in your current working directory:"
read user_guess
#define function to check guess
function check_guess {
#as long as guess is not correct
while [[ $user_guess -ne $num_files ]]; do
# respond with "too high", if guess is bigger
if [[ $user_guess > $num_files ]]; then
echo "Your guess was too high, please try again!"
# respond with "too small", if guess is smaller
else
echo "Your guess was too low, please try again!"
fi
#ask for another guess
read user_guess
done
# if while loop is done, the guess is correct -> congrats
echo "Your guess $user_guess was correct! Congratulations"
}
#call function (after definition)
check_guess
| true
|
47b707410c142e5fb32ae74685f12fc64dc0607f
|
Shell
|
YASoftwareDev/dotfiles
|
/install.sh
|
UTF-8
| 10,393
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script should be run by
# ./install.sh
# You can tweak the install behavior by setting variables when running the script. For
# example, to install development packages:
# DEV_PACKAGES=yes sh install.sh
#
# Respects the following environment variables:
# DEV_PACKAGES - install additional developer packages (default: no)
# DOCKER_SETUP - install packages for docker run (default: yes)
# DOTFILES_PACKAGES - install packages for this dotfiles (default: yes)
# PIP_PACKAGES - install pip and virtualenv (default: no)
# NERD_FONTS - clone and install nerdfonts (default: no)
# UPDATE_PACKAGES - ubuntu apt update & upgrade (default: yes)
# RG_PACKAGE - install ripgrep (default: yes)
# TMUX_PACKAGE - install tmux (default: yes)
# FD_PACKAGE - install fd (default: yes)
# PARALLEL_PACKAGE - install parallel (default: yes)
# CHEAT_PACKAGE - install cheat (default: yes)
# SHELLCHECK_PACKAGE - install shellcheck (default: yes)
# OH_MY_ZSH_PACKAGE - install oh-my-zsh (default: yes)
# ZSH_CUSTOMIZATIONS - use this repo config + custom plugins (default: yes)
# FZF_PACKAGE - install fzf (default: yes)
# DIFF_SO_FANCY - install diff-so-fancy (default: yes)
# CHANGE_SHELL - change shell to zsh (default: yes)
# VIM_CUSTOMIZATIONS - use this repo config + custom plugins (default: yes)
#
# You can also pass some arguments to the install script to set some these options:
# --dev: has the same behavior as setting DEV_PACKAGES to 'yes'
# --no-docker-setup: sets DOCKER_SETUP to 'no'
# --pip: sets PIP_PACKAGES to 'yes'
# --nerd: sets NERD_FONTS to 'yes'
# --no-update: set UPDATE_PACKAGES to 'no'
# --no-rg: sets RG_PACKAGE to 'no'
# --no-tmux: sets TMUX_PACKAGE to 'no'
# --no-fd: sets FD_PACKAGE to 'no'
# --no-parallel: sets PARALLEL_PACKAGE to 'no'
# --no-cheat: set CHEAT_PACKAGE to 'no'
# --no-shellcheck: set SHELLCHECK_PACKAGE to 'no'
# --no-oh-my: sets OH_MY_ZSH_PACKAGE to 'no'
# --no-custom-zsh: sets ZSH_CUSTOMIZATIONS to 'no'
# --no-fzf: sets FZF_PACKAGE to 'no'
# --no-diff: sets DIFF_SO_FANCY to 'no'
# --no-shell-change: sets CHANGE_SHELL to 'no'
# --no-custom-vim: sets VIM_CUSTOMIZATIONS to 'no'
#
# For example:
# sh install.sh --dev
#
# Exit immediately if a command exits with a non-zero status.
set -e
# set input variables
DEV_PACKAGES=${DEV_PACKAGES:-no}
DOCKER_SETUP=${DOCKER_SETUP:-yes}
DOTFILES_PACKAGES=${DOTFILES_PACKAGES:-yes}
PIP_PACKAGES=${PIP_PACKAGES:-no}
NERD_FONTS=${NERD_FONTS:-no}
UPDATE_PACKAGES=${UPDATE_PACKAGES:-yes}
RG_PACKAGE=${RG_PACKAGE:-yes}
TMUX_PACKAGE=${TMUX_PACKAGE:-yes}
FD_PACKAGE=${FD_PACKAGE:-yes}
PARALLEL_PACKAGE=${PARALLEL_PACKAGE:-yes}
CHEAT_PACKAGE=${CHEAT_PACKAGE:-yes}
SHELLCHECK_PACKAGE=${SHELLCHECK_PACKAGE:-yes}
OH_MY_ZSH_PACKAGE=${OH_MY_ZSH_PACKAGE:-yes}
ZSH_CUSTOMIZATIONS=${ZSH_CUSTOMIZATIONS:-yes}
FZF_PACKAGE=${FZF_PACKAGE:-yes}
DIFF_SO_FANCY=${DIFF_SO_FANCY:-yes}
CHANGE_SHELL=${CHANGE_SHELL:-yes}
VIM_CUSTOMIZATIONS=${VIM_CUSTOMIZATIONS:-yes}
# Parse input arguments
while [ $# -gt 0 ]; do
case $1 in
--dev) DEV_PACKAGES=yes ;;
--no-docker-setup) DOCKER_SETUP=no ;;
--no-dotfiles) DOTFILES_PACKAGES=no ;;
--pip) PIP_PACKAGES=yes ;;
--nerd) NERD_FONTS=yes ;;
--no-update) UPDATE_PACKAGES=no ;;
--no-rg) RG_PACKAGE=no ;;
--no-tmux) TMUX_PACKAGE=no ;;
--no-fd) FD_PACKAGE=no ;;
--no-parallel) PARALLEL_PACKAGE=no ;;
--no-cheat) CHEAT_PACKAGE=no ;;
--no-shellcheck) SHELLCHECK_PACKAGE=no ;;
--no-oh-my) OH_MY_ZSH_PACKAGE=no ;;
--no-custom-zsh) ZSH_CUSTOMIZATIONS=no ;;
--no-fzf) FZF_PACKAGE=no ;;
--no-diff) DIFF_SO_FANCY=no ;;
--no-shell-change) CHANGE_SHELL=no ;;
--no-custom-vim) VIM_CUSTOMIZATIONS=no ;;
esac
shift
done
# Let's go to business!
if [ ${UID} -ne 0 ]; then
echo "Current run as non privileged user means that some packages will not be installed!"
echo "Also remember to run from directory where you have write access."
hash curl 2>/dev/null || hash wget 2>/dev/null || { echo >&2 "Without curl or wget this run rather doesn't make sense..."; }
UPDATE_PACKAGES=no
DEV_PACKAGES=no
DOCKER_ENV_SETUP=no
DOTFILES_PACKAGES=no
RG_PACKAGE=no
FD_PACKAGE=no
fi
# to enable execution from other directories
BASE_DIR="$(dirname "$(readlink -f "$0")")"
cd "${BASE_DIR}"
# Let's start with getting newest stuff from apt.
if [ ${UPDATE_PACKAGES} = yes ]; then
apt -yq update
apt -yq upgrade
fi
# From my perspective below packages are needed only for full development environment.
# Because not all setups need them I leave you with choice based on script input argument.
if [ ${DEV_PACKAGES} = yes ]; then
APT_PACKAGES_DEVELOPER_KIT="clang build-essential cmake python3-dev python3-pip python3-venv man-db"
DEBIAN_FRONTEND=noninteractive apt -yq install "${APT_PACKAGES_DEVELOPER_KIT}"
fi
# Install missing packages if script is run in base docker container
if [ ${DOCKER_ENV_SETUP} = yes ]; then
APT_PACKAGES_MISSING_IN_DOCKER="locales"
DEBIAN_FRONTEND=noninteractive apt -yq install "${APT_PACKAGES_MISSING_IN_DOCKER}"
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
fi
# packages used by me (man for manuals, gnupg for confirm authenticity of parallel)
if [ ${DOTFILES_PACKAGES} = yes ]; then
APT_PACKAGES_TERMINAL_ENHANCEMENTS="git curl wget vim-gtk3 tmux clipit zsh ranger jq fasd man gnupg"
DEBIAN_FRONTEND=noninteractive apt -yq install "${APT_PACKAGES_TERMINAL_ENHANCEMENTS}"
fi
# I'm not certain if these should be installed globally, so I leave you with choice based on script input argument
if [ ${PIP_PACKAGES} = yes ]; then
PIP_PACKAGES_LIST="pip virtualenv"
pip install --upgrade "${PIP_PACKAGES_LIST}"
fi
./install-tig.sh
# Install ripgrep (grep on steroids) and customizations
if [ ${RG_PACKAGE} = yes ]; then
./install-ripgrep-on-ubuntu.sh
mkdir -p ~/.config/ripgrep
ln -s -f "${BASE_DIR}/ripgrep/rc" ~/.config/ripgrep/rc
fi
# https://github.com/gpakosz/.tmux.git inspired tmux configuration. You can further adjust it later with dotfiles/tmux/ files
if [ ${TMUX_PACKAGE} = yes ]; then
ln -s -f "${BASE_DIR}/tmux/.tmux.conf" ~
ln -s -f "${BASE_DIR}/tmux/.tmux.conf.local" ~
fi
# fd - from Ubuntu 19.04 you can run: sudo apt install fd-find
# but, for now:
if [ ${FD_PACKAGE} = yes ]; then
FD_LATEST_URL=$(curl --silent "https://api.github.com/repos/sharkdp/fd/releases/latest" | jq -r '.assets[0].browser_download_url')
wget "${FD_LATEST_URL}"
dpkg -i "$(basename "${FD_LATEST_URL}")"
rm "$(basename "${FD_LATEST_URL}")"
fi
# Install highlight
# http://www.andre-simon.de/doku/highlight/en/install.php
# GNU parallel
# http://oletange.blogspot.com/2013/04/why-not-install-gnu-parallel.html
if [ "${PARALLEL_PACKAGE}" = yes ]; then
pushd ~
(wget pi.dk/3 -qO - || curl pi.dk/3/) | bash
popd
fi
# cheat - allows you to create and view interactive cheatsheets on the command-line
# https://github.com/cheat/cheat
if [ "${CHEAT_PACKAGE}" = yes ]; then
cp install-cheat.sh ~
pushd ~
./install-cheat.sh
popd
fi
# ShellCheck - a static analysis tool for shell scripts
# https://github.com/koalaman/shellcheck
if [ "${SHELLCHECK_PACKAGE}" = yes ]; then
./install-shellcheck.sh
fi
# shfmt - A shell parser, formatter, and interpreter. Supports POSIX Shell, Bash, and mksh. Requires Go 1.13 or later.
# https://github.com/mvdan/sh
if [ "${SHELLCHECK_PACKAGE}" = yes ]; then
./install-shfmt.sh
fi
# oh-my-zsh
if [ "${OH_MY_ZSH_PACKAGE}" = yes ]; then
pushd ~
wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -qO install_oh_my_zsh.sh
sh install_oh_my_zsh.sh --unattended
popd
fi
# enable zsh plugins and show full filepath in shell prompt
if [ "${ZSH_CUSTOMIZATIONS}" = yes ]; then
ZSH_CUSTOM=~/.oh-my-zsh/custom
git clone git://github.com/zsh-users/zsh-autosuggestions $ZSH_CUSTOM/plugins/zsh-autosuggestions
git clone https://github.com/zdharma/fast-syntax-highlighting.git $ZSH_CUSTOM/plugins/fast-syntax-highlighting
git clone https://github.com/Aloxaf/fzf-tab.git $ZSH_CUSTOM/plugins/fzf-tab
# There is also zsh-syntax-highlighting. At the moment I'm not sure which one is a winner
#git clone https://github.com/zsh-users/zsh-syntax-highlighting.git $ZSH_CUSTOM/plugins/zsh-syntax-highlighting
# powerlevel10k (faster than powerlevel9k) and nerd fonts
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git $ZSH_CUSTOM/themes/powerlevel10k
ln -s -f "${BASE_DIR}/zsh/.zshrc" ~
fi
echo "Whole Nerd-fonts project is too heavy to download - you should rather path individual font that you are using."
echo "Use instructions from: https://kifarunix.com/install-and-setup-zsh-and-oh-my-zsh-on-ubuntu-20-04"
#f [ "${NERD_FONTS}" = yes ]; then
#git clone --depth=1 https://github.com/ryanoasis/nerd-fonts.git ~/.nerd_fonts
#pushd ~/.nerd-fonts
#./install.sh
#popd
#i
#fzf (ctrl-R ctrl-T)
if [ ${FZF_PACKAGE} = yes ]; then
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install --all
fi
#diff-so-fancy (https://github.com/so-fancy/diff-so-fancy)
if [ ${DIFF_SO_FANCY} = yes ]; then
mkdir -p ~/.local/bin
wget https://raw.githubusercontent.com/so-fancy/diff-so-fancy/master/third_party/build_fatpack/diff-so-fancy -P ~/.local/bin
chmod u+x ~/.local/bin/diff-so-fancy
git config --global core.pager "diff-so-fancy | less --tabs=4 -RFX"
# update PATH with ~/.local/bin
fi
# TODO: add an option to install custom vim: install-custom-built-vim.sh
# Below are things Vim related. It is possible that you don't want them!
if [ ${VIM_CUSTOMIZATIONS} = yes ]; then
ln -s -f "${BASE_DIR}/vim/.vimrc" ~
ln -s -f "${BASE_DIR}/vim/vimrc_minimal.vim" ~
vim +PlugInstall +qall
# custom python folding rules for vim
#mkdir ~/.vim/syntax
#wget https://www.vim.org/scripts/download_script.php?src_id=9584
fi
if [ ${CHANGE_SHELL} = yes ]; then
# zsh should be now default shell, if not, run below command
chsh -s "$(which zsh)"
RUN_EXTRA_COMMAND_IN_THE_END="p10k configure" zsh -i
fi
# other stuff...
#install gnu global
echo "use: https://gist.github.com/y2kr/2ff0d3e1c7f20b0925b2"
echo "check for never link (6.6.4) and later"
| true
|
17b5b973bbd15e019d9873b438efa3a1f5e52a16
|
Shell
|
dmstr/vado-ligure
|
/template/provision-volume.sh
|
UTF-8
| 1,744
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
echo Running 'always disk' provisioning...
# define device name for disk to check
DISK_DEV_NAME=/dev/sdb
DISK_M_POINT=/var/lib/docker
DOCKER_RUNS=0
echo "check if $DISK_M_POINT mount is configured, otherwise setup disk and mount options"
if mount | grep -qsE "${DISK_M_POINT}[[:space:]]+type[[:space:]]ext4"; then
echo "Mount for ${DISK_M_POINT} exists"
else
echo "Init Mounting /var/lib/docker to secondary disk ${DISK_DEV_NAME}"
echo "check if ${DISK_DEV_NAME} has ext4 fs"
if file -Ls ${DISK_DEV_NAME} | grep -q ext4; then
echo 'No mkfs.ext4 needed';
else
mkfs.ext4 ${DISK_DEV_NAME};
fi
# get UUID for /dev/sdb to check and/or set /etc/fstab entry
SDB_UUID=`blkid ${DISK_DEV_NAME} | awk '{print$2}' | sed -e 's/"//g'`
echo "check /etc/fstab entry for $SDB_UUID on ${DISK_M_POINT}"
if grep -q $SDB_UUID /etc/fstab; then
echo "/etc/fstab entry exists"
else
echo "adding /etc/fstab entry ${SDB_UUID} to ${DISK_M_POINT}"
echo '# added from vagrant provision script' >> /etc/fstab
echo "${SDB_UUID} ${DISK_M_POINT} ext4 errors=remount-ro 0 1" >> /etc/fstab
fi
# check if docker engine is running, if yes we must stop it before we mount /var/lib/docker
if service docker status > /dev/null 2>&1; then
echo 'docker daemon is running, stop it before mount...'
service docker stop
DOCKER_RUNS=1
fi
echo "mounting ${DISK_M_POINT}"
mkdir -p ${DISK_M_POINT}
mount ${DISK_M_POINT}
# (re)start docker needed?
if [ $DOCKER_RUNS -eq 1 ]; then
echo "(re)start docker daemon"
service docker start
fi
fi
echo "Done: 'always disk' provisioning"
| true
|
ad8831863100d584c6b19864ac9fe53948dd9d52
|
Shell
|
TechanIO/rpi-usb-gadget
|
/remove.sh
|
UTF-8
| 2,640
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
USBFILE=/usr/local/sbin/usb-gadget.sh
UNITFILE=/lib/systemd/system/usb-gadget.service
# some usefull functions
confirm() {
# call with a prompt string or use a default
read -r -p "${1:-Are you sure? [y/N]} " response
case "$response" in
[yY][eE][sS]|[yY])
true
;;
*)
false
;;
esac
}
cat << EOF
This script will modify '/boot/config.txt', '/boot/cmdline.txt' and other files.
Warning, It might brick your device!
Do not run unless you understand what it is doing.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Continue with modifications?
EOF
! confirm && exit
if [ -e "$UNITFILE" ]; then
echo "disabling and removing usb-gadget unit"
sudo systemctl disable usb-gadget
# Removed /etc/systemd/system/sysinit.target.wants/usb-gadget.service.
sudo rm "$UNITFILE"
sudo systemctl daemon-reload
fi
if [ -e /etc/usb-gadgets ]; then
echo "removing /etc/usb-gadgets"
sudo rm -Rf /etc/usb-gadgets
fi
if [ -e "$USBFILE" ]; then
echo "removing $USBFILE"
sudo rm "$USBFILE"
fi
if [ -e /etc/dnsmasq.d/usb-gadget ]; then
echo "removing dnsmasq config and uninstalling dnsmasq"
sudo rm /etc/dnsmasq.d/usb-gadget
sudo systemctl stop dnsmasq
sudo apt purge dnsmasq
fi
if [ -e /etc/network/interfaces.d/usb0 ]; then
echo "removing interface config for usb0"
sudo ifdown usb0
sudo rm /etc/network/interfaces.d/usb0
fi
if $(grep -q modules-load=dwc2 /boot/cmdline.txt) ; then
echo
echo "remove line modules-load=dwc2 from /boot/cmdline.txt"
if ! confirm ; then
exit
fi
cat /boot/cmdline.txt
sudo sed -i '${s/ modules-load=dwc2//}' /boot/cmdline.txt
cat /boot/cmdline.txt
fi
if $(grep -q 'denyinterfaces usb0' /etc/dhcpcd.conf) ; then
echo
echo "remove line 'denyinterfaces usb0' from /etc/dhcpcd.conf"
if ! confirm ; then
exit
fi
sudo sed -i '${s/denyinterfaces usb0//}' /etc/dhcpcd.conf
fi
if $(grep -q '^libcomposite' /etc/modules) ; then
echo
echo "remove line 'libcomposite' from /etc/modules"
if ! confirm ; then
exit
fi
sudo sed -i '${s/^libcomposite//}' /etc/modules
fi
| true
|
05fe9f2bf5e75b7772c0c9a63ab1f78de0821365
|
Shell
|
petronny/aur3-mirror
|
/terminology-guake-git/PKGBUILD
|
UTF-8
| 1,034
| 2.859375
| 3
|
[] |
no_license
|
# Maintainer: George Kamenov < cybertorture@gmail.com >
#
# This is not part of "terminology" (yet?) , it is for testing purpose only !
#
pkgname=terminology-guake-git
pkgver=1
pkgrel=2
pkgdesc="Terminal emulator for e17, successor of previous eterm with extention 'guake-like'"
arch=('i686' 'x86_64')
groups=('e17-extra-svn')
url="http://www.enlightenment.org/p.php?p=about/terminology"
license=('BSD')
depends=('ecore' 'enlightenment17')
makedepends=('git')
provides=('terminology')
conflicts=('terminology-svn' 'terminology')
options=('!libtool')
source=()
md5sums=()
_gitroot="git://github.com/bearnik/terminology-guake.git"
build() {
msg "Connecting to GIT server..."
if [[ -d $srcdir/$pkgname-$pkgver ]]; then
cd $srcdir/$pkgname-$pkgver && git pull origin
else
git clone $_gitroot $srcdir/$pkgname-$pkgver
cd $srcdir/$pkgname-$pkgver
fi
msg "GIT checkout done or server timeout"
./autogen.sh --prefix=/usr
make
}
package() {
cd $srcdir/$pkgname-$pkgver
make DESTDIR=$pkgdir PREFIX=$pkgdir/usr install
}
| true
|
d4e49959f3788a2fd1948858ebf0fc2991eef14e
|
Shell
|
xubyxiaobao/docker-cluster
|
/docker-registry/save-images.sh
|
UTF-8
| 1,396
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
# 在能连接外网的机器上将需要的镜像进行打包,之后将压缩文件上传至内网,执行init-registry.sh脚本将镜像注册至私服
base_dir=$(cd $(dirname $0);pwd)
source ./env.sh
mkdir -p ${base_dir}/$IMAGES_TAR_NAME
download_path=${base_dir}/$IMAGES_TAR_NAME
image_arr=($SAVE_IMAGES)
#从远程仓库下载
for image in ${image_arr[@]}
do
if [ -z $(docker images $image -q) ]; then
echo -e "\033[33m开始从远程仓库拉取镜像 $image\033[0m"
docker pull $image
if [ $? -ne 0 ]; then
echo -e "\031[33m镜像${image}拉取失败\033[0m"
exit127
fi
echo -e "\033[32m镜像${image}已拉取完成\033[0m"
fi
file_name=$(echo $image|awk -F'/' '{print $NF}')
echo -e "\033[33m开始生成${image}的镜像文件\033[0m"
docker save -o "${download_path}/${file_name}.tar" $image
if [ $? -ne 0 ]; then
echo -e "\031[31m镜像${image}保存文件失败\033[0m"
exit127
fi
echo -e "\033[32m镜像${image}的文件已生成\033[0m"
done
tar_name=$(basename $download_path)
#将镜像文件夹打包
echo -e "\033[33m开始将已下载的镜像打包,打包文件名:${tar_name}.tar.gz\033[0m"
cd $download_path;
tar -czvf "../${tar_name}.tar.gz" *
echo -e "\033[32m打包完成,打包路径:$download_path${tar_name}.tar.gz\033[0m"
| true
|
eb48dd799143d7938d84909ca77643bc94174bd3
|
Shell
|
JustinHop/Profile
|
/bin/resolution.sh
|
UTF-8
| 255
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
for i in $@; do
EXTRA=""
RES=$(ffprobe -hide_banner $i 2>&1 | grep Video | grep -oP '\d{3,4}x\d{3,4}')
if $(echo $RES | grep -vsqP '(1920|1080|1280|720|540)') ; then
EXTRA="BAD"
fi
echo -e "$i\t$RES\t$EXTRA"
done
| true
|
e5d48f2c73cb87b78834a1fdbd4cce6849f92a3f
|
Shell
|
kearnsw/question-type-classification
|
/code/generate_configs.sh
|
UTF-8
| 167
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Generating $1 configs into configs/k-fold ..."
for ((n=0;n<$1;n++))
do
sed "s|placeholder|$n|" configs/$2.json > configs/k-fold/$2_$n.json
done
| true
|
eaf56d8a55b0af4aa18d1ed698386383d1324359
|
Shell
|
JacquesJacob/unix-inventory-html
|
/copia_html.sh
|
UTF-8
| 312
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Script copia saida cfg2html para consola
#
#for i in `ls /var/www/html/inventory/ | grep .html | cut -d. -f 1`; do cp /var/www/html/inventory/$i.html /var/www/html/inventory/backup/$i_`date +%m.%d.%Y`.html; done
scp -q -o LogLevel=QUIET 15.128.1.132:/inventory/*.html /var/www/html/inventory/
| true
|
e7a5e3f490fa6313747be5173612f0ff4a79c866
|
Shell
|
BoldingBruggeman/hamsom
|
/compile_ECOSMO.sh
|
UTF-8
| 3,270
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
# script to start a single ECOSMO run
# that's why runs can be started directly with this file
# written by: Cara Nissen (uda081@student.uib.no), ute.daewel@hzg.de
echo -e " \n Preparations and start of new model run. \n"
#define model specifications here
#runID_old run Id that should be replaced by the new runID
#runSP run specification "ECOSMOCO2" fo simulation with carbon chemistry
#rm *.o
Mrate_old=2
runSP="ECOSMO"
#runSP="ECOSMOCO2"
runFI="NOFISH"
#runFI="FISH"
runID_old=cr1
runID=ttt
#set compiler options
#DSATLAS : start biology from WOA values
#DSASCII : start physics from ascii climatology
#else remember to define file in code (here n011978)
#DECO2 : use carbon module (check inpur files for Alkalinity and DIC)
#DNOFISH : use model without fish and MB
#DWADDEN : consider specific conditions for wadden sea boundary
#DCARA : consider CARA parameterisation of sediment remineralizeation under hypoxic conditions
#export OPT="-DCOASTDAT -DCARA"
#export OPT="-DSASCII -DCARA -DNCEP"
#export OPT="-DSATLAS -DSASCII -DCARA -DCOASTDAT"
export OPT="-DSATLAS -DSASCII -DCOASTDAT"
#export OPT="-DCARA -DNOFISH"
#export OPT="-DSATLAS -DSASCII"
#export OPT="-DSASCII"
#export OPT="-DSEDPO"
#export OPT="-DPUMP"
#if [ ${runFI} == "NOFISH" ]; then
#cp ECOSMparam_mi0.f ECOSMparam_${runID}.f
#else
#cp ECOSMparam_nsp.f ECOSMparam_${runID}.f
#fi
echo -e "Die Run ID ist $runID \n"
################
# Below this block, nothing needs to be changed
################
start=$(cat /work/uda081/ecosmo/input_$runID | head -n1)
duration=$(cat /work/uda081/ecosmo/input_$runID | tail -n1)
let end_year=$duration-1
let end=$start+$end_year
echo -e "Model run starts in $start and model is run for $duration years. \n"
years=$(seq $start $end)
for i in $years
do
if [ -d /work/uda081/ecosmo/north_b/f_out_$i ]; then
echo -e "Folder "f_out_$i" exists"
else
mkdir /work/uda081/ecosmo/north_b/f_out_$i
echo -e "Folder "f_out_$i" didn't exist, but was created."
fi
done
echo -e "All output folders exist. \n"
# create folder with model scripts for runID
# Update sbatchecosmo.pbs
sed -i s/ECOSM${runID_old}/ECOSM${runID}/g sbatchecosmo.pbs
sed -i s/ECOSMO_${runID_old}/ECOSMO_${runID}/g sbatchecosmo.pbs
sed -i s/output${runID_old}/output${runID}/g sbatchecosmo.pbs
# Update sbatchecosmo_co2.pbs
# Update Makefile
sed -i s/ECOSMO_${runID_old}/ECOSMO_${runID}/g Makefile
#sed -i s/bio_${runID_old}/bio_${runID}/g Makefile
#cp Makefile Makefile_${runID}
# Update main_new.F
#sed -i 's/ppp='${runID_old}'/ppp='${runID}'"/g' main_new.F
#sed -i s/ECOSMparam_${runID_old}/ECOSMparam_${runID}/g main_new.F
#cp bio_${runID_old}.F bio_${runID}.F
#sed -i s/ECOSMparam_${runID_old}/ECOSMparam_${runID}/g bio_${runID}.F
#sed -i s/${runID_old}/${runID}/g bio_${runID}.F
# Update main_new.F
if [ ${runSP} == "ECOSMOCO2" ]; then
sed -i s/"(nbio=nbiox)"/"(nbio=nbiox+5)"/g C_model.f
else
sed -i s/"(nbio=nbiox+5)"/"(nbio=nbiox)"/g C_model.f
fi
echo -e "sbatchecosmo.pbs, Makefile and main_new.F are updated. \n"
echo -e "All needed changes made. All files and folders created."
echo -e "Model run can be started. \n"
mkdir track${runID}
cp Makefile track${runID}
cp compile_ECOSMO.sh track${runID}
make
#sbatch sbatchecosmo.pbs
runID_old=${runID}
#done
cd
| true
|
18ec53c04fe6232219845456f81262310ade93f2
|
Shell
|
jwa5426/jake
|
/.circleci/ci-setup.sh
|
UTF-8
| 856
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
# intended to be run from directory above this one.
# Setup a proper path, I call my virtualenv dir ".venv_non_dev"
PATH=$WORKSPACE/.venv_non_dev/bin:$PATH
python3 --version
if [ ! -d ".venv_non_dev" ]; then
# use python3 to create .venv
python3 -m venv .venv_non_dev
fi
source .venv_non_dev/bin/activate
#pip3 install pipenv
pip3 install python-semantic-release
pip3 install -r requirements.txt
# Setup a proper path, I call my virtualenv dir ".venv_dev"
PATH=$WORKSPACE/.venv_dev/bin:$PATH
python3 --version
if [ ! -d ".venv_dev" ]; then
# use python3 to create .venv
python3 -m venv .venv_dev
fi
source .venv_dev/bin/activate
#pip3 install pipenv
pip3 install python-semantic-release
pip3 install -r requirements.txt
# development only requirements
pip3 install -r requirements-dev.txt
| true
|
ecf629f91b1e8c5675243dc4536d18ade5d3eaca
|
Shell
|
Liryna/android-platform-ndk
|
/build/tools/build-libjpeg-turbo.sh
|
UTF-8
| 11,679
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2011-2015 CrystaX.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY CrystaX ''AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CrystaX OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of CrystaX.
# include common function and variable definitions
. `dirname $0`/prebuilt-common.sh
PROGRAM_PARAMETERS="<src-dir>"
PROGRAM_DESCRIPTION=\
"Rebuild the prebuilt libjpeg-turbo binaries for the Android NDK.
This requires a temporary NDK installation containing platforms and
toolchain binaries for all target architectures, as well as the path to
the corresponding libjpeg-turbo source tree.
By default, this will try with the current NDK directory, unless
you use the --ndk-dir=<path> option.
The output will be placed in appropriate sub-directories of
<ndk>/$LIBJPEGTURBO_SUBDIR, but you can override this with the --out-dir=<path>
option.
"
PACKAGE_DIR=
register_var_option "--package-dir=<path>" PACKAGE_DIR "Put prebuilt tarballs into <path>."
NDK_DIR=$ANDROID_NDK_ROOT
register_var_option "--ndk-dir=<path>" NDK_DIR "Specify NDK root path for the build."
BUILD_DIR=
OPTION_BUILD_DIR=
register_var_option "--build-dir=<path>" OPTION_BUILD_DIR "Specify temporary build dir."
OUT_DIR=
register_var_option "--out-dir=<path>" OUT_DIR "Specify output directory directly."
ABIS=$(spaces_to_commas $PREBUILT_ABIS)
register_var_option "--abis=<list>" ABIS "Specify list of target ABIs."
LIBJPEGTURBO_VERSION=
register_var_option "--version=<ver>" LIBJPEGTURBO_VERSION "Specify libjpeg-turbo version to build"
register_try64_option
register_jobs_option
extract_parameters "$@"
LIBJPEGTURBO_SRCDIR=$(echo $PARAMETERS | sed 1q)
if [ -z "$LIBJPEGTURBO_SRCDIR" ]; then
echo "ERROR: Please provide the path to the libjpeg-turbo source tree. See --help" 1>&2
exit 1
fi
if [ ! -d "$LIBJPEGTURBO_SRCDIR" ]; then
echo "ERROR: No such directory: '$LIBJPEGTURBO_SRCDIR'" 1>&2
exit 1
fi
if [ -z "$LIBJPEGTURBO_VERSION" ]; then
echo "ERROR: Please specify libjpeg-turbo version" 1>&2
exit 1
fi
GITHASH=$(git -C $LIBJPEGTURBO_SRCDIR rev-parse --verify v$LIBJPEGTURBO_VERSION 2>/dev/null)
if [ -z "$GITHASH" ]; then
echo "ERROR: Can't find tag v$LIBJPEGTURBO_VERSION in $LIBJPEGTURBO_SRCDIR" 1>&2
exit 1
fi
LIBJPEGTURBO_DSTDIR=$NDK_DIR/$LIBJPEGTURBO_SUBDIR/$LIBJPEGTURBO_VERSION
mkdir -p $LIBJPEGTURBO_DSTDIR
fail_panic "Can't create libjpeg-turbo-$LIBJPEGTURBO_VERSION destination directory: $LIBJPEGTURBO_DSTDIR"
ABIS=$(commas_to_spaces $ABIS)
if [ -z "$OPTION_BUILD_DIR" ]; then
BUILD_DIR=$NDK_TMPDIR/build-libjpeg-turbo
else
eval BUILD_DIR=$OPTION_BUILD_DIR
fi
rm -rf "$BUILD_DIR"
mkdir -p "$BUILD_DIR"
fail_panic "Could not create build directory: $BUILD_DIR"
prepare_target_build
fail_panic "Could not setup target build"
# $1: ABI
# $2: build directory
build_libjpeg_turbo_for_abi()
{
local ABI="$1"
local BUILDDIR="$2"
dump "Building libjpeg-turbo-$LIBJPEGTURBO_VERSION $ABI libraries"
local APILEVEL
case $ABI in
armeabi*|x86|mips)
APILEVEL=9
;;
arm64*|x86_64|mips64)
APILEVEL=21
;;
*)
echo "ERROR: Unknown ABI: '$ABI'" 1>&2
exit 1
esac
local ARCH
case $ABI in
armeabi*)
ARCH=arm
;;
arm64*)
ARCH=arm64
;;
x86|x86_64|mips|mips64)
ARCH=$ABI
;;
*)
echo "ERROR: Unknown ABI: '$ABI'" 1>&2
exit 1
esac
local TOOLCHAIN
case $ABI in
armeabi*)
TOOLCHAIN=arm-linux-androideabi
;;
x86)
TOOLCHAIN=x86
;;
mips)
TOOLCHAIN=mipsel-linux-android
;;
arm64-v8a)
TOOLCHAIN=aarch64-linux-android
;;
x86_64)
TOOLCHAIN=x86_64
;;
mips64)
TOOLCHAIN=mips64el-linux-android
;;
*)
echo "ERROR: Unknown ABI: '$ABI'" 1>&2
exit 1
esac
local SRCDIR="$BUILDDIR/src"
run git clone -b v$LIBJPEGTURBO_VERSION $LIBJPEGTURBO_SRCDIR $SRCDIR
fail_panic "Can't copy libjpeg-turbo-$LIBJPEGTURBO_VERSION sources to temporary directory"
cd $SRCDIR
local HOST
case $ABI in
armeabi*)
HOST=arm-linux-androideabi
;;
arm64*)
HOST=aarch64-linux-android
;;
x86)
HOST=i686-linux-android
;;
x86_64)
HOST=x86_64-linux-android
;;
mips)
HOST=mipsel-linux-android
;;
mips64)
HOST=mips64el-linux-android
;;
*)
echo "ERROR: Unknown ABI: '$ABI'" 1>&2
exit 1
esac
local INSTALLDIR="$BUILDDIR/install"
case $ABI in
armeabi)
CFLAGS="-march=armv5te -mtune=xscale -msoft-float"
;;
armeabi-v7a)
CFLAGS="-march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=softfp"
;;
armeabi-v7a-hard)
CFLAGS="-march=armv7-a -mfpu=vfpv3-d16 -mhard-float"
;;
*)
CFLAGS=""
esac
case $ABI in
armeabi*)
CFLAGS="$CFLAGS -mthumb"
esac
LDFLAGS=""
if [ "$ABI" = "armeabi-v7a-hard" ]; then
LDFLAGS="$LDFLAGS -Wl,--no-warn-mismatch"
fi
LDFLAGS="$LDFLAGS -L$NDK_DIR/sources/crystax/libs/$ABI"
local TCPREFIX=$NDK_DIR/toolchains/${TOOLCHAIN}-4.9/prebuilt/$HOST_TAG
CC=$BUILDDIR/cc
{
echo "#!/bin/bash"
echo "ARGS="
echo 'NEXT_ARG_IS_SONAME=no'
echo "for p in \"\$@\"; do"
echo ' case $p in'
echo ' -Wl,-soname)'
echo ' NEXT_ARG_IS_SONAME=yes'
echo ' ;;'
echo ' *)'
echo ' if [ "$NEXT_ARG_IS_SONAME" = "yes" ]; then'
echo ' p=$(echo $p | sed "s,\.so.*$,.so,")'
echo ' NEXT_ARG_IS_SONAME=no'
echo ' fi'
echo ' esac'
echo " ARGS=\"\$ARGS \$p\""
echo "done"
echo "exec $TCPREFIX/bin/${HOST}-gcc --sysroot=$NDK_DIR/platforms/android-$APILEVEL/arch-$ARCH \$ARGS"
} >$CC
fail_panic "Can't create cc wrapper"
chmod +x $CC
fail_panic "Can't chmod +x cc wrapper"
CPP="$CC $CFLAGS -E"
AR=$TCPREFIX/bin/${HOST}-ar
RANLIB=$TCPREFIX/bin/${HOST}-ranlib
export CC CPP AR RANLIB
export CFLAGS LDFLAGS
local EXTRA_OPTS=""
case $ABI in
mips)
EXTRA_OPTS="--without-simd"
esac
run ./configure --prefix=$INSTALLDIR \
--host=$HOST \
--enable-shared \
--enable-static \
--with-pic \
--disable-ld-version-script \
$EXTRA_OPTS
fail_panic "Can't configure $ABI libjpeg-turbo-$LIBJPEGTURBO_VERSION"
run make -j$NUM_JOBS
fail_panic "Can't build $ABI libjpeg-turbo-$LIBJPEGTURBO_VERSION"
run make install
fail_panic "Can't install $ABI libjpeg-turbo-$LIBJPEGTURBO_VERSION"
if [ "$LIBJPEGTURBO_HEADERS_INSTALLED" != "yes" ]; then
log "Install libjpeg-turbo-$LIBJPEGTURBO_VERSION headers into $LIBJPEGTURBO_DSTDIR"
run rm -Rf $LIBJPEGTURBO_DSTDIR/include
run rsync -aL $INSTALLDIR/include $LIBJPEGTURBO_DSTDIR/
fail_panic "Can't install $ABI libjpeg-turbo-$LIBJPEGTURBO_VERSION headers"
LIBJPEGTURBO_HEADERS_INSTALLED=yes
export LIBJPEGTURBO_HEADERS_INSTALLED
fi
log "Install libjpeg-turbo-$LIBJPEGTURBO_VERSION $ABI libraries into $LIBJPEGTURBO_DSTDIR"
run mkdir -p $LIBJPEGTURBO_DSTDIR/libs/$ABI
fail_panic "Can't create libjpeg-turbo-$LIBJPEGTURBO_VERSION target $ABI libraries directory"
for LIBSUFFIX in a so; do
rm -f $LIBJPEGTURBO_DSTDIR/libs/$ABI/lib*.$LIBSUFFIX
for f in $(find $INSTALLDIR -name "lib*.$LIBSUFFIX" -print); do
run rsync -aL $f $LIBJPEGTURBO_DSTDIR/libs/$ABI
fail_panic "Can't install $ABI libjpeg-turbo-$LIBJPEGTURBO_VERSION libraries"
done
done
}
if [ -n "$PACKAGE_DIR" ]; then
PACKAGE_NAME="libjpeg-turbo-$LIBJPEGTURBO_VERSION-headers.tar.xz"
echo "Look for: $PACKAGE_NAME"
try_cached_package "$PACKAGE_DIR" "$PACKAGE_NAME" no_exit
if [ $? -eq 0 ]; then
LIBJPEGTURBO_HEADERS_NEED_PACKAGE=no
else
LIBJPEGTURBO_HEADERS_NEED_PACKAGE=yes
fi
fi
BUILT_ABIS=""
for ABI in $ABIS; do
DO_BUILD_PACKAGE=yes
if [ -n "$PACKAGE_DIR" ]; then
PACKAGE_NAME="libjpeg-turbo-$LIBJPEGTURBO_VERSION-libs-$ABI.tar.xz"
echo "Look for: $PACKAGE_NAME"
try_cached_package "$PACKAGE_DIR" "$PACKAGE_NAME" no_exit
if [ $? -eq 0 ]; then
if [ "$LIBJPEGTURBO_HEADERS_NEED_PACKAGE" = "yes" -a -z "$BUILT_ABIS" ]; then
BUILT_ABIS="$BUILT_ABIS $ABI"
else
DO_BUILD_PACKAGE=no
fi
else
BUILT_ABIS="$BUILT_ABIS $ABI"
fi
fi
if [ "$DO_BUILD_PACKAGE" = "yes" ]; then
build_libjpeg_turbo_for_abi "$ABI" "$BUILD_DIR/$ABI"
fi
done
# If needed, package files into tarballs
if [ -n "$PACKAGE_DIR" ]; then
if [ "$LIBJPEGTURBO_HEADERS_NEED_PACKAGE" = "yes" ]; then
FILES="$LIBJPEGTURBO_SUBDIR/$LIBJPEGTURBO_VERSION/include"
PACKAGE_NAME="libjpeg-turbo-$LIBJPEGTURBO_VERSION-headers.tar.xz"
PACKAGE="$PACKAGE_DIR/$PACKAGE_NAME"
dump "Packaging: $PACKAGE"
pack_archive "$PACKAGE" "$NDK_DIR" "$FILES"
fail_panic "Can't package libjpeg-turbo-$LIBJPEGTURBO_VERSION headers!"
cache_package "$PACKAGE_DIR" "$PACKAGE_NAME"
fi
for ABI in $BUILT_ABIS; do
FILES="$LIBJPEGTURBO_SUBDIR/$LIBJPEGTURBO_VERSION/libs/$ABI"
PACKAGE_NAME="libjpeg-turbo-$LIBJPEGTURBO_VERSION-libs-$ABI.tar.xz"
PACKAGE="$PACKAGE_DIR/$PACKAGE_NAME"
dump "Packaging: $PACKAGE"
pack_archive "$PACKAGE" "$NDK_DIR" "$FILES"
fail_panic "Can't package $ABI libjpeg-turbo-$LIBJPEGTURBO_VERSION libraries!"
cache_package "$PACKAGE_DIR" "$PACKAGE_NAME"
done
fi
if [ -z "$OPTION_BUILD_DIR" ]; then
log "Cleaning up..."
rm -Rf $BUILD_DIR
else
log "Don't forget to cleanup: $BUILD_DIR"
fi
log "Done!"
| true
|
0588589fe911e573161f7bb4acbbe90f7a8f26d7
|
Shell
|
petronny/aur3-mirror
|
/linux-lts-ak/XF86MonBrightness.sh
|
UTF-8
| 581
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh -e
if [[ "$1" = "-h" ]]
then
echo "Use: $(basename $0) [+/-pcnt]"
exit 0
fi
max=$(cat /sys/class/backlight/intel_backlight/max_brightness)
cur=$(cat /sys/class/backlight/intel_backlight/actual_brightness)
if ( echo $1 | egrep "^[-+]{0,1}[0-9]{1,3}$" > /dev/null )
then
change=$1
else
echo $(($cur *100 / $max ))%
exit 0
fi
new=$(( $cur + $change * $max / 100))
[[ $new -gt $max ]] && new=$max
[[ $new -lt 0 ]] && new=0
echo $new > /sys/class/backlight/intel_backlight/brightness
notify-send -u low -h int:value:$(($new *100 / $max )) "Brightness"
| true
|
943c3ea55144ea83a1147e0278ebf2c37505dde4
|
Shell
|
oxford-hack/registration
|
/restart.sh.template
|
UTF-8
| 594
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
export PROD_MODE="True"
# Set up your postgres password
export PG_PWD="password"
# Domain where running
export DOMAIN="my.hackupc.com"
echo "checking updates..."
./env/bin/pip install -r requirements.txt
echo "checking updates...done"
echo "migrating db..."
./env/bin/python manage.py migrate
echo "migrating db...done"
echo "collecting static..."
./env/bin/python manage.py collectstatic --no-input
echo "collecting static...done"
echo "removing all pyc..."
find . -name \*.pyc -delete
echo "removing all pyc...done"
echo "Deploy completed. The game is on!"
| true
|
c697444071f32ab4806ef1740336919dbc1fe567
|
Shell
|
jiho-dev/irongate-vpn
|
/gen_free_cert.sh
|
UTF-8
| 2,540
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
DNS_NAME=""
#DNS_NAME="irongatevpn.p-e.kr"
#DNS_NAME="irongatevpn.kro.kr"
# letsencrypt certonly --webroot --webroot-path=/var/www/html -d irongatevpn.p-e.kr -d www.irongatevpn.p-e.kr
copy_cert() {
# copy /etc/ssl/certs/DST_Root_CA_X3.pem
local LETS_CA_PEM="DST_Root_CA_X3.pem"
local ROOT_CA="/etc/ipsec.d/cacerts/$LETS_CA_PEM"
local SRC_ROOT_CA="/etc/ssl/certs/$LETS_CA_PEM"
local LIVE_DIR="/etc/letsencrypt/live/$DNS_NAME"
local SWANCTL_DIR="/etc/swanctl"
if [ -d $LIVE_DIR ]; then
echo "Copy cert files into ipsec.d"
cp $LIVE_DIR/cert.pem /etc/ipsec.d/certs/
cp $LIVE_DIR/fullchain.pem /etc/ipsec.d/certs/
cp $LIVE_DIR/privkey.pem /etc/ipsec.d/private/
cp $LIVE_DIR/chain.pem /etc/ipsec.d/cacerts/
if [ -d "$SWANCTL_DIR" ]; then
echo "Copy cert files into swanctl"
cp $LIVE_DIR/cert.pem $SWANCTL_DIR/x509/
cp $LIVE_DIR/fullchain.pem $SWANCTL_DIR/x509/
cp $LIVE_DIR/privkey.pem $SWANCTL_DIR/private/
cp $LIVE_DIR/chain.pem $SWANCTL_DIR/x509ca/
fi
#if [ ! -e $ROOT_CA ]; then
# cp $SRC_ROOT_CA $ROOT_CA
#fi
echo "Success !"
fi
}
gen_cert() {
local dns=$DNS_NAME
local www="www.$DNS_NAME"
ufw allow 80/tcp
certbot certonly -m irongate11@gmail.com --agree-tos --standalone -n -d $dns -d $www
# letsencrypt certonly --webroot --webroot-path=/var/www/html -d irongatevpn.p-e.kr -d www.irongatevpn.p-e.kr
ufw delete allow 80/tcp
copy_cert
swanctl -r
swanctl -q
}
renew_cert() {
local ofile=$(readlink -f $LIVE_DIR/cert.pem)
ufw allow 80/tcp
#certbot renew --force-renewal -m irongate11@gmail.com --agree-tos --standalone -n
certbot renew -m irongate11@gmail.com --agree-tos --standalone -n
ufw delete allow 80/tcp
local nfile=$(readlink -f $LIVE_DIR/cert.pem)
if [ "$ofile" != "$nfile" ]; then
copy_cert
swanctl -r
swanctl -q
fi
}
print_usage() {
echo "gen_free_cert -d <your dns name> <-i | -r>"
echo "-i: install free certificate"
echo "-r: renew the certificate installed"
exit 1
}
###########################
CMD=""
while [[ "$#" -gt 0 ]]; do
case $1 in
-d)
DNS_NAME="$2"
shift 2
;;
-i)
CMD="gen"
shift
;;
-r)
CMD="renew"
shift
;;
*)
echo "Unknown parameter passed: $1"
shift
;;
esac;
done
if [ "_$DNS_NAME" == "_" ]; then
echo "DNS_NAME is empyt"
print_usage
fi
if [ "$CMD" == "gen" ]; then
#echo $CMD
#echo "$DNS_NAME"
gen_cert
elif [ "$CMD" == "renew" ]; then
#echo $CMD
#echo "$DNS_NAME"
renew_cert
elif [ "_$CMD" == "_" ]; then
echo "No cmd, assign command"
print_usage
else
echo "Unknown cmd: $CMD"
print_usage
fi
| true
|
25d2d6d90bed6824c538869b96edf4e0d95b5f44
|
Shell
|
freifunk-berlin/ca.berlin.freifunk.net
|
/send-mailreminder.sh
|
UTF-8
| 556
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
SIGNERS_GROUP=tunnelberlin-signer
MAIL_SUBJECT="Tunnel-Berlin - open requests notice"
# nothing to configure down here #
MYDIR=`dirname $0`
# get users of group
MAIL_RCPT=`getent group $SIGNERS_GROUP| cut -d : -f 4`
SIGNCOUNT=`$MYDIR/runscript.sh $MYDIR/manage.py requests show|wc -l`
mail -s "$MAIL_SUBJECT" "$MAIL_RCPT" <<EOF
Hi Certificate-signers,
this is a notice showing you the number of open certificate-requests.
Currently there are $SIGNCOUNT requests to process. Probably you find the time to check by for signing them.
EOF
| true
|
1741474a075a7a77a6f6e72fb4ce31c53d741cb9
|
Shell
|
CrisZhao/linux_scripts
|
/backsql.sh
|
UTF-8
| 1,324
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#back up databases by dump, and upload *.sql files to remote server.
#before using this, you need to touch a dailyconfig and sundayconfig file with database names inside.
logfile=/tmp/backup/mysqlbackup.log
echo "-------------------------------" >> $logfile
echo "$(date) backupsql starts " >> $logfile
echo "-------------------------------" >> $logfile
#change config file when sunday
configfile=/tmp/backup/dailyconfig
weekday=$(date +%w)
if [ "$weekday" == 0 ]
then
configfile=/tmp/backup/sundayconfig
fi
array=($(cat $configfile|grep -v ^#|grep -v ^$))
for dbname in ${array[@]};
do
echo "woking on $dbname" >> $logfile
dbuser=root
backuppath=/tmp/backup/datafiles/"$weekday"
if [ ! -d "$backuppath" ]
then
mkdir $backuppath
fi
dumpfile="$backuppath"/"$dbname".sql
# oldfile="$backuppath""$dbname"$(date +%y%m%d --date='5 days ago').sql
#delete old file
# if [ -f $oldfile ]
# then
# rm -f $oldfile >> $logfile 2>$1
# echo "delete old file success" >> $logfile
# fi
mysqldump -u $dbuser --opt $dbname > $dumpfile 2>>$logfile
done
#upload to remote server
rsync -axz /tmp/backup/datafiles/ demo@192.168.1.202:~/tmp/sqlbackups/ 2>>$logfile
echo "------------------------------" >>$logfile
echo "[$(date)] work finished" >>$logfile
| true
|
5239a05b2932d60bc2bf87ee91c87d8172bc00b4
|
Shell
|
kenji-hosokawa/rba
|
/unittest/script/JSONGenerator/generate_json.sh
|
UTF-8
| 2,076
| 3.8125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Linux用 jsonファイル 生成スクリプト
# arbitrator/rba/unittestディレクトリ内のmodelディレクトリのモデルファイルを元に
# RBAModel.jsonファイルを生成し、modelディレクトリと同一階層のtemplate-genディレクトリにコピーする。
# 使い方
# cd arbitrator/rba/unittest/script/JSONGenerator
# ./generate_json.sh
# 2019/04/10現在、JSONGeneratorに不具合があり、jsonを生成できないモデルがあるため、
# JSONGeneratorがエラーを出したプロジェクトは"ErrorProjectList.txt"にリストするように対応
# 2019/12/2
# rbaユニットテスト項目が多く、Linux上で実施すると落ちる可能性があるため、Windows上で実施してください。
workingDir="temp_JSONGenerator"
inputDir="model"
outputDir="template-gen"
outputFileName="RBAModel.json"
errorListFileName="ErrorProjectList.txt"
unittestDirPath="../.."
mkdir -p $workingDir
for test_projectDirPath in `find $unittestDirPath -type d -name '*_test_project'`
do
for inputDirPath in `find $test_projectDirPath -type d -name $inputDir`
do
projectDirPath=${inputDirPath%/$inputDir}
# 対象のプロジェクト名を出力
echo "${projectDirPath##*/}"
# 日本語を含むパスは2019/04/10現在、JSONGeneratorに渡せないため
# modelディレクトリを作業ディレクトリにコピー
rm -rf $workingDir/*
cp -pR $inputDirPath/* $workingDir
# 作業ディレクトリにjsonファイルを生成
java -cp ./ -jar JSONGenerator.jar "./$workingDir" "./$workingDir"
# modelディレクトリと同一階層のtemplate-genディレクトリにコピー
# jsonファイルが生成されていなければErrorProjectList.txtに出力
if [ -e $workingDir/$outputFileName ]
then
cp $workingDir/$outputFileName $projectDirPath/$outputDir/$outputFileName
else
echo $projectDirPath >> $errorListFileName
fi
done
done
| true
|
7c1a8575119524149bf79a25c5a1d75adc2794bb
|
Shell
|
sayali-pathak/scripts
|
/database.sh
|
UTF-8
| 423
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
db_set() {
echo "$1,$2" >> ~/database
}
db_get() {
grep "^$1," ~/database | sed -e "s/^$1,//" | tail -n 1
}
_db_suggest() {
local cur
local stringreply
COMPREPLY=()
cur=${COMP_WORDS[COMP_CWORD]}
if (($COMP_CWORD == 1 ))
then
stringreply=$(cat /home/prashant/database | sed -e "s/,.*//" | grep "$cur" | tr '\n' ' ' )
COMPREPLY=($stringreply)
else
return 0
fi
}
complete -F _db_suggest db_get
| true
|
425cf033af802b77384e76cea0d3b819469f29fe
|
Shell
|
jkimblad/bostadssnabben
|
/bostadssnabben.sh
|
UTF-8
| 1,923
| 3.640625
| 4
|
[] |
no_license
|
#! /bin/bash
#https://pushover.net/
#Pushover key used to send push notification to cellphone
USER_KEY=
#Application token for the script to be allowed API requests to Pushover
APP_TOKEN=
# Send push notification to user when the bash script is up and running!
curl -s \
--form-string "token="$APP_TOKEN \
--form-string "user="$USER_KEY \
--form-string "message=Bostadssnabben up and running!" \
https://api.pushover.net/1/messages.json
# Send push notification to user when the bash script is being killed and notifications wont be delivered!
exit_script() {
curl -s \
--form-string "token="$APP_TOKEN \
--form-string "user="$USER_KEY \
--form-string "message=Bostadssnabben going down, no more notifications!" \
https://api.pushover.net/1/messages.json
}
trap exit_script SIGINT SIGTERM
# Loop forever once a minute
while true
do
#Get search results through CURL and grep interesting lines
RESULT="$(curl -s https://bostad.stockholm.se/Lista/AllaAnnonser | jq | grep Bostadssnabben)"
while read -r line
do
#If the line contains true, its bostadssnabben
if [[ $line == *"true"* ]]; then
# Send push notification using Pushover Application
curl -s \
--form-string "token="$APP_TOKEN \
--form-string "user="$USER_KEY \
--form-string "message=Bostadssnabben, new apartment available!" \
https://api.pushover.net/1/messages.json
fi
done <<< "$RESULT"
# Sleep 1 minute before running script again
sleep 60
done
| true
|
dfce29b3f10f1890008dd0c5e4341705888ae215
|
Shell
|
florianjoerg/project_groupD
|
/bash_scripts/hottestday.sh
|
UTF-8
| 2,000
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
city=$1
Quality=$2
path=../data_files
if [[ ${Quality} == "highQuality" ]]; then
if [[ -f ${path}/hottestday_${Quality}_${city}.txt ]]; then
rm ${path}/hottestday_${Quality}_${city}.txt
fi
Date=`awk '$1~/-01-01/ {print $1; exit 1}' ${path}/oneDayTemp_highQuality_$city.txt`
NR=`awk 'END{print NR}' ${path}/oneDayTemp_highQuality_$city.txt`
next_date=$(date +%Y-%m-%d -d "$Date +$i year")
final_date=`awk 'END {print $1}' ${path}/oneDayTemp_highQuality_$city.txt`
echo "Creating hottestday_${Quality}_${city}.txt data file, this takes a few seconds"
touch ${path}/hottestday_highQuality_${city}.txt
while [[ "${Date}" < "${final_date}" ]]
do
next_date=$(date +%Y-%m-%d -d "$Date +$i year")
stop_date=$(date +%Y-%m-%d -d "$next_date -1 day")
awk -v start="${Date}" -v stop="${stop_date}" 'BEGIN{a=-100; b=substr($1,1,4)} $1 == start,$1 == stop {if ($2>a) {a=$2; b=substr($1,1,4)} fi} END{print b" "a}' ${path}/oneDayTemp_highQuality_$city.txt >> ${path}/hottestday_${Quality}_${city}.txt
Date=${next_date}
done
else
if [[ -f ${path}/hottestday_${Quality}_${city}.txt ]]; then
rm ${path}/hottestday_${Quality}_${city}.txt
fi
Date=`awk '$1~/-01-01/ {print $1; exit 1}' ${path}/oneDayTemp_allEntries_$city.txt`
NR=`awk 'END{print NR}' ${path}/oneDayTemp_allEntries_$city.txt`
next_date=$(date +%Y-%m-%d -d "$Date +$i year")
final_date=`awk 'END {print $1}' ${path}/oneDayTemp_allEntries_$city.txt`
echo "Creating hottestday_${Quality}_${city}.txt data file, this takes a few seconds"
touch ${path}/hottestday_${Quality}_${city}.txt
while [[ "${Date}" < "${final_date}" ]]
do
next_date=$(date +%Y-%m-%d -d "$Date +$i year")
stop_date=$(date +%Y-%m-%d -d "$next_date -1 day")
awk -v start="${Date}" -v stop="${stop_date}" 'BEGIN{a=-100; b=substr($1,1,4)} $1 == start,$1 == stop {if ($2>a) {a=$2; b=substr($1,1,4)} fi} END{print b" "a}' ${path}/oneDayTemp_allEntries_$city.txt >> ${path}/hottestday_${Quality}_${city}.txt
Date=${next_date}
done
fi
| true
|
90445c9f7ac1ccdea59eb30d0435d888ed619751
|
Shell
|
pandu-rao/storm-sample
|
/dds.sh
|
UTF-8
| 271
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# get the address of the dockerized drpc server
# get the container id
nimbus_id=$(docker ps | grep nimbus | awk '{print $1}')
# inspect the container for its ip address
docker inspect $nimbus_id | grep -i ipaddress | awk '{print $2}' | sed -e 's/[",]//g'
| true
|
0b350489c369648cfb9c467dbfa65e4951ecd064
|
Shell
|
pgleghorn/DockerAEM
|
/install/tools/startup.sh
|
UTF-8
| 894
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
# TODO switches
# -jmx check for startup via FrameworkStartLevel 30
# -curl check for startup via response on port
# -tail tail logs during startup
# which logs, optional?
# TODO quit if required vars are missing
$AEM_DIR/crx-quickstart/bin/start
toolsdir=`dirname $0`
LOGS="$AEM_DIR/crx-quickstart/logs/error.log $AEM_DIR/crx-quickstart/logs/stdout.log"
touch $LOGS
# exiting when startup finished?
if [ "$1" = "-exit" ]; then
tail -n0 -f $LOGS &
tailpid=$!
while true; do
r=`java -jar $toolsdir/cmdline-jmxclient-0.10.3.jar - localhost:$AEM_JMXPORT "osgi.core:framework=org.apache.felix.framework,type=framework,uuid=*,version=*" FrameworkStartLevel 2>&1 | grep FrameworkStartLevel`
echo $r
startlevel=`echo $r | awk '{print $6}'`
if [ "$startlevel" = 30 ]; then echo "FINISHED"; break; fi
sleep 1
done
kill $tailpid
# tail forever
else
tail -n0 -f $LOGS
fi
| true
|
a1248b0db9e05c2bbdd9d97842b24d41a004e641
|
Shell
|
christianparobek/utils
|
/multivcf2fasta.sh
|
UTF-8
| 1,459
| 3.46875
| 3
|
[] |
no_license
|
## Script to turn a multivcf into a multifasta
## USAGE: bash multivcf2fasta.sh <vcf> <ref> <gatk-style interval string (single locus)> # <outname>
## Started 14 October 2015
## Christian Parobek
## Define useful variables
gatk=/nas02/apps/biojars-1.0/GenomeAnalysisTK-3.3-0/GenomeAnalysisTK.jar
vcf=$1
ref=$2
interval=$3
#outname=$4
time=`date +"%F_%T"`
## make folder for the files
mkdir "multivcfs_$time"
## run a for loop for each individual in the vcf
for name in `grep "CHROM" $vcf | cut -d$'\t' -f10-`
do
## SPLIT VCF INTO INDIVIDUAL VCFs
java -Xmx2g -jar $gatk \
-T SelectVariants \
--variant $vcf \
-R $ref \
-sn $name \
-o multivcfs_$time/$name.tmp.vcf
### REMOVE ALL NON-ENTRIES IN INDIVIDUAL FILES (PL=0 & GT=0)
grep -vP "PL\t0" multivcfs_$time/$name.tmp.vcf | grep -vP "\tGT\t." > multivcfs_$time/$name.vcf
rm multivcfs_$time/$name.tmp.vcf* # remove the files with the extra entries
##for interval in `ls $interval`
##do
## VCF TO FASTA
java -Xmx2g -jar $gatk \
-T FastaAlternateReferenceMaker \
-R $ref \
--variant multivcfs_$time/$name.vcf \
-o multivcfs_$time/$name.fa
#-L $interval \
#-o multivcfs_$time/$name$interval.fa
#--rawOnelineSeq prints only sequence
echo ">"$name >> multivcfs_$time/$interval.fa
#grep -v ">" multivcfs_$time/$name$interval.fa >> multivcfs_$time/$interval.fa
grep -v ">" multivcfs_$time/$name.fa >> multivcfs_$time/$interval.fa
rm multivcfs_$time/$name.fa
##done
rm multivcfs_$time/*vcf*
done
| true
|
943172a03cb792c3133aa5ddffa1c8581257e4c7
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/perl-ritx/PKGBUILD
|
UTF-8
| 867
| 2.78125
| 3
|
[] |
no_license
|
# Maintainer: Chris Severance aur.severach aATt spamgourmet dott com
# Contributor: Sebastian Neef <aur @ gehaxelt DOT in>
# Neat idea but doesn't seem to work. Probably easy to fix.
set -u
_pkgname='ritx'
pkgname='perl-ritx'
pkgver='1.6'
pkgrel='1'
pkgdesc='discovers domains hosted on the same server as a given IP or domain'
arch=('any')
url="https://code.google.com/p/${pkgname}/"
url='http://www.aldeid.com/wiki/RitX'
license=('GPL2')
depends=('perl')
source=("https://ritx.googlecode.com/files/RitX-Reverse-Ip-Tool-v${pkgver}.zip")
sha256sums=('01495ea9c5e18dc0eec31c90ef2321e73bf2b58408f44e08077c7136e8216010')
prepare() {
set -u
cd "${_pkgname}"
sed -i -e 's:perl $b:$b:g' 'RitX.pl'
set +u
}
package() {
set -u
cd "${_pkgname}"
install -Dm755 'RitX.pl' "${pkgdir}/usr/bin/ritx"
install -Dm644 'README' -t "${pkgdir}/usr/share/doc/ritx/"
set +u
}
set +u
| true
|
a7c72afcada17adb8b91baad6bad04f78ca4c475
|
Shell
|
ZeroWasteTeam/SampleJavaSpringBoot
|
/.github/scripts/ci-assertVersioningRulesFollowed
|
UTF-8
| 454
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
BASE_VESION=$1
MODIFIED_VERSION=$2
NUMBER_OF_VERSION_MODIFIED=$(git diff --name-only "${BASE_VESION}..${MODIFIED_VERSION}" version.txt | wc -l )
if [[ "$NUMBER_OF_VERSION_MODIFIED" -eq 1 ]] ;
then
NUMBER_OF_FILES_MODIFIED=$(git diff --name-only "${BASE_VESION}..${MODIFIED_VERSION}" | wc -l )
if [[ "$NUMBER_OF_FILES_MODIFIED" -gt 1 ]] ;
then
echo "While modifying verion.txt, no other change is allowed" >&2
exit 1
fi
fi
| true
|
272da8dbbc84ed1caa766c8b5db1edff7e11676d
|
Shell
|
cplatini/scripts
|
/provisioning/bin/build_aws/security/allow_mcafee.sh
|
UTF-8
| 708
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Script to allow inbound McAfee to a specified security group
#
# Author:
# Richard DeHaven
#
# Version: 0.0.1
# Date: 2016-10-27
aws_acct=$1
secgroup_id=$2
AWS_CONFIG_FILE=$HOME/.aws/credentials
aws_cli_base="aws --profile ${aws_acct} --output text"
aws_group_allow="${aws_cli_base} ec2 authorize-security-group-ingress"
${aws_group_allow} --group-id "${secgroup_id}" --protocol tcp --port 8334-8335 --cidr 169.70.71.5/32
${aws_group_allow} --group-id "${secgroup_id}" --protocol tcp --port 8334-8335 --cidr 169.10.10.229/32
${aws_group_allow} --group-id "${secgroup_id}" --protocol tcp --port 8334-8335 --cidr 192.28.32.113/32
echo "[INFO] Updated Group ID: ${secgroup_id} with McAfee Rules"
| true
|
26bbb37626f22dcd7a4c39bbe1892896ce4c7c0b
|
Shell
|
toomasr/jspgnviewer
|
/makeRelease.sh
|
UTF-8
| 2,934
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Intended to be used from the project root directory
# or ./res directory
OLD_DIR=`pwd`
PROJ_DIR="."
DEST_DIR="bin"
LIB_DIR="lib"
SRC_DIR="src/main"
WP_DIR="wpPlugin"
WP_IMG_DIR="bin/pgnviewer/img"
TEST_DIR="examples"
IMG_DIR="img"
# functions
genPackedFormat() {
# pack the source with packer
cd $LIB_DIR
if [ "`which php`" = "" ];then
echo "PHP not found. Not using the PHP packer!";
else
php packerConf.php
fi
cd $OLD_DIR
cp $DEST_DIR/jsPgnViewer.js $JS_DEST_DIR/jsPgnViewerUnpacked.js
cp $DEST_DIR/jsPgnViewer.js $WP_DEST_DIR/jsPgnViewerUnpacked.js
java -cp $LIB_DIR/jsmin JSMin $DEST_DIR/jsPgnViewer.js > $JS_DEST_DIR/jsPgnViewer.js
java -cp $LIB_DIR/jsmin JSMin $DEST_DIR/jsPgnViewer.js > $WP_DEST_DIR/jsPgnViewer.js
}
makeRelease() {
if [ ! -d $SRC_DIR ];then
DEST_DIR="../bin"
SRC_DIR="../src"
TEST_DIR="../tests"
IMG_DIR="../img"
WP_DIR="../wpPlugin"
LIB_DIR="../lib"
PROJ_DIR="../"
WP_IMG_DIR="../"$WP_IMG_DIR
fi
WP_DEST_DIR=$DEST_DIR/"pgnviewer"
JS_DEST_DIR=$DEST_DIR/"jspgnviewer"
if [ ! -d $DEST_DIR ];then
mkdir $DEST_DIR
fi
if [ ! -d $JS_DEST_DIR ];then
mkdir $JS_DEST_DIR
fi
if [ ! -d $WP_DEST_DIR ];then
mkdir $WP_DEST_DIR
fi
if [ ! -d $WP_IMG_DIR ];then
mkdir -p $WP_IMG_DIR
fi
JS_VERSION=`cat jsVersion`
echo "/** Version: $JS_VERSION **/" > $JS_DEST_DIR/jsPgnViewer.js
cat $SRC_DIR/chess-game.js >> $JS_DEST_DIR/jsPgnViewer.js
cat $SRC_DIR/converter.js >> $JS_DEST_DIR/jsPgnViewer.js
cat $SRC_DIR/pgn.js >> $JS_DEST_DIR/jsPgnViewer.js
cat $SRC_DIR/yahoo-format.js >> $JS_DEST_DIR/jsPgnViewer.js
cat $SRC_DIR/board.js >> $JS_DEST_DIR/jsPgnViewer.js
cp $JS_DEST_DIR/jsPgnViewer.js $DEST_DIR
cp $TEST_DIR/samplePage.html $JS_DEST_DIR/
cp $SRC_DIR/README.txt $JS_DEST_DIR/
cp License.txt $JS_DEST_DIR/
cp -r $IMG_DIR $JS_DEST_DIR
# WP Plugin release
cp $WP_DIR/pgnviewer.php $WP_DEST_DIR/pgnviewer.php
WP_VERSION=`cat wpVersion`
cp $WP_DIR/* $WP_DEST_DIR
cp -r $IMG_DIR/* $WP_IMG_DIR
cp $JS_DEST_DIR/jsPgnViewer.js $WP_DEST_DIR
chmod -R 775 $DEST_DIR
perl -pi -e "s/WP_VERSION/$WP_VERSION/" $WP_DEST_DIR/pgnviewer.php
# WPR release
cd $DEST_DIR
NAME="pgnviewer-"`cat ../wpVersion`".tar.gz"
tar --exclude=.svn -cvzf $NAME pgnviewer
cd $OLD_DIR
# JSR release
cd $DEST_DIR
NAME="jspgnviewer-"`cat ../jsVersion`".tar.gz"
tar --exclude=.svn -cvzf $NAME jspgnviewer
cd $OLD_DIR
}
if [ $# -ge 1 ];then
if [ $1 == 'clean' ];then
echo "cleaning "$DEST_DIR
rm -rf $DEST_DIR
else
makeRelease
genPackedFormat
fi
else
echo "Usage:"
echo " We have the following targets:"
echo " release - makes a release"
echo " clean - clean the project"
fi
| true
|
0ad9c26b8c59a90904434534d705ecf079015b17
|
Shell
|
dkappler/idsim
|
/scripts/exp_iros/create_experiment.sh
|
UTF-8
| 281
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
CONFIG_PATH="${SCRIPT_DIR}/config_experiment.yaml"
source "${SCRIPT_DIR}/../ubash.sh" || exit 1
cd ${PROJECT_DIR}
${VPY_BIN} inverse_dynamics/experiment_config.py \
--fp_config ${CONFIG_PATH}
| true
|
a1029892cf820907d5317d36f8d858c896d9471d
|
Shell
|
hypered/reesd-stack
|
/build-images-helper.sh
|
UTF-8
| 253
| 3.15625
| 3
|
[] |
no_license
|
#! /bin/bash
# Helper script for build-in-vm.sh.
set -e
function log {
echo "$(date --iso-8601=seconds --utc) $1"
}
log "Cloning the repositories..."
git clone git@github.com:noteed/reesd-stack
log "Building images..."
pushd reesd-stack
./build.sh
popd
| true
|
312e03031dccf59ef3a4968073767895162fe0fa
|
Shell
|
jets-/fsrw
|
/scripts/mounted.sh
|
UTF-8
| 178
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function main {
DEV=$1
CMD=`findmnt -rno SOURCE ${DEV}`
VALUE=0
if [[ -z ${CMD} ]]; then
VALUE=1
fi
echo ${VALUE}
}
main $@
| true
|
aef6794b6d45dc82a11575b30371082bc0a87c19
|
Shell
|
andyneff/docker-vscode
|
/code_entrypoint.bsh
|
UTF-8
| 246
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu
groupmod user -g ${GROUP_ID} -o
usermod -u ${USER_ID} -o -g user user >& /dev/null
chown -R user:user /home/user
if [ "$1" == "code" ]; then
shift 1
exec gosu user code -w "${@}"
else
exec gosu user "${@}"
fi
| true
|
2eaa55c4eebe80246b89daf781bbe617e2bf1b14
|
Shell
|
intgyl/vim
|
/bin/android_tools/common/adk.sh
|
UTF-8
| 12,182
| 3.4375
| 3
|
[] |
no_license
|
adk() {
host_platform=""
case `uname` in
Linux) host_platform=linux ;;
FreeBSD) host_platform=fbsd ;;
*CYGWIN*) host_platform=cygwin ;;
*MINGW*) host_platform=mingw ;;
Darwin) host_platform=darwin ;;
esac
if [ $# -lt 1 ] ; then
echo " adk \"cmd\""
print_adk_usage
return
fi
case "$1" in
ftyrst)
__check_online
if [ $? = 1 ]; then
return
fi
adb root > /dev/null
adb wait-for-device
adb shell am broadcast -a android.intent.action.MASTER_CLEAR;;
smartisan-active)
__check_online
if [ $? = 1 ]; then
return
fi
__skip-first-time;;
smartisan-launcher)
__check_online
if [ $? = 1 ]; then
return
fi
adb shell am start -n com.smartisanos.launcher/com.smartisanos.launcher.Launcher;;
hexdump)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_hexdump;;
# flash-dir)
# __adk_flash-dir;;
# symbol-dir)
# __adk_symbol-dir;;
pmap-all)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_pmap-all;;
root)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_root;;
cpu-performance)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_cpu-performance;;
panic)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_panic;;
listapk)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_listapk;;
focusedapk)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_focusedapk;;
net-shell)
__check_online
if [ $? = 1 ]; then
return
fi
__adk_net-shell;;
clk)
__check_online
if [ $? = 1 ]; then
return
fi
__clk;;
cpuclk)
__check_online
if [ $? = 1 ]; then
return
fi
__cpuclk;;
gpuclk)
__check_online
if [ $? = 1 ]; then
return
fi
__gpuclk;;
airplane_on)
__check_online
if [ $? = 1 ]; then
return
fi
__airplane_mode_on;;
airplane_off)
__check_online
if [ $? = 1 ]; then
return
fi
__airplane_mode_off;;
fps)
__check_online
if [ $? = 1 ]; then
return
fi
__fps;;
charging-disble)
__check_online
if [ $? = 1 ]; then
return
fi
__charging_disable;;
file-log)
__check_online
if [ $? = 1 ]; then
return
fi
__file_log $1 $2;;
dtc)
__dtc $2;;
screen-off-time)
__check_online
if [ $? = 1 ]; then
return
fi
__screen_off_time $2;;
*)
print_adk_usage;;
esac
}
function __check_online()
{
tmp=`adb devices | sed -n '/device$/p'`
if [ -z "$tmp" ]; then
echo -e "\nConnect the devce and authorize it\n"
return 1
fi
return 0
}
function __adk_root
{
adb root > /dev/null
adb wait-for-device
for string in `adb shell cat /proc/mounts | grep ro, | awk '{printf ("%s@%s\n",$1, $2) }'`; do
drive=$(echo $string |awk -F'@' '$0=$1')
mountpoint=$(echo $string|awk -F'@' '$0=$2')
adb shell "mount -o remount $drive $mountpoint"
done
}
function __adk_panic
{
adb root > /dev/null
adb wait-for-device
adb shell "echo c > /proc/sysrq-trigger"
}
function __adk_listapk
{
adb shell "pm list packages -f" > /tmp/tmplog.pid.$$
for dir in '/system/app' '/system/priv-app' '/system/vendor' '/system/framework' '/data/app'; do
echo
echo dir: $dir
cat /tmp/tmplog.pid.$$ | grep $dir
done
rm -rf /tmp/tmplog.pid.$$
}
function __adk_focusedapk
{
packages=`adb shell dumpsys activity | grep mFocusedActivity | awk {'print $4'} | sed 's/\(.*\)\/\.\(.*\)/\1/g'`
echo "activity: $packages"
adb shell "pm list packages -f" | grep $packages
}
function __adk_hexdump
{
dump_path="/data/hexdump"
#blk_path="/dev/block/bootdevice/by-name"
blk_path=`adb shell cat /proc/mounts | grep system | awk '{print $1}' | sed "s/\/system//g"`
adb root > /dev/null
adb wait-for-device
adb shell "mkdir $dump_path"
for partition in `adb shell ls $blk_path | grep -v "system\|cache\|userdata\|udisk"`; do
partition=`echo "$partition" | tr -d '\r\n'`
echo "dd if=$blk_path/$partition of=$dump_path/$partition"
adb shell "dd if=$blk_path/$partition of=$dump_path/$partition"
done
adb shell "sync"
adb pull $dump_path .
adb shell "rm -rf $dump_path"
}
function __adk_cpu-performance
{
adb root > /dev/null
adb wait-for-device
adb shell stop thermal-engine
cpus=0
cpus=`adb shell cat /proc/cpuinfo | grep processor | wc -l`
cpus=$((cpus - 1))
for nb in `seq 0 $cpus`; do
adb shell "echo performance > /sys/devices/system/cpu/cpu$nb/cpufreq/scaling_governor"
max_freq=`adb shell cat /sys/devices/system/cpu/cpu$nb/cpufreq/cpuinfo_max_freq`
adb shell "echo $max_freq > /sys/devices/system/cpu/cpu$nb/cpufreq/scaling_min_freq"
adb shell "echo $max_freq > /sys/devices/system/cpu/cpu$nb/cpufreq/scaling_max_freq"
done
}
function __adk_net-shell
{
tcpport=5555
adb disconnect
adb root > /dev/null
adb wait-for-device
# adb shell setprop service.adb.tcp.port $tcpport
adb tcpip 5555
adb wait-for-device
# ipaddr=`adb shell "ifconfig wlan0" | grep "inet addr" | awk {'print $2'} | sed {"s/\(.*\):\(.*\)/\2/g"}`
ipaddr=`adb shell ifconfig wlan0|grep 'inet addr'|awk -F'[ : ]' '{print $13}'`
adb wait-for-device
adb connect $ipaddr:$tcpport
sleep 2
adb -s $ipaddr:$tcpport shell
}
#function __adk_flash-dir
#{
# webcgi="http://172.16.2.18/cgi-bin/vmlinux-lookup.cgi"
# version=$(adb shell "cat /proc/version" | grep "Linux version")
# if [ $host_platform == "cygwin" ] ; then
# smb_path=$(curl --data-urlencode "version=$version" $webcgi 2> /dev/null | grep "Flashing binary" -A 1 | tail -1)
# unc_path=$(echo ${smb_path#*smb:})
# for string in `net use | grep "Microsoft Windows Network" | awk '{printf ("%s@%s\n",$2, $3)}'`; do
# drive=$(echo $string |awk -F'@' '$0=$1')
# map_point=$(echo $string|awk -F'@' '$0=$2'| sed "s/\\\/\//g")
# echo $unc_path | grep $map_point > /dev/null
# if [ $? == 0 ]; then
# #echo $drive $map_point $win_path
# map_point_regex=$(echo $map_point | sed "s/\//\\\\\//g")
# drive_regex=$(echo $drive | sed "s/\:/\\\:/g")
# win_path=$(echo "$unc_path" | sed "s/$map_point_regex/$drive_regex/g")
# echo $win_path | tee /dev/console | tr '\n' ' ' | clip
# fi
# done
# fi
#}
#
#function __adk_symbol-dir
#{
# webcgi="http://172.16.2.18/cgi-bin/vmlinux-lookup.cgi"
# version=$(adb shell "cat /proc/version" | grep "Linux version")
# if [ $host_platform == "cygwin" ] ; then
# smb_path=$(curl --data-urlencode "version=$version" $webcgi 2> /dev/null | grep "kernel symbols" -A 1 | tail -1)
# unc_path=$(echo ${smb_path#*smb:})
# for string in `net use | grep "Microsoft Windows Network" | awk '{printf ("%s@%s\n",$2, $3)}'`; do
# drive=$(echo $string |awk -F'@' '$0=$1')
# map_point=$(echo $string|awk -F'@' '$0=$2'| sed "s/\\\/\//g")
# echo $unc_path | grep $map_point > /dev/null
# if [ $? == 0 ]; then
# #echo $drive $map_point $win_path
# map_point_regex=$(echo $map_point | sed "s/\//\\\\\//g")
# drive_regex=$(echo $drive | sed "s/\:/\\\:/g")
# win_path=$(echo "$unc_path" | sed "s/$map_point_regex/$drive_regex/g")
# echo $win_path | tee /dev/console | tr '\n' ' ' | clip
# fi
# done
# fi
#}
function __adk_pmap-all
{
for pid in `adb shell "ps" | awk '{print $2}' `; do
cmdline=`adb shell cat /proc/$pid/cmdline`
if [ -n "$cmdline" ]; then
adb shell pmap $pid
fi
done
}
function __clk {
while true
do
adb wait-for-device
echo "****************cpu clk***************"
adb shell "cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq"
echo "****************gpu clk***************"
adb shell "cat /sys/class/kgsl/kgsl-3d0/gpuclk"
echo "****************gpubusy***************"
adb shell "cat /sys/class/kgsl/kgsl-3d0/gpubusy"
sleep 0.5
done
}
function __cpuclk {
while true
do
adb wait-for-device
adb shell "cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq"
sleep 0.5
done
}
function __gpuclk {
while true
do
adb wait-for-device
adb shell "cat /sys/class/kgsl/kgsl-3d0/gpuclk"
sleep 0.5
done
}
function __airplane_mode_on {
adb shell settings put global airplane_mode_on 1
adb shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true
}
function __airplane_mode_off {
adb shell settings put global airplane_mode_on 0
adb shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state false
}
function __fps {
adb wait-for-device
# adb shell "while true ; do dumpsys SurfaceFlinger | grep \"vsync period\" ; sleep 0.1 ; done"
adb shell "while true ; do dumpsys SurfaceFlinger | grep \"fps\" ; sleep 0.1 ; done"
}
function __skip-first-time () {
adbd_string="adbd cannot run as root in production builds"
adb wait-for-device
user=`adb root`
if [ "$user" = "$adbd_string" ]; then
adb wait-for-device
adb shell settings put secure user_setup_complete 1
adb shell settings put global device_provisioned 1
return
fi
adb wait-for-device
adb shell "am start -n com.smartisanos.setupwizard/com.smartisanos.setupwizard.SetupWizardCompleteActivity"
}
function __charging_disable() {
adb root > /dev/null
adb wait-for-device
adb shell "echo 0 > /sys/class/power_supply/battery/charging_enabled"
}
function __file_log () {
if [ $# != 2 ]; then
echo "usage: adk file-log xxx.c"
return
fi
adb root > /dev/null
adb shell "echo 'file $2 +p' > /sys/kernel/debug/dynamic_debug/control"
}
function __dtc() {
if [ $# != 1 ]; then
echo "Please input the dtb file"
echo "adk dtc xxx.dtb"
return
fi
if [ -z `echo $1 | sed 's/^.*\.//'` ]; then
echo "Please input the dtb file"
echo "adk dtc xxx.dtb"
return
elif [ `echo $1 | sed 's/^.*\.//'` != "dtb" ]; then
echo "Please input the dtb file"
echo "adk dtc xxx.dtb"
return
fi
bin_dir=`dirname "$BASH_SOURCE"`
$bin_dir/dtc_bin -I dtb -O dts -o $(echo $1 | sed 's/\.[^.]*$//').dts $1
}
function __screen_off_time() {
if [ $# = 0 ]; then
echo "usage: adk screen-off-time ms"
return
fi
adb shell settings put system screen_off_timeout $1
}
supported_adk="root ftyrst hexdump meminfo pmap-all cpu-performance listapk
focusedapk panic net-shell clk cpuclk gpuclk airplane_on
airplane_off fps smartisan-active smartisan-launcher charging-disble
file-log dtc screen-off-time"
function _comp_adk {
local curw
opts="-a -l -d -h"
COMPREPLY=()
curw=${COMP_WORDS[COMP_CWORD]}
if [[ ${curw} == -* ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${curw}) )
return 0
fi
COMPREPLY=($(compgen -W '$supported_adk' -- $curw))
return 0
}
# ZSH completion command
function _compzsh {
reply=($(_l))
}
if [ $ZSH_VERSION ]; then
compctl -K _compzsh adk
else
shopt -s progcomp
complete -F _comp_adk adk
fi
RED="\033[31m"
GREEN="\033[32m"
YELLOW="\033[33m"
END="\033[0m"
function print_adk_usage {
echo -e "
$RED 1. root $END
$GREEN 将adb重启为root权限,并且remount所有ro分区为rw $END
$RED 2. ftyrst $END
$GREEN 执行恢复出厂设置 $END
$RED 3. hexdump $END
$GREEN 将userdata,system,cache之外的分区,都已二进制形式dump到当前目录中 $END
$RED 4. meminfo $END
$GREEN 监控系统的内存状态 $END
$RED 5. pmap-all $END
$GREEN 把所有用户进程的pmap打印出来,用来查看动态库被哪些进程引用了 $END
$RED 6. cpu-performance $END
$GREEN 将Android设置成高性能状态,锁定在最高频。$END
$RED 7. listapk $END1
$GREEN 打印Android当前所有安装的apk $END
$RED 8. focusedapk $END
$GREEN 打印当前主界面的apk名称 $END
$RED 9. panic $END
$GREEN 触发panic $END
$RED 10. net-shell $END
$GREEN 建立一个adb net的shell通道,可以不依赖USB进行调试 $END
$RED 11. fps $EDN
$GREEN 获取当前fps $END
$RED 12. clk $END
$GREEN cpuclk gpuclk gpubusy $END
$RED 13. airplane_on $END
$GREEN 打开飞行模式 $END
$RED 14. airplane_off $END
$GREEN 关闭飞行模式 $END
$RED 15. smartisan-active smartisan-launcher $END
$RED 16. charging-disble $END
$RED 17. file-log $END
$GREEN 打开指定文件dmesg log $END
$YELLOW usage: adk file-log xxx.c $EDN
$RED 18. dtc $END
$GREEN 反编译dtb文件 $END
$YELLOW usage: adk dtc xxx.dtb $END
$RED 19. screen-off-time $END
$GREEN 配置灭屏时间 $EDN
$YELLOW usage: adk screen-off-time ms $END
"
}
| true
|
5f3b911e91182ce268bd8d0135e6ac41edd34ba4
|
Shell
|
HashNuke/heroku-buildpack-elixir-otp-builds
|
/build-otp.sh
|
UTF-8
| 786
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -z "$OTP_VERSION" ]; then
echo "OTP_VERSION not set"
else
echo "Building OTP_VERSION $OTP_VERSION"
export OTP_URL=https://github.com/erlang/otp/archive/OTP-$OTP_VERSION.tar.gz
export OTP_TAR_NAME=$(basename https://github.com/erlang/otp/archive/OTP-${OTP_VERSION}.tar.gz)
export OTP_UNTAR_DIR="otp-OTP-$OTP_VERSION"
wget $OTP_URL
echo "******====*******"
ls
echo "******====*******"
tar -zxf $OTP_TAR_NAME
chmod -R 777 $OTP_UNTAR_DIR
cd $OTP_UNTAR_DIR
./otp_build autoconf
./configure --with-ssl --enable-dirty-schedulers
make
make release
cd ../
mv otp-OTP-${OTP_VERSION}/release/x86_64-unknown-linux-gnu/ OTP-${OTP_VERSION}
rm OTP-${OTP_VERSION}.tar.gz
tar -zcf out/OTP-${OTP_VERSION}.tar.gz OTP-${OTP_VERSION}
fi
| true
|
db428f70c14bb30c40b15ba642a773352a8a4437
|
Shell
|
flexxo17/dvr_install
|
/qnap/fetch_wrapper.sh
|
UTF-8
| 1,491
| 3.75
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
####################
#
# Simple script to fetch the prebuilt wrappers
#
####################
# QDK Parameters
QDK_ROOT=$PWD
QDK_SHARED_PATH=$QDK_ROOT/shared
# Wrapper Parameters
WRAPPER_BIN_ARM7=hdhr_wrapper_arm7
WRAPPER_BIN_ARM8=hdhr_wrapper_arm8
WRAPPER_BIN_i686=hdhr_wrapper_i686
WRAPPER_BIN_X86_64=hdhr_wrapper_x86_64
WRAPPER_REPO_LINK=http://www.irish-networx.com/hdhr_wrapper
# Update this with any additional WGET parameters you need to use.. or place in local .wgetrc
WGET_OPTS=-q
######################
######################
# SCRIPT STARTS HERE #
######################
######################
echo "--- Moving to $QDK_SHARED_PATH folder...."
cd $QDK_SHARED_PATH
echo "--- Removing previous $DVR_BIN if it exists..."
if [ -f $WRAPPER_BIN_ARM7 ] ; then
echo "--- arm binary exists, deleting..."
rm -f $WRAPPER_BIN_ARM7
fi
if [ -f $WRAPPER_BIN_X86_64 ] ; then
echo "--- x86_64 binary exists, deleting..."
rm -f $WRAPPER_BIN_X86_64
fi
if [ -f $WRAPPER_BIN_i686 ] ; then
echo "--- i686 binary exists, deleting..."
rm -f $WRAPPER_BIN_i686
fi
echo "--- Fetching binaries from SiliconDust $WRAPPER_REPO_LINK/ ..."
wget $WGET_OPTS $WRAPPER_REPO_LINK/$WRAPPER_BIN_X86_64
wget $WGET_OPTS $WRAPPER_REPO_LINK/$WRAPPER_BIN_i686
wget $WGET_OPTS $WRAPPER_REPO_LINK/$WRAPPER_BIN_ARM7
echo "--- Making binaries executable..."
chmod a+x $WRAPPER_BIN_ARM7
chmod a+x $WRAPPER_BIN_i686
chmod a+x $WRAPPER_BIN_X86_64
echo "--- Done, returning to $QDK_ROOT."
cd $QDK_ROOT
| true
|
27e15a16106bd5daa4f7103f90ef96942fb6484c
|
Shell
|
CherokeeLanguage/Cherokee-TTS-fst
|
/assets/enbible/kaggle-download.sh
|
UTF-8
| 355
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env -S bash -x
cd "$(dirname "$0")"
PS1='$'
. ~/.bashrc
conda deactivate
conda activate Cherokee-TTS-fst
set -e
set -o pipefail
(
while read -r dataset; do
echo $dataset
dataset_track="$(echo "$dataset"|cut -f 2 -d '/')"
kaggle datasets download "$dataset" || exit 1
done
) << EOT
bryanpark/the-world-english-bible-speech-dataset
EOT
| true
|
169663a7d59024783cb778c413bcd7ac3a878c04
|
Shell
|
nerdroychan/dotfiles
|
/bin/corerun
|
UTF-8
| 283
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# Real cores instead of CPU threads
if [[ $# -eq 0 ]] ; then
exit 0
fi
CPUS=$(lscpu -p | grep -v '#' | awk -F ',' '{print $1 ":" $2}' | shuf | sort -R -u -t : -k 2,2 | awk -F ':' '{print $1}')
SELECTED=$(echo -n "${CPUS}" | tr '\n' ',')
taskset -c $SELECTED "$@"
| true
|
e40876d65fc19de7fa9f5e920cf5d0fd242c791c
|
Shell
|
otsuarez/nc2n
|
/inventory/inventory.sh
|
UTF-8
| 798
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#pwd
#DIR=group_vars
DIR=data
#SERVERS=$(cd group_vars ; ls -d *)
#SERVERS=$(cd ${DIR} ; ls -d *)" localhost"
SERVERS=$(cd ${DIR} ; ls -d *)
#echo "inventariooooo ----> $SERVERS"
IN='{
'
host=''
HOSTVARS=''
for i in $SERVERS;
do
host=$host'"'$i'" : { "hosts" : ["'$i'"] },
'
done
for i in $SERVERS;
do
HOSTVARS=$HOSTVARS'"'$i'" : { "ansible_connection" : "local" },
'
done
#HOSTS=$(echo $host | sed 's/,$//')
HOSTS=$host
HOSTVARS=$(echo $HOSTVARS | sed 's/,$//')
IN=$IN$HOSTS'
"_meta" : {
"hostvars" : {
'$HOSTVARS'
}
}
}'
echo $IN
#IN=$IN'
#echo ;echo '---'; echo
OUT='{
"vbox" : {
"hosts" : [ "vbox"]
},
"_meta" : {
"hostvars" : {
"vbox" : { "ansible_connection" : "local" }
}
}
}'
#echo $OUT
| true
|
bf3ac3998be6a0daaafc90ab82fc69e5bf98f540
|
Shell
|
oshyd/docker-stack-wait-deploy
|
/docker-stack-wait-deploy.sh
|
UTF-8
| 1,715
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Script to wait until all services in stack are deployed.
# Usage:
# ./docker-stack-wait-deploy.sh [stack-name]
stack_name=$1
while true; do
all_services_done=1
has_errors=0
updatable_output=""
service_ids=$(docker stack services -q $stack_name)
# check all services in stack
for service_id in $service_ids; do
service_name=$(docker service inspect --format '{{.Spec.Name}}' $service_id)
# see: https://github.com/moby/moby/issues/28012
service_state=$(docker service inspect --format '{{if .UpdateStatus}}{{.UpdateStatus.State}}{{else}}created{{end}}' $service_id)
case "$service_state" in
created|completed)
;;
paused|rollback_completed)
has_errors=1
;;
*)
all_services_done=0
;;
esac
service_echo_ps=$(docker service ps $service_id --filter "desired-state=running")
service_echo_header="STATUS ($service_name): $service_state"
updatable_output="$updatable_output\n$service_echo_header\n$service_echo_ps\n"
done
updatable_output="$updatable_output\n"
# clear updatable output per line (using 'tput ed' does not work correctly)
while [ ${updatable_output_lines:-0} -gt 0 ]
do
tput cuu 1 # move cursor up one line
tput el # clear line
((updatable_output_lines--))
done
printf "$updatable_output"
updatable_output_lines=$(printf "$updatable_output" | wc -l | tr -d '\n')
# check if all services done
if [ "$all_services_done" == "1" ]; then
if [ "$has_errors" == "1" ]; then
echo "Deployment failed."
# todo: show error message
exit 1
else
echo "Deployment successful."
exit 0
fi
else
sleep 1
fi
done
| true
|
a884ff357f8a039ef5cb985bd6800684735fad56
|
Shell
|
CyberSME/ubuntu-wrt
|
/rootfs/scripts/functions.sh
|
UTF-8
| 341
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
ask() { local q="$1"; local d=${2:-"n"}
read -p "$q [$d]: " r; r=${r:-"$d"}
while true; do
case $r in
y|Y|yes|Yes|yES|YES )
return 0
;;
n|N|no|No|nO )
return 1
;;
* )
read -p "Not a valid answer. Try 'y' or 'n': " r
continue
;;
esac
done
}
| true
|
3589a88829a0e1111544d2bf5fd680d566b7d08e
|
Shell
|
kyma-project/test-infra
|
/prow/scripts/cluster-integration/helpers/eventing.sh
|
UTF-8
| 12,594
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
readonly BACKEND_SECRET_NAME=eventing-backend
readonly BACKEND_SECRET_NAMESPACE=default
readonly BACKEND_SECRET_LABEL_KEY=kyma-project.io/eventing-backend
readonly BACKEND_SECRET_LABEL_VALUE=NATS
readonly EVENTING_BACKEND_CR_NAME=eventing-backend
readonly EVENTING_BACKEND_CR_NAMESPACE=kyma-system
# shellcheck source=prow/scripts/lib/gardener/gardener.sh
source "${TEST_INFRA_SOURCES_DIR}/prow/scripts/lib/gardener/gardener.sh"
# Check if required vars are set or not
function eventing::check_required_vars() {
if [[ -z ${CREDENTIALS_DIR} ]]; then
echo "required variable CREDENTIALS_DIR is missing"
exit 1
fi
}
# Create a Kubernetes Secret which contains the EventMesh service key
function eventing::create_eventmesh_secret() {
eventing::check_required_vars
pushd "${CREDENTIALS_DIR}"
SECRET_NAME=event-mesh
SECRET_NAMESPACE=default
SERVICE_KEY_VALUE=$(base64 -i serviceKey | tr -d '\n')
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: ${SECRET_NAME}
namespace: ${SECRET_NAMESPACE}
labels:
kyma-project.io/event-mesh: "true"
data:
serviceKey: "${SERVICE_KEY_VALUE}"
EOF
popd
}
# Create a Kubernetes Secret which is needed by the Eventing Backend controller
function eventing::create_eventing_backend_secret() {
eventing::check_required_vars
pushd "${CREDENTIALS_DIR}"
SECRET_NAME="${BACKEND_SECRET_NAME}"
SECRET_NAMESPACE="${BACKEND_SECRET_NAMESPACE}"
MANAGEMENT=$(jq -r '.management' < serviceKey | tr -d '[:space:]' | base64 | tr -d '\n')
MESSAGING=$(jq -r '.messaging' < serviceKey | tr -d '[:space:]' | base64 | tr -d '\n')
NAMESPACE=$(jq -r '.namespace' < serviceKey | tr -d '[:space:]' | base64 | tr -d '\n')
SERVICE_INSTANCE_ID=$(jq -r '.serviceinstanceid' < serviceKey | tr -d '[:space:]' | base64 | tr -d '\n')
XS_APP_NAME=$(jq -r '.xsappname' < serviceKey | tr -d '[:space:]' | base64 | tr -d '\n')
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: ${SECRET_NAME}
namespace: ${SECRET_NAMESPACE}
data:
management: "${MANAGEMENT}"
messaging: "${MESSAGING}"
namespace: "${NAMESPACE}"
serviceinstanceid: "${SERVICE_INSTANCE_ID}"
xsappname: "${XS_APP_NAME}"
EOF
popd
}
# Create a Kubernetes Secret which is needed by the Eventing Publisher and Subscription Controller
function eventing::create_eventing_secret() {
eventing::check_required_vars
pushd "${CREDENTIALS_DIR}"
SECRET_NAME=eventing
SECRET_NAMESPACE=kyma-system
# delete the default Eventing secret
kubectl delete secret -n ${SECRET_NAMESPACE} ${SECRET_NAME}
HTTP_REST=$(jq -r '.messaging' < serviceKey | jq -c '.[] | select(.broker.type | contains("saprestmgw"))')
BEB_NAMESPACE=$(jq -r '.namespace' < serviceKey | tr -d '[:space:]' | base64 | tr -d '\n')
CLIENT_ID=$(echo "$HTTP_REST" | jq -r '.oa2.clientid' | tr -d '[:space:]' | base64 | tr -d '\n')
CLIENT_SECRET=$(echo "$HTTP_REST" | jq -r '.oa2.clientsecret' | tr -d '[:space:]' | base64 | tr -d '\n')
EMS_PUBLISH_URL=$(echo "$HTTP_REST" | jq -r '.uri' | tr -d '[:space:]' | base64 | tr -d '\n')
TOKEN_ENDPOINT=$(echo "$HTTP_REST" | jq -r '.oa2.tokenendpoint' | tr -d '[:space:]' | base64 | tr -d '\n')
# create Eventing secret with the proper values
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: ${SECRET_NAME}
namespace: ${SECRET_NAMESPACE}
data:
beb-namespace: "${BEB_NAMESPACE}"
client-id: "${CLIENT_ID}"
client-secret: "${CLIENT_SECRET}"
ems-publish-url: "${EMS_PUBLISH_URL}"
token-endpoint: "${TOKEN_ENDPOINT}"
EOF
popd
}
# Switches the eventing backend based on the passed parameter (NATS or BEB).
# If there is no parameter passed, NATS is used as the default backend.
function eventing::switch_backend() {
labelValue="$(echo "${1}" | tr -s '[:upper:]' '[:lower:]')"
if [[ -z "${labelValue}" ]]; then
labelValue="$(echo "${BACKEND_SECRET_LABEL_VALUE}" | tr -s '[:upper:]' '[:lower:]')"
fi
echo "label backend secret with ${BACKEND_SECRET_LABEL_KEY}=${labelValue}"
kubectl label secret --namespace "${BACKEND_SECRET_NAMESPACE}" "${BACKEND_SECRET_NAME}" "${BACKEND_SECRET_LABEL_KEY}=${labelValue}" --overwrite
}
# Waits for Eventing backend to be ready by checking the EventingBackend custom resource status
function eventing::wait_for_backend_ready() {
if [[ -z "${1}" ]]; then
echo "backend type is missing"
exit 1
fi
# wait for Eventing backend custom resource old status to be cleared
sleep 10s
retry=0
maxRetires=20
wantReady="$(echo "true" | tr -s '[:upper:]' '[:lower:]')"
wantBackend="$(echo "${1}" | tr -s '[:upper:]' '[:lower:]')"
while [[ ${retry} -lt ${maxRetires} ]]; do
ready=$(kubectl get eventingbackends.eventing.kyma-project.io --namespace "${EVENTING_BACKEND_CR_NAMESPACE}" "${EVENTING_BACKEND_CR_NAME}" -ojsonpath="{.status.eventingReady}" | tr -s '[:upper:]' '[:lower:]')
backend=$(kubectl get eventingbackends.eventing.kyma-project.io --namespace "${EVENTING_BACKEND_CR_NAMESPACE}" "${EVENTING_BACKEND_CR_NAME}" -ojsonpath="{.status.backendType}" | tr -s '[:upper:]' '[:lower:]')
if [[ "${ready}" == "${wantReady}" && "${backend}" == "${wantBackend}" ]]; then
echo "Eventing backend [${1}] is ready"
kubectl get eventingbackends.eventing.kyma-project.io --namespace "${EVENTING_BACKEND_CR_NAMESPACE}" "${EVENTING_BACKEND_CR_NAME}"
return 0
fi
echo "try $((retry + 1))/${maxRetires} waiting for Eventing backend ${1} to be ready - current backend status ${backend}/${ready}"
retry=$((retry + 1))
sleep 10
done
echo "Eventing backend [${1}] is not ready"
kubectl get eventingbackends.eventing.kyma-project.io --namespace "${EVENTING_BACKEND_CR_NAMESPACE}" "${EVENTING_BACKEND_CR_NAME}"
return 1
}
# Runs eventing specific fast-integration tests preparation
function eventing::test_fast_integration_eventing_prep() {
log::info "Running Eventing script to prepare test assets"
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
npm install
npm run eventing-test-prep
popd
log::success "Eventing test preparation completed"
}
# Runs eventing specific fast-integration tests
function eventing::test_fast_integration_eventing() {
log::info "Running Eventing E2E release tests"
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
make ci-test-eventing
popd
log::success "Eventing tests completed"
}
# Runs eventing script to provision SKR
function eventing::test_fast_integration_provision_skr() {
log::info "Running Eventing script to provision SKR"
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
make ci-test-eventing-provision-skr
popd
log::success "Provision SKR completed"
}
# Runs eventing script to de-provision SKR
function eventing::test_fast_integration_deprovision_skr() {
log::info "Running Eventing script to de-provision SKR"
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
make ci-test-eventing-deprovision-skr
popd
log::success "De-provision SKR completed"
}
# Sets KUBECONFIG to ~/.kube/config
function eventing::set_default_kubeconfig_env() {
log::info "Setting default KUBECONFIG ~/.kube/config"
export KUBECONFIG="${HOME}/.kube/config"
}
function eventing::pre_upgrade_test_fast_integration() {
log::info "Running pre upgrade Eventing E2E release tests"
if [[ "${KYMA_BRANCH}" ]]; then
log::info "Cloning kyma repository and checking out branch:${KYMA_BRANCH}"
git clone https://github.com/kyma-project/kyma ~/.kyma_branch
pushd ~/.kyma_branch
git checkout "${KYMA_BRANCH}"
popd
pushd ~/.kyma_branch/tests/fast-integration
make ci-test-eventing-pre-upgrade
popd
elif [[ "${KYMA_SOURCE}" ]]; then
log::info "Cloning kyma repository and checking out branch:${KYMA_SOURCE}"
git clone https://github.com/kyma-project/kyma ~/.kyma_old
pushd ~/.kyma_old
git checkout "${KYMA_SOURCE}"
popd
pushd ~/.kyma_old/tests/fast-integration
make ci-test-eventing-pre-upgrade
popd
else
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
make ci-test-eventing-pre-upgrade
popd
fi
log::success "Pre upgrade Eventing tests completed"
}
function eventing::fast_integration_tests() {
log::info "Running only Eventing E2E release tests"
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
make ci-test-eventing-tests
popd
log::success "Eventing tests completed"
}
function eventing::post_upgrade_test_fast_integration() {
log::info "Running post upgrade Eventing E2E release tests and clean up the resources"
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
make ci-test-eventing-post-upgrade
popd
log::success "Post upgrade Eventing tests completed"
}
function eventing::fast_integration_test_cleanup() {
log::info "Running fast integration tests cleanup to remove the testing resources such as namespaces and compass scenario"
pushd /home/prow/go/src/github.com/kyma-project/kyma/tests/fast-integration
npm run eventing-test-cleanup
popd
log::success "Fast integration tests cleanup completed"
}
# Runs eventing copy-crd make target
function eventing::run_copy_crds() {
log::info "Running eventing copy-crd make target"
pushd /home/prow/go/src/github.com/kyma-project/kyma/components/eventing-controller
make gomod-vendor-local
make gomod-tidy-local
make copy-crds
popd
log::success "Eventing copy-crds make target completed"
}
# deploy Kyma PR-version with the v1alpha2 Subscription CRD version
function eventing::deploy_kyma_pr_version_with_v1alpha2_subscription() {
log::info "Copying the CRDs to installation/eventing"
export ENABLE_NEW_CRD_VERSION="true"
eventing::run_copy_crds
pushd /home/prow/go/src/github.com/kyma-project/kyma/components/eventing-controller
gardener::deploy_kyma --source=local -w /home/prow/go/src/github.com/kyma-project/kyma --value eventing.controller.enableNewCRDVersion=true --verbose
popd
log::success "Deploying of the v1alpha2 Subscription completed"
}
# Printing stored Subscription CRD versions for debugging purposes.
function eventing::print_subscription_crd_version(){
log::info "Stored Subscription CRD versions:"
kubectl get crd subscriptions.eventing.kyma-project.io -o json | jq '.status.storedVersions'
}
function eventing::print_troubleshooting_logs() {
log::banner "Printing troubleshooting logs"
CMD_RUN_IMAGE="curlimages/curl"
# all pods in kyma-system
log::banner "Pods: kyma-system namespace"
kubectl get po -n kyma-system
# Eventing backend
log::banner "Active Eventing backend"
kubectl get eventingbackends -n kyma-system
# Subscriptions
log::banner "Subscriptions: All namespaces"
kubectl get subscriptions -A
kubectl get subscriptions -A -o yaml
# NATS health
log::banner "NATS Health Check"
log::info "eventing-nats-0"
kubectl run -it natscheck0 --image="${CMD_RUN_IMAGE}" --timeout=360s --restart=Never --rm -- curl http://eventing-nats-0.eventing-nats.kyma-system.svc.cluster.local:8222/healthz
log::info "eventing-nats-1"
kubectl run -it natscheck1 --image="${CMD_RUN_IMAGE}" --timeout=360s --restart=Never --rm -- curl http://eventing-nats-1.eventing-nats.kyma-system.svc.cluster.local:8222/healthz
log::info "eventing-nats-2"
kubectl run -it natscheck2 --image="${CMD_RUN_IMAGE}" --timeout=360s --restart=Never --rm -- curl http://eventing-nats-2.eventing-nats.kyma-system.svc.cluster.local:8222/healthz
# Logs from NATS pods
log::banner "Logs: eventing-nats-0"
kubectl logs -n kyma-system eventing-nats-0 -c nats
log::banner "Logs: eventing-nats-1"
kubectl logs -n kyma-system eventing-nats-1 -c nats
log::banner "Logs: eventing-nats-2"
kubectl logs -n kyma-system eventing-nats-2 -c nats
# Logs from EPP
log::banner "Logs: eventing-publisher-proxy"
kubectl logs -n kyma-system deployment/eventing-publisher-proxy -c eventing-publisher-proxy
# Logs from EC
log::banner "Logs: eventing-controller"
kubectl logs -n kyma-system deployment/eventing-controller -c controller
# all pods in all namespaces
log::banner "Pods: All namespace"
kubectl get po -A
}
| true
|
28328ab5622d7402fbd9df48ced76eb1f412e526
|
Shell
|
gbsf/archlinux-packages
|
/openoffice-de/repos/extra-x86_64/PKGBUILD
|
UTF-8
| 844
| 2.53125
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Tobias Powalowski <tpowa@archlinux.org>
# Contributor: Sarah Hay <sarah@archlinux.org>
pkgname=openoffice-de
pkgver=2.4.0
pkgrel=1
pkgdesc="OpenOffice german language files"
arch=(i686 x86_64)
license=('LGPL')
url="http://www.openoffice.org"
makedepends=('rpmextract')
depends=('openoffice-base')
source=(ftp://ftp-1.gwdg.de/pub/openoffice/extended/2.4.0rc6/OOo_2.4.0rc6_20080314_LinuxIntel_langpack_de.tar.gz)
md5sums=('afa9b2477fbe6d9629942f4f05f393d9')
build() {
cd ${startdir}/src/OOH680_m12_native_packed-1_de.9286/RPMS
for i in *.rpm
do rpmextract.sh $i
done
# install openoffice language files
cd ${startdir}/src/OOH680_m12_native_packed-1_de.9286/RPMS/opt
mkdir -p ${startdir}/pkg/opt
mv openoffice.org2.4 ${startdir}/pkg/opt/openoffice
chown root -R ${startdir}/pkg/opt/openoffice
chgrp root -R ${startdir}/pkg/opt/openoffice
}
| true
|
f29f5e1fbf43eb40575299bbff215f56612123bb
|
Shell
|
edwardsmarkf/fastfeathers
|
/init-cockroach-client.bsh
|
UTF-8
| 4,723
| 3.46875
| 3
|
[] |
no_license
|
#! /bin/bash
# init-cockroach-client.bsh last update: 2018-04-19
# this script builds a cockroach(pg) client from scratch for testing with init-cockroach-server.bsh
dbServerIp='XXX.XXX.XXX.XXX'; ## this this from init-cockroach-server.bsh
dbUser='feathersuser';
dbPass='aaaaaa';
dbPort='26257';
dbName='bank';
sslFlag='true';
ipaddrInternal=$(hostname --all-ip-addresses ;);
ipaddrExternal=$(dig +short myip.opendns.com @resolver1.opendns.com. ;);
echo 'ipAddrInternal is: ${ipaddrInternal}';
echo 'ipAddrExternal is: ${ipaddrExternal}';
if [ '${$(groups)/sudo}' ] ;
then SUDO='sudo' ;
elif [ '${$(whoami)/root' ] ;
then echo SUDO='';
else
echo 'you either need to be have sudo or be logged in as root!';
exit;
fi;
FileNameWithExtension=${0##*/} ;
FileNameWithoutExtension=${FileNameWithExtension%.*} ;
TimeStamp=`date "+%Y-%m-%d %r"` ;
rm -Rf ./${FileNameWithoutExtension}/ ; ## just in case one already exists.
mkdir ./${FileNameWithoutExtension}/ && cd ./${FileNameWithoutExtension}/ ;
${SUDO} yum --assumeyes install bind-utils expect firewalld wget ;
${SUDO} systemctl start firewalld ;
${SUDO} systemctl enable firewalld ;
${SUDO} firewall-cmd --zone=dmz --add-port=${dbPort}/tcp --permanent ; sudo firewall-cmd --reload ;
${SUDO} yum --assumeyes update ;
${SUDO} yum --assumeyes install gcc-c++ make ;
${SUDO} yum --assumeyes install epel-release ;
curl --silent --location https://rpm.nodesource.com/setup_8.x | sudo bash - ;
${SUDO} yum --assumeyes install nodejs ;
sleep 10 ; ## installing node appeared to work async in one test.
${SUDO} npm -g update npm ; ## update to latest version
echo -n 'node version: ' ; node --version ; ## 8.10.0 used in this writing
echo -n 'npm version: ' ; npm --version ; ## 5.6.0 at the time of this writing
export FileNameWithExtension;
expect <(cat <<'END_OF_NPM_INIT'
set timeout -1
spawn npm init ;
expect -re ".*package name:.*"
send -- "\r"
expect -re ".*version:.*"
send -- "\r"
expect -re ".*description:.*"
send -- "Created using bash script: $env(FileNameWithExtension)\r"
expect -re ".*entry point:.*"
send -- "\r"
expect -re ".*test command:.*"
send -- "\r"
expect -re ".*git repository:.*"
send -- "\r"
expect -re ".*keywords:.*"
send -- "\r"
expect -re ".*author:.*"
send -- "Created using bash script: $env(FileNameWithExtension)\r"
expect -re ".*license:.*"
send -- "\r"
expect -re ".*Is this OK?.*"
send -- "\r"
expect eof
END_OF_NPM_INIT
)
npm install pg async --save ;
## written from https://www.cockroachlabs.com/docs/stable/build-a-nodejs-app-with-cockroachdb.html
cat > nodePgTest.js <<END_OF_NODE_SCRIPT ;
var async = require('async');
// Require the driver.
var pg = require('pg');
// Connect to the "bank" database.
var config = {
user: '${dbUser}',
password: '${dbPass}',
host: '${dbServerIp}', // DOUBLE-CHECK THIS!
port: ${dbPort},
database: '${dbName}',
dialectOptions: { // required for ssl postgres/cockroachdb
ssl: ${sslFlag}
},
};
// Create a pool.
const pool = new pg.Pool(config);
/* optionally this should work too:
const connectionString = 'postgresql://${dbUser}:${dbPass}@${dbServerIp}:${dbPort}/${dbName}?ssl=${sslFlag}';
const pool = new pg.Pool({
connectionString: connectionString,
})
*/
pool.connect(function (err, client, done) {
// Closes communication with the database and exits.
var finish = function () {
done();
process.exit();
};
if (err) {
console.error('could not connect to cockroachdb', err);
finish();
}
async.waterfall([
function (next) {
// Create the "accounts" table.
client.query('CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT);', next);
},
function (results, next) {
// Insert two rows into the "accounts" table.
client.query('INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250);', next);
},
function (results, next) {
// Print out the balances.
client.query('SELECT id, balance FROM accounts;', next);
},
],
function (err, results) {
if (err) {
console.error('error inserting into and selecting from accounts', err);
finish();
}
console.log('Initial balances:');
results.rows.forEach(function (row) {
console.log(row);
});
finish();
});
});
END_OF_NODE_SCRIPT
cat <<END_OF_SCRIPT;
be sure to do cd ${FileNameWithoutExtension}; node nodePgTest.js ;
END_OF_SCRIPT
| true
|
a4cc7fd7a0ae4a7b3fd3a6009b8d631abc7b57cb
|
Shell
|
dan144/dotfiles
|
/util_funcs.sh
|
UTF-8
| 189
| 2.890625
| 3
|
[] |
no_license
|
function vimd ()
{
vim -p $(git diff --relative --name-only)
}
function vgrep ()
{
vim -p $(egrep -ril "$1" | sort | uniq)
}
function vfind ()
{
vim -p $(find . -name "$1")
}
| true
|
65fa17790bd8ca144b2e6100437e313376fa895a
|
Shell
|
spiralofhope/shell-random
|
/live/sh/scripts/examples/math.sh
|
UTF-8
| 297
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# This functionality not be available in all shells:
\echo $(( 1 + 1 ))
# Using `expr`:
# shellcheck disable=2003
\expr "1 + 1"
# Using `bc`:
\echo "1 + 1" | \bc
# Using `bc` for more complex math:
\echo "10 / 3" | \bc --mathlib
# TODO - More is possible with awk
| true
|
e1d401fac045002d6d57773c7949029693ef17c6
|
Shell
|
badlogic/paperbots
|
/scripts/backup.sh
|
UTF-8
| 1,939
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
# Dumps the live Paperbots db and stores it in Google Drive.
# You need to install this https://github.com/prasmussen/gdrive
# On macOS, brew install gdrive will do the trick.
# You have to specify the server user, host and sudo pwd
# You have to specify the Paperbots db Docker container name, db port, and db password
# You have to specify the GDrive folder Id to store the backup in. Use `gdrive list` to get the folder id.
# Pray and hope nobody sniffs your connection...
set -e
if [ -z $SERVER_USER ]; then echo "Specify \$SERVER_USER"; exit -1; fi
if [ -z $SERVER_HOST ]; then echo "Specify \$SERVER_HOST"; exit -1; fi
if [ -z $SERVER_PWD ]; then echo "Specify \$SERVER_PWD"; exit -1; fi
if [ -z $PAPERBOTS_DB_CONTAINER ]; then echo "Specify \$PAPERBOTS_DB_CONTAINER"; exit -1; fi
if [ -z $PAPERBOTS_DB_PWD ] ; then echo "Specify \$PAPERBOTS_DB_PWD"; exit -1; fi
if [ -z $PAPERBOTS_DB_PORT ] ; then echo "Specify \$PAPERBOTS_DB_PORT"; exit -1; fi
if [ -z $GDRIVE_FOLDER ] ; then echo "Specify \$GDRIVE_FOLDER"; exit -1; fi
# MySQL dump
MYSQL_DUMP_FILE="paperbots-`date +%F`.sql"
ZIP_DUMP_FILE="$MYSQL_DUMP_FILE.tar.gz"
echo "Dumping paperbots database to $MYSQL_DUMP_FILE"
ssh -l $SERVER_USER $SERVER_HOST "echo $SERVER_PWD | sudo -S docker exec $PAPERBOTS_DB_CONTAINER mysqldump --default-character-set=utf8mb4 -uroot -p$PAPERBOTS_DB_PWD --port $PAPERBOTS_DB_PORT paperbots" > $MYSQL_DUMP_FILE
echo "Uploading $DUMP_FILE to GDrive"
tar -czvf $ZIP_DUMP_FILE $MYSQL_DUMP_FILE
gdrive upload -p $GDRIVE_FOLDER $ZIP_DUMP_FILE
rm $MYSQL_DUMP_FILE
rm $ZIP_DUMP_FILE
# Files dump
FILES_DUMP_FILE=paperbots-`date +%F`-files.tar.gz
ssh -l $SERVER_USER $SERVER_HOST "rm -f paperbots-files.tar.gz && tar -C paperbots.io/docker/data/ -czvf paperbots-files.tar.gz files"
scp $SERVER_USER@$SERVER_HOST:/home/$SERVER_USER/paperbots-files.tar.gz $FILES_DUMP_FILE
gdrive upload -p $GDRIVE_FOLDER $FILES_DUMP_FILE
rm $FILES_DUMP_FILE
| true
|
bade88fd71a44ab92cc4b9879189823c2c3b4b79
|
Shell
|
wasas/shell
|
/install_docker_debian.sh
|
UTF-8
| 1,544
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##### debian 安装Docker #####
#准备工作
preparation(){
#移除原有的服务
apt-get -y remove docker docker-engine docker.io containerd runc
#更新软件包
apt-get update
#安装必要的依赖
apt-get -y install ca-certificates curl gnupg lsb-release
#添加官方密钥
mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
#设置存储库
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
}
#安装docker
install_docker(){
#创建配置文件
mkdir -p /etc/docker
touch /etc/docker/daemon.json
#创建存储目录
mkdir -p /data/docker-data
#chown -R docker:docker /data/docker-data
cat <<EOF > /etc/docker/daemon.json
{
"data-root": "/data/docker-data",
"storage-driver": "overlay2"
}
EOF
apt-get update
apt-get -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin
#启动docker
systemctl start docker
systemctl enable docker
#运行一个hello word
docker run hello-world
}
#安装docker composer
install_composer(){
curl -SL https://github.com/docker/compose/releases/download/v2.7.0/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
}
preparation && install_docker && install_composer
| true
|
f275c2646b9bff17f03c398b996fc452d33f9ee0
|
Shell
|
GVLind/ST-14
|
/Trimmomatic_Command.sh
|
UTF-8
| 949
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#in each subfolder:
#1) locate files with ending _1.fastq
#2) change ending to _2.fastq to exactly match read paris.
#3) setup trimmomatic for each pair with input/output according to read paris.
for S in ./S*; do
#locate all subfolder
if [ -d "$S" ]; then
cd "$S"
# if subfolder == folder: move into subfolder.
for f1 in *_1.fastq
do
f2=${f1%%_1.fastq}"_2.fastq"
# create variable f2 which is f1 with ending "_2.fastq"
java -jar ~/bin/trimmomatic/trimmomatic.jar \
PE -phred33 -trimlog \
${f1%%_1.fastq}.log \
$f1 \
$f2 \
${f1%%_1.fastq}_trimmed_FP.fastq \
${f1%%_1.fastq}_trimmed_FU.fastq \
${f1%%_1.fastq}_trimmed_RP.fastq \
${f1%%_1.fastq}_trimmed_RU.fastq \
LEADING:10 TRAILING:10 SLIDINGWINDOW:8:15 MINLEN:150 AVGQUAL:18 \
ILLUMINACLIP:/home/gvl/bin/trimmomatic/adapters/TruSeq3-PE.fa:2:10:3:1
echo ------------------------------------------------
done
cd ..
fi
done
| true
|
3de3f9860ce84cc2221fe81c39483837556fa16b
|
Shell
|
whoo/ChrootTool
|
/Chrootbin.sh
|
UTF-8
| 586
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
function green { echo "[32m$1[00m"; }
function red { echo "[31m$1[00m"; }
function push
{
dir=$(dirname "$1")
if [ ! -d $dir ]
then
#red "create $dir"
mkdir -p "$dir"
fi
cp "/$1" "$dir"
}
mkdir -p jail
pushd jail
## Essential ###
push lib64/ld-linux-x86-64.so.2
push lib32/ld-linux.so.2
cp ../Dockerfile .
mkdir -p bin sbin
###############################
for a in "$@"
do
for b in $(LD_TRACE_LOADED_OBJECTS=1 $a | awk '/=>/{print $3}')
do
### Push all lib
green "${b}"
push "${b#\/}"
done
### Finally Push bin
red "${a}"
push "${a#\/}"
done
popd
| true
|
29a037d18c6a78b19c955e4f5528d18befc6924e
|
Shell
|
timplab/nivar
|
/paperfigs/freebayes_bwa.sh
|
UTF-8
| 1,719
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
##This time, $1 is the results outdir. $2 is the assembly full path. $3 is the prefix.
mkdir -p $1
mkdir -p $1/index
mkdir -p $1/bam
##copy the raw assembly into the index dir
prefix=$3
cp $2 $1/index/$prefix.fasta
cd ~/software/freebayes/scripts
for i in {1..5} ;
do
##build the index and align
echo building index and aligning for round $i =================================================================
samtools faidx $1/index/$prefix.fasta
bwa index $1/index/$prefix.fasta
bwa mem -t 36 $1/index/$prefix.fasta $1/*fwd_paired.fq.gz $1/*rev_paired.fq.gz |\
samtools view -@ 36 -bS - |\
samtools sort -@ 36 -o $1/bam/$prefix.sorted.bam
samtools index $1/bam/$prefix.sorted.bam
##do the correction
echo correcting for round $i ================================================================================
./freebayes-parallel \
<(./fasta_generate_regions.py $1/index/$prefix.fasta.fai 100000) 36\
-f $1/index/$prefix.fasta \
$1/bam/$prefix.sorted.bam > $1/nivar_fb${i}_bwa_raw.vcf
vcffilter -f "AO > RO & AO > 5 & AF > .5" $1/nivar_fb${i}_bwa_raw.vcf > $1/nivar_fb${i}_bwa.vcf
bgzip -c $1/nivar_fb${i}_bwa.vcf > $1/nivar_fb${i}_bwa.vcf.gz
tabix -p vcf $1/nivar_fb${i}_bwa.vcf.gz
bcftools consensus $1/nivar_fb${i}_bwa.vcf.gz < $1/index/$prefix.fasta > $1/nivar_fb${i}_bwa.fasta
##newly corrected genome replaces the old genome in the index dir
echo moving old
mv $1/bam/$prefix.sorted.bam $1/bam/$prefix.$i.sorted.bam
mv $1/bam/$prefix.sorted.bam.bai $1/bam/$prefix.$i.sorted.bam.bai
rm $1/index/*
echo copying $i to empty index folder
cp $1/nivar_fb${i}_bwa.fasta $1/index/$prefix.fasta
done
| true
|
74dbd17649a168411b8b13653926271c9f4b4f99
|
Shell
|
asdf-vm/asdf
|
/test/get_asdf_config_value.bats
|
UTF-8
| 1,205
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
# shellcheck disable=SC2164
load test_helpers
setup() {
cd "$BATS_TMPDIR"
ASDF_CONFIG_FILE="$BATS_TMPDIR/asdfrc"
cat >"$ASDF_CONFIG_FILE" <<-EOM
key1 = value1
legacy_version_file = yes
EOM
ASDF_CONFIG_DEFAULT_FILE="$BATS_TMPDIR/asdfrc_defaults"
cat >"$ASDF_CONFIG_DEFAULT_FILE" <<-EOM
# i have a comment, it's ok
key2 = value2
legacy_version_file = no
EOM
}
teardown() {
rm "$ASDF_CONFIG_FILE"
rm "$ASDF_CONFIG_DEFAULT_FILE"
unset ASDF_CONFIG_DEFAULT_FILE
unset ASDF_CONFIG_FILE
}
@test "get_config returns default when config file does not exist" {
result=$(ASDF_CONFIG_FILE="/some/fake/path" get_asdf_config_value "legacy_version_file")
[ "$result" = "no" ]
}
@test "get_config returns default value when the key does not exist" {
[ "$(get_asdf_config_value "key2")" = "value2" ]
}
@test "get_config returns config file value when key exists" {
[ "$(get_asdf_config_value "key1")" = "value1" ]
[ "$(get_asdf_config_value "legacy_version_file")" = "yes" ]
}
@test "get_config returns config file complete value including '=' symbols" {
cat >>"$ASDF_CONFIG_FILE" <<-'EOM'
key3 = VAR=val
EOM
[ "$(get_asdf_config_value "key3")" = "VAR=val" ]
}
| true
|
b43b4697ceed5dd4fca5995c5987617ea6f9527d
|
Shell
|
joeystevens00/signal-deviation-alerts
|
/docker_logger/scripts/docker_logs_to_matrix.sh
|
UTF-8
| 343
| 2.546875
| 3
|
[] |
no_license
|
while read LINE; do export "$LINE"; done < /app/env
sleep $((RANDOM/1000))
for service in $(\
docker service ls \
| awk '{print $2}'\
| grep -vE '^NAME$'\
| grep -vi synapse\
)
do
docker service logs --since 1m "$service"\
| python3 /app/src/log.py --host "$MATRIX_HOST" --user "$MATRIX_USER" --room docker_$service
done
| true
|
f6f5ab107c76ff21fe1f4a699206b8f2e5790cfc
|
Shell
|
pmem/dev-utils-kit
|
/docker/images/install-pmdk.sh
|
UTF-8
| 2,373
| 4.3125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018-2022, Intel Corporation
#
# install-pmdk.sh [prefix] [just_install]
# - PMDK's libraries installation script. Can be use in two ways:
# 1. Regular usage would be to "just install" all PMDK's libraries (preferably from packages).
# 2. The other scenario (when `just_install == 0`) is to 'make install' in '${PREFIX}' and
# prepare packages (for further use) in '${PREFIX}-pkg' dir. With this usage, it's probably
# the best to use prefix in a non-system path, e.g. "/opt/pmdk".
#
set -e
if [ "${SKIP_PMDK_BUILD}" ]; then
echo "Variable 'SKIP_PMDK_BUILD' is set; skipping building PMDK"
exit
fi
## Script's arguments:
PREFIX=${1:-/usr}
JUST_INSTALL=${2:-1} # if == 0: create extra packages in '${PREFIX}-pkg' dir
## Environment variables:
PACKAGE_TYPE=${PACKAGE_MANAGER,,} # make it lowercase
[ "${PACKAGE_TYPE}" == "deb" ] && PACKAGE_TYPE="dpkg" # XXX: PMDK uses different alias
# common: 1.12.1 release, 25.08.2022
CHECKOUT=${PMDK_VERSION:-786098a024c6fe60e746f2cb1041bcfcd21386c9}
echo "Installation prefix: '${PREFIX}'"
echo "Bool flag - just_install: '${JUST_INSTALL}'"
echo "Package type: '${PACKAGE_TYPE}'"
echo "Checkout version: '${CHECKOUT}'"
# prepare repo
build_dir=$(mktemp -d -t pmdk-XXX)
git clone https://github.com/pmem/pmdk ${build_dir}
pushd ${build_dir}
git checkout ${CHECKOUT}
# make initial build
make -j$(nproc)
echo "### PMDK compilation complete ###"
if [ "${JUST_INSTALL}" == "1" ]; then
# install, preferably using packages
if [ -z "${PACKAGE_TYPE}" ]; then
sudo make install -j$(nproc) prefix=${PREFIX}
else
make BUILD_PACKAGE_CHECK=n "${PACKAGE_TYPE}" -j$(nproc)
echo "### PMDK package compilation complete ###"
if [ "${PACKAGE_TYPE}" = "dpkg" ]; then
sudo dpkg -i dpkg/*.deb
elif [ "${PACKAGE_TYPE}" = "rpm" ]; then
sudo rpm -iv rpm/*/*.rpm
fi
fi
else
# install within '${PREFIX}'
sudo make install -j$(nproc) prefix=${PREFIX}
# and prepare packages (move them, no install) into '${PREFIX}-pkg/'
make BUILD_PACKAGE_CHECK=n "${PACKAGE_TYPE}" -j$(nproc)
echo "### PMDK package compilation complete ###"
mkdir -p "${PREFIX}-pkg/"
if [ "${PACKAGE_TYPE}" = "dpkg" ]; then
sudo mv dpkg/*.deb "${PREFIX}-pkg/"
elif [ "${PACKAGE_TYPE}" = "rpm" ]; then
sudo mv rpm/x86_64/*.rpm "${PREFIX}-pkg/"
fi
fi
popd
rm -r ${build_dir}
| true
|
7e81eedd2392643cdfb1ef2776f9826c2248786c
|
Shell
|
Zuquim/punch_clock
|
/ctrl-punch-clock.bash
|
UTF-8
| 2,015
| 4.125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
DOCKER_IMAGE="punch-clock"
DOCKER_CONTAINER=$DOCKER_IMAGE
function print_syntax_exit {
printf "\nSyntax: $0 <build|rebuild|run|test|update|full-update>\n"
exit 1
}
function build_image {
echo "Building API Docker base image from scratch..."
docker build --rm --pull --no-cache -t $DOCKER_IMAGE:latest .
echo "Docker image ($DOCKER_IMAGE) built!"
exit 0
}
function rebuild_image {
echo "Re-Building (on top of base image) API Docker image..."
docker build --rm --no-cache -t $DOCKER_IMAGE:latest -f Dockerfile-fast .
echo "Docker image ($DOCKER_IMAGE) built!"
exit 0
}
function run_tests {
echo "Running API tests..."
docker run -it --rm -p 8001:80 \
-v $PWD/log:/var/log/api \
-e ENV=testing \
-e TESTING=1 \
-e FLASK_ENV=testing \
$DOCKER_IMAGE \
py.test -v
echo "Done testing!"
exit 0
}
function run_api {
echo "Running API..."
docker run -d --name $DOCKER_CONTAINER -p 8000:80 \
-v $PWD/log:/var/log/api \
$DOCKER_IMAGE
echo "Done!"
exit 0
}
if [[ $# != 1 ]]
then
print_syntax_exit
elif [[ $1 == "build" ]]
then
build_image
elif [[ $1 == "rebuild" ]]
then
rebuild_image
elif [[ $1 == "run" ]]
then
if [[ $(docker images | grep $DOCKER_IMAGE) != "" ]]
then
run_api
else
echo "PunchClock Docker image does not exist!"
echo "Building it from scratch in 10s... [hit <Ctrl>+<C> to cancel]"
sleep 10 && build_image && run_api
fi
elif [[ $1 == "test" ]]
then
run_tests
elif [[ $1 == "update" ]]
then
rebuild_image
echo "Stopping and removing current version..."
docker stop $DOCKER_CONTAINER; docker rm $DOCKER_CONTAINER
echo "Running new version..."
run_api
elif [[ $1 == "full-update" ]]
then
build_image
echo "Stopping and removing current version..."
docker stop $DOCKER_CONTAINER; docker rm $DOCKER_CONTAINER
echo "Running new version..."
run_api
else
print_syntax_exit
fi
| true
|
6fd4e6cdd7d001471f52926f3eaa0ac66e797d23
|
Shell
|
releasemgn/shurm
|
/master/makedistr/commonexecute.sh
|
UTF-8
| 18,551
| 2.875
| 3
|
[] |
no_license
|
# Copyright 2011-2014 vsavchik@gmail.com
C_TAG=
C_PGUWARNEXUSGROUPID="com.nvision.pgu.service"
function f_execute_getversionmode_defaulttag() {
if [ "$GETOPT_TAG" != "" ]; then
C_TAG=$GETOPT_TAG
else
C_TAG=$C_CONFIG_APPVERSION_TAG
fi
export C_TAG
}
function f_execute_buildone_core_tags() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
if [ "$C_BUILD_OUTDIR" = "" ]; then
echo f_execute_buildone_core_tags: C_BUILD_OUTDIR is not set
exit 1
fi
if [ "$C_TAG" = "" ]; then
echo f_execute_buildone_core_tags: C_TAG is not set
exit 1
fi
local BUILD_OPTIONS="$C_CONFIG_MODULE_BUILD_OPTIONS_CORE"
export MODULE_MAVEN_CMD=$C_SOURCE_PROJECT_MVNCMD
./buildone-tags.sh $C_BUILD_OUTDIR "$P_EXECUTE_SET" "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_TAG "$BUILD_OPTIONS" $C_BUILD_APPVERSION
}
function f_execute_buildone_war_tags() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
if [ "$C_BUILD_OUTDIR" = "" ]; then
echo f_execute_buildone_war_tags: C_BUILD_OUTDIR is not set
exit 1
fi
if [ "$C_TAG" = "" ]; then
echo f_execute_buildone_war_tags: C_TAG is not set
exit 1
fi
local BUILD_OPTIONS="$C_CONFIG_MODULE_BUILD_OPTIONS_WAR"
export MODULE_MAVEN_CMD=$C_SOURCE_PROJECT_MVNCMD
./buildone-tags.sh $C_BUILD_OUTDIR "$P_EXECUTE_SET" "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_TAG "$BUILD_OPTIONS" $C_BUILD_APPVERSION
}
function f_execute_download_wardistr() {
local P_EXECUTE_SET=$1
local P_PROJECT=$2
local P_REPOSITORY=$3
if [ "$C_VERSION" = "" ]; then
echo f_execute_download_wardistr: C_VERSION is not set
exit 1
fi
f_source_readproject war $P_PROJECT
local F_PROJECT_DISTITEM=$C_SOURCE_PROJECT_DISTITEM
# get dist item details
f_distr_readitem $F_PROJECT_DISTITEM
local F_ISOBSOLETE=$C_DISTR_OBSOLETE
# compare with release information
if [ "$C_RELEASE_PROPERTY_OBSOLETE" = "false" ] && [ "$F_ISOBSOLETE" = "true" ]; then
return 1
fi
if [ "$C_RELEASE_PROPERTY_OBSOLETE" = "true" ] && [ "$F_ISOBSOLETE" = "false" ]; then
return 1
fi
local WAR_FILENAME=$C_DISTR_DISTBASENAME-$C_VERSION.war
f_downloadnexus $P_PROJECT $C_CONFIG_NEXUS_REPO $C_PGUWARNEXUSGROUPID $C_DISTR_DISTBASENAME $C_VERSION "war"
if [ $? -ne 0 ]; then
return 1
fi
local STATIC_FILENAME=$C_DISTR_DISTBASENAME-$C_VERSION-webstatic.tar.gz
f_downloadnexus $P_PROJECT $C_CONFIG_NEXUS_REPO $C_PGUWARNEXUSGROUPID $C_DISTR_DISTBASENAME $C_VERSION "tar.gz" "webstatic"
if [ $? -ne 0 ]; then
return 1
fi
# download versioninfo
local VERSION_FILENAME=$P_PROJECT-$C_VERSION-version.txt
f_downloadnexus $P_PROJECT $C_CONFIG_NEXUS_REPO release $P_PROJECT $C_VERSION "txt" "version"
local VERSION_TAGNAME=`cat $VERSION_FILENAME`
f_copy_distr $WAR_FILENAME
f_repackage_staticdistr $P_PROJECT $C_VERSION $WAR_FILENAME $STATIC_FILENAME $VERSION_TAGNAME
return 0
}
function f_execute_download_lib() {
local P_EXECUTE_SET=$1
local P_PROJECT=$2
local P_REPOSITORY=$3
if [ "$C_VERSION" = "" ]; then
echo f_execute_download_lib: C_VERSION is not set
exit 1
fi
f_source_readproject war $P_PROJECT
local F_PROJECT_DISTITEM=$C_SOURCE_PROJECT_DISTITEM
# get dist item details
f_distr_readitem $F_PROJECT_DISTITEM
local F_ISOBSOLETE=$C_DISTR_OBSOLETE
# compare with release information
if [ "$C_RELEASE_PROPERTY_OBSOLETE" = "false" ] && [ "$F_ISOBSOLETE" = "true" ]; then
return 1
fi
if [ "$C_RELEASE_PROPERTY_OBSOLETE" = "true" ] && [ "$F_ISOBSOLETE" = "false" ]; then
return 1
fi
local F_LIB=$C_SOURCE_PROJECT_DISTLIBITEM
f_downloadnexus $P_PROJECT $C_CONFIG_NEXUS_REPO $C_PGUWARNEXUSGROUPID $F_LIB $C_VERSION "jar"
}
function f_execute_copy_release_to_release() {
local P_EXECUTE_SET=$1
local P_PROJECT=$2
local P_REPOSITORY=$3
./copy-releaseproject.sh $P_EXECUTE_SET $P_PROJECT
}
function f_execute_vcssetbranchtag() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
if [ "$C_TAG" = "" ]; then
echo f_execute_vcssetbranchtag: C_TAG is not set
exit 1
fi
local F_BRANCHNAME=$C_CONFIG_BRANCHNAME
if [ "$F_BRANCHNAME" = "" ]; then
F_BRANCHNAME=$P_PROD_BRANCH
fi
if [ "$F_BRANCHNAME" != "trunk" ]; then
F_BRANCHNAME=branches/$F_BRANCHNAME
fi
./vcssettag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $F_BRANCHNAME $C_TAG "$GETOPT_DATE"
}
function f_execute_vcscopytag() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
if [ "$C_TAG1" = "" ]; then
echo f_execute_vcscopytag: C_TAG1 is not set
exit 1
fi
if [ "$C_TAG2" = "" ]; then
echo f_execute_vcscopytag: C_TAG2 is not set
exit 1
fi
./vcscopytag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_TAG1 "tags/$C_TAG2"
}
function f_execute_vcscopytagtobranch() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
if [ "$C_TAG1" = "" ]; then
echo f_execute_vcscopytagtobranch: C_TAG1 is not set
exit 1
fi
if [ "$C_BRANCH2" = "" ]; then
echo f_execute_vcscopytagtobranch: C_BRANCH2 is not set
exit 1
fi
./vcscopytag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_TAG1 "branches/$C_BRANCH2"
}
function f_execute_vcscopynewtag() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
if [ "$C_TAG1" = "" ]; then
echo f_execute_vcscopynewtag: C_TAG1 is not set
exit 1
fi
if [ "$C_TAG2" = "" ]; then
echo f_execute_vcscopynewtag: C_TAG2 is not set
exit 1
fi
./vcscopynewtag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_TAG1 $C_TAG2
}
function f_execute_vcsdroptag() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
if [ "$C_TAG" = "" ]; then
echo f_execute_vcsdroptag: C_TAG is not set
exit 1
fi
./vcsdroptag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_TAG
}
function f_execute_vcsrenametag() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
if [ "$C_TAG1" = "" ]; then
echo f_execute_vcsrenametag: C_TAG1 is not set
exit 1
fi
if [ "$C_TAG2" = "" ]; then
echo f_execute_vcsrenametag: C_TAG2 is not set
exit 1
fi
./vcsrenametag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_TAG1 $C_TAG2
}
function f_execute_vcscopybranch() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
if [ "$C_BRANCH1" = "" ] || [ "$C_BRANCH2" = "" ]; then
echo "f_execute_vcscopybranch: C_BRANCH1, C_BRANCH2 not set"
exit 1
fi
local X_BRANCH1=$C_BRANCH1
local X_BRANCH2=$C_BRANCH2
if [ "$X_BRANCH1" = "prod" ]; then
X_BRANCH1=$P_PROD_BRANCH
fi
if [ "$X_BRANCH2" = "prod" ]; then
X_BRANCH2=$P_PROD_BRANCH
fi
./vcscopybranch.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $X_BRANCH1 $X_BRANCH2
}
function f_execute_vcsrenamebranch() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
if [ "$C_BRANCH1" = "" ] || [ "$C_BRANCH2" = "" ]; then
echo "f_execute_vcsrenamebranch: C_BRANCH1, C_BRANCH2 not set"
exit 1
fi
local X_BRANCH1=$C_BRANCH1
local X_BRANCH2=$C_BRANCH2
if [ "$X_BRANCH1" = "prod" ]; then
X_BRANCH1=$P_PROD_BRANCH
fi
if [ "$X_BRANCH2" = "prod" ]; then
X_BRANCH2=$P_PROD_BRANCH
fi
./vcsrenamebranch.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $X_BRANCH1 $X_BRANCH2
}
function f_execute_start_settag() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local CANDIDATETAG=$C_CONFIG_APPVERSION_TAG
./vcssettag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_CONFIG_BRANCHNAME $CANDIDATETAG
}
function f_execute_update_settag() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
F_BRANCH=$P_PROD_BRANCH
if [ "$C_CONFIG_BRANCHNAME" != "" ]; then
F_BRANCH=$C_CONFIG_BRANCHNAME
fi
if [ "$F_BRANCH" != "trunk" ]; then
F_BRANCH=branches/$F_BRANCH
fi
if [ "$C_TAG" = "" ]; then
f_execute_getversionmode_defaulttag
fi
local F_TAG=$C_TAG
./vcssettag.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $F_BRANCH $F_TAG
}
function f_execute_setversion() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
F_BRANCH=$P_PROD_BRANCH
if [ "$C_CONFIG_BRANCHNAME" != "" ]; then
F_BRANCH=$C_CONFIG_BRANCHNAME
fi
./setversion.sh "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" "$F_BRANCH" "$C_VERSION"
}
function f_execute_checkout() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
F_BRANCH=$P_PROD_BRANCH
if [ "$C_CONFIG_BRANCHNAME" != "" ]; then
F_BRANCH=$C_CONFIG_BRANCHNAME
fi
local F_PATH=$C_TARGETDIR/$P_PROJECT
mkdir -p $F_PATH
if [ $? != 0 ]; then
echo unable to create $F_PATH. Exiting
exit 1
fi
./vcscheckout.sh $F_PATH "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" "$F_BRANCH"
}
function f_execute_export() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
F_BRANCH=$P_PROD_BRANCH
if [ "$C_CONFIG_BRANCHNAME" != "" ]; then
F_BRANCH=$C_CONFIG_BRANCHNAME
fi
local F_PATH=$C_TARGETDIR/$P_PROJECT
rm -rf $F_PATH
mkdir -p $C_TARGETDIR
if [ $? != 0 ]; then
echo unable to create $F_PATH. Exiting
exit 1
fi
./vcsexport.sh $F_PATH "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" "$F_BRANCH" "$GETOPT_TAG"
}
function f_execute_commit() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_PROD_BRANCH=$6
local F_PATH=$C_TARGETDIR/$P_PROJECT
mkdir -p $F_PATH
if [ $? != 0 ]; then
echo unable to create $F_PATH. Exiting
exit 1
fi
if [ "$C_COMMITMSG" = "" ]; then
C_COMMITMSG="default commit message"
fi
./vcscommit.sh $F_PATH "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" "$C_COMMITMSG"
}
function f_execute_diffbranchtag() {
local P_GROUP=$1
local P_VCSTYPE=$2
local P_EXECUTE_SET=$3
local P_PROJECT=$4
local P_REPOSITORY=$5
local P_VCSPATH=$6
local P_PROD_BRANCH=$7
if [ "$C_DIFF_SINCE" = "" ]; then
echo f_execute_diffbranchtag: C_DIFF_SINCE is not set
exit 1
fi
if [ "$C_DIFF_TILL" = "" ]; then
echo f_execute_diffbranchtag: C_DIFF_TILL is not set
exit 1
fi
if [ "$C_FINFO" = "" ]; then
echo f_execute_diffbranchtag: C_FINFO is not set
exit 1
fi
if [ "$C_FDIFF" = "" ]; then
echo f_execute_diffbranchtag: C_FDIFF is not set
exit 1
fi
./vcsdiff.sh MARKER $C_FINFO $C_FDIFF "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $C_DIFF_TILL $C_DIFF_SINCE
}
function f_execute_diffbranchsinceone() {
local P_VCSTYPE=$1
local P_EXECUTE_SET=$2
local P_PROJECT=$3
local P_REPOSITORY=$4
local P_VCSPATH=$5
local P_JIRA=$6
local P_PROD_BRANCH=$7
if [ "$C_BUILD_OUTDIR" = "" ]; then
echo f_diffbranchsinceone: C_BUILD_OUTDIR is not set
exit 1
fi
./diffbranchsinceone.sh prod-$C_CONFIG_VERSION_LAST_FULL $C_BUILD_OUTDIR "$P_PROJECT" "$P_REPOSITORY" "$P_VCSTYPE:$P_VCSPATH" $P_PROD_BRANCH $P_JIRA
}
function f_execute_custom() {
local P_EXECUTE_SET=$1
local P_PROJECT=$2
local P_REPOSITORY=$3
if [ ! -f "$C_CONFIG_PRODUCT_DEPLOYMENT_HOME/custom/$C_CUSTOM_SCRIPT" ]; then
echo unknown custom script: $C_CUSTOM_SCRIPT. Exiting
exit 1
fi
local F_CUSTOMEXECUTE_SAVEDIR=`pwd`
if [ "$GETOPT_SHOWONLY" = "yes" ]; then
echo "(showonly) $C_CUSTOM_SCRIPT $P_EXECUTE_SET $P_PROJECT"
else
echo "(execute) $C_CUSTOM_SCRIPT $P_EXECUTE_SET $P_PROJECT"
(
source $C_CONFIG_PRODUCT_DEPLOYMENT_HOME/custom/$C_CUSTOM_SCRIPT
f_custom_execute $P_EXECUTE_SET $P_PROJECT
)
fi
cd $F_CUSTOMEXECUTE_SAVEDIR
}
function f_execute_one() {
local P_EXECUTE_SET=$1
local P_EXECUTE_LIST="$2"
local P_FUNCTION=$3
local P_PROJECT=$4
f_source_readproject $P_EXECUTE_SET $project
local P_REPOSITORY=$C_SOURCE_REPOSITORY
local P_EXECUTE_MODE=$C_SOURCE_VERSION
local P_GROUP=$C_SOURCE_GROUP
local P_VCSTYPE=$C_SOURCE_VCS
local P_VCSPATH=$C_SOURCE_PATH
local P_JIRA=$C_SOURCE_JIRA
local P_PROD_BRANCH=$C_SOURCE_BRANCH
if [ "$P_PROD_BRANCH" = "" ]; then
P_PROD_BRANCH=${P_VCSDIR}-prod
fi
if [ "$VERSION_MODE" = "branch" ] && [ "$P_EXECUTE_MODE" = "trunk" ]; then
# ignore trunk for branch
return 0
fi
if [ "$P_EXECUTE_LIST" = "all" ] || [[ " $P_EXECUTE_LIST " =~ " $P_PROJECT " ]] || [ "$P_EXECUTE_LIST" = "$P_GROUP" ]; then
if [ "$GETOPT_SHOWALL" = "yes" ]; then
echo execute: $P_FUNCTION for $P_PROJECT, VERSION_MODE=$VERSION_MODE...
fi
else
return 0
fi
case "$P_FUNCTION" in
# build operations
CUSTOM)
f_execute_custom $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY
;;
BUILDCORE)
f_execute_buildone_core_tags $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
BUILDWAR)
f_execute_buildone_war_tags $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
DOWNLOADWAR)
f_execute_download_wardistr $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY
;;
DOWNLOADLIB)
f_execute_download_lib $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY
;;
COPYRELEASETORELEASE)
f_execute_copy_release_to_release $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY
;;
# vcs operations
VCSSETBRANCHTAG)
f_execute_vcssetbranchtag $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
VCSCOPYTAG)
f_execute_vcscopytag $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
VCSCOPYTAGTOBRANCH)
f_execute_vcscopytagtobranch $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
VCSCOPYNEWTAG)
f_execute_vcscopynewtag $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
VCSDROPTAG)
f_execute_vcsdroptag $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
VCSRENAMETAG)
f_execute_vcsrenametag $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
VCSCOPYBRANCH)
f_execute_vcscopybranch $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
VCSRENAMEBRANCH)
f_execute_vcsrenamebranch $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
VCSCHECKOUT)
f_execute_checkout $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
VCSEXPORT)
f_execute_export $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
VCSCOMMIT)
f_execute_commit $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
STARTCANDIDATETAGS)
f_execute_start_settag $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH
;;
UPDATETAGS)
f_execute_update_settag $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
SETVERSION)
f_execute_setversion $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
DIFFBRANCHTAG)
f_execute_diffbranchtag $P_GROUP $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_PROD_BRANCH
;;
DIFFBRANCHSINCEONE)
f_execute_diffbranchsinceone $P_VCSTYPE $P_EXECUTE_SET $P_PROJECT $P_REPOSITORY $P_VCSPATH $P_JIRA $P_PROD_BRANCH
;;
esac
}
function f_execute_core() {
local P_EXECUTE_LIST="$1"
local P_FUNCTION=$2
if [ "$P_EXECUTE_LIST" = "" ]; then
return 0
fi
if [ "$P_FUNCTION" = "" ]; then
echo f_execute_core: P_FUNCTION is empty
exit 1
fi
# get full core project list
f_source_projectlist core
local F_FULLLIST=$C_SOURCE_PROJECTLIST
if [ "$GETOPT_RELEASE" != "" ]; then
local F_FNAME_REL=$C_CONFIG_DISTR_PATH/$GETOPT_RELEASE/release.xml
f_release_setfile $F_FNAME_REL
f_release_getprojects core
if [ "$C_RELEASE_TARGETS" != "all" ]; then
f_getsubsetexact "$F_FULLLIST" "$C_RELEASE_TARGETS"
F_FULLLIST=$C_COMMON_SUBSET
fi
fi
echo commonexecute.sh: execute function=$P_FUNCTION for core projects=$P_EXECUTE_LIST ...
local project
for project in $F_FULLLIST; do
f_execute_one core "$P_EXECUTE_LIST" $P_FUNCTION $project
done
}
function f_execute_wars() {
local P_EXECUTE_LIST="$1"
local P_FUNCTION=$2
if [ "$P_EXECUTE_LIST" = "" ]; then
return 0
fi
if [ "$P_FUNCTION" = "" ]; then
echo f_execute_wars: P_FUNCTION is empty
exit 1
fi
# get full war project list
f_source_projectlist war
local F_FULLLIST=$C_SOURCE_PROJECTLIST
if [ "$GETOPT_RELEASE" != "" ]; then
local F_FNAME_REL=$C_CONFIG_DISTR_PATH/$GETOPT_RELEASE/release.xml
f_release_setfile $F_FNAME_REL
f_release_getprojects war
if [ "$C_RELEASE_TARGETS" != "all" ]; then
f_getsubsetexact "$F_FULLLIST" "$C_RELEASE_TARGETS"
F_FULLLIST=$C_COMMON_SUBSET
fi
fi
echo commonexecute.sh: execute function=$P_FUNCTION for war projects=$P_EXECUTE_LIST ...
local project
for project in $F_FULLLIST; do
f_execute_one war "$P_EXECUTE_LIST" $P_FUNCTION $project
done
}
function f_execute_all() {
local P_LOCAL_EXECUTE_LIST="$1"
local P_FUNCTION=$2
if [ "$P_LOCAL_EXECUTE_LIST" = "" ]; then
return 0
fi
if [ "$P_FUNCTION" = "" ]; then
echo f_execute_all: P_FUNCTION is empty
exit 1
fi
# handle types
local DONE=0
if [ "$P_LOCAL_EXECUTE_LIST" = "core" ] || [ "$P_LOCAL_EXECUTE_LIST" = "all" ]; then
f_execute_core all $P_FUNCTION
DONE=1
fi
if [ "$P_LOCAL_EXECUTE_LIST" = "war" ] || [ "$P_LOCAL_EXECUTE_LIST" = "all" ]; then
f_execute_wars all $P_FUNCTION
DONE=1
fi
if [ "$DONE" = "1" ]; then
return 0
fi
# handle specific subsets
if [[ "$P_LOCAL_EXECUTE_LIST" =~ "^core " ]]; then
P_LOCAL_EXECUTE_LIST=${P_LOCAL_EXECUTE_LIST#core }
f_execute_core "$P_LOCAL_EXECUTE_LIST" $P_FUNCTION
elif [[ "$P_LOCAL_EXECUTE_LIST" =~ "^war " ]]; then
P_LOCAL_EXECUTE_LIST=${P_LOCAL_EXECUTE_LIST#war }
f_execute_wars "$P_LOCAL_EXECUTE_LIST" $P_FUNCTION
else
f_execute_core "$P_LOCAL_EXECUTE_LIST" $P_FUNCTION
f_execute_wars "$P_LOCAL_EXECUTE_LIST" $P_FUNCTION
fi
}
function f_execute_set() {
local P_PROJECTSET=$1
local P_EXECUTE_LIST="$2"
local P_FUNCTION=$3
if [ "$P_PROJECTSET" = "core" ]; then
f_execute_core "$P_EXECUTE_LIST" $P_FUNCTION
elif [ "$P_PROJECTSET" = "war" ]; then
f_execute_wars "$P_EXECUTE_LIST" $P_FUNCTION
fi
}
| true
|
2d7184e4689e3e9bfaac3fd5b38ffcb16e9b4275
|
Shell
|
abhijithda/software-update-manager
|
/sdk/scripts/install
|
UTF-8
| 899
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2021 Veritas Technologies LLC. All rights reserved. IP63-2828-7171-04-15-9
myprg=$0
myDir=$(dirname ${myprg})
# Copy any update contents placed in the staging area to ${myDir}/.
stage_area="/system/upgrade/stage/"
update_stage_area="${stage_area}/update/"
if [ -d ${update_stage_area} ]; then
echo "Copying latest update contents..."
# NOTE: Good idea not to display the staging location to everyone!
# echo "from staging area ${update_stage_area} to ${myDir}."
cp -fpr ${update_stage_area}/* ${myDir}
fi
# Set environment variables for plugins to know the path of alternate root.
# i.e., upgrade volume.
export VXAPP_UPGRADE_ROOT="/system/upgrade/volume/"
echo "Installing the update..."
${myDir}/asum install "$@"
if [ $? -ne 0 ]; then
echo "Error: Failed to install the update."
exit 1
fi
echo "Successfully installed the update."
exit 0
| true
|
3605a0962bf3c8fa35ea9eb6605ed2abaac68a0f
|
Shell
|
webos-internals/build
|
/optware/readline/control/postinst
|
UTF-8
| 468
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
APPID=mobi.optware.readline
# Symlink files into /opt
cd $IPKG_OFFLINE_ROOT/usr/palm/applications/$APPID/opt
find lib -type d -exec mkdir -p /opt/{} \;
find lib -type f -exec ln -sf $IPKG_OFFLINE_ROOT/usr/palm/applications/$APPID/opt/{} /opt/{} \;
ln -sf libhistory.so.6.0 /opt/lib/libhistory.so
ln -sf libhistory.so.6.0 /opt/lib/libhistory.so.6
ln -sf libreadline.so.6.0 /opt/lib/libreadline.so
ln -sf libreadline.so.6.0 /opt/lib/libreadline.so.6
exit 0
| true
|
9fb842d466ca04d12ec3cac627700336f633f670
|
Shell
|
leggsimon/dotfiles
|
/home/.bashrc
|
UTF-8
| 1,321
| 3.25
| 3
|
[] |
no_license
|
export PATH="$PATH:node_modules/.bin"
export PATH=/Users/simon.legg/.gem/ruby/2.0.0/bin:$PATH
export ANDROID_HOME=/usr/local/opt/android-sdk
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
source ~/.profile
###################
# Command aliases #
###################
alias gc="git commit -m"
alias ga="git add ."
alias gs='git status'
alias gr='git reset HEAD'
alias gp='git push'
alias go='git checkout'
alias ..='cd ..'
alias ...='cd ../..'
alias ls='ls -G'
alias ll='ls -lhg -G'
alias la='ls -G'
################################
# Git commands auto-completion #
################################
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash
fi
########################
# Git Branch in prompt #
########################
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
#########################
# Terminal Text colours #
#########################
Black='\e[0;30m' # Black
BBlack='\e[1;30m' # Bold Black
BGreen='\e[1;32m' # Bold Green
Red='\e[0;31m' # Bold Red
export PS1="\[$Red\][\$(date +%H:%M)]\[$BBlack\][\W]\[$BGreen\]\$(parse_git_branch)\[$Black\] $ "
export NVM_DIR="/Users/simon.legg/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
| true
|
f566da973f7faf35675635406aa8cabfdca31d58
|
Shell
|
jessecooper/EdPurp
|
/install.sh
|
UTF-8
| 603
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
# !/bin/bash
#
# EdPurp Install Script
#
#Functions
test_packages()
{
if ([ -e /usr/bin/npm ] || [ -f "/usr/local/bin/npm" ])
then
echo 'NPM ........... Pass'
else
echo 'NPM ........... Not Found'
exit 1
fi
}
test_folders()
{
if [ -d ./public/uploads ]
then
echo 'public/uploads ........... Pass'
else
`mkdir public/uploads`
echo 'public/uploads ........ Created'
fi
}
npm_install()
{
# Install EdPurp Packages
npm install
# Install mongodb packages needed by mongoose
cd node_modules/mongoose/node_modules/mongodb/
npm install
}
#Main
test_packages
test_folders
npm_install
| true
|
ac25385f8d77af8c2f7edfb09ee35b0816afc3bb
|
Shell
|
Wolox/WLXBluetoothDevice
|
/script/update
|
UTF-8
| 332
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
source script/.env
source script/common/carthage
if [ -f Cartfile.resolved ] && type carthage > /dev/null
then
check_carthage_version
call_carthage "update"
elif [ -f Podfile ]
then
if type bundle > /dev/null && bundle show pod > /dev/null
then
bundle exec pod update
else
pod update
fi
fi
| true
|
77e49aec0ee5cb936e991c44b2af3e356a1d1b93
|
Shell
|
erik/dotfiles
|
/zsh/.config/zsh/functions/kubernetes-ssh
|
UTF-8
| 868
| 3.265625
| 3
|
[] |
no_license
|
# -*- mode: sh -*-
kubernetes-ssh () {
if [ -z "$1" ]; then
echo "usage: $0 NAMESPACE"
return 1
fi
local ns=$1; shift
# Ensure we're auth'd
# NOTE: DD tooling specific
kubectl cluster-info &>/dev/null || ( set -eo pipefail;
kubectl config get-clusters | \
sed 1d | \
sort | \
fzf --prompt="cluster > " | \
xargs -I{} dd-toolbox kubernetes-auth login -e "{}"
) || return 1
local context="$(kubectl config view --output=json | jq -r '."current-context"')"
local preview_cmd="kubectl describe pod -n '$ns' {}"
local pod=$(kubectl get pods -n "$ns" | \
awk '{print $1}' | \
sed 1d | \
fzf --prompt="[$context] > " --preview="$preview_cmd" --preview-window='right:70%'
)
local args="bash"
if [ ! -z "$*" ]; then
args="$*"
fi
kubectl exec -it "$pod" -n "$ns" -- /bin/sh -c "$args"
}
| true
|
27aba286302f61b162f4d62e847a662515ccad6b
|
Shell
|
rai-project/tensorflow
|
/proto/update.sh
|
UTF-8
| 1,604
| 3.140625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
if [ $# -ne 1 ]; then
echo "usage: $0 <tensorflow-root-dir>" >&2
exit 1
fi
rsync --existing "$1"/tensorflow/core/framework/*.proto .
rsync --existing "$1"/tensorflow/core/protobuf/*.proto .
rsync --existing "$1"/tensorflow/core/profiler/*.proto .
rsync --existing "$1"/tensorflow/core/util/*.proto .
rsync --existing "$1"/tensorflow/python/framework/*.proto .
# Rewrite file paths and package names.
find . -type f -name '*.proto' -exec perl -pi \
-e 's|tensorflow/core/framework|tensorflow|g;' \
-e 's|tensorflow/core/protobuf|tensorflow|g;' \
-e 's|tensorflow/core/profiler|tensorflow|g;' \
-e 's|tensorflow/core/util|tensorflow|g;' \
-e 's|tensorflow/python/framework|tensorflow|g;' \
-e 's|package tensorflow.tfprof;|package tensorflow;|g;' \
-e 's|package tensorflow;|package tensorflow;|g;' \
{} +
echo "Protos in ${PWD} updated! You can now add and commit them."
| true
|
acbfdc08dc3089da1407cabbea17d842f38df7a9
|
Shell
|
arshadullah/hadoop
|
/install_tuned.sh
|
UTF-8
| 7,549
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright
# Function to discover basic OS details.
discover_os() {
if command -v lsb_release >/dev/null; then
# CentOS, Ubuntu, RedHatEnterpriseServer, Debian, SUSE LINUX
# shellcheck disable=SC2034
OS=$(lsb_release -is)
# CentOS= 6.10, 7.2.1511, Ubuntu= 14.04, RHEL= 6.10, 7.5, SLES= 11
# shellcheck disable=SC2034
OSVER=$(lsb_release -rs)
# 7, 14
# shellcheck disable=SC2034
OSREL=$(echo "$OSVER" | awk -F. '{print $1}')
# Ubuntu= trusty, wheezy, CentOS= Final, RHEL= Santiago, Maipo, SLES= n/a
# shellcheck disable=SC2034
OSNAME=$(lsb_release -cs)
else
if [ -f /etc/redhat-release ]; then
if [ -f /etc/centos-release ]; then
# shellcheck disable=SC2034
OS=CentOS
# 7.5.1804.4.el7.centos, 6.10.el6.centos.12.3
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/centos-release --qf='%{VERSION}.%{RELEASE}\n' | awk -F. '{print $1"."$2}')
# shellcheck disable=SC2034
OSREL=$(rpm -qf /etc/centos-release --qf='%{VERSION}\n')
else
# shellcheck disable=SC2034
OS=RedHatEnterpriseServer
# 7.5, 6Server
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/redhat-release --qf='%{VERSION}\n')
if [ "$OSVER" == "6Server" ]; then
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/redhat-release --qf='%{RELEASE}\n' | awk -F. '{print $1"."$2}')
# shellcheck disable=SC2034
OSNAME=Santiago
else
# shellcheck disable=SC2034
OSNAME=Maipo
fi
# shellcheck disable=SC2034
OSREL=$(echo "$OSVER" | awk -F. '{print $1}')
fi
elif [ -f /etc/SuSE-release ]; then
if grep -q "^SUSE Linux Enterprise Server" /etc/SuSE-release; then
# shellcheck disable=SC2034
OS="SUSE LINUX"
fi
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/SuSE-release --qf='%{VERSION}\n' | awk -F. '{print $1}')
# shellcheck disable=SC2034
OSREL=$(rpm -qf /etc/SuSE-release --qf='%{VERSION}\n' | awk -F. '{print $1}')
# shellcheck disable=SC2034
OSNAME="n/a"
fi
fi
}
echo "********************************************************************************"
echo "*** $(basename "$0")"
echo "********************************************************************************"
# Check to see if we are on a supported OS.
# Only available on EL.
discover_os
if [ "$OS" != RedHatEnterpriseServer ] && [ "$OS" != CentOS ]; then
echo "ERROR: Unsupported OS."
exit 3
fi
echo "Installing tuned..."
yum -y -e1 -d1 install tuned
mkdir -m 0755 /etc/tuned/hadoop
cat <<EOF >/etc/tuned/hadoop/tuned.conf
#
# tuned configuration
#
[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100
[vm]
transparent_hugepages=never
[disk]
readahead=>4096
[sysctl]
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
#
# Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds)
kernel.sched_min_granularity_ns = 10000000
# SCHED_OTHER wake-up granularity.
# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds)
#
# This option delays the preemption effects of decoupled workloads
# and reduces their over-scheduling. Synchronous workloads will still
# have immediate wakeup/sleep latencies.
kernel.sched_wakeup_granularity_ns = 15000000
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up. Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 40
# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio = 10
# PID allocation wrap value. When the kernel's next PID value
# reaches this value, it wraps back to a minimum PID value.
# PIDs of value pid_max or larger are not allocated.
#
# A suggested value for pid_max is 1024 * <# of cpu cores/threads in system>
# e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus,
# 65536, for 4096 cpus, 4194304 (which is the upper limit possible).
#kernel.pid_max = 65536
# The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache
vm.swappiness=1
#net.core.busy_read=50
#net.core.busy_poll=50
#net.ipv4.tcp_fastopen=3
#kernel.numa_balancing=0
# Increase kernel buffer size maximums. Currently this seems only necessary at
# 40Gb speeds.
#
# The buffer tuning values below do not account for any potential hugepage
# allocation. Ensure that you do not oversubscribe system memory.
#net.ipv4.tcp_rmem="4096 87380 16777216"
#net.ipv4.tcp_wmem="4096 16384 16777216"
#net.ipv4.udp_mem="3145728 4194304 16777216"
# Cloudera Professional Services recommendations:
# https://access.redhat.com/sites/default/files/attachments/20150325_network_performance_tuning.pdf
net.core.netdev_max_backlog = 250000
net.core.optmem_max = 4194304
net.core.rmem_default = 4194304
net.core.rmem_max = 4194304
net.core.wmem_default = 4194304
net.core.wmem_max = 4194304
# https://docs.aws.amazon.com/AmazonS3/latest/dev/TCPWindowScaling.html
net.ipv4.tcp_adv_win_scale = 1
net.ipv4.tcp_low_latency = 1
# https://docs.aws.amazon.com/AmazonS3/latest/dev/TCPSelectiveAcknowledgement.html
net.ipv4.tcp_sack = 1
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 65536 4194304
EOF
chown root:root /etc/tuned/hadoop/tuned.conf
chmod 0644 /etc/tuned/hadoop/tuned.conf
mkdir -m 0755 /etc/tuned/hadoop-virtual
cat <<EOF >/etc/tuned/hadoop-virtual/tuned.conf
#
# tuned configuration
#
[main]
include=hadoop
[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up. Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 30
EOF
chown root:root /etc/tuned/hadoop-virtual/tuned.conf
chmod 0644 /etc/tuned/hadoop-virtual/tuned.conf
# shellcheck disable=SC2063
if virt-what | grep -q '.*'; then
tuned-adm profile hadoop-virtual
else
tuned-adm profile hadoop
fi
| true
|
2b814a7bb5d1a38336771e4abda266abaceca867
|
Shell
|
dhruvbaldawa/dotfiles
|
/bin/symlink-dotfiles.sh
|
UTF-8
| 989
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dev="$HOME/Code"
dotfiles="$dev/dotfiles"
bin="$HOME/bin"
if [[ -d "$dotfiles" ]]; then
echo "Symlinking dotfiles from $dotfiles"
else
echo "$dotfiles does not exist"
exit 1
fi
link() {
from="$1"
to="$2"
echo "Linking '$from' to '$to'"
rm "$to"
ln -s "$from" "$to"
}
for location in "$dotfiles"/home/*; do
file="${location##*/}"
file="${file%.*}"
link "$location" "$HOME/.$file"
done
for location in "$dotfiles"/bin/*; do
file="${location##*/}"
file="${file%.*}"
link "$location" "$bin/$file"
done
if [[ `uname` == 'Darwin' ]]; then
# link "$dotfiles/sublime/Packages/User/Preferences.sublime-settings" "$HOME/Library/Application Support/Sublime Text 3/Packages/User/Preferences.sublime-settings"
link "$dotfiles/vscode/settings.json" "$HOME/Library/Application Support/Code/User/settings.json"
fi
link "$dotfiles/terminal/prompt_dhruv_setup" "$HOME/.zprezto/modules/prompt/functions/prompt_dhruv_setup"
link "$dotfiles/bin/z.sh" "$bin/z"
| true
|
92af33409d137c2068923e6de338c66650e9c6a8
|
Shell
|
toru173/Abusing-macOS
|
/OS X Diskless Boot/rc.cleanup
|
UTF-8
| 242
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -f "/etc/rc.cdrom" ]; then
echo "Cleaning up rc.cdrom and respringing Mac OS X"
/bin/mv /etc/rc.cdrom /etc/rc.cdrom.bkp
/bin/launchctl reboot userspace
else
echo "rc.cdrom not present. Proceeding with liveboot"
fi
exit 0
| true
|
173590190aff352e44aefb75537790a92479d276
|
Shell
|
SiliconLabs/wfx-linux-tools
|
/scripts/wfx_firmware_install
|
UTF-8
| 1,491
| 3.890625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2018, Silicon Laboratories
# See license terms contained in COPYING file
set -e
. wfx_set_env
check_not_root
DEFAULT_CHIP_KEY=C0
USAGE="Usage: $(basename $0) OPTION
Install firmware
Otpions:
--help display this message
--version VER install the firmware specified by VER
--list-tags list firmware versions
"
GIT="git -C $GITHUB_FIRMWARE_PATH"
WFX_GIT="$GITHUB_TOOLS_PATH/update/git.sh --path $GITHUB_FIRMWARE_PATH"
case "$1" in
--help)
echo "$USAGE"
exit 0
;;
--version)
VERSION="$2"
$WFX_GIT --version "$VERSION"
;;
--list-tags)
$WFX_GIT --list
exit 0
;;
*)
echo "ERROR: unknown command $1" >&2
echo "$USAGE" >&2
exit 1
;;
esac
FIRMWARE_VER=$($GIT describe --tags --dirty)
printf "Installing firmware %s\n" "$FIRMWARE_VER"
sudo rm -fv /lib/firmware/wfm_wf200.sec
sudo rm -fv /lib/firmware/wfm_wf200_[A-Z0-9][0-9].sec
sudo rm -fv /lib/firmware/wfm_wf200_[A-Z0-9][0-9]-FW[0-9].[0-9].[0-9].sec
for i in $GITHUB_FIRMWARE_PATH/wfm_wf200_??.sec; do
INFILE=$i
INFILE_BASE=$(basename $INFILE)
OUTFILE_BASE=${INFILE_BASE%.sec}-$FIRMWARE_VER.sec
OUTFILE=/lib/firmware/$OUTFILE_BASE
set -x
sudo cp $INFILE $OUTFILE
sudo ln -sfn $OUTFILE_BASE /lib/firmware/$INFILE_BASE
{ set +x; } 2>/dev/null # Disable traces without disturbing user
done
set -x
echo "Firmware installed for version $VERSION"
| true
|
2456dd2c8d2a8fef74c4216f7cf510f1567daa2c
|
Shell
|
delph-in/delphintools
|
/bin/logon_hg_id
|
UTF-8
| 275
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
logon_hg_id () {
HG=${1:-$HOME/logon.hg} && \
hg -R $HG qpop -a && \
LOGON_ID=$(hg -R $HG id -n) && \
PATCHES_ID=$(hg -R $HG/.hg/patches id -n) && \
hg -R $HG qpush -a 2>&1 && \
echo ${LOGON_ID}@${PATCHES_ID}
}
logon_hg_id $1 | tail -n1
| true
|
1126070fcc18f96c90bee5c7604aea41fba2a0e2
|
Shell
|
ShalokShalom/plan.sh
|
/itstool/plan.sh
|
UTF-8
| 463
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
pkg_origin=cosmos
pkg_name=itstool
pkg_version=2.0.2
pkg_description="XML to PO and back again"
pkg_upstream_url="http://itstool.org/"
pkg_license=('GPL3')
pkg_deps=('python2' 'libxml2')
pkg_source=("http://files.itstool.org/itstool/${pkg_name}-${pkg_version}.tar.bz2")
pkg_shasum=('d472d877a7bc49899a73d442085b2f93')
do_build() {
./configure --prefix=/usr
make
}
check() {
make -k check
}
do_package() {
make DESTDIR=${pkg_prefix} install
}
| true
|
558be1554410f2afb9d5c1b7aec11ac770760819
|
Shell
|
rovjuvano/dotfiles
|
/bash.d/serve.sh
|
UTF-8
| 170
| 2.78125
| 3
|
[] |
no_license
|
function serve {
local TYPE="text/html"
if [ "$1" = "-t" ]; then TYPE=$2; shift 2; fi
(echo -e "HTTP/1.0 200 Ok\nContent-Type: ${TYPE}\n"; cat -) | nc -l 8888
}
| true
|
76305c4496d882f288ac584939a9967e2484c506
|
Shell
|
mc-assemblage/nembase
|
/components/FASTQC/fastqc.sh
|
UTF-8
| 294
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
. ./functions.sh
kmersize=$( getparameter "kmersize")
threads=$( getparameter "threads")
fastqfile=$( getinput "fastqfile")
fastqcdir=$( getoutput "fastqcdir")
if [ ! -d $fastqcdir ]
then
mkdir $fastqcdir
fi
fastqc -t $threads -o $fastqcdir -f fastq -k $kmersize $fastqfile
| true
|
94c9e9fe02079dd1ed86d92a21ab884649c15edf
|
Shell
|
DevilInChina/NumericalAnalysis
|
/run.sh
|
UTF-8
| 187
| 2.703125
| 3
|
[] |
no_license
|
para='gau doo cro cho jac gs sor cg'
File=$1
gcc main.c -o main -lm -lpthread
mat=$(ls ${File})
for name in ${para}
do
for matrix in ${mat}
do
./main $name ${File}/$matrix
done
done
| true
|
e8146df4cfbb76b8d353cd919040f833771db04f
|
Shell
|
Nikhilkumbhare97/Flip-Coin-Simulation
|
/flipCoinSimulator.sh
|
UTF-8
| 1,161
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#Constants
isHeads=1
maxWin=21
ties=1
diff=2
#Variables
Head=0
Tail=0
Tie=0
Head1=0
Tail1=0
read -p "Number of times Coin Flip :" flip
for (( i=1; i<=$flip ; i++ ))
do
while [[ $Head -lt $maxWin ]] && [[ $Tail -lt $maxWin ]] && [[ $(($Head+$Tail)) -lt $flip ]] && [[ $Tie -lt $ties ]]
do
result=$((RANDOM%2))
if [ $result -eq $isHeads ]
then
Head=$(($Head+1))
else
Tail=$(($Tail+1))
fi
if [ $Head -eq $Tail ]
then
((Tie++))
else
((i++))
fi
done
done
echo "Number of Times Heads Won :" $Head
echo "Number of Times Tails Won :" $Tail
echo "Number of Times Tie :" $Tie
if [ $Head -gt $Tail ]
then
echo Head won by $(($Head - $Tail ))
elif [ $Tail -gt $Head ]
then
echo Tail won by $(($Tail - $Head ))
else
echo Tie
fi
for (( i=0; i<$flip; i++ ))
do
if [ $Head -eq $Tail ] && [ $(($Head1-$Tail1)) -ne $diff ] && [ $(($Tail1-$Head1)) -ne $diff ]
then
result1=$((RANDOM%2))
if [ $result1 -eq $isHeads ]
then
Head1=$(($Head1+1))
else
Tail1=$(($Tail1+1))
fi
elif [ $Tie -eq $ties ]
then
echo "After Tie Heads And Tails :" $Head1 $Tail1
i=$flip
else
i=$flip
fi
done
| true
|
389f9189edbe2285a6434ec6005a9556a8669299
|
Shell
|
timarenz/hashicorp-demos
|
/multi-cloud-vm-consul-mesh-gateway/scripts/mesh-web-login.sh
|
UTF-8
| 1,080
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
sudo mkdir /tmp/web-login/
sudo tee /tmp/web-login/index.html <<EOF
<!DOCTYPE html>
<html>
<head>
<title>Welcome to /LOGIN API path!</title>
</head>
<body>
<h1>Welcome to /LOGIN API path!</h1>
<h1>Located on host $(hostname)</h1>
<p>If you see this page, you were redirected by a Consul Service Mesh L7 service-router.</p>
</body>
</html>
EOF
sudo docker stop web-login
sudo docker rm web-login
sudo docker run -d -p 89:80 -v /tmp/web-login:/usr/share/nginx/html --hostname ${HOSTNAME} --name web-login nginx
sudo tee /etc/consul.d/web-login.json <<EOF
{
"service": {
"name": "web-login",
"port": 89,
"check": {
"args": [
"curl",
"localhost:89"
],
"interval": "5s"
},
"connect": {
"sidecar_service": {}
}
}
}
EOF
sudo consul reload
sudo docker stop web-login-proxy
sudo docker rm web-login-proxy
sudo docker run -d --network host --name web-login-proxy timarenz/envoy-consul:v1.11.1_1.6.1 -sidecar-for web-login -admin-bind localhost:19012 -- -l debug
| true
|
56b23e7dda67e15407f7d495eefb13be42ba20c8
|
Shell
|
djulian2001/vagrant_devstack
|
/scripts/admin_project_tasks.sh
|
UTF-8
| 1,209
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# create the project and add all the users for the project
# project
project_id=$( openstack project create \
-f value -c id \
--description "neal adams and jill xavier project" \
--enable \
--or-show \
"nadams_nih_12345" )
# set project usage
# reference: os_project_quota.sh
openstack quota set \
--server-groups 4 \
--server-group-members 8 \
--key-pairs 10 \
--floating-ips 2 \
--networks 4 \
--ram 2048 \
--cores 5 \
--instances 8 \
--per-volume-gigabytes 1 \
--volumes 8 \
"$project_id"
# need the deploy user to have admin permission.
admin_role_id=$( openstack role show -f value -c id "admin" )
# create the project_deployer and grant all required access.
# as the admin user
user_id=$(
openstack user create -f value -c id \
--project "$project_id" \
--password "deployer" \
--enable \
--or-show \
"project_deployer" )
# in theroy this should give the deploy user the access to create all of the objects...
openstack role add -f value -c id --project "$project_id" --user "$user_id" "$admin_role_id"
# at this point, all of the project assignments should come from the deploy user?
# i think yes lets see..
| true
|
18f2355ad962ae7f8dfb6310272135cfc9f6ec1a
|
Shell
|
elbakerino/cloud-in-a-shell
|
/apache/vhost-rm.sh
|
UTF-8
| 692
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR_CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)
source ${DIR_CUR}/../_boot.sh
DIR_CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)
VHOST_NAME=${1}
conffile=/etc/apache2/sites-available/${VHOST_NAME}.conf
conffile_le=/etc/apache2/sites-available/${VHOST_NAME}-le-ssl.conf
echo "disabling and removing vhost '${VHOST_NAME}'"
echo ""
a2dissite ${VHOST_NAME}.conf
rm ${conffile}
echo " ✓ disabled & removed ${VHOST_NAME}.conf"
if test -f "${conffile_le}"; then
a2dissite ${VHOST_NAME}-le-ssl.conf
rm ${conffile_le}
echo " ✓ disabled & removed ${VHOST_NAME}-le-ssl.conf"
fi
# todo: remove cert for vhost
systemctl reload apache2
| true
|
87112af96bbbb888d9e8da140351fd69d7ed2877
|
Shell
|
magos-linux/magos-linux
|
/make_MagOS/files/patches/rootfs/MagOS/usr/lib/magos/rootfs-patches/MagOS/MagOS-01-locales.sh
|
UTF-8
| 234
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
ln -sf /usr/share/locale /etc/locale
#creating locales archive
LOCALEARCHIVE="en_US ru_RU"
if [ -x /usr/bin/localedef ] ;then
for a in $LOCALEARCHIVE ;do
localedef -c -f UTF-8 -i "$a" $a.UTF-8
done
fi
exit 0
| true
|
9f31961d5cc4d78004361e4207351a02276193a2
|
Shell
|
FerreiraLari/ex-aula-2-GIT
|
/feature-01_script
|
UTF-8
| 160
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Insira o link do arquivo a ser extraído: "
read file_download
wget $file_download > file1
dtrx $file1
echo "Arquivo descompactado."
| true
|
9e3bf72beb9d21615307b5babf8b802b3485797c
|
Shell
|
christian-fei/garden
|
/scripts/make-today-timelapse
|
UTF-8
| 464
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "rm -f today-timelapse.mp4"
rm -f today-timelapse.mp4
DATE=$(date +"%Y-%m-%d")
echo "using snapshots/$DATE*"
ls -1 snapshots/$DATE*
cat snapshots/$DATE*.jpg | ffmpeg -f image2pipe -r 30 -vcodec mjpeg -i - -vcodec libx264 today-timelapse.mp4
# (not working?)
# FILES_LIST=$(ls -1t snapshots/*.jpg | grep -v current | head -10)
# grep "$FILES_LIST" | xargs cat | ffmpeg -f image2pipe -r 1 -vcodec mjpeg -i - -vcodec libx264 today-timelapse.mp4
| true
|
9c1672ae5853c4d615fbae14a46579bb00f87d7d
|
Shell
|
yupswing/dotfiles
|
/dotfiles/scripts/screenshot.zsh
|
UTF-8
| 560
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
scrot_dir=$HOME/Pictures/screenshots/
scrot_file='%Y-%m-%dT%H:%M:%S_$wx$h.png'
if ! [ -d $scrot_dir ]; then
mkdir -p $scrot_dir
fi
cd $scrot_dir
case "$1" in
--desktop | -d | $NULL)
scrot -f $scrot_file -e 'xclip -selection clipboard -t image/png -i $f && notify-send "screenshot taken $f"'
;;
--select | -s)
# notify-send 'select an area for the screenshot' &
scrot -fs $scrot_file -e 'xclip -selection clipboard -t image/png -i $f && notify-send "screenshot taken $f"'
;;
--open | -o)
xdg-open $scrot_dir
;;
esac
exit 0
| true
|
0cd370a209b46b3598d70e531a1ab3c7bbfe0435
|
Shell
|
kbasecollaborations/KB-CAMOCO
|
/scripts/load_ref_data.sh
|
UTF-8
| 2,160
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# start in data folder
cd /data
# camoco tutorial and maize reference data
# tutorial: https://camoco.readthedocs.io/en/latest/tutorial.html
RefGen=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/RefGen/ZmB73_5b_FGS.gff.gz
RefGenfile=${RefGen##*/}
Expr1=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/Expr/RNASEQ/Hirsch2014_PANGenomeFPKM.txt.gz
Expr1file=${Expr1##*/}
GOBASE=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/GOnt/go.obo.gz
GOBASEfile=${GOBASE##*/}
GOZM=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/GOnt/zm_go.tsv.gz
GOZMfile=${GOZM##*/}
GWASZM=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/GWAS/SchaeferPlantCell/ZmIonome.allLocs.csv.gz
GWASZMfile=${GWASZM##*/}
# Expr2=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/Expr/RNASEQ/Stelpflug2018_B73_Tissue_Atlas.txt.gz
# Expr3=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/Expr/RNASEQ/Schaefer2018_ROOTFPKM.tsv.gz
# GWASZM2=https://github.com/LinkageIO/Camoco/raw/master/tests/raw/GWAS/WallacePLoSGenet/Wallace_etal_2014_PLoSGenet_GWAS_hits-150112.txt.gz
# cURL this data, follow github redirects (-L), preserve filename (-O), keep silent (-s) and accept only gzip
# files (-H), then decompress with gunzip and bash string manipulation "${URL##*/}"
curl -L -sH 'Accept-encoding: gzip' -O "${RefGen}" && gunzip -f "${RefGenfile}"
curl -L -sH 'Accept-encoding: gzip' -O "${Expr1}" && gunzip -f "${Expr1file}"
curl -L -sH 'Accept-encoding: gzip' -O "${GOZM}" && gunzip -f "${GOZMfile}"
curl -L -sH 'Accept-encoding: gzip' -O "${GOBASE}" && gunzip -f "${GOBASEfile}"
curl -L -sH 'Accept-encoding: gzip' -O "${GWASZM}" && gunzip -f "${GWASZMfile}"
# curl -L -sH 'Accept-encoding: gzip' -O "${Expr2}" && gunzip -f "${Expr2##*/}"
# curl -L -sH 'Accept-encoding: gzip' -O "${Expr3}" && gunzip -f "${Expr3##*/}"
# curl -L -sH 'Accept-encoding: gzip' -O "${GWASZM2}" && gunzip -f "${GWASZM2##*/}"
if ( [ -f "ZmB73_5b_FGS.gff" ] \
&& [ -f "go.obo" ] \
&& [ -f "Hirsch2014_PANGenomeFPKM.txt" ] \
&& [ -f "zm_go.tsv" ] \
&& [ -f "ZmIonome.allLocs.csv" ] ); then
touch __READY__
else
echo "Init failed"
fi
| true
|
39eb42615fbab097ed795ca2357bfb4c97ecd037
|
Shell
|
lawrie/riscv32_lcc
|
/testcases/binutils/rvi/run.sh
|
UTF-8
| 365
| 2.96875
| 3
|
[] |
no_license
|
for name in *.s; do
../../../binutils/bin/as "-o" "results/${name%.s}.elf" "$name"
../../../binutils/bin/ld "-h" "-o" "results/${name%.s}.bin" "results/${name%.s}.elf"
if cmp -s "${name%.s}.ref" "results/${name%.s}.bin"
then
echo "Testcase $name ok.">>results/summary.log
else
echo "Testcae $name fail.">>results/summary.log
fi
done
| true
|
87e8cd3941f0d9aeecfdb57c1317d255b5e08942
|
Shell
|
lemattma/backup-script
|
/backup-site.sh
|
UTF-8
| 814
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
DATE=`date +%Y%m%d`
DBHOST=localhost
DBUSER=user
DBPASS=secreto
DBNAMES=database
BACKUP_FOLDER="/path/to/backups-folder"
BACKUP_SOURCE="/path/to/site-folder"
echo "Running MySQL backup"
# Fetch all the databases if none is specified
if [[ $DBNAMES == '' ]]; then
DBNAMES="$(mysql -u ${DBUSER} -h ${DBHOST} -p${DBPASS} -Bse 'show databases')"
fi
for db in $DBNAMES
do
echo "Backing up ${db}"
FILE=${BACKUP_FOLDER}/backup-${DATE}.${db}.sql.gz
mysqldump --opt --skip-lock-tables -u ${DBUSER} -h ${DBHOST} -p${DBPASS} ${db} | gzip -9 > ${FILE}
done
echo "Running files backup"
tar -czf ${BACKUP_FOLDER}/backup-${DATE}-site.tar.gz ${BACKUP_SOURCE}
echo "deleting old files"
# Remove files older than 15 days
find ${BACKUP_FOLDER} -type f -name "backup-*" -mtime +15 -exec rm -f '{}' ';'
| true
|
1ce98281e60d82dc6bc06ccaf07d02e2d5120e27
|
Shell
|
uphold-forks/flare
|
/src/stateco/client/proveDataAvailability.sh
|
UTF-8
| 550
| 3.0625
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
# (c) 2021, Flare Networks Limited. All rights reserved.
# Please see the file LICENSE for licensing terms.
#!/bin/bash
if [ $1 == 'btc' ]; then
PORT=8000
elif [ $1 == 'ltc' ]; then
PORT=8001
elif [ $1 == 'doge' ]; then
PORT=8002
elif [ $1 == 'xrp' ]; then
PORT=8003
fi;
while true; do
nohup $(sleep 10; curl -s http://localhost:$PORT/?prove=$1) >& /dev/null &
if ! lsof -Pi :$PORT -sTCP:LISTEN -t >/dev/null ; then
node stateConnector $PORT --unhandled-rejections=strict
else
echo "System already activated."
fi;
sleep 10
done
| true
|
aaacb78c5b0a2e79065337e7ecf13946ab28a884
|
Shell
|
pepkit/hello_looper
|
/pipeline/count_lines.sh
|
UTF-8
| 113
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
linecount=`wc -l $1 | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '`
echo "Number of lines: $linecount"
| true
|
dfea5c4dc48211ed037d93c7aaeba233ececc06f
|
Shell
|
ErikEkstedt/.files
|
/possible_notes/Installation/misc/misc-links.sh
|
UTF-8
| 1,067
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
echo Create links for Misc-config files:
echo
echo ln -sf ~/.files/vimrc ~/.vimrc
ln -sf ~/.files/nvim/init.vim ~/.vimrc
echo ln -sf ~/.files/gitignore ~/.gitignore
ln -sf ~/.files/gitignore ~/.gitignore
echo ln -sf ~/.files/gitconfig ~/.gitconfig
ln -sf ~/.files/gitconfig ~/.gitconfig
mkdir -p ~/.config/zathura
echo ln -sf ~/.files/zathurarc ~/.config/zathura/zathurarc
ln -sf ~/.files/zathurarc ~/.config/zathura/zathurarc
if [[ `uname` == Linux ]]; then
echo ln -sf ~/.files/inputrc ~/.inputrc
ln -sf ~/.files/inputrc ~/.inputrc
echo ln -sf ~/.files/bashrc ~/.bashrc
ln -sf ~/.files/bashrc ~/.bashrc
echo ln -sf ~/.files/xinitrc ~/.xinitrc
ln -sf ~/.files/xinitrc ~/.xinitrc
echo ln -sf ~/.files/Xresources ~/.Xresources
ln -sf ~/.files/Xresources ~/.Xresources
if [[ -x "$(command -v plasmashell)" ]]; then
echo Running KDE
echo "Removing konsole directory (~/.local/share/konsole)"
rm -rf ~/.local/share/konsole
echo ln -sf ~/.files/konsole ~/.local/share/
ln -sf ~/.files/terminals/konsole ~/.local/share/
fi
fi
| true
|
5e69546215564cde8481cdb8af97d8c972db1dd2
|
Shell
|
ps2420/spring-boot-template
|
/scripts/status.sh
|
UTF-8
| 879
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
# Usage: Hello World Bash Shell Script Using Variables
# Author: Vivek Gite
# -------------------------------------------------
# Define bash shell variable called var
# Avoid spaces around the assignment operator (=)
status_component () {
component_name=$1
pid=$(ps -ef | grep java | grep $component_name | grep -v grep | awk '{print $2}')
if [[ -z $pid ]]
then
echo " $component_name :[DOWN]"
else
echo " $component_name :[UP] : [$pid]"
fi
}
check_status_component () {
echo ""
echo "============ component statuses ================="
component_array=("config-server" "api-gateway" "service-registry" "search-service" "file-handler" "db-service")
for comp_name in "${component_array[@]}"
do
status_component $comp_name
done
echo "================================================="
echo ""
}
check_status_component
| true
|
adce14338244d87647dce4e2734de898f3b9db31
|
Shell
|
nichite/bash-cta
|
/cta.bash
|
UTF-8
| 22,081
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
#-------------------------------------------------------------------------------
# cta.bash
# Nic Hite
# 09/22/16
#
# A simple, lightweight CTA tracker written in Bash.
#
# Basically just takes your arguments, does a quick lookup for stop and / or
# station codes, makes a curl request to the CTA API, parses the XML, and
# displays the results.
#-------------------------------------------------------------------------------
# --- Constants ---
# Codes for each field in each arrival.
readonly STATION_ID=0
readonly STOP_ID=1
readonly STATION_NAME=2
readonly STOP_DESCRIPTION=3
readonly RUN=4
readonly ROUTE=5
readonly DESTINATION_STOP=6
readonly DESTINATION_NAME=7
readonly INBOUND_IND=8
readonly PREDICTION_TS=9
readonly ARRIVAL_TS=10
readonly IS_APPROACHING=11
readonly IS_SCHEDULED=12
readonly IS_DELAYED=13
readonly FAULT_IND=14
readonly LATITUDE=15
readonly LONGITUDE=16
readonly CARDINAL_DIRECTION_DEGREE=17
readonly NUM_FIELDS=18
# Train line indices to smartly insert the correct printf color
readonly Red=0
readonly Blue=1
readonly Brn=2
readonly G=3
readonly Org=4
readonly P=5
readonly Pink=6
readonly Y=7
readonly END=8
# Map user-friendly route colors to the ones actually used by the API
readonly blue=Blue
readonly red=Red
readonly green=G
readonly brown=Brn
readonly orange=Org
readonly purple=P
readonly pink=Pink
readonly yellow=Y
# A list of the train station information to be used. Rather than keep this
# in a separate file and introduce a dependency, I'll just throw this in here
# so users can move this script anywhere they want without any problems.
#
# You'll notice there's some geolocation stuff in here too. Haven't done too
# much with that yet. But soon!
stations_txt="40830,18th,pink,41.857908,-87.669147,1
41120,35th-bronzeville-iit,green,41.831677,-87.625826,1
40120,35th/archer,orange,41.829353,-87.680622,1
41270,43rd,green,41.816462,-87.619021,1
41080,47th,green,41.809209,-87.618826,1
41230,47th,red,41.810318,-87.63094,1
40130,51st,green,41.80209,-87.618487,1
40580,54th/cermak,pink,41.85177331,-87.75669201,1
40910,63rd,red,41.780536,-87.630952,1
40990,69th,red,41.768367,-87.625724,1
40240,79th,red,41.750419,-87.625112,1
41430,87th,red,41.735372,-87.624717,1
40450,95th/dan ryan,red,41.722377,-87.624342,1
40680,adams/wabash,brown,41.879507,-87.626037,0
40680,adams/wabash,green,41.879507,-87.626037,0
40680,adams/wabash,orange,41.879507,-87.626037,0
40680,adams/wabash,purple,41.879507,-87.626037,0
40680,adams/wabash,pink,41.879507,-87.626037,0
41240,addison,blue,41.9466037164,-87.7184584172,0
41420,addison,red,41.947316173,-87.6536241013,1
41440,addison,brown,41.947028,-87.674642,1
41200,argyle,red,41.9733220506,-87.6585279483,0
40660,armitage,brown,41.918217,-87.652644,1
40660,armitage,purple,41.918217,-87.652644,1
40170,ashland,green,41.885269,-87.666969,1
40170,ashland,pink,41.885269,-87.666969,1
41060,ashland,orange,41.839234,-87.665317,1
40290,ashland/63rd,green,41.77886,-87.663766,1
40010,austin,blue,41.870851,-87.776812,0
41260,austin,green,41.887293,-87.774135,0
40060,belmont,blue,41.9391107041,-87.71225212,0
41320,belmont,red,41.939751,-87.65338,1
41320,belmont,brown,41.939751,-87.65338,1
41320,belmont,purple,41.939751,-87.65338,1
40340,berwyn,red,41.977984,-87.658668,0
41380,bryn mawr,red,41.983504,-87.65884,0
40440,california,pink,41.854109,-87.694774,1
40570,california,blue,41.9221583097,-87.6972439537,0
41360,california,green,41.88422,-87.696234,1
40280,central,green,41.887389,-87.76565,1
41250,central,purple,42.063987,-87.685617,0
40780,central park,pink,41.853839,-87.714842,1
41000,cermak-chinatown,red,41.853206,-87.630968,1
41690,cermak-mccormick place,green,41.853115,-87.626402,1
40710,chicago,brown,41.89681,-87.635924,1
40710,chicago,purple,41.89681,-87.635924,1
41410,chicago,blue,41.896075,-87.655214,0
41450,chicago,red,41.896671,-87.628176,1
40420,cicero,pink,41.85182,-87.745336,1
40480,cicero,green,41.886519,-87.744698,1
40970,cicero,blue,41.871574,-87.745154,0
40630,clark/division,red,41.90392,-87.631412,1
40380,clark/lake,blue,41.885737,-87.630886,1
40380,clark/lake,brown,41.885737,-87.630886,1
40380,clark/lake,green,41.885737,-87.630886,1
40380,clark/lake,orange,41.885737,-87.630886,1
40380,clark/lake,purple,41.885737,-87.630886,1
40380,clark/lake,pink,41.885737,-87.630886,1
40430,clinton,blue,41.875539,-87.640984,0
41160,clinton,green,41.885678,-87.641782,1
41160,clinton,pink,41.885678,-87.641782,1
41670,conservatory-central park drive,green,41.884904,-87.716523,1
40720,cottage grove,green,41.780309,-87.605857,1
40230,cumberland,blue,41.984246,-87.838028,1
40090,damen,brown,41.966286,-87.678639,1
40210,damen,pink,41.854517,-87.675975,1
40590,damen,blue,41.9098452343,-87.6775400139,0
40050,davis,purple,42.04771,-87.683543,1
40690,dempster,purple,42.041655,-87.681602,0
40140,dempster-skokie,yellow,42.038951,-87.751919,1
40530,diversey,brown,41.932732,-87.653131,1
40530,diversey,purple,41.932732,-87.653131,1
40320,division,blue,41.903355,-87.666496,0
40390,forest park,blue,41.874257,-87.817318,1
40520,foster,purple,42.05416,-87.68356,0
40870,francisco,brown,41.966046,-87.701644,1
41220,fullerton,red,41.9253003719,-87.6528684398,1
41220,fullerton,brown,41.9253003719,-87.6528684398,1
41220,fullerton,purple,41.9253003719,-87.6528684398,1
40510,garfield,green,41.795172,-87.618327,1
41170,garfield,red,41.79542,-87.631157,1
40330,grand,red,41.891665,-87.628021,0
40490,grand,blue,41.891189,-87.647578,0
40760,granville,red,41.9944830093,-87.6591866269,1
40940,halsted,green,41.778943,-87.644244,1
41130,halsted,orange,41.84678,-87.648088,1
40980,harlem (forest park branch),blue,41.87349,-87.806961,0
40750,harlem (o'hare branch),blue,41.98227,-87.8089,1
40020,harlem/lake,green,41.886848,-87.803176,1
40850,harold washington library-state/van buren,brown,41.876862,-87.628196,1
40850,harold washington library-state/van buren,orange,41.876862,-87.628196,1
40850,harold washington library-state/van buren,purple,41.876862,-87.628196,1
40850,harold washington library-state/van buren,pink,41.876862,-87.628196,1
41490,harrison,red,41.874039,-87.627479,0
40900,howard,red,42.019063,-87.672892,1
40900,howard,purple,42.019063,-87.672892,1
40900,howard,yellow,42.019063,-87.672892,1
40810,illinois medical district,blue,41.875706,-87.673932,1
40300,indiana,green,41.821732,-87.621371,1
40550,irving park,blue,41.952925,-87.729229,0
41460,irving park,brown,41.954521,-87.674868,1
40070,jackson,blue,41.878183,-87.629296,1
40560,jackson,red,41.878153,-87.627596,1
41190,jarvis,red,42.0160204165,-87.6692571266,0
41280,jefferson park,blue,41.9702338623,-87.7615940115,1
41040,kedzie,pink,41.853964,-87.705408,1
41070,kedzie,green,41.884321,-87.706155,1
41150,kedzie,orange,41.804236,-87.704406,1
41180,kedzie,brown,41.965996,-87.708821,1
40250,kedzie-homan,blue,41.874341,-87.70604,1
41290,kimball,brown,41.967901,-87.713065,1
41140,king drive,green,41.78013,-87.615546,1
40600,kostner,pink,41.853751,-87.733258,1
41660,lake,red,41.884809,-87.627813,1
40700,laramie,green,41.887163,-87.754986,1
41340,lasalle,pink,41.875568,-87.631722,0
41340,lasalle,orange,41.875568,-87.631722,0
41340,lasalle,purple,41.875568,-87.631722,0
40160,lasalle/van buren,orange,41.8768,-87.631739,0
40160,lasalle/van buren,pink,41.8768,-87.631739,0
40160,lasalle/van buren,purple,41.8768,-87.631739,0
40770,lawrence,red,41.9689762882,-87.6584869372,0
41050,linden,purple,42.073153,-87.69073,1
41020,logan square,blue,41.9295342259,-87.7076881549,1
41300,loyola,red,42.001073,-87.661061,1
40270,main,purple,42.033456,-87.679538,0
40460,merchandise mart,brown,41.888969,-87.633924,1
40460,merchandise mart,purple,41.888969,-87.633924,1
40930,midway,orange,41.78661,-87.737875,1
40790,monroe,blue,41.880703,-87.629378,0
41090,monroe,red,41.880745,-87.627696,0
41330,montrose,blue,41.9609010454,-87.7429034362,0
41500,montrose,brown,41.961756,-87.675047,1
41510,morgan,green,41.88557676,-87.65212993,1
41510,morgan,pink,41.88557676,-87.65212993,1
40100,morse,red,42.008362,-87.665909,0
40650,north/clybourn,red,41.910655,-87.649177,0
40400,noyes,purple,42.058282,-87.683337,0
40890,o'hare,blue,41.97766526,-87.90422307,1
40180,oak park,blue,41.872108,-87.791602,0
41350,oak park,green,41.886988,-87.793783,0
41680,oakton-skokie,yellow,42.02624348,-87.74722084,1
41310,paulina,brown,41.943623,-87.670907,1
41030,polk,pink,41.871551,-87.66953,1
40030,pulaski,green,41.885412,-87.725404,1
40150,pulaski,pink,41.853732,-87.724311,1
40920,pulaski,blue,41.873797,-87.725663,0
40960,pulaski,orange,41.799756,-87.724493,1
40040,quincy/wells,pink,41.878723,-87.63374,0
40040,quincy/wells,orange,41.878723,-87.63374,0
40040,quincy/wells,purple,41.878723,-87.63374,0
40470,racine,blue,41.87592,-87.659458,0
40200,randolph/wabash,brown,41.884431,-87.626149,0
40200,randolph/wabash,green,41.884431,-87.626149,0
40610,ridgeland,green,41.887159,-87.783661,0
41010,rockwell,brown,41.966115,-87.6941,1
41400,roosevelt,red,41.8673785311,-87.6270314058,1
41400,roosevelt,green,41.8673785311,-87.6270314058,1
41400,roosevelt,orange,41.8673785311,-87.6270314058,1
40820,rosemont,blue,41.983507,-87.859388,1
40800,sedgwick,brown,41.910409,-87.639302,1
40800,sedgwick,purple,41.910409,-87.639302,1
40080,sheridan,red,41.9539048386,-87.6546614127,0
40840,south boulevard,purple,42.027612,-87.678329,0
40360,southport,brown,41.943744,-87.663619,1
40190,sox-35th,red,41.831191,-87.630636,1
40260,state/lake (loop 'l'),brown,41.88574,-87.627835,0
40260,state/lake (loop 'l'),green,41.88574,-87.627835,0
40260,state/lake (loop 'l'),orange,41.88574,-87.627835,0
40260,state/lake (loop 'l'),purple,41.88574,-87.627835,0
40260,state/lake (loop 'l'),pink,41.88574,-87.627835,0
40880,thorndale,red,41.9900990857,-87.6590684978,0
40350,uic-halsted,blue,41.875474,-87.649707,1
40370,washington,blue,41.883164,-87.62944,0
40730,washington/wells,brown,41.882695,-87.63378,1
40730,washington/wells,orange,41.882695,-87.63378,1
40730,washington/wells,purple,41.882695,-87.63378,1
40730,washington/wells,pink,41.882695,-87.63378,1
41210,wellington,brown,41.936033,-87.653266,1
41210,wellington,purple,41.936033,-87.653266,1
40310,western,orange,41.804546,-87.684019,1
40740,western,pink,41.854225,-87.685129,1
41480,western,brown,41.966163,-87.688502,1
40220,western (forest park branch),blue,41.875478,-87.688436,0
40670,western (o'hare branch),blue,41.916157,-87.687364,1
40540,wilson,red,41.965481568,-87.6579258145,0"
# List of colors to display line specific or error information.
# (Accessed with the same index mapping as train lines)
declare -ar colors=(
$'\e[38;05;196m'
$'\e[38;05;27m'
$'\e[38;05;94m'
$'\e[38;05;46m'
$'\e[38;05;202m'
$'\e[38;05;55m'
$'\e[38;05;177m'
$'\e[38;05;226m'
$'\e[0m'
)
# Usage statement for later
readonly usage="Usage: $(basename $0) -s station_name [-r train_route] | -l train_route
Lists train arrivals for a given stop. If no train line is given, all relevant
lines will be listed. Alternatively, passing in the -l flag will list all
stations for a given route.
-r: Train route to check (e.g. 'Blue', 'Red' OR shortest unambiguous identifier ('r', 'br'))
-s: Station name (e.g. 'Clark/Lake', 'Fullteron')
-l: List stops for a route"
# --- Functions ---
# print_usage: prints a formatted usage statement for the program
print_usage () {
printf "%80s\n" "$usage"
}
# read_dom: function to (somewhat hackishly) parse XML.
# Locally change the IFS to break apart read commands by < or > (escaped).
# XML tags go into $tag and content goes to, well, $content.
read_dom () {
local IFS=\>
read -d \< tag content
}
# contains_element: quick function for checking if an element is in an
# array. The arrays in question are all really small--no scaling problems
# with a linear scan here.
doesntContainElement () {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 1; done
return 0
}
# to_lower: turns any uppercase letter to lowercase. Good for standardizing
# user input.
to_lower () {
echo $1 | tr '[:upper:]' '[:lower:]'
}
validate_route () {
local inp=$(to_lower $1)
# allow caller to pass in more specific list of routes
shift
if [[ -n $2 ]]; then
declare -a routes=("${@}")
else
declare -a routes=(blue red green brown pink orange purple yellow)
fi
# disambiguate input route--the user can enter the shortest
# unambiguous route this way
ambiguous=false
match=
for route in "${routes[@]}"; do
if [[ ${route:0:${#inp}} == $inp ]]; then
if [[ -z $match ]]; then
match=$route
else
ambiguous=true
break
fi
fi
done
if [[ -z $match ]]; then
echo "Invalid route: Try {blue|red|brown|green|orange|purple|pink|yellow}"
exit 4
# Since we're not implementing a generalized string-contains here,
# the only ambiguity really would be "p" (Pink and Purple).
elif [[ -n $match && $ambiguous == true ]]; then
echo "Multiple route matches found. Type a bit more:"
read inp_route
validate_route $inp_route
else
inp_route=$match
fi
}
# list_stations_for: prints the list of stations for a given train route
list_stations_for () {
validate_route $1
printf "\nListing all stations for the ${colors[${!inp_route}]}%s${colors[END]} line:\n\n" "$inp_route"
# Use awk to look up the given station name to find mapid and disambiguate
awk -F',' -v route="$inp_route" '$3 == route {
print $2
}' <<< "$stations_txt"
}
# make_request: all the logic for making a curl request to the CTA API and
# displaying the formatted output.
make_request () {
# URL base information
API_KEY="6ae11e4b3a174bbf8dde5f8be2bf19f0"
url_base="http://lapi.transitchicago.com/api/1.0/ttarrivals.aspx?"
# Initial URL
request_url=$url_base
# This will always get set
request_url+="key=${API_KEY}"
# Add the map id to the url
request_url+="&mapid=$1"
# If the caller supplied a route, add it
if [[ -n $2 ]]
then
request_url+="&rt=$2"
fi
# If no max requests are specified, default to 5.
: ${inp_max:=10}
request_url+="&max=${inp_max}"
result_xml=$(curl -s ${request_url})
# Create arrays and counters for inbound and outbound trains
# NOTE: The notion of "inbound" and "outbound" are ill-defined in the CTA API.
# They give a direction code specifiying either "1" or "5"--to make this
# more understandable to the user, we print out the destination stop before
# the list of arrivals. The API handles the case of, say, the Brown line,
# which always terminates at Kimball, by understanding the location of the
# train and updating the destination name to smartly reflect this
num_inbound=0
declare -a inbound_array
num_outbound=0
declare -a outbound_array
# Read through the response XML
while read_dom; do
# If there's an error, skip to the error message and print it for the user
if [[ $tag = "errCd" && $content > 0 ]]
then
# The next tag contains the actual error message
read_dom; read_dom
# Print error notification to the user
echo "This failed--CTA API says: ${content}"
exit 6
fi
# The "eta" tag specifies the start of an individual arrival. When we
# get here, start grabbing info
if [[ $tag = "eta" ]]
then
# Create a temp array to store arrival info until the direction
# is confirmed. This info will be copied into either the inbound or
# outbound array later
declare -a temp_array
declare temp_index=0
read_dom
while [[ $tag != "/eta" ]]
do
# Filter out ending tags
# (the "flags" tag is deprecated)
if [[ $tag != /* && $tag != "flags /" ]]
then
# Here's where all the useful arrival info comes in
temp_array[$temp_index]="$content"
((temp_index++))
fi
read_dom
done
# Copy over the arrival info into either the inbound or outbound arrival
# array (these arrays will be processed separately).
if [[ ${temp_array[INBOUND_IND]} == 1 ]]
then
((num_inbound++))
inbound_array+=("${temp_array[@]}")
else
((num_outbound++))
outbound_array+=("${temp_array[@]}")
fi
fi
done <<< $result_xml
if [[ $num_inbound == 0 && $num_outbound == 0 ]]
then
echo "No arrivals for whatever you asked for."
exit 4
fi
printf "\nResults for %s:\n" "${inbound_array[$STATION_NAME]}"
print_arrivals $num_inbound "${inbound_array[@]}"
print_arrivals $num_outbound "${outbound_array[@]}"
}
# print_arrivals: takes in a number of arrivals and an array containing all
# the relevant content to print.
#
# NOTE: the array is single-dimensional but contains the info for ALL stops. To
# access the correct stop, it adds a multiple of NUM_FIELDS to the index at
# every stop iteration. Works fine.
#
# A little ugly, though.
print_arrivals () {
local num=$1
shift
declare -a arrival_array=("${@}")
# In Bash, for loops using ranges (e.g. {1..5}) don't work with variables.
# Use c-style loop instead.
for (( i=0; i<$num; i++ )) {
# For the first arrival, print out where the group of arrivals is headed.
if [[ $i == 0 ]]
then
printf "\n%s\n" "${arrival_array[$STOP_DESCRIPTION]}"
fi
# Convert timestamps into a "minutes away" style time
TIME1=$(date -j -f "%H:%M:%S" "${arrival_array[(($PREDICTION_TS+($NUM_FIELDS*$i)))]:9}" +%s)
TIME2=$(date -j -f "%H:%M:%S" "${arrival_array[(($ARRIVAL_TS+($NUM_FIELDS*$i)))]:9}" +%s)
minutes_away=$((($TIME2 - $TIME1)/60))
printf "%20s${colors[${arrival_array[(($ROUTE+($NUM_FIELDS*$i)))]}]}%4s${colors[END]}%8s min away" \
"To ${arrival_array[(($DESTINATION_NAME+($NUM_FIELDS*$i)))]}" \
"(${arrival_array[(($ROUTE+($NUM_FIELDS*$i)))]:0:1})" \
"${minutes_away}"
if [[ ${arrival_array[(($IS_DELAYED+($NUM_FIELDS*$i)))]} == 1 ]]
then
printf "${colors[$Red]}%15s${colors[$END]}" "Delayed"
elif [[ ${arrival_array[(($IS_SCHEDULED+($NUM_FIELDS*$i)))]} == 1 ]]
then
printf "${colors[Y]}%15s${colors[END]}" "Scheduled"
elif [[ ${arrival_array[(($IS_APPROACHING+($NUM_FIELDS*$i)))]} == 1 ]]
then
printf "${colors[$G]}%15s${colors[$END]}" "Approaching"
fi
printf "\n"
}
}
# --- Scripts start here ---
# Use getopts to process all the command line arguments
while getopts ":hl:m:r:s:" flag
do
case $flag
in
h)
print_usage
exit 0;;
l)
list_stations_for $OPTARG
exit 0;;
m)
inp_max=$OPTARG
;;
s)
inp_station=$(to_lower $OPTARG)
;;
r)
inp_route=$(to_lower $OPTARG)
;;
:)
echo "Error: -$OPTARG requires an argument"
print_usage
exit 1
;;
--)
break
;;
esac
done
# If there was an unrecognized flag supplied, spit out the usage statement
if [[ $? != 0 ]]
then
print_usage
exit 5
fi
# --- Validate arguments ---
# You can make a request using a direction-specific stop or by using the parent
# station, but you'll need at least one of these. If you use both,
if [[ -z $inp_station ]]
then
echo "Please list a station that you'd like arrival times for:"
read inp_station
fi
if [[ -n $inp_route ]]; then
validate_route $inp_route
fi
# Use awk to look up the given station name to find mapid and disambiguate
mapid=$(awk -F',' -v route="$inp_route" -v station="$inp_station" '$2 == station {
if(route!="" && route==$3) {
print $1
}
else if (route=="") {
print $1 " " $3
}
}' <<< "$stations_txt")
# Awk didn't return any mapids
if [[ -z $mapid ]]
then
echo "No station found by that name and/or route. Try $(basename $0) -l to list stations."
# Best case: we've narrowed it down to a single mapid, just make that request
elif [[ ${#mapid} == 5 ]]
then
make_request $mapid
# If there are multiple lines captured, we'll iterate through them and check
# out the mapid and route info. The forking logic goes like this:
#
# 1) There are multiple hits for a given station name, all with the same
# map id (e.g. Clark/Lake). In this case, just generate a single request
# and the route info for each arrival time printed will be sufficient
#
# 2) There are multiple hits for different stations that happen to have the
# same name (e.g. Damen, Cicero, etc). In this case, prompt for a station
# to disambiguate, and make that request. If none is supplied, just make
# requests for each station and display all of them.
else
# keep track of which line it is to treat the first line differently
hits=0
multiple_stations=false
# Do a quick scan through the awk results (never more than six lines)
# to see if there are multiple stations listed
# keep a list of the relevant train routes for smart disambiguation later
declare -a hit_routes
route_index=0
while read map rt; do
((hits++))
# Add to list of routes if not already there
if doesntContainElement $rt "${hit_routes[@]}"; then
hit_routes[$route_index]="$rt"
((route_index++))
fi
# The first line will be used as the comparison mapid
if [[ $hits == 1 ]]
then
check_mapid=$map
check_rt=$rt
else
# if a different station is found, we know which case it is, so exit.
if [[ $map != $check_mapid ]]
then
multiple_stations=true
# This is the unusual case where there are two stations with the
# same name, on the same route (e.g. Western on Blue )
fi
fi
done <<< "$mapid"
# Prompt user for disambiguation, if they so desire
if [[ $multiple_stations == true ]]
then
echo "Multiple stations found with that name. Enter a route to disambiguate (blank to list all):"
read inp_route
if [[ -n $inp_route ]]; then
validate_route $inp_route "${hit_routes[@]}"
fi
while read map rt; do
# If a specific route was specified, list only the hits that match
if [[ -z $inp_route || (-n $inp_route && $rt == $inp_route) ]]
then
make_request $map ${!rt}
fi
done <<< "$mapid"
else
make_request $check_mapid
fi
fi
echo ""
exit 0
| true
|
29378668becb8de8eb784ae4a5e032fe4f1793f5
|
Shell
|
bdeblis/portal
|
/Portal/deploy-portal.sh
|
UTF-8
| 1,848
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $# -ne 5 ]] ; then
"Invalid number of arguments supplied: deploy-portal-jenkins.sh serverurl jenkinsbuildname jndicontext folder buildnumber|lastSuccessful\
for example deploy-portal.sh gwqa3 Portal-Master-Build qa3 compsource-portal-webui-stripes lastSuccessful"
exit 1
fi
if echo "${5}" | egrep -q '^[0-9]+$'; then
BUILD_SUBDIR="modules/com.pwc.us\$compsource-portal-webui-stripes/builds/${5}"
elif [ "${5}" == "lastSuccessful" ]; then
BUILD_SUBDIR="lastSuccessful/com.pwc.us\$compsource-portal-webui-stripes"
else
echo "${5} is not a valid argument. Usage ${0} <serverurl> <jenkinsbuildname> <jndicontext> <folder> <buildnumber|lastSuccessful>"
exit 2
fi
ssh root"@$1" "/etc/init.d/tomcat6 stop; \
rm /usr/share/tomcat6/webapps/'$4'/ROOT.war; \
rm -rf /usr/share/tomcat6/webapps/'$4'/ROOT ; \
rm -rf /usr/share/tomcat6/work/ ; \
rm /usr/share/tomcat6/conf/Catalina/'$4'/ROOT.xml; \
exit"
ssh root"@$1" "mkdir -v -p /usr/share/tomcat6/webapps/'$4'"
ssh root"@$1" "chmod 755 /usr/share/tomcat6/webapps/'$4'"
ssh root"@$1" "chown tomcat /usr/share/tomcat6/webapps/'$4'"
scp /srv/Jenkins/jobs/"$2"/"${BUILD_SUBDIR}"/archive/com.pwc.us/compsource-portal-webui-stripes/1.0-SNAPSHOT/compsource-portal-webui-stripes-1.0-SNAPSHOT.war root"@$1":/usr/share/tomcat6/webapps/"$4"/ROOT.war
ssh root"@$1" "mkdir -v -p /usr/share/tomcat6/conf/Catalina/'$4'"
ssh root"@$1" "chmod 755 /usr/share/tomcat6/conf/Catalina/'$4'"
scp /srv/Jenkins/workspace/"$2"/Portal/Tomcat_JNDI_Context_Files/"$3"/compsource-portal-webui-stripes.xml root"@$1":/usr/share/tomcat6/conf/Catalina/"$4"/ROOT.xml
ssh root"@$1" "chmod 644 /usr/share/tomcat6/webapps/"$4"/ROOT.war; \
chmod 644 /usr/share/tomcat6/conf/Catalina/'$4'/ROOT.xml; \
/etc/init.d/tomcat6 start; \
tail -f -n 150 /usr/share/tomcat6/logs/catalina.out"
| true
|
9e0f4964eb124cfd029f807b68f1946314f05c82
|
Shell
|
DMBuce/clicraft
|
/src/lib/action.d/version.sh
|
UTF-8
| 186
| 2.796875
| 3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!bash
#
# Usage: clicraft version
#
# Prints version information.
#
msg "%s %s" "$PROG" "$VERSION"
msg "Copyright (C) %s" "2011-2014 DMBuce <dmbuce@gmail.com>"
msg "Home page: %s" "<$URL>"
| true
|
4edc503c23120b0f4485f82aee57521737d283d7
|
Shell
|
speakinghedge/freepackets_org
|
/wrt54gl/add2distro/usr/sbin/vpn_report
|
UTF-8
| 376
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
# report script
###############
if [ -f /etc/config/vpn_report ] ; then
. /etc/config/vpn_report
else
logger "vpn_report missing /etc/config/vpn_report"
exit 1
fi
if [ -z "$stats_url" -o -z "$stats_interval" ] ; then
logger "vpn_report invalid configuration"
exit 1
fi
while [ 1 ] ; do
sleep $stats_interval
/usr/sbin/ap_stats $stats_url $stats_key
done
| true
|
1240d08f199c405f2094bc199dd0337438c26f8b
|
Shell
|
LucaScorpion/advent-of-code
|
/2018/day_1/solution.sh
|
UTF-8
| 1,109
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
input=()
frequency=0
# Frequency calibration.
declare -A frequencies
frequencies['0']=1
calibrated=false
iterations=0
function checkFrequencies
{
# Add one to the frequency counter.
local count=${frequencies["$frequency"]-0}
let count+=1
frequencies["$frequency"]=$count
# Check if we are calibrated.
if [ $count = 2 ]
then
calibrated=true
echo "Calibrated frequency: $frequency"
fi
}
function processInput
{
for change in ${input[*]}
do
let frequency+=$change
checkFrequencies
# If we are calibrated, stop processing.
if [ $calibrated = true ]
then
return
fi
done
}
# Read the complete input.
while IFS='' read -r line || [[ -n "$line" ]]
do
input[${#input[*]}]=$line
done
# Process the input once, display the resulting frequency.
processInput
echo "Resulting frequency: $frequency"
# Continue calibration.
while [ $calibrated = false ]
do
let iterations+=1
processInput
done
echo "Calibration took $iterations additional iterations."
| true
|
a4633bd0861a3009ab5a1a434edee20a39305198
|
Shell
|
levlukacs/dotfiles
|
/konsole_tabs.sh
|
UTF-8
| 1,001
| 3.71875
| 4
|
[] |
no_license
|
# Configuration
COMMAND=''
SAVEFILE_TERMINAL="${HOME}/.konsole/current-tabs"
# Restore if asked to
if [ "$1" = "restore" ] ; then
echo "Restoring..."
konsole --tabs-from-file ${SAVEFILE_TERMINAL} -e 'bash -c exit'&
exit 0
fi
# Function to get the current sessions and write them to a file
function getSessions {
pid=$(pgrep konsole -u $USER)
local SESSIONS=$(qdbus org.kde.konsole-$pid | grep /Sessions/)
if [[ ${SESSIONS} ]] ; then
for i in ${SESSIONS}; do
local FORMAT=$(qdbus org.kde.konsole-$pid $i tabTitleFormat 0)
local PROCESSID=$(qdbus org.kde.konsole-$pid $i processId)
local CWD=$(pwdx ${PROCESSID} | sed -e "s/^[0-9]*: //")
if [[ $(pgrep --parent ${PROCESSID}) ]] ; then
CHILDPID=$(pgrep --parent ${PROCESSID})
COMMAND=$(ps -p ${CHILDPID} -o args=)
fi
echo "workdir: ${CWD};; title: ${FORMAT};; command:${COMMAND}" >> ${SAVEFILE_TERMINAL}
COMMAND=''
done
fi
}
getSessions
| true
|
e4a8dc92ea28d30c66d1f1c8bae47992cb0baa9c
|
Shell
|
gsauthof/dracut-sshd
|
/ci/setup/upload-ssh-keys.sh
|
UTF-8
| 981
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# Provision known ssh keys to a VM
# 2019, Georg Sauthoff <mail@gms.tf>
set -eux
PS4='+${SECONDS}s '
host=localhost
port=10022
known_hosts=known_horsts
key=key/dracut-ssh-travis-ci-insecure-ed25519
scp -F /dev/null -o IdentityFile=$key \
-o IdentitiesOnly=yes -o PreferredAuthentications=publickey \
-o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/dev/null \
-P $port key/ssh_host_*_key* root@localhost:/etc/ssh/
ssh -F /dev/null -o IdentityFile=$key \
-o IdentitiesOnly=yes -o PreferredAuthentications=publickey \
-o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/dev/null \
-p $port root@localhost <<EOF
set -x
restorecon -rv /etc/ssh
systemctl reload sshd.service
EOF
sleep 3
scp -F /dev/null -o IdentityFile=$key \
-o IdentitiesOnly=yes -o PreferredAuthentications=publickey \
-o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=$known_hosts \
-P $port key/dracut_ssh_host_*_key* root@localhost:
echo done
| true
|
d5e3cce4e7fc81f4c9530b00ef60b022c8639c0b
|
Shell
|
fevrin/home
|
/.shellrc.d/functions/_find_unscoped_vars
|
UTF-8
| 1,639
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
_find_unscoped_vars() {
local files="${@:-$HOME/.bashrc.d/functions/*}"
local unscoped_vars
local var_chars="[a-zA-Z_0-9]"
local var_assignment_chars="([][()*/%+&^|<>-]|<<|>>)"
local FILES_PROCESSED=0
for file in $files; do
file -L --mime-type "$file" | grep -q 'text/x-shellscript' || {
echo "can't read file: '$file'"
echo "skipping..."
continue
}
# grab all vars from the given file, including those with special assignment operators
# this assumes Bash is the language used, and there could be issues with arrays
for var in $(sed -rne "s;^\s*([^ #=[]$var_chars+)$var_assignment_chars?=.*;\1;p" "$file" | sort -u); do
# echo "var = '$var'"
# ensure the variable is locally defined
egrep -q "(((declare|typeset)( -[^g])?)|local) $var(=|$)" "$file" || {
# if it's not locally defined
# echo [[ ${unscoped_vars[*]} =~ $var ]]
[[ ${unscoped_vars[*]} =~ $var ]] || {
# and it's not already in the unscoped_vars array, then add it to the array
unscoped_vars+=("$(cat -n "$file" | sed -rne "s;^\s+([0-9]+)\s+("$var")\+?=.*;\1:\2;p" | head -n1)")
}
}
done
FILES_PROCESSED=1
if [[ ${#unscoped_vars[*]} -gt 0 ]]; then
echo -e "$file\nthese vars need to be scoped to 'local':"
for i in ${unscoped_vars[*]}; do
echo "$i"
done | sort -nu
echo
fi
unset unscoped_vars
done
if [[ "$FILES_PROCESSED" -eq 0 ]]; then
echo "no Bash script files found"
fi
}
| true
|
ba2fd3aceee847ab9fbf8ab21a8f03c0ef54b33b
|
Shell
|
Flexberry/NewPlatform.Flexberry.ORM
|
/Docker/Postgres/scripts/swarm/exec.sh
|
UTF-8
| 132
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -- `docker ps | grep flexberry/alt.p8-postgresql:latest`
id=$1
if [ "$id" ]
then
docker exec -it $id bash
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.