blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
81d58ed01705ae83ac3f85e938775d4e50933b36
|
Shell
|
W4RH4WK/dotConfig
|
/dotconfig
|
UTF-8
| 1,084
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
readonly DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
readonly APPDATA="/mnt/c/Users/$USER/AppData"
usage() {
echo "Usage: $0 <pull | push> <PROGRAM>"
echo
echo " PROGRAM:"
echo " krita"
echo " vscode"
echo " vs-snippets"
echo
}
config_krita() {
copy "$APPDATA/Local/kritadisplayrc" "$DIR/krita/kritadisplayrc"
copy "$APPDATA/Local/kritarc" "$DIR/krita/kritarc"
copy "$APPDATA/Local/kritashortcutsrc" "$DIR/krita/kritashortcutsrc"
}
config_vscode() {
mkdir -p "$APPDATA/Roaming/Code/User"
copy "$APPDATA/Roaming/Code/User/settings.json" "$DIR/vscode/settings.json"
copy "$APPDATA/Roaming/Code/User/keybindings.json" "$DIR/vscode/keybindings.json"
}
config_vs-snippets() {
mkdir -p "$APPDATA/Roaming/VisualAssist/Autotext"
copy "$APPDATA/Roaming/VisualAssist/Autotext/cpp.tpl" "$DIR/vs/cpp.tpl"
}
if [[ $# -ne 2 || ( "$1" != "pull" && "$1" != "push" ) ]]; then
usage
exit 1
else
if [[ "$1" == "pull" ]]; then
copy() { cp -r -T "$1" "$2"; }
else
copy() { cp -i -r -T "$2" "$1"; }
fi
fi
"config_$2" "$1"
| true
|
9666a59407b224d7fd2aa5174e11e17c18ae63bd
|
Shell
|
smitagaikwad25/Demo_Programs
|
/case_satement.sh
|
UTF-8
| 561
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
#vehicle=$1
#case $vehicle in
# "car")
# echo "Rent of $vehicle is 100 doller" ;;
# "van")
# echo "Rent of $vehicle is 80 doller" ;;
# "bicycle")
# echo "Rent of $vehicle is 5 doller" ;;
# * )
# echo "unknown vehicle" ;;
#esac
echo -e "enter character : \c"
read value
case $value in
[a-z] )
echo "User entered $value a to z" ;;
[A-Z] )
echo "User entered $value A to Z" ;;
[0-9] )
echo "User entered $value 0 to 9" ;;
?)
echo "User entered $value special character" ;;
*)
echo "Unknow input" ;;
esac
| true
|
d8601d34f7d51fc8837481e4e03a239b3ee5b526
|
Shell
|
sstone1/fabric-sdk-node
|
/scripts/ci_scripts/publishNpmModules.sh
|
UTF-8
| 3,681
| 3.796875
| 4
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
nodeModules="fabric-protos fabric-common fabric-ca-client fabric-client fabric-network"
npmPublish() {
if [[ "$CURRENT_TAG" = *"skip"* ]]; then
echo -e "\033[34m----> Don't publish $1 npm modules on skip tag \033[0m"
elif [[ "$CURRENT_TAG" = *"unstable"* ]]; then
echo
# Get the current unstable version of a module from npm registry
UNSTABLE_VER=$(npm dist-tags ls "$1" | awk "/$CURRENT_TAG"":"/'{
ver=$NF
rel=$NF
sub(/.*\./,"",rel)
sub(/\.[[:digit:]]+$/,"",ver)
print ver"."rel+1}')
if [[ $UNSTABLE_VER = "" ]]; then
echo -e "\033[34m ----> unstable ver is blank \033[0m"
UNSTABLE_INCREMENT=1
else
# Get last digit of the unstable version built above
UNSTABLE_INCREMENT=$(echo $UNSTABLE_VER| rev | cut -d '.' -f 1 | rev)
fi
echo -e "\033[32m======> UNSTABLE_INCREMENT:" $UNSTABLE_INCREMENT "\033[0m"
# Append last digit with the package.json version
export UNSTABLE_INCREMENT_VERSION=$RELEASE_VERSION.$UNSTABLE_INCREMENT
echo -e "\033[32m======> UNSTABLE_INCREMENT_VERSION:" $UNSTABLE_INCREMENT_VERSION "\033[0"
for module in ${nodeModules}; do
sed -i "s/\"${module}\": \".*\"/\"${module}\": \"${CURRENT_TAG}\"/" package.json
done
# Replace existing version with $UNSTABLE_INCREMENT_VERSION
sed -i 's/\(.*\"version\"\: \"\)\(.*\)/\1'$UNSTABLE_INCREMENT_VERSION\"\,'/' package.json
# Show Version after modify the package.json with latest version to publish
grep '"version":' package.json | cut -d\" -f4
# Publish unstable versions to npm registry
npm publish --tag $CURRENT_TAG
if [ $? != 0 ]; then
echo -e "\033[31m FAILED to publish $CURRENT_TAG of $1 npm module" "\033[0m"
exit 1
fi
echo -e "\033[32m ========> PUBLISHED $CURRENT_TAG tag of $1 npm module SUCCESSFULLY" "\033[0m"
else
# Publish node modules on latest tag
echo -e "\033[32m ========> PUBLISH $RELEASE_VERSION" "\033[0m"
for module in ${nodeModules}; do
sed -i "s/\"${module}\": \".*\"/\"${module}\": \"${CURRENT_TAG}\"/" package.json
done
npm publish --tag $CURRENT_TAG
if [ $? != 0 ]; then
echo -e "\033[31m FAILED TO PUBLISH $CURRENT_TAG of $1 npm module" "\033[0m"
exit 1
fi
echo -e "\033[32m ========> PUBLISHED $CURRENT_TAG tag of $1 npm module SUCCESSFULLY" "\033[0m"
fi
}
versions() {
# Get the unstable tag from package.json
CURRENT_TAG=$(grep '"tag":' package.json | cut -d\" -f4)
echo -e "\033[32m ======> Current TAG: $CURRENT_TAG" "\033[0m"
# Get the version from package.json
RELEASE_VERSION=$(grep '"version":' package.json | cut -d\" -f4)
echo -e "\033[32m ======> Current Version: $RELEASE_VERSION" "\033[0m"
}
echo " ____ _ _ ____ _ ___ ____ _ _ _ _ ____ __ __"
echo "| _ \| | | | __ )| | |_ _/ ___|| | | | | \ | | _ \| \/ |"
echo "| |_) | | | | _ \| | | |\___ \| |_| | | \| | |_) | |\/| |"
echo "| __/| |_| | |_) | |___ | | ___) | _ | | |\ | __/| | | |"
echo "|_| \___/|____/|_____|___|____/|_| |_| |_| \_|_| |_| |_|"
cd $WORKSPACE/gopath/src/github.com/hyperledger/fabric-sdk-node
# Set NPM_TOKEN from CI configuration
# Please post in #ci-pipeline channel if you observe npm_token issue
npm config set //registry.npmjs.org/:_authToken=$NPM_TOKEN
# Publish node modules
for module in ${nodeModules}; do
if [ -d "$module" ]; then
echo -e "\033[32m Publishing $module" "\033[0m"
cd $module
versions
npmPublish $module
cd -
fi
done
| true
|
ab37467ed28058429a1448a3845d6ab0e9318003
|
Shell
|
blackb1rd/dotfiles
|
/setup.sh
|
UTF-8
| 42,465
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC1091
. "shells/source/utility.sh"
GITHUB_RAW_URL='https://raw.githubusercontent.com'
GITHUB_FOLDER="$HOME/git/github"
GITHUB_URL='https://github.com'
TEMP="/tmp"
ROOT_PERM=""
USRPREFIX="/usr/local"
GOLANG_VERSION="1.18"
PYTHON3_VERSION="3.10.9"
PIPoption="install --user --upgrade"
RUBY_VERSION="3.1.1"
GO_TENSORFLOW_VERSION="2.8.0"
case $(uname) in
Darwin)
current_dir="$( cd "$( dirname "$0" )" && pwd )"
OStype=Darwin
;;
CYGWIN_NT-*)
OStype=CYGWIN_NT
;;
MSYS_NT-*)
current_dir="$(cygpath -a .)"
OStype=MSYS_NT
PKG_CMD_UPDATE="pacman -Syy"
PKG_CMD_INSTALL="pacman --needed -Su --noconfirm"
PKG_CMD_REMOVE="pacman -R"
PACKAGE="autoconf
automake
cmake
curl
gcc
git
gperf
irssi
libbz2
libevent
liblzma
libreadline
libtool
llvm
make
mingw-w64-i686-cmake
mingw-w64-i686-gcc
mingw-w64-i686-go
mingw-w64-i686-jansson
mingw-w64-i686-libtool
mingw-w64-i686-libxml2
mingw-w64-i686-libyaml
mingw-w64-i686-make
mingw-w64-i686-pcre
mingw-w64-i686-perl
mingw-w64-i686-pkg-config
mingw-w64-i686-unibilium
mingw-w64-i686-xz
mingw-w64-x86_64-cmake
mingw-w64-x86_64-gcc
mingw-w64-x86_64-go
mingw-w64-x86_64-jansson
mingw-w64-x86_64-libtool
mingw-w64-x86_64-libxml2
mingw-w64-x86_64-libyaml
mingw-w64-x86_64-make
mingw-w64-x86_64-pcre
mingw-w64-x86_64-perl
mingw-w64-x86_64-pkg-config
mingw-w64-x86_64-unibilium
mingw-w64-x86_64-xz
pkg-config
python3
ruby
unzip
wget
zsh"
pathadd "/mingw64/bin"
[ -z "$GOROOT" ] && export GOROOT=/mingw64/lib/go
[ -z "$GOPATH" ] && export GOPATH=/mingw64
;;
FreeBSD)
OStype=FreeBSD
;;
OpenBSD)
OStype=OpenBSD
;;
DragonFly)
OStype=DragonFly
;;
Linux)
current_dir="$( cd "$( dirname "$0" )" && pwd )"
if [ -f "/etc/os-release" ] ; then
os_release_id="$(grep -E '^ID=([a-zA-Z]*)' /etc/os-release | cut -d '=' -f 2)"
os_version_id="$(grep -E '^VERSION_ID="([0-9\.]*)"' /etc/os-release | cut -d '=' -f 2 | tr -d '"')"
is_wsl="$(uname -a | grep -E 'Microsoft')"
echo "os version : $os_version_id"
PIPmodule="bottleneck
Cython
h5py
jedi
keras
kikit
matplotlib
mycli
mysqlclient
neovim
numexpr
numpy
pandas
pynvim
Pygments
python-language-server
sciPy
tensorflow
yapf"
if [ -n "${is_wsl}" ] ; then
SCOOP_PACKAGE="hugo-extended"
echo "Scoop package : $SCOOP_PACKAGE"
fi
case "$os_release_id" in
"arch")
OStype=arch
;;
"debian" | "ubuntu")
ROOT_PERM="sudo"
PKG_CMD_UPDATE="$ROOT_PERM apt-get update"
PKG_CMD_INSTALL="$ROOT_PERM apt-get install -y"
PKG_CMD_REMOVE="$ROOT_PERM apt-get remove -y"
PKG_CMD_ADD_REPO="$ROOT_PERM add-apt-repository -y"
PACKAGE="apt-transport-https
autoconf
automake
build-essential
ca-certificates
clang
cmake
curl
figlet
g++
gettext
git
gnupg-agent
htop
irssi
libbz2-dev
libevent-dev
libffi-dev
liblzma-dev
libncurses5-dev
libpcre3-dev
libreadline-dev
libsqlite3-dev
libssl-dev
libtool
libtool-bin
llvm
lynx
make
nasm
neovim
net-tools
ninja-build
openjdk-11-jre
openjdk-11-jdk
pkg-config
python3-dev
ruby-dev
software-properties-common
snapd
sqlite3
tk-dev
tor
unzip
wget
xclip
xz-utils
zlib1g-dev
zsh"
case "$os_release_id" in
"debian")
OStype=debian
;;
"ubuntu")
OStype=ubuntu
PACKAGE="$PACKAGE
libmysqlclient-dev
nmap
qemu-kvm"
REPOSITORY=("deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable")
;;
esac
;;
"elementary")
OStype=elementary
;;
"fedora")
OStype=fedora
ROOT_PERM="sudo"
PKG_CMD_UPDATE="$ROOT_PERM yum update"
PKG_CMD_INSTALL="$ROOT_PERM yum install -y"
PKG_CMD_REMOVE="$ROOT_PERM yum remove -y"
PACKAGE="mysql-devel"
;;
"coreos")
OStype=coreos
;;
"gentoo")
OStype=gentoo
;;
"mageia")
OStype=mageia
;;
"centos")
OStype=centos
ROOT_PERM="sudo"
PKG_CMD_UPDATE="$ROOT_PERM yum update"
PKG_CMD_INSTALL="$ROOT_PERM yum install -y"
PKG_CMD_REMOVE="$ROOT_PERM yum remove -y"
PACKAGE="mysql-devel"
;;
"opensuse"|"tumbleweed")
OStype=opensuse
;;
"sabayon")
OStype=sabayon
;;
"slackware")
OStype=slackware
;;
"linuxmint")
OStype=linuxmint
ROOT_PERM="sudo"
PKG_CMD_UPDATE="$ROOT_PERM apt-get update"
PKG_CMD_INSTALL="$ROOT_PERM apt-get install -y"
PKG_CMD_REMOVE="$ROOT_PERM apt-get remove -y"
;;
*)
;;
esac
fi
# Check if we're running on Android
case $(uname -o 2>/dev/null) in
Android )
OStype=Android
PKG_CMD_UPDATE="pkg update"
PKG_CMD_INSTALL="pkg install -y"
PKG_CMD_REMOVE="pkg uninstall"
PACKAGE="autoconf
automake
cmake
curl
figlet
fzf
git
htop
irssi
libbz2
libevent
libtool
llvm
lynx
make
ncurses-utils
neovim
nodejs
openssh
pkg-config
python
ruby
silversearcher-ag
tmux
unzip
wget
xz-utils
zsh"
PIPmodule="Cython
SciPy
bottleneck
mycli
mysqlclient
neovim
numexpr
numpy
pandas
python-language-server
yapf"
TEMP=$TMPDIR
USRPREFIX=$PREFIX
;;
esac
;;
SunOS)
OStype=SunOS
;;
*)
;;
esac
usage() {
echo "Usage: $0 [options]"
echo ""
echo "Options:"
echo " -a, --all Installing all setup"
echo " -b, --basictool Installing basic tool"
echo " -dart --dart Installing dart"
echo " -d, --dot Installing dotfiles"
echo " -dk, --docker Installing docker"
echo " -f, --fonts Installing fonts"
echo " -fzf, --fzf Installing fzf"
echo " -l, --latest Compiling the latest ctags and VIM version"
echo " -go, --golang Installing golang package"
echo " -ki, --kicad Installing KiCad Plugin"
echo " -node, --nodejs Installing nodejs package"
echo " -pl, --perl Installing perl package"
echo " -py, --python Installing python package"
echo " -rb, --ruby Installing ruby package"
echo " -rs, --rust Installing rust package"
echo " -sh, --shell Installing shell"
echo " -sc, --scoop Installing scoop"
echo " -sp, --snap Installing snap"
echo " -nvim, --neovim Compiling neovim"
echo " -tmux, --tmux Compiling tmux"
echo " -ycm, --ycmd Compiling YouCompleteMe"
echo " -h, --help Show basic help message and exit"
}
mkdirfolder () {
if [ ! -d "$HOME/$1" ] ; then
mkdir -p "$HOME/$1"
fi
}
installfile () {
if [ ! -f "$HOME/$1" ] ; then
ln -snf "$current_dir/$2" "$HOME/$1"
fi
}
installfolder () {
if [ ! -d "$HOME/.$1" ] ; then
ln -snf "$current_dir/$1" "$HOME/.$1"
fi
}
checkOStype () {
case $1 in
debian|ubuntu ) return 1 ;;
android ) return 1 ;;
msys_nt ) return 1 ;;
darwin ) return 1 ;;
* ) return 0 ;;
esac
}
# Check argument
while [ $# != 0 ]
do
case $1 in
-a | --all ) all=true;;
-b | --basictool ) basictool=true;;
-dart | --dart ) dart=true;;
-d | --dot ) dot=true;;
-dk | --docker ) docker=true;;
-f | --fonts ) fonts=true;;
-fzf | --fzf ) fzf=true;;
-l | --latest ) latest=true;;
-go | --golang ) golang=true;;
-ki | --kicad ) kicad=true;;
-node | --nodejs ) nodejs=true;;
-pl | --perl ) perl=true;;
-py | --python ) python=true;;
-rb | --ruby ) ruby=true;;
-rs | --rust ) rust=true;;
-sh | --shell ) shell=true;;
-sc | --scoop ) scoop=true;;
-sp | --snap ) snap=true;;
-nvim | --neovim ) neovim=true;;
-tmux | --tmux ) tmux=true;;
-ycm | --ycmd ) ycmd=true;;
-h | --help ) usage;exit;;
* ) usage;exit 1
esac
shift
done
# Make string as lower case
OStype=$(echo $OStype | awk '{print tolower($0)}')
# Check the input of OStype
if checkOStype "$OStype" ; then
echo "$OStype OS is not supported"
echo ""
usage
exit 1
fi
if [ -z "${all}" ] \
&& [ -z "${basictool}" ] \
&& [ -z "${dart}" ] \
&& [ -z "${dot}" ] \
&& [ -z "${docker}" ] \
&& [ -z "${fonts}" ] \
&& [ -z "${fzf}" ] \
&& [ -z "${golang}" ] \
&& [ -z "${kicad}" ] \
&& [ -z "${nodejs}" ] \
&& [ -z "${perl}" ] \
&& [ -z "${python}" ] \
&& [ -z "${ruby}" ] \
&& [ -z "${rust}" ] \
&& [ -z "${shell}" ] \
&& [ -z "${scoop}" ] \
&& [ -z "${snap}" ] \
&& [ -z "${neovim}" ] \
&& [ -z "${tmux}" ] \
&& [ -z "${ycmd}" ] \
&& [ -z "${latest}" ] ; then
echo "Need more option(installing or compiling) to be set"
echo ""
usage
exit 1
fi
if [ "$OStype" = "msys_nt" ] ; then
export MSYS=winsymlinks:nativestrict
export HOME=$USERPROFILE
fi
# Install program
if [ -n "${all}" ] || [ -n "${basictool}" ] ; then
txtbld=$(tput bold)
if [ "$OStype" != "android" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the GPG key$(tput sgr0)"
$PKG_CMD_INSTALL curl
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | $ROOT_PERM apt-key add -
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | $ROOT_PERM apt-key add -
fi
echo "${txtbld}$(tput setaf 1)[-] Install the basic tool$(tput sgr0)"
if [ -n "${REPOSITORY[*]}" ] ; then
for repo in "${REPOSITORY[@]}"
do
$PKG_CMD_ADD_REPO "$repo"
done
fi
$PKG_CMD_UPDATE
# shellcheck disable=SC2086
$PKG_CMD_INSTALL $PACKAGE || { echo 'Failed to install program' ; exit 1; }
# if did not want to install latest version
if [ ! "${latest}" ] && [ ! "${all}" ] ; then
$PKG_CMD_INSTALL vim ctags
fi
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ _ _ _ #
# / ___|| |__ ___| | | #
# \___ \| '_ \ / _ \ | | #
# ___) | | | | __/ | | #
# |____/|_| |_|\___|_|_| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dot}" ] || [ -n "${shell}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the shell$(tput sgr0)"
if ! [ -x "$(command -v antibody)" ] ; then
curl -sfL git.io/antibody | $ROOT_PERM sh -s - -b /usr/local/bin
fi
# for dircolor
wget "https://raw.github.com/trapd00r/LS_COLORS/master/lscolors.sh" -O "$HOME/.lscolors.sh"
installfile .zsh_plugins.txt shells/zsh_plugins.txt
antibody bundle < "$HOME/.zsh_plugins.txt" > "$HOME/.zsh_plugins.sh"
installfile .profile shells/profile
installfile .bashrc shells/bashrc
installfile .zshrc shells/zshrc
installfile .zprofile shells/zprofile
# source external programs
mkdirfolder .shells
mkdirfolder .shells/git
for shell in bash zsh
do
mkdirfolder ".shells/$shell"
wget "$GITHUB_RAW_URL/git/git/master/contrib/completion/git-completion.$shell" \
-O "$HOME/.shells/git/git-completion.$shell"
done
mkdirfolder ".shells/source"
installfile ".shells/source/transmission.sh" "shells/source/transmission.sh"
installfile ".shells/source/utility.sh" "shells/source/utility.sh"
installfile ".shells/source/path.sh" "shells/source/path.sh"
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# _ #
# / \ __ _ #
# / _ \ / _` | #
# / ___ \ (_| | #
# /_/ \_\__, | #
# |___/ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${latest}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the silversearcher-ag$(tput sgr0)"
$PKG_CMD_REMOVE silversearcher-ag
# clone silversearcher-ag
git clone --depth 1 $GITHUB_URL/ggreer/the_silver_searcher "$TEMP/the_silver_searcher"
cd "$TEMP/the_silver_searcher" || exit
./build.sh
make
$ROOT_PERM make install
cd "$current_dir" && rm -rf "$TEMP/the_silver_searcher"
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ _ #
# / ___| |_ __ _ __ _ ___ #
# | | | __/ _` |/ _` / __| #
# | |___| || (_| | (_| \__ \ #
# \____|\__\__,_|\__, |___/ #
# |___/ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${latest}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the ctags$(tput sgr0)"
$PKG_CMD_REMOVE ctags
# clone ctags
git clone --depth 1 $GITHUB_URL/universal-ctags/ctags "$TEMP/ctags"
cd "$TEMP/ctags" || exit
./autogen.sh
./configure --prefix="$USRPREFIX" --enable-iconv
make
$ROOT_PERM make install
cd "$current_dir" && rm -rf "$TEMP/ctags"
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ _ #
# | _ \ __ _ _ __| |_ #
# | | | |/ _` | '__| __| #
# | |_| | (_| | | | |_ #
# |____/ \__,_|_| \__| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dart}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the dart$(tput sgr0)"
$ROOT_PERM sh -c 'wget -qO- https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -'
$ROOT_PERM sh -c 'wget -qO- https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list'
$ROOT_PERM apt-get update
$PKG_CMD_INSTALL dart
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
echo "${txtbld}$(tput setaf 1)[-] Install the flutter$(tput sgr0)"
mkdir "$HOME/development"
cd "$HOME/development" || exit
git clone https://github.com/flutter/flutter.git -b stable --depth 1
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ _ #
# | _ \ ___| |__ _ _ __ _ __ _ ___ _ __ #
# | | | |/ _ \ '_ \| | | |/ _` |/ _` |/ _ \ '__| #
# | |_| | __/ |_) | |_| | (_| | (_| | __/ | #
# |____/ \___|_.__/ \__,_|\__, |\__, |\___|_| #
# |___/ |___/ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dot}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the debugger$(tput sgr0)"
installfile .gdbrc debugger/gdbrc
mkdirfolder .cgdb
installfile .cgdb/cgdbrc debugger/cgdbrc
# install gdb-dashboard https://github.com/cyrus-and/gdb-dashboard
wget -P ~ git.io/.gdbinit
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ _ #
# | _ \ ___ ___| | _____ _ __ #
# | | | |/ _ \ / __| |/ / _ \ '__| #
# | |_| | (_) | (__| < __/ | #
# |____/ \___/ \___|_|\_\___|_| #
# #
###############################################################################
if [ -n "${all}" ] || [ "${docker}" ] ; then
if [ "$OStype" != "android" ] ; then
$PKG_CMD_INSTALL docker-ce docker-ce-cli containerd.io
fi
fi
###############################################################################
# _____ _ #
# | ___|__ _ __ | |_ ___ #
# | |_ / _ \| '_ \| __/ __| #
# | _| (_) | | | | |_\__ \ #
# |_| \___/|_| |_|\__|___/ #
# #
###############################################################################
if [ "${fonts}" ] ; then
if [ "$OStype" != "android" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the fonts$(tput sgr0)"
# Install nerd fonts
git clone --depth 1 $GITHUB_URL/ryanoasis/nerd-fonts "$TEMP/fonts"
cd "$TEMP/fonts" && ./install.sh "DejaVuSansMono"
cd "$current_dir" && rm -rf "$TEMP/fonts"
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
# "DejaVu Sans Mono Nerd Font 12"
fi
fi
################################################################################
# _____ __ #
# | ___|___/ _| #
# | |_ |_ / |_ #
# | _| / /| _| #
# |_| /___|_| #
# #
################################################################################
if [ -n "${all}" ] || [ "${fzf}" ] ; then
if [ "$OStype" != "android" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the Fzf$(tput sgr0)"
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
"$HOME/.fzf/install --all"
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
fi
###############################################################################
# ____ _ _ #
# / ___(_) |_ #
# | | _| | __| #
# | |_| | | |_ #
# \____|_|\__| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dot}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the git$(tput sgr0)"
installfile .gitconfig git/gitconfig
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ #
# / ___| ___ #
# | | _ / _ \ #
# | |_| | (_) | #
# \____|\___/ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${golang}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the go$(tput sgr0)"
wget "https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz"
$ROOT_PERM tar -C /usr/local -xzf "go$GOLANG_VERSION.linux-amd64.tar.gz"
rm "go$GOLANG_VERSION.linux-amd64.tar.gz"
pathadd "/usr/local/go/bin"
go install github.com/PuerkitoBio/goquery@latest
go install github.com/beevik/ntp@latest
go install github.com/cenkalti/backoff@latest
go install github.com/derekparker/delve/cmd/dlv@latest
go install github.com/FiloSottile/mkcert@latest
go install github.com/go-sql-driver/mysql@latest
go install github.com/golang/dep/cmd/dep@latest
go install github.com/mattn/go-sqlite3@latest
go install github.com/mmcdole/gofeed@latest
go install gonum.org/v1/gonum/...@latest
go install gonum.org/v1/plot/...@latest
go install gonum.org/v1/hdf5/...@latest
if [ "$OStype" != "android" ] ; then
TF_TYPE="cpu" # Change to "gpu" for GPU support
TARGET_DIRECTORY='/usr/local'
curl -L \
"https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-${GO_TENSORFLOW_VERSION}.tar.gz" |
$ROOT_PERM tar -C $TARGET_DIRECTORY -xz
$ROOT_PERM ldconfig
go install github.com/tensorflow/tensorflow/tensorflow/go@latest
fi
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# _ _ _ #
# | | | | |_ ___ _ __ #
# | |_| | __/ _ \| '_ \ #
# | _ | || (_) | |_) | #
# |_| |_|\__\___/| .__/ #
# |_| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dot}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the htop$(tput sgr0)"
installfile .htoprc htop/htoprc
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ___ _ #
# |_ _|_ __ ___ ___(_) #
# | || '__/ __/ __| | #
# | || | \__ \__ \ | #
# |___|_| |___/___/_| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dot}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the irssi$(tput sgr0)"
installfolder irssi
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# _ ___ ____ _ #
# | |/ (_)/ ___|__ _ __| | #
# | ' /| | | / _` |/ _` | #
# | . \| | |__| (_| | (_| | #
# |_|\_\_|\____\__,_|\__,_| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${kicad}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install KiCad Plugin$(tput sgr0)"
KICAD_GITHUB_PLUGIN_FOLDER="KiCad/plugins"
mkdir -p "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER"
git clone "$GITHUB_URL/NilujePerchut/kicad_scripts.git" "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/teardrops"
git clone "$GITHUB_URL/easyw/RF-tools-KiCAD.git" "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/RF-tools-KiCAD"
git clone "$GITHUB_URL/easyw/kicad-action-tools.git" "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/easyw-kicad-action-tools"
git clone "$GITHUB_URL/stimulu/kicad-round-tracks.git" "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/kicad-round-tracks"
git clone "$GITHUB_URL/jsreynaud/kicad-action-scripts.git" "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/jsreynaud-kicad-action-scripts"
git clone "$GITHUB_URL/xesscorp/WireIt.git" "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/WireIt"
if [ -n "${is_wsl}" ] ; then
# cannot create symlink
KICAD_PLUGIN_FOLDER="/mnt/c/Program Files/KiCad/share/kicad/scripting/plugins"
cp -r "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/teardrops/teardrops" "$KICAD_PLUGIN_FOLDER/"
cp -r "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/RF-tools-KiCAD" "$KICAD_PLUGIN_FOLDER/"
cp -r "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/easyw-kicad-action-tools" "$KICAD_PLUGIN_FOLDER/"
cp -r "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/jsreynaud-kicad-action-scripts/ViaStitching" "$KICAD_PLUGIN_FOLDER/"
cp -r "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/jsreynaud-kicad-action-scripts/CircularZone" "$KICAD_PLUGIN_FOLDER/"
cp -r "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/WireIt" "$KICAD_PLUGIN_FOLDER/"
else
KICAD_PLUGIN_FOLDER="$HOME/.kicad/scripting/plugins"
ln -snf "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/teardrops/teardrops" "$KICAD_PLUGIN_FOLDER/teardrops"
ln -snf "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/RF-tools-KiCAD" "$KICAD_PLUGIN_FOLDER/RF-tools-KiCAD"
ln -snf "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/easyw-kicad-action-tools" "$KICAD_PLUGIN_FOLDER/easyw-kicad-action-tools"
ln -snf "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/jsreynaud-kicad-action-scripts/ViaStitching" "$KICAD_PLUGIN_FOLDER/ViaStitching"
ln -snf "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/jsreynaud-kicad-action-scripts/CircularZone" "$KICAD_PLUGIN_FOLDER/CircularZone"
ln -snf "$GITHUB_FOLDER/$KICAD_GITHUB_PLUGIN_FOLDER/WireIt" "$KICAD_PLUGIN_FOLDER/WireIt"
fi
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# _ _ _ _ #
# | \ | | ___ __| | ___ (_)___ #
# | \| |/ _ \ / _` |/ _ \| / __| #
# | |\ | (_) | (_| | __/| \__ \ #
# |_| \_|\___/ \__,_|\___|/ |___/ #
# |__/ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${nodejs}" ] ; then
if [ "$OStype" != "android" ] ; then
$PKG_CMD_REMOVE cmdtest
$ROOT_PERM snap install node --classic
echo "deb https://dl.yarnpkg.com/debian/ stable main" | $ROOT_PERM tee /etc/apt/sources.list.d/yarn.list
$PKG_CMD_INSTALL -y yarn
$ROOT_PERM yarn global add async \
expo-cli \
react-native-cli \
react \
redux \
mobx \
netlify-cms \
neovim \
prettier
fi
fi
###############################################################################
# ____ _ _ #
# | _ \ _ _| |_| |__ ___ _ __ #
# | |_) | | | | __| '_ \ / _ \| '_ \ #
# | __/| |_| | |_| | | | (_) | | | | #
# |_| \__, |\__|_| |_|\___/|_| |_| #
# |___/ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${python}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the python$(tput sgr0)"
installfile .pythonrc python/pythonrc
if [ "$OStype" != "android" ] ; then
# install pyenv
curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash
# Adding pyenv path
pathadd "$HOME/.pyenv/bin"
pyenv update
export PYTHON_CONFIGURE_OPTS="--enable-shared"
pyenv install -s $PYTHON3_VERSION
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
pyenv virtualenv-delete -f py3nvim
pyenv virtualenv $PYTHON3_VERSION py3nvim
pyenv activate py3nvim
fi
# set pyenv to system
pyenv shell $PYTHON3_VERSION
pyenv local $PYTHON3_VERSION
pyenv global $PYTHON3_VERSION
pip install --upgrade pip
# shellcheck disable=SC2086
pip $PIPoption $PIPmodule
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ _ #
# | _ \ _ _| |__ _ _ #
# | |_) | | | | '_ \| | | | #
# | _ <| |_| | |_) | |_| | #
# |_| \_\\__,_|_.__/ \__, | #
# |___/ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${ruby}" ] ; then
git clone "$GITHUB_URL/rbenv/rbenv" "$HOME/.rbenv"
cd "$HOME/.rbenv" && src/configure && make -C src
cd "$current_dir" || exit
pathadd "$HOME/.rbenv/bin"
curl -fsSL $GITHUB_URL/rbenv/rbenv-installer/raw/master/bin/rbenv-doctor | bash
mkdir -p "$(rbenv root)"/plugins
git clone $GITHUB_URL/rbenv/ruby-build.git "$(rbenv root)"/plugins/ruby-build
git clone $GITHUB_URL/carsomyr/rbenv-bundler.git "$(rbenv root)"/plugins/bundler
rbenv install $RUBY_VERSION
rbenv shell $RUBY_VERSION
rbenv global $RUBY_VERSION
gem install neovim bundler
gem environment
rbenv rehash
fi
###############################################################################
# ____ _ #
# | _ \ _ _ ___| |_ #
# | |_) | | | / __| __| #
# | _ <| |_| \__ \ |_ #
# |_| \_\\__,_|___/\__| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${rust}" ] ; then
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
fi
###############################################################################
# ____ #
# / ___| ___ ___ ___ _ __ #
# \___ \ / __/ _ \ / _ \| '_ \ #
# ___) | (_| (_) | (_) | |_) | #
# |____/ \___\___/ \___/| .__/ #
# |_| #
# #
###############################################################################
if [ -n "${is_wsl}" ] && [ -n "${all}" ] || [ -n "${scoop}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the snap package$(tput sgr0)"
scoop install $SCOOP_PACKAGE
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ #
# / ___| _ __ __ _ _ __ #
# \___ \| '_ \ / _` | '_ \ #
# ___) | | | | (_| | |_) | #
# |____/|_| |_|\__,_| .__/ #
# |_| #
# #
###############################################################################
if [ -z "${is_wsl}" ] && [ -n "${all}" ] || [ -n "${snap}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the snap package$(tput sgr0)"
$ROOT_PERM snap install --channel=extended hugo
$ROOT_PERM snap install --channel=edge shellcheck
$ROOT_PERM snap install nvim --classic
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# ____ _ #
# / ___| ___| |__ #
# \___ \/ __| '_ \ #
# ___) \__ \ | | | #
# |____/|___/_| |_| #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dot}" ] ; then
if [ "${OStype}" != "MSYS_NT" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the ssh$(tput sgr0)"
mkdirfolder .ssh/control
installfile .ssh/config ssh/config
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
fi
###############################################################################
# _____ #
# |_ _| __ ___ _ ___ __ #
# | || '_ ` _ \| | | \ \/ / #
# | || | | | | | |_| |> < #
# |_||_| |_| |_|\__,_/_/\_\ #
# #
###############################################################################
if [ -n "${all}" ] || [ -n "${dot}" ] || [ -n "${tmux}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the tmux$(tput sgr0)"
if [ ! -d "$HOME/.tmux" ] ; then
git clone "$GITHUB_URL/gpakosz/.tmux.git" "$HOME/.tmux"
else
git -C "$HOME/.tmux" pull
fi
if [ -n "${all}" ] || [ -n "${latest}" ] || [ -n "${tmux}" ] ; then
if [ "$OStype" != "android" ] ; then
# clone tmux
git clone --depth 1 "$GITHUB_URL/tmux/tmux" "$TEMP/tmux"
cd "$TEMP/tmux" || exit
sh autogen.sh
./configure --prefix="$USRPREFIX"
make
$ROOT_PERM make install
cd "$current_dir" && rm -rf "$TEMP/tmux"
fi
fi
installfile .tmux.conf tmux/tmux.conf
installfile .tmux.conf.local tmux/tmux.conf.local
echo "${txtbld}$(tput setaf 4)[>] Install completed$(tput sgr0)"
fi
###############################################################################
# __ ___ #
# \ \ / (_)_ __ __ #
# \ \ / /| | '_ ` _ \ #
# \ V / | | | | | | | #
# \_/ |_|_| |_| |_| #
# #
###############################################################################
if [ -n "${all}" ] \
|| [ -n "${latest}" ] \
|| [ -n "${neovim}" ] \
|| [ -n "${ycmd}" ] \
|| [ -n "${dot}" ] ; then
echo "${txtbld}$(tput setaf 1)[-] Install the vim$(tput sgr0)"
if [ -n "${all}" ] || [ -n "${latest}" ] || [ -n "${neovim}" ] ; then
if [ "$OStype" != "android" ] && [ -n "${latest}" ] ; then
# Install latest vim version
$PKG_CMD_REMOVE vim
echo "${txtbld}$(tput setaf 1)[-] Install the latest VIM$(tput sgr0)"
if [ ! -d "$HOME/github/neovim/" ] ; then
git clone --depth 1 $GITHUB_URL/neovim/neovim "$HOME/github/neovim/"
else
git -C "$HOME/github/neovim/" pull
fi
cd "$HOME/github/neovim/" || exit
rm -rf build
make clean
make CMAKE_BUILD_TYPE=Release
$ROOT_PERM make install
cd "$current_dir" && $ROOT_PERM rm -rf "$HOME/github/neovim/"
fi
fi
if [ -n "${all}" ] || [ -n "${dot}" ] ; then
mkdirfolder .vim
mkdirfolder .vim/
mkdirfolder .vim/backups
mkdirfolder .vim/tmp
mkdirfolder .vim/undo
mkdirfolder .config/nvim
if [ ! -d "$HOME/.config/nvim/autoload/plug.vim" ] ; then
curl -fLo "$HOME/.config/nvim/autoload/plug.vim" --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
fi
installfolder vim/colors
# vim, keep these confiure if use original vim
installfile .vim/dict.add vim/dict.add
installfile .vim/filetype.vim vim/filetype.vim
installfile .vimrc vim/vimrc
installfolder vim/spell
installfolder vim/ycm
# neovim
installfile .config/nvim/dict.add vim/dict.add
installfile .config/nvim/filetype.vim vim/filetype.vim
installfile .config/nvim/init.vim vim/vimrc
installfolder config/nvim/spell
installfolder config/nvim/ycm
# download all plugin
nvim +PlugInstall +qall
nvim +PlugUpdate +qall
fi
fi
| true
|
e94d704f060b1d5990727f1e44f6ba47cd2839d1
|
Shell
|
chaosone/shell_study_guide
|
/if-test.sh
|
UTF-8
| 116
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -f /etc/host ]; then
echo "host file exists!"
else
echo "host file not exists!"
fi
| true
|
9acdc82d30b30a3ae2550becd287bffb81c78ba9
|
Shell
|
starpicker/something
|
/sourcecode/my_training_predict/my_predict/my_predict.sh
|
UTF-8
| 172
| 2.65625
| 3
|
[] |
no_license
|
inputpath=temp_out
inputdata=input.data
for i in `ls $inputpath`
do
file=$inputpath/$i
./write_grey2txt_append $file $inputdata 0
./tinn $inputdata $file
done
| true
|
fe75ce2b9c6479946ee2d857b2431cab3ddc899f
|
Shell
|
cacack/ansible-moteino-gateway
|
/run-ansible.sh
|
UTF-8
| 242
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
allargs=( $@ )
len=${#allargs[@]}
hostname=${allargs[$len-1]}
args=${allargs[@]:0:$len-1}
export ANSIBLE_HOST_KEY_CHECKING=false
ansible-playbook -u root --ask-pass --inventory="${hostname}," ${args} moteino-gateway.yml
| true
|
910811dae015ea4a83ea7dbc631d4f1bf2d0be65
|
Shell
|
rachclark/bash-parallel
|
/parallel_submit_legacy.sh
|
UTF-8
| 11,383
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
function printCommandLine {
echo "Usage: parallel_submit.sh -f flags -l list (.txt) -s script -m free_memory_limit -c clean -h help"
echo " where"
echo " -f Put the flags in quotations separated by a space (i.e. \"1_flag 2_flag 3_flag\")"
echo " -l Lists of the arguments you want to pass in. Each argument should get it's own line. See script for examples"
echo " -s The script (.sh) file you wish to run"
echo " -j The maximum number of jobs you want to run cocurrently"
echo -e " -c this deletes the .txt files this script creates, which really\n aren't important to have except for debugging purposes"
echo " -m Try not to let free memory dip below this number (in kilobytes)"
exit 1
}
#example of lists:
#let's say we want to run a script example.sh with
#a bunch of subjects. example.sh takes subjects and output directories
#as arguments. Therefore the user will make subjects.txt and outDirs.txt
#if I were to call 'cat subjects.txt', I would see:
#341
#342
#343
#344
#etc...
#Of course without the "#" in front of them.
#Similarly, if I were to call 'cat outDirs.txt', I would see:
#outDir/for/341
#outDir/for/342
#outDir/for/343
#outdir/for/344
#etc...
#again without the "#" in front.
#The 2 important things for these files are:
#1) each argument gets it's own line (i.e. 341 is on a different line than 342)
#2) The line in subjects.txt corresponds to the same line in outDirs.txt
# (i.e. "343" in subjects.txt is on the same line as "outDir/for/343" in outDirs.txt)
#example of flags
#back to example.sh, which takes in a subject and an output directory.
#example.sh takes two flags -s and -o, which correspond to subject and output, respectively.
#to pass those flag into the script, either do -f "-s -o" or -f "s o".
#The important thing to remember is:
#the order of the flags has to match the order of lists.
#So if you put -s first, you must put subjects.txt first.
#the scary code
########################################################
#Collect the inputs from the user.
while getopts “f:l:j:s:m:ch” OPTION
do
case $OPTION in
f)
flags=($OPTARG)
;;
l)
lists=($OPTARG)
;;
j)
max_jobs=$OPTARG
;;
s)
script=$OPTARG
;;
c)
clean=1
;;
m)
free_memory_limit_kb=$OPTARG
;;
h)
printCommandLine
;;
?)
echo "ERROR: Invalid option"
printCommandLine
;;
esac
done
OS=$(uname -s)
if [ "${OS}" != "Darwin" ] && [ "${OS}" != "Linux" ];then
echo "WARNING: this program was not designed for this system"
fi
#complain and exit if there are no lists
if [ "${lists}" == "" ]; then
echo "-l option is required, please enter the lists below and press [ENTER] (don't use quotes)"
read -t 20 lists
if [ "${lists}" == "" ]; then
echo "Too slow! Exiting with a helpful message"
printCommandLine
else
lists=( ${lists} )
fi
fi
#complain and exit if there isn't a script provided
if [ "${script}" == "" ]; then
echo "-s option is required, please enter your script below and press [ENTER]"
read -t 15 script
if [ "${script}" == "" ]; then
echo "Too slow! Exiting with a helpful message"
printCommandLine
fi
fi
#chastise and continue if there is no max job set
if [ "${max_jobs}" == "" ]; then
if [ "${OS}" == "Darwin" ]; then
num_cores=$(sysctl -n hw.ncpu)
elif [ ${OS} == "Linux" ]; then
num_cores=$(nproc)
else
echo "WARNING, system not supported, expect error"
fi
echo -e "you didn't set -j, default 2,\n but you could potentially run ${num_cores} jobs or more cocurrently"
echo "last chance, please enter the number of jobs you would like to run and press [ENTER]:"
read -t 10 max_jobs
if [ "${max_jobs}" == "" ]; then
echo "fine, be that way"
max_jobs=2
fi
fi
#If you are running intensive jobs, then you may wish to wait until there is memory available
if [[ "${free_memory_limit_kb}" == "" ]]; then
free_memory_limit_kb=0
fi
#if there are no flags, possible some scripts don't have flags.
if [ "${flags}" == "" ]; then
echo "Running script with no flags, hope this is what you want"
num_args=${#lists[@]}
paste ${lists[@]} | xargs -n${num_args} -P${max_jobs} ${script}
exit 1
fi
#in case you had to use escape characters to pass arguments
flag_index=0
for flag in ${flags[@]}; do
flags[${flag_index}]=$(echo ${flag} | sed 's/\\//g')
flag_index=$((${flag_index} + 1 ))
done
#this array will be used to hold the flags and arguments to pass into xargs
declare -a command_args
#number of flags indicates number of arguments, right?
#nope
num_flags=${#flags[@]}
num_lists=${#lists[@]}
echo "prepping flags"
#possible some flags don't have arguments
#need to make the same number of flags as there are arguments in a list
for flag_num in $(seq 0 $(echo "${num_flags}-1" | bc)); do
if [[ ${flag_num} -le $(echo "${num_lists}-1" | bc) ]]; then
#get the number of arguments in a list
num_items=$(cat ${lists[${flag_num}]} | wc -l)
fi
#check to see if this script was ran before
if [ -e flag_${flag_num}.txt ]; then
rm flag_${flag_num}.txt
fi
#where we make a txt file containing the same number of flags as arguments.
for arg in $(seq ${num_items}); do
#if [[ "${flags[${flag_num}]}" == -* ]]; then
echo "${flags[${flag_num}]}" >> flag_${flag_num}.txt
#else
#echo "-${flags[${flag_num}]}" >> flag_${flags[${flag_num}]}.txt
#fi
done
#echo ${num_args}
#smush the flag and arguments together, and index the output in the command_args array
if [[ ${flag_num} -le $(echo "${num_lists}-1" | bc) ]]; then
paste flag_${flag_num}.txt ${lists[${flag_num}]} > list_${flag_num}.txt
command_args[${flag_num}]="list_${flag_num}.txt"
else
command_args[${flag_num}]="flag_${flag_num}.txt"
fi
done
echo "putting together command and submiting"
#xargs likes to know how many items it should take as input (flags + arguments)
num_args=$(echo "${num_flags}+${num_lists}" | bc)
script_name=$(basename ${script})
script_name_strip=${script_name/.*/""}
######################################################################################
#set up argument array
declare -a arg_arr
#inefficient processing to get arguments in a useful array
#where each index in arg_arr represents all the arguments necesary to
#run one instance of the script.
i=0
paste ${command_args[@]} > all_${script_name_strip}_args.txt
while read args; do
arg_arr[${i}]=${args}
i=$((${i}+1))
done < all_${script_name_strip}_args.txt
######################################################################################
#final initializations/tidbits before we start the main loop.
#index to count set of arguments we are using
x=0
#arrays to keep track of the pids (process id's) and the time the pids started
declare -a pid_arr
declare -a time_arr
#do some housekeeping to keep clutter down
if [[ ${clean} -eq 1 ]]; then
if [ -e ${script_name_strip}_times.txt ]; then
rm ${script_name_strip}_times.txt
fi
fi
echo "if you would like to kill everything, press \"k\" and hit [ENTER]"
#option to end script safely?
kill_signal=no
######################################################################################
#The big magical loop
first_iteration=1
#keep going until all the scripts are submitted or until kill signal is initiated.
while [[ ${#arg_arr[@]} -gt ${#pid_arr[@]} ]] &&\
[ "${kill_signal}" != "k" ]; do
if [ ${first_iteration} -eq 1 ]; then
sh ${script} ${arg_arr[${x}]} &>${script_name_strip}_${x}.txt & pid_arr[${x}]=$!
echo "${script} ${arg_arr[${x}]} submitted"
#keep track of the time this script was started
time_arr[${x}]=$(date +%s)
#keeps track of which instance of the script we are planning on running
x=$((${x}+1))
first_iteration=0
else
#the kill signal
read -t 1 kill_signal
#doesn't work in MAC-OS
#got to keep updating/reseting to get accurate measures
if [ "${OS}" == "Linux" ]; then
free_memory_kb=$(grep MemTotal /proc/meminfo | awk '{print $2}')
elif [ "${OS}" == "Darwin" ]; then
free_memory_kb=$(vm_stat | grep Pages\ free | awk '{print $3}' | sed 's/\./\*4/' | bc)
fi
active_jobs=0
#two purposes:
#1) find out how many active jobs are running
#2) if a job finished, write its run time to a text file
for job in $(seq 0 $(echo "${#pid_arr[@]}-1" |bc)); do
if ps -p ${pid_arr[${job}]} > /dev/null; then
active_jobs=$((${active_jobs}+1))
else
if [ "${time_arr[${job}]}" != "wrote" ]; then
end_time=$(date +%s)
run_time=$(echo "scale=2; (${end_time}-${time_arr[${job}]})/60" | bc -l)
echo "${script_name_strip}_${job} ran ${run_time} minutes" >> ${script_name_strip}_times.txt
time_arr[${job}]=wrote
fi
fi
done
#if there isn't enough memory or we've reached the max jobs we can submit...
#don't do anything else until we have enough memory & fewer than max jobs are running
if [[ ${free_memory_limit_kb} -gt ${free_memory_kb} ]] ||\
[[ ${active_jobs} -eq ${max_jobs} ]]; then
continue
fi
echo "There are ${active_jobs} job(s) running"
#This does a few things
#1)submits the script
#2)redirects output to a text file
#3)records the pid of that process
sh ${script} ${arg_arr[${x}]} &>${script_name_strip}_${x}.txt & pid_arr[${x}]=$!
echo "${script} ${arg_arr[${x}]} submitted"
#keep track of the time this script was started
time_arr[${x}]=$(date +%s)
#keeps track of which instance of the script we are planning on running
x=$((${x}+1))
fi
done
#if the kill signal was issued, kill the jobs
if [ "${kill_signal}" == "k" ]; then
echo "killing all jobs"
for pid in ${pid_arr[@]}; do
kill ${pid} &> /dev/null
done
exit 1
fi
echo "All jobs submitted, if you want to see how many are currently running, press \"j\" and hit [ENTER]"
echo "Additionally if you would like to kill remaining jobs, press \"k\" and hit [ENTER]"
#the waiting period for all remaining scripts to end
#can kill scripts here too, if they are failing
#future: add option to kill specific scripts?
while [[ ${active_jobs} -gt 0 ]]; do
ans=n #default value, so "j" doesn't continue to be true forever, filling the terminal with trash
read -t 1 ans
active_jobs=0
for job in $(seq 0 $(echo "${#pid_arr[@]}-1" |bc)); do
if ps -p ${pid_arr[${job}]} > /dev/null; then
active_jobs=$((${active_jobs}+1))
else
if [ "${time_arr[${job}]}" != "wrote" ]; then
end_time=$(date +%s)
run_time=$(echo "scale=2; (${end_time}-${time_arr[${job}]})/60" | bc -l)
echo "${script_name_strip}_${job} ran ${run_time} minutes" >> ${script_name_strip}_times.txt
time_arr[${job}]=wrote
fi
fi
done
if [ "${ans}" == "j" ];then
echo "there are ${active_jobs} job(s) remaining"
fi
if [ "${ans}" == "k" ]; then
echo "killing all jobs"
for pid in ${pid_arr[@]}; do
kill ${pid} &> /dev/null
exit 1
done
fi
done
#clean up the crap made by this script
if [[ ${clean} = 1 ]]; then
for flag_num in $(seq 0 $(echo "${num_flags}-1" | bc)); do
if [ -e flag_${flags[${flag_num}]}.txt ]; then
rm flag_${flags[${flag_num}]}.txt
fi
if [ -e list_${flags[${flag_num}]}.txt ]; then
rm list_${flags[${flag_num}]}.txt
fi
done
if [ -e all_${script_name_strip}_args.txt ]; then
rm all_${script_name_strip}_args.txt
fi
fi
| true
|
a6ade03ecc245c7f56526f932fc54ed378ac9a8a
|
Shell
|
evacougnon/dotfiles
|
/bin/create_pem_key_from_rsa
|
UTF-8
| 165
| 2.921875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
#!/bin/bash
if [ ! -f ~/.ssh/id_rsa.pem ]
then
echo 'creation of pem private key'
openssl rsa -in ~/.ssh/id_rsa -outform pem > ~/.ssh/id_rsa.pem
exit 1
fi
| true
|
5b76b1b55954309e9349c87a229ae118e9944c0c
|
Shell
|
absortium/deluge
|
/useful/aliases/docker-compose.sh
|
UTF-8
| 1,920
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# ------------------------------------
# Docker compose alias and function
# ------------------------------------
func_dcinit() {
declare COMPOSE_MODE="$1"
if [ -z "$COMPOSE_MODE" ]; then
echo "Use default init mode '$DEFAULT_MODE'"
COMPOSE_MODE="$DEFAULT_MODE"
fi
declare SENSITIVE_FILE="$1"
case "$COMPOSE_MODE" in
'unit'|'integration'|'frontend')
export IMAGE_TYPE="dev"
ideluge ".mock-sensitive"
;;
'testnet')
export IMAGE_TYPE="prod"
ideluge ".testnet-sensitive"
;;
'realnet')
export IMAGE_TYPE="prod"
ideluge ".realnet-sensitive"
;;
*)
echo "Can not find any options similar to '$1'";;
esac
declare COMPOSES_PATH="$DELUGE_PATH/docker/composes"
declare IMAGES_PATH="$DELUGE_PATH/docker/images"
export DOCKER_OVERRIDE="$COMPOSES_PATH/$COMPOSE_MODE.yml"
export DOCKER_BASE="$IMAGES_PATH/$IMAGE_TYPE.yml"
}
alias dcinit=func_dcinit
func_dc() {
if [[ -z "$DOCKER_OVERRIDE" || -z "$DOCKER_BASE" ]]
then
func_dcinit
fi
declare COMMAND=$(echo "$@" | python -c "import sys; print(sys.stdin.read().split(' ')[0])")
if [ "$COMMAND" = "build" ]; then
docker-compose -f "$DELUGE_PATH/docker/images/base/build.yml" \
-f "$DELUGE_PATH/docker/images/$IMAGE_TYPE/build.yml" \
"$@"
else
echo "Docker base file: $DOCKER_BASE"
echo "Docker override file: $DOCKER_OVERRIDE"
echo "Full command: docker-compose -f $DOCKER_BASE -f $DOCKER_OVERRIDE $@"
docker-compose -f "$DOCKER_BASE" -f "$DOCKER_OVERRIDE" "$@"
fi
}
alias dc=func_dc
# docker-compose run alias
alias dcr="dc run"
# docker-compose up alias
alias dcu="dc up"
# docker-compose run alias
alias dcl="dc logs"
# docker-compose run alias
alias dcb="dc build"
| true
|
b8b919c79982f3c135d38c84e216298115dbc6a9
|
Shell
|
jazzsir/tmux
|
/bin/tmux-ceph
|
UTF-8
| 1,099
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
#/home/ryann/scripts/restoretmux.sh
if [ -z "$1" ]
then
SESSIONNAME="ceph"
else
SESSIONNAME="ceph-$1"
fi
tmux has-session -t $SESSIONNAME 2> /dev/null
if [ $? != 0 ]
then
tmux new-session -s $SESSIONNAME -n ceph -d
tmux send-keys 'ssh jazzsir@172.30.1.200' 'C-m'
tmux split-window -t $SESSIONNAME:1 -h
tmux send-keys 'ssh jazzsir@172.30.1.201' 'C-m'
tmux select-pane -t $SESSIONNAME:1 -L
tmux split-window -t $SESSIONNAME:1 -v
tmux send-keys 'ssh jazzsir@172.30.1.202' 'C-m'
tmux split-window -t $SESSIONNAME:1 -v
tmux send-keys 'ssh jazzsir@172.30.1.203' 'C-m'
tmux select-pane -t $SESSIONNAME:1 -R
tmux split-window -t $SESSIONNAME:1 -v
tmux send-keys 'ssh jazzsir@172.30.1.204' 'C-m'
tmux split-window -t $SESSIONNAME:1 -v
tmux send-keys 'ssh jazzsir@172.30.1.205' 'C-m'
## switch the "base" window
tmux select-window -t $SESSIONNAME:1
tmux attach -t $SESSIONNAME
else
echo "$SESSIONNAME aleady exists"
fi
| true
|
6e2da0a8739e8cea4cc52f2ad1e1b2309385ac2b
|
Shell
|
ish/adminish-example
|
/recreate-database.sh
|
UTF-8
| 420
| 3.03125
| 3
|
[] |
no_license
|
echo -e "\n\tPrepared to replace example\n"
read -p "Continue? (yes/no): " REPLACE
if [ "$REPLACE" == "yes" ] || [ "$REPLACE" == "Yes" ] || [ "$REPLACE" == "YES" ]; then
echo -n "Deleting example .. "
curl -X DELETE http://localhost:5984/example
echo
echo -n "Creating example .. "
curl -X PUT http://localhost:5984/example
echo
./setup-app.sh
#echo 'Populating Categories'
#./populate_categories.sh example -v
echo
fi
| true
|
60cc8a2924b695160453b7bdc4c3d79f8cc2cf8b
|
Shell
|
huhabla/grass6-test-suite
|
/Tests/raster/r.elev.to.rast3-test.sh
|
UTF-8
| 1,267
| 2.796875
| 3
|
[] |
no_license
|
###########################################
#Tile
Title="r.elev.to.rast3 test" # -- required
#A description of the test
Description="Full function test of r.elev.to.rast3" # -- optional
NumberOfTests=6
#The module which should be tested
i=0
while [ ${i} -lt ${NumberOfTests} ] ; do
Module[${i}]="r.elev.to.rast3"
ModuleOutput[${i}]="volume_${i}_$$"
ModuleOutputType[${i}]="rast3d"
let i=${i}+1
done
ModuleValidationMD5[0]="2932f4dafa23b862bd43212acfd73da5"
ModuleValidationMD5[1]="960f46e8d306ec78a9e9347997cecc8e"
ModuleValidationMD5[2]="8e583b7b53b85f3e0217804a46ed0c7d"
ModuleValidationMD5[3]="edf39d3bfeac30ee67463647fda0cacf"
ModuleValidationMD5[4]="d97c07104ebfcd6bc27d839a1c87be94"
ModuleValidationMD5[5]="af26ea3344aafda0ecbda6022b0a71e4"
ModuleOptions[0]="input=elevation elev=elevation output=${ModuleOutput[0]}"
ModuleOptions[1]="-l input=elevation elev=elevation output=${ModuleOutput[1]}"
ModuleOptions[2]="-u input=elevation elev=elevation output=${ModuleOutput[2]}"
ModuleOptions[3]="upper=50 input=elevation elev=elevation output=${ModuleOutput[3]}"
ModuleOptions[4]="lower=50 input=elevation elev=elevation output=${ModuleOutput[4]}"
ModuleOptions[5]="lower=50 upper=75 input=elevation elev=elevation output=${ModuleOutput[5]}"
| true
|
9dee41ce9eecfc05c8fcb41d76a071b0f89ffdfc
|
Shell
|
shotalinux/LinuxClass
|
/shellscripts/7-functions.sh
|
UTF-8
| 1,174
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#simle example
hello_world () {
echo 'hello, world'
}
hello_world
# ----------------------------------------- Variables Scope -------------------------------------------
echo "----------------------------------------- Variables Scope -------------------------------------------"
var1='A'
var2='B'
my_function () {
local var1='C'
var2='D'
echo "Inside function: var1: $var1, var2: $var2"
}
echo "Before executing function: var1: $var1, var2: $var2"
my_function
echo "After executing function: var1: $var1, var2: $var2"
# ---------------------------------------- Return Values ---------------------------------------
echo "----------------------------------------- Return Values -------------------------------------------"
my_function () {
echo "some result"
return 55
}
my_function
echo $?
# ---------------------------------------- Arguments ---------------------------------------
echo "----------------------------------------- Arguments -------------------------------------------"
greeting () {
echo "Hello $1"
}
greeting "Joe"
| true
|
3d079ee1816b5fa455a3e884b7e577fd1c03f25f
|
Shell
|
bsed/dockerfiles
|
/wp-mariadb/scripts/restore.sh
|
UTF-8
| 246
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
if [ -z "$1" ]; then
echo "Usage: restore BACKUP.sql"
fi
if [ -e "$1" ]; then
mysql \
--user=$MYSQL_USER \
--password=$MYSQL_PASSWORD \
$MYSQL_DATABASE < $1
echo "Database backup $1 restored."
else
echo "$1 not found."
fi
| true
|
2a7b3a158296dae83a859cc330e822560ca6ecc1
|
Shell
|
ik-security/prowler
|
/checks/check_extra7195
|
UTF-8
| 7,302
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Prowler - the handy cloud security tool (copyright 2019) by Toni de la Fuente
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# Remediation:
#
# here URL to the relevand/official documentation
# https://docs.aws.amazon.com/codeartifact/latest/ug/package-origin-controls.html
# https://zego.engineering/dependency-confusion-in-aws-codeartifact-86b9ff68963d
# https://aws.amazon.com/blogs/devops/tighten-your-package-security-with-codeartifact-package-origin-control-toolkit/
#
#
# here commands or steps to fix it if avalable, like:
# aws codeartifact put-package-origin-configuration \
# --package "MyPackage" \
# --namespace "MyNamespace" \ #You don't need namespace for npm or pypi
# --domain "MyDomain" \
# --repository "MyRepository" \
# --domain-owner "MyOwnerAccount"
# --format "MyFormat" \ # npm/pypi/maven
# --restrictions 'publish=ALLOW,upstream=BLOCK'
CHECK_ID_extra7195="7.195"
CHECK_TITLE_extra7195="[check7195] Ensure CodeArtifact internal packages do not allow external public source publishing."
CHECK_SCORED_extra7195="NOT_SCORED"
CHECK_CIS_LEVEL_extra7195="EXTRA"
CHECK_SEVERITY_extra7195="Critical"
CHECK_ASFF_RESOURCE_TYPE_extra7195="Other"
CHECK_ALTERNATE_check7195="extra7195"
CHECK_SERVICENAME_extra7195="codeartifact"
CHECK_RISK_extra7195="Allowing package versions of a package to be added both by direct publishing and ingesting from public repositories makes you vulnerable to a dependency substitution attack."
CHECK_REMEDIATION_extra7195="Configure package origin controls on a package in a repository to limit how versions of that package can be added to the repository."
CHECK_DOC_extra7195="https://docs.aws.amazon.com/codeartifact/latest/ug/package-origin-controls.html"
CHECK_CAF_EPIC_extra7195=""
extra7195(){
# Checks Code Artifact packages for Dependency Confusion
# Looking for codeartifact repositories in all regions
for regx in ${REGIONS}; do
LIST_OF_REPOSITORIES=$("${AWSCLI}" codeartifact list-repositories ${PROFILE_OPT} --region "${regx}" --query 'repositories[*].[name,domainName,domainOwner]' --output text 2>&1)
if [[ $(echo "${LIST_OF_REPOSITORIES}" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError|Could not connect to the endpoint URL|ExpiredToken') ]]; then
textInfo "${regx}: Access Denied trying to list repositories" "${regx}"
continue
fi
if [[ "${LIST_OF_REPOSITORIES}" != "" && "${LIST_OF_REPOSITORIES}" != "none" ]]; then
while read -r REPOSITORY DOMAIN ACCOUNT; do
# Iterate over repositories to get packages
# Found repository scanning packages
LIST_OF_PACKAGES=$(aws codeartifact list-packages --repository "$REPOSITORY" --domain "$DOMAIN" --domain-owner "$ACCOUNT" ${PROFILE_OPT} --region "${regx}" --query 'packages[*].[package, namespace, format, originConfiguration.restrictions.upstream]' --output text 2>&1)
if [[ $(echo "${LIST_OF_PACKAGES}" | grep -E 'AccessDenied|UnauthorizedOperation|AuthorizationError|Could not connect to the endpoint URL|ExpiredToken') ]]; then
textInfo "${regx}: Access Denied trying to list packages for repository: ${REPOSITORY}" "${regx}" "${REPOSITORY}"
continue
fi
if [[ "${LIST_OF_PACKAGES}" != "" && "${LIST_OF_PACKAGES}" != "none" ]]; then
while read -r PACKAGE NAMESPACE FORMAT UPSTREAM; do
# Get the latest version of the package we assume if the latest is internal the package is internal
# textInfo "Found package: $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "${NAMESPACE}:"; fi)${PACKAGE}"
LATEST=$(aws codeartifact list-package-versions --package "$PACKAGE" $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "--namespace $NAMESPACE"; fi) --domain "$DOMAIN" --repository "$REPOSITORY" --domain-owner "$ACCOUNT" --format "$FORMAT" ${PROFILE_OPT} --region "${regx}" --sort-by PUBLISHED_TIME --no-paginate --query 'versions[0].version' --output text 2>&1)
if grep -q -E 'AccessDenied|UnauthorizedOperation|AuthorizationError|Could not connect to the endpoint URL|ExpiredToken' <<< "${LATEST}"; then
textInfo "${regx}: Access Denied trying to get latest version for packages: $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "${NAMESPACE}:"; fi)${PACKAGE}" "${regx}"
continue
fi
if grep -q -E 'ResourceNotFoundException' <<< "${LATEST}"; then
textInfo "${regx}: Package not found for package: $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "${NAMESPACE}:"; fi)${PACKAGE}" "${regx}"
continue
fi
LATEST=$(head -n 1 <<< $LATEST)
# textInfo "Latest version: ${LATEST}"
# Get the origin type for the latest version
ORIGIN_TYPE=$(aws codeartifact describe-package-version --package "$PACKAGE" $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "--namespace $NAMESPACE"; fi) --domain "$DOMAIN" --repository "$REPOSITORY" --domain-owner "$ACCOUNT" --format "$FORMAT" --package-version "$LATEST" ${PROFILE_OPT} --region "${regx}" --query 'packageVersion.origin.originType' --output text 2>&1)
if grep -q -E 'AccessDenied|UnauthorizedOperation|AuthorizationError|Could not connect to the endpoint URL|ExpiredToken' <<< "${ORIGIN_TYPE}"; then
textInfo "${regx}: Access Denied trying to get origin type of package $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "${NAMESPACE}:"; fi)${PACKAGE}:${LATEST}" "${regx}" "${PACKAGE}"
continue
fi
if grep -q -E 'INTERNAL|UNKNOWN' <<< "${ORIGIN_TYPE}"; then
# The package is internal
if [[ "$UPSTREAM" == "ALLOW" ]]; then
# The package is not configured to block upstream fail check
textFail "${regx}: Internal package $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "${NAMESPACE}:"; fi)${PACKAGE} is vulnerable to dependency confusion in repository ${REPOSITORY}" "${regx}" "${PACKAGE}"
else
textPass "${regx}: Internal package $(if [[ "$NAMESPACE" != "" && "$NAMESPACE" != "None" ]]; then echo "${NAMESPACE}:"; fi)${PACKAGE} is NOT vulnerable to dependency confusion in repository ${REPOSITORY}" "${regx}" "${PACKAGE}"
fi
fi
done <<< "${LIST_OF_PACKAGES}"
else
textInfo "${regx}: No packages found in ${REPOSITORY}" "${regx}" "${REPOSITORY}"
fi
done <<< "${LIST_OF_REPOSITORIES}"
else
textPass "${regx}: No repositories found" "${regx}"
fi
done
}
| true
|
1c5fcad2a6ca08ee634ac0c318af105e3feea324
|
Shell
|
NapoleonWils0n/cerberus
|
/w3m/cgi-bin/functions.cgi
|
UTF-8
| 853
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# fzf prompt to specify function to run from readme.func
file='/usr/share/doc/w3m/README.func'
selection=$(awk '{ print $0 }' "${file}" | fzf-tmux -d 30% --delimiter='\n' --prompt='Run w3m function: ' --info=inline --layout=reverse --no-multi | awk '{ print $1 }')
# variables
browser='/usr/bin/firefox'
# default function
default() {
echo "${selection}" | xsel -ipsb
}
# open current page with external browser
extern() {
EXTERN="EXTERN ${browser}"
echo "${EXTERN}" | xsel -ipsb
}
# open link with external browser
extern_link() {
EXTERN="EXTERN_LINK ${browser}"
echo "${EXTERN_LINK}" | xsel -ipsb
}
# quit w3m and w3mimgdisplay with pkill -15
quit() {
pkill -15 w3m
}
# case statement match selection and run function
case "${selection}" in
EXTERN) extern;;
EXTERN_LINK) extern_link;;
EXIT|ABORT) quit;;
*) default;;
esac
| true
|
dad15f9a673ce5ab95209aba1d40dd501b1ccf55
|
Shell
|
zamar/admin
|
/src/createf/createf-new
|
UTF-8
| 1,729
| 4.65625
| 5
|
[] |
no_license
|
#!/bin/bash
#--------------------------------------------------------------------------
# USAGE:
# createf-new filename type
#
# DESCRIPTION:
# Creates file with specified skeleton
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# INIT
#--------------------------------------------------------------------------
set -e # Stop immediately if a command fails
#--------------------------------------------------------------------------
# GLOBAL CONSTANTS
#--------------------------------------------------------------------------
MODULE_NAME=createf-new
CONFDIR=/etc/createf
#--------------------------------------------------------------------------
# PRIVATE FUNCTIONS
#--------------------------------------------------------------------------
show_usage() {
echo ""
echo "$MODULE_NAME filename type"
echo ""
echo "filename File to be created"
echo "type File type"
echo ""
exit 1
}
die() {
echo "$1" 1>&2
exit 1
}
#--------------------------------------------------------------------------
# MAIN CODE
#--------------------------------------------------------------------------
# STEP. Parse command line.
if [ -z $2 ] || [ $1 == "-?" ] || [ $1 == "-h" ] || [ $1 == "--help" ] ; then
show_usage
fi
FILENAME="$1"
FILETYPE="$2"
# STEP. Sanity check command
SOURCE="$CONFDIR/$FILETYPE.skel"
if [ ! -r "$SOURCE" ]; then
die "File '$SOURCE' does not exist or is not readable."
fi
if [ -f "$FILENAME" ]; then
die "File '$FILENAME' already exists!"
fi
# STEP. Copy template over
cp "$SOURCE" "$FILENAME"
| true
|
d3cde9e18dfde91a03eaaaae9103e57bd4191778
|
Shell
|
bdill11/noweb
|
/filenamehack.bash
|
UTF-8
| 185
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
cat filelist | tr -d '\\' | tr -d \\r > templist
for f in `cat templist`
do
if [[ "$f" =~ .*-.*\.m ]]
then mv "$f" `echo "$f" | sed -e s/-/_/g`
fi
done
rm templist
| true
|
7fcf7cb90220dbc813c795f4ff81eba6be4edb5e
|
Shell
|
AndrewWalker/dotfiles
|
/python/install-python-depends
|
UTF-8
| 443
| 3.21875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
PATH=$PATH:~/.local/bin
function installpip {
# check if pip is available (even if it's system pip)
which pip > /dev/null
if [[ "$?" != "0" ]]; then
wget -c https://raw.github.com/pypa/pip/master/contrib/get-pip.py
python get-pip.py --user
pip install --upgrade pip wheel virtualenvwrapper --user
fi
}
pip wheel -r python/requirements.txt
pip install -r python/requirements.txt --user
| true
|
d713e3ea7427669a480af85c73cd2472c390a63d
|
Shell
|
hassio-addons/bashio
|
/lib/addons.sh
|
UTF-8
| 49,463
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# ==============================================================================
# Home Assistant Community Add-ons: Bashio
# Bashio is a bash function library for use with Home Assistant add-ons.
#
# It contains a set of commonly used operations and can be used
# to be included in add-on scripts to reduce code duplication across add-ons.
# ==============================================================================
# ------------------------------------------------------------------------------
# Reloads the add-ons.
# ------------------------------------------------------------------------------
function bashio::addons.reload() {
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST /addons/reload
bashio::cache.flush_all
}
# ------------------------------------------------------------------------------
# Start the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.start() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST "/addons/${slug}/start"
}
# ------------------------------------------------------------------------------
# Restart the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.restart() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST "/addons/${slug}/restart"
}
# ------------------------------------------------------------------------------
# Stop the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.stop() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST "/addons/${slug}/stop"
}
# ------------------------------------------------------------------------------
# Install the specified add-on.
#
# Arguments:
# $1 Add-on slug
# ------------------------------------------------------------------------------
function bashio::addon.install() {
local slug=${1}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST "/addons/${slug}/install"
}
# ------------------------------------------------------------------------------
# Rebuild the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.rebuild() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST "/addons/${slug}/rebuild"
}
# ------------------------------------------------------------------------------
# Uninstall the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.uninstall() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST "/addons/${slug}/uninstall"
}
# ------------------------------------------------------------------------------
# Update the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.update() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor POST "/addons/${slug}/update"
}
# ------------------------------------------------------------------------------
# RAW Docker logs of the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.logs() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor GET "/addons/${slug}/logs" true
}
# ------------------------------------------------------------------------------
# Returns the documentation of the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.documentation() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor GET "/addons/${slug}/documentation" true
}
# ------------------------------------------------------------------------------
# Returns the changelog of the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.changelog() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}"
bashio::api.supervisor GET "/addons/${slug}/changelog" true
}
# ------------------------------------------------------------------------------
# Returns a JSON object with generic version information about addons.
#
# Arguments:
# $1 Add-on slug (optional)
# $1 Cache key to store results in (optional)
# $2 jq Filter to apply on the result (optional)
# ------------------------------------------------------------------------------
function bashio::addons() {
local slug=${1:-false}
local cache_key=${2:-'addons.list'}
local filter=${3:-'.addons[].slug'}
local info
local response
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::cache.exists "${cache_key}"; then
bashio::cache.get "${cache_key}"
return "${__BASHIO_EXIT_OK}"
fi
if bashio::var.false "${slug}"; then
if bashio::cache.exists "addons.list"; then
info=$(bashio::cache.get 'addons.list')
else
info=$(bashio::api.supervisor GET "/addons" false)
bashio::cache.set "addons.list" "${info}"
fi
else
if bashio::cache.exists "addons.${slug}.info"; then
info=$(bashio::cache.get "addons.${slug}.info")
else
info=$(bashio::api.supervisor GET "/addons/${slug}/info" false)
bashio::cache.set "addons.${slug}.info" "${info}"
fi
fi
response="${info}"
if bashio::var.has_value "${filter}"; then
response=$(bashio::jq "${info}" "${filter}")
fi
bashio::cache.set "${cache_key}" "${response}"
printf "%s" "${response}"
return "${__BASHIO_EXIT_OK}"
}
# ------------------------------------------------------------------------------
# Returns a list of installed add-ons or for a specific add-ons.
# Arguments:
# $1 Add-on slug (optional)
# ------------------------------------------------------------------------------
function bashio::addons.installed() {
local slug=${1:-false}
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::var.false "${slug}"; then
bashio::addons \
false \
'addons.info.installed' \
'.addons[] | select(.installed != null) | .slug'
else
bashio::addons \
"${slug}" \
"addons.${slug}.installed" \
'if (.version != null) then true else false end'
fi
}
# ------------------------------------------------------------------------------
# Returns the name of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.name() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.name" '.name'
}
# ------------------------------------------------------------------------------
# Returns the hostname of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.hostname() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.hostname" '.hostname'
}
# ------------------------------------------------------------------------------
# Returns a list of DNS names for the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.dns() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.dns" '.dns // empty | .[]'
}
# ------------------------------------------------------------------------------
# Returns the description of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.description() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.description" '.description'
}
# ------------------------------------------------------------------------------
# Returns the long description of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.long_description() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.long_description" \
'.long_description'
}
# ------------------------------------------------------------------------------
# Returns or sets whether or not auto update is enabled for this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# $2 Set current auto update state (Optional)
# ------------------------------------------------------------------------------
function bashio::addon.auto_update() {
local slug=${1:-'self'}
local auto_update=${2:-}
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::var.has_value "${auto_update}"; then
auto_update=$(bashio::var.json auto_update "^${auto_update}")
bashio::api.supervisor POST "/addons/${slug}/options" "${auto_update}"
bashio::cache.flush_all
else
bashio::addons \
"${slug}" \
"addons.${slug}.auto_update" \
'.auto_update // false'
fi
}
# ------------------------------------------------------------------------------
# Returns the URL of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.url() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.url" '.url'
}
# ------------------------------------------------------------------------------
# Returns the detached state of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.detached() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.detached" '.detached // false'
}
# ------------------------------------------------------------------------------
# Returns the availability state of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.available() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.available" '.available // false'
}
# ------------------------------------------------------------------------------
# Returns is this is an advanced add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.advanced() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.advanced" '.advanced // false'
}
# ------------------------------------------------------------------------------
# Returns the stage the add-on is currently in.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.stage() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.stage" '.stage'
}
# ------------------------------------------------------------------------------
# Returns the phase the add-on is started up.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.startup() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.startup" '.startup'
}
# ------------------------------------------------------------------------------
# Returns list of supported architectures by the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.arch() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.arch" '.arch[]'
}
# ------------------------------------------------------------------------------
# Returns list of supported machine types by the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.machine() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.machine" '.machine[]'
}
# ------------------------------------------------------------------------------
# Returns the slug of the repository this add-on is in.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.repository() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.repository" '.repository'
}
# ------------------------------------------------------------------------------
# Returns the version of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.version() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.version" '.version'
}
# ------------------------------------------------------------------------------
# Returns the latest version of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.version_latest() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.version_latest" '.version_latest'
}
# ------------------------------------------------------------------------------
# Checks if there is an update available for an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.update_available() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.update_available" \
'.update_available // false'
}
# ------------------------------------------------------------------------------
# Returns the current state of an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.state() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.state" '.state'
}
# ------------------------------------------------------------------------------
# Returns the current boot setting of this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# $2 Sets boot setting (optional).
# ------------------------------------------------------------------------------
function bashio::addon.boot() {
local slug=${1:-'self'}
local boot=${2:-}
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::var.has_value "${boot}"; then
boot=$(bashio::var.json boot "${boot}")
bashio::api.supervisor POST "/addons/${slug}/options" "${boot}"
bashio::cache.flush_all
else
bashio::addons "${slug}" "addons.${slug}.boot" '.boot'
fi
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on is being build locally.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.build() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.build" '.build // false'
}
# ------------------------------------------------------------------------------
# Returns options for this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.options() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.options" '.options'
}
# ------------------------------------------------------------------------------
# Edit options for this add-on.
#
# Arguments:
# $1 Config Key to set or remove (required)
# $2 Value to set (optional, default:null, if null will remove the key pair)
# $3 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.option() {
local key=${1}
local value=${2:-}
local slug=${3:-'self'}
local options
local payload
local item
bashio::log.trace "${FUNCNAME[0]}" "$@"
options=$(bashio::addon.options "${slug}")
if bashio::var.has_value "${value}"; then
item="\"$value\""
if [[ "${value:0:1}" == "^" ]]; then
item="${value:1}"
fi
if bashio::jq.exists "${options}" ".${key}"; then
options=$(bashio::jq "${options}" ".${key} |= ${item}")
else
options=$(bashio::jq "${options}" ".${key} = ${item}")
fi
else
options=$(bashio::jq "${options}" "del(.${key})")
fi
payload=$(bashio::var.json options "^${options}")
bashio::api.supervisor POST "/addons/${slug}/options" "${payload}"
bashio::cache.flush_all
}
# ------------------------------------------------------------------------------
# Returns a JSON object with add-on specific config for the addon itself.
#
# This can be only used by self.
# ------------------------------------------------------------------------------
function bashio::addon.config() {
local cache_key="addons.self.options.config"
local response
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::cache.exists "${cache_key}"; then
bashio::cache.get "${cache_key}"
return "${__BASHIO_EXIT_OK}"
fi
response=$(bashio::api.supervisor GET "/addons/self/options/config" false)
# If the add-on has no configuration, it returns an empty string.
# This is Bashio logic, that is problematic in this case, so make it a
# emtpty JSON object instead.
if bashio::var.is_empty "${response}";
then
response="{}"
fi
bashio::cache.set "${cache_key}" "${response}"
printf "%s" "${response}"
return "${__BASHIO_EXIT_OK}"
}
# ------------------------------------------------------------------------------
# Returns a list of ports which are exposed on the host network for this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.network() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.network" '.network'
}
# ------------------------------------------------------------------------------
# Returns a list of ports and their descriptions for this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.network_description() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" \
"addons.${slug}.network_description" \
'.network_description'
}
# ------------------------------------------------------------------------------
# Returns a user configured port number for an original port number.
#
# Arguments:
# $1 Original port number
# $2 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.port() {
local port=${1:-}
local slug=${2:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
# Default to TCP if not specified.
if [[ "${port}" != *"/"* ]]; then
port="${port}/tcp"
fi
bashio::addons \
"${slug}" \
"addons.${slug}.network.${port//\//-}" \
".network[\"${port}\"] // empty"
}
# ------------------------------------------------------------------------------
# Returns a description for port number for this add-on.
#
# Arguments:
# $1 Original port number
# $2 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.port_description() {
local port=${1:-}
local slug=${2:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
# Default to TCP if not specified.
if [[ "${port}" != *"/"* ]]; then
port="${port}/tcp"
fi
bashio::addons \
"${slug}" \
"addons.${slug}.network_description.${port//\//-}" \
".network_description[\"${port}\"] // empty"
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on runs on the host network.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.host_network() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.host_network" \
'.host_network // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on runs on the host pid namespace.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.host_pid() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.host_pid" \
'.host_pid // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on has IPC access.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.host_ipc() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.host_ipc" \
'.host_ipc // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on has DBus access to the host.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.host_dbus() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.host_dbus" \
'.host_dbus // false'
}
# ------------------------------------------------------------------------------
# Returns the privileges the add-on has on to the hardware / system.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.privileged() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.privileged" '.privileged[]'
}
# ------------------------------------------------------------------------------
# Returns the current apparmor state of this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.apparmor() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.apparmor" '.apparmor'
}
# ------------------------------------------------------------------------------
# Returns a list devices made available to the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.devices() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.devices" '.devices // empty | .[]'
}
# ------------------------------------------------------------------------------
# Returns if add-on provide his own udev support.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.udev() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.udev" '.udev // false'
}
# ------------------------------------------------------------------------------
# Returns if UART was made available to the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.uart() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.uart" '.uart // false'
}
# ------------------------------------------------------------------------------
# Returns if USB was made available to the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.usb() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.usb" '.usb // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on has an icon available.
#
# Arguments:
# $1 Add-on slug
# ------------------------------------------------------------------------------
function bashio::addon.icon() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.icon" '.icon // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on has a logo available.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.logo() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.logo" '.logo // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on has documentation available.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.has_documentation() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.documentation" '.documentation // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on has a changelog available.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.has_changelog() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.changelog" '.changelog // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access the Supervisor API.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.hassio_api() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.hassio_api" '.hassio_api // false'
}
# ------------------------------------------------------------------------------
# Returns the Supervisor API role of this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.hassio_role() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.hassio_role" '.hassio_role'
}
# ------------------------------------------------------------------------------
# Returns the minimal required Home Assistant version needed by this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.homeassistant() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.homeassistant" '.homeassistant'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access the Home Assistant API.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.homeassistant_api() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.homeassistant_api" \
'.homeassistant_api // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access the Supervisor Auth API.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.auth_api() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.auth_api" '.auth_api // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on run in protected mode.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.protected() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.protected" '.protected // false'
}
# ------------------------------------------------------------------------------
# Returns the add-on its rating
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.rating() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.rating" '.rating'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can use the STDIN on the Supervisor API.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.stdin() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.stdin" '.stdin // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on has full access
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.full_access() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.full_access" \
'.full_access // false'
}
# ------------------------------------------------------------------------------
# A URL for web interface of this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.webui() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.webui" '.webui // empty'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access GPIO.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.gpio() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.gpio" '.gpio // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access kernel modules.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.kernel_modules() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.kernel_modules" \
'.kernel_modules // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access the devicetree.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.devicetree() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.devicetree" '.devicetree // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access the Docker socket.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.docker_api() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.docker_api" '.docker_api // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access video devices.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.video() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.video" '.video // false'
}
# ------------------------------------------------------------------------------
# Returns whether or not this add-on can access an audio device.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.audio() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.audio" '.audio // false'
}
# ------------------------------------------------------------------------------
# Returns the available audio input device for an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.audio_input() {
local slug=${1:-'self'}
local audio_input=${2:-}
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::var.has_value "${audio_input}"; then
audio_input=$(bashio::var.json audio_input "${audio_input}")
bashio::api.supervisor POST "/addons/${slug}/options" "${audio_input}"
bashio::cache.flush_all
else
bashio::addons \
"${slug}" \
"addons.${slug}.audio_input" \
'.audio_input // empty'
fi
}
# ------------------------------------------------------------------------------
# Returns the available audio output device for an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# $2 Audio output device to set (Optional)
# ------------------------------------------------------------------------------
function bashio::addon.audio_output() {
local slug=${1:-'self'}
local audio_output=${2:-}
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::var.has_value "${audio_output}"; then
audio_output=$(bashio::var.json audio_output "${audio_output}")
bashio::api.supervisor POST "/addons/${slug}/options" "${audio_output}"
bashio::cache.flush_all
else
bashio::addons \
"${slug}" \
"addons.${slug}.audio_output" \
'.audio_output // empty'
fi
}
# ------------------------------------------------------------------------------
# Returns IP address assigned on the home assistant network for an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.ip_address() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.ip_address" '.ip_address // empty'
}
# ------------------------------------------------------------------------------
# Returns if the add-on support ingress mode.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.ingress() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons "${slug}" "addons.${slug}.ingress" '.ingress // false'
}
# ------------------------------------------------------------------------------
# Returns the ingress entry point of the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.ingress_entry() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.ingress_entry" \
'.ingress_entry // empty'
}
# ------------------------------------------------------------------------------
# Returns the ingress url of the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.ingress_url() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.ingress_url" \
'.ingress_url // empty'
}
# ------------------------------------------------------------------------------
# Returns the ingress port of the add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.ingress_port() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons \
"${slug}" \
"addons.${slug}.ingress_port" \
'.ingress_port // empty'
}
# ------------------------------------------------------------------------------
# Returns or sets whether or not watchdog is enabled for this add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# $2 Set current watchdog state (Optional)
# ------------------------------------------------------------------------------
function bashio::addon.watchdog() {
local slug=${1:-'self'}
local watchdog=${2:-}
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::var.has_value "${watchdog}"; then
watchdog=$(bashio::var.json watchdog "^${watchdog}")
bashio::api.supervisor POST "/addons/${slug}/options" "${watchdog}"
bashio::cache.flush_all
else
bashio::addons \
"${slug}" \
"addons.${slug}.watchdog" \
'.watchdog // false'
fi
}
# ------------------------------------------------------------------------------
# List all available stats about an add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# $1 Cache key to store results in (optional)
# $2 jq Filter to apply on the result (optional)
# ------------------------------------------------------------------------------
function bashio::addon.stats() {
local slug=${1:-'self'}
local cache_key=${2:-"addons.${slug}.stats"}
local filter=${3:-}
local info
local response
bashio::log.trace "${FUNCNAME[0]}" "$@"
if bashio::cache.exists "${cache_key}"; then
bashio::cache.get "${cache_key}"
return "${__BASHIO_EXIT_OK}"
fi
if bashio::cache.exists "addons.${slug}.stats"; then
info=$(bashio::cache.get "addons.${slug}.stats")
else
info=$(bashio::api.supervisor GET "/addons/${slug}/stats" false)
bashio::cache.set "addons.${slug}.stats" "${info}"
fi
response="${info}"
if bashio::var.has_value "${filter}"; then
response=$(bashio::jq "${info}" "${filter}")
fi
bashio::cache.set "${cache_key}" "${response}"
printf "%s" "${response}"
return "${__BASHIO_EXIT_OK}"
}
# ------------------------------------------------------------------------------
# Returns CPU usage from the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.cpu_percent() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.cpu_percent" \
'.cpu_percent'
}
# ------------------------------------------------------------------------------
# Returns memory usage from the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.memory_usage() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.memory_usage" \
'.memory_usage'
}
# ------------------------------------------------------------------------------
# Returns memory limit from the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.memory_limit() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.memory_limit" \
'.memory_limit'
}
# ------------------------------------------------------------------------------
# Returns memory usage in percentage for the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.memory_percent() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.memory_percent" \
'.memory_percent'
}
# ------------------------------------------------------------------------------
# Returns outgoing network usage from the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.network_tx() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.network_tx" \
'.network_tx'
}
# ------------------------------------------------------------------------------
# Returns incoming network usage from the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.network_rx() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.network_rx" \
'.network_rx'
}
# ------------------------------------------------------------------------------
# Returns disk read usage from the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.blk_read() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.blk_read" \
'.blk_read'
}
# ------------------------------------------------------------------------------
# Returns disk write usage from the specified add-on.
#
# Arguments:
# $1 Add-on slug (optional, default: self)
# ------------------------------------------------------------------------------
function bashio::addon.blk_write() {
local slug=${1:-'self'}
bashio::log.trace "${FUNCNAME[0]}" "$@"
bashio::addons.stats \
"${slug}" \
"addons.${slug}.stats.blk_write" \
'.blk_write'
}
# ------------------------------------------------------------------------------
# Checks if the add-on is running in protected mode and exits if not.
# ------------------------------------------------------------------------------
function bashio::require.protected() {
local protected
protected=$(bashio::addon.protected 'self')
if bashio::var.true "${protected}"; then
return "${__BASHIO_EXIT_OK}"
fi
bashio::log.fatal "PROTECTION MODE IS DISABLED!"
bashio::log.fatal
bashio::log.fatal "We are trying to help you to protect your system the"
bashio::log.fatal "best we can. Therefore, this add-on checks if"
bashio::log.fatal "protection mode is enabled on this add-on."
bashio::log.fatal
bashio::log.fatal "Unfortunately, it has been disabled."
bashio::log.fatal "Please enable it again!"
bashio::log.fatal ""
bashio::log.fatal "Steps:"
bashio::log.fatal " - Go to the Supervisor Panel."
bashio::log.fatal " - Click on this add-on."
bashio::log.fatal " - Set the 'Protection mode' switch to on."
bashio::log.fatal " - Restart the add-on."
bashio::log.fatal
bashio::exit.nok
}
# ------------------------------------------------------------------------------
# Checks if the add-on is running in unprotected mode and exits if not.
# ------------------------------------------------------------------------------
function bashio::require.unprotected() {
local protected
protected=$(bashio::addon.protected 'self')
if bashio::var.false "${protected}"; then
return "${__BASHIO_EXIT_OK}"
fi
bashio::log.fatal "PROTECTION MODE IS ENABLED!"
bashio::log.fatal
bashio::log.fatal "To be able to use this add-on, you'll need to disable"
bashio::log.fatal "protection mode on this add-on. Without it, the add-on"
bashio::log.fatal "is unable to access Docker."
bashio::log.fatal
bashio::log.fatal "Steps:"
bashio::log.fatal " - Go to the Supervisor Panel."
bashio::log.fatal " - Click on this add-on."
bashio::log.fatal " - Set the 'Protection mode' switch to off."
bashio::log.fatal " - Restart the add-on."
bashio::log.fatal
bashio::log.fatal "Access to Docker allows you to do really powerful things"
bashio::log.fatal "including complete destruction of your system."
bashio::log.fatal "Please, be sure you know what you are doing before"
bashio::log.fatal "enabling this feature (and this add-on)!"
bashio::log.fatal
bashio::exit.nok
}
| true
|
67b493a2065dd8e81567e18b55cd4455e94f11d5
|
Shell
|
relaxdiego/gastropub
|
/script/common
|
UTF-8
| 799
| 2.84375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
script_path=$( dirname "${BASH_SOURCE[0]}" )
cd $script_path/.. && export project_path=$(pwd) && cd - > /dev/null
export project_name=gastropub
export tmp_path=${project_path}/tmp
export packer_tmp_path=${tmp_path}/packer
export packer_output_path=${packer_tmp_path}/output
export packer_http_path=${packer_tmp_path}/http
export templates_path=${project_path}
export os_installer_path=${project_path}/os-installer
export provisioner_path=${project_path}/config
export playbook_path=${provisioner_path}/host.yaml
export PACKER_CACHE_DIR=${packer_tmp_path}/cache
export rehearsal_config=${project_path}/.rehearsal
if [[ -f $rehearsal_config ]]; then
source $rehearsal_config
fi
export provider=virtualbox
export VAGRANT_VAGRANTFILE=${project_path}/vagrant/Vagrantfile
| true
|
085c14ad846f16d8f1ff0fb507ce8e7973c1a1ad
|
Shell
|
MrZloHex/lscc
|
/scripts/lscmem.sh
|
UTF-8
| 1,159
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
aLL=""
bYTES=""
hELP=""
JSON=""
nOHEADINGS=""
oUTPUT=""
Ouput_list=""
PAIRS=""
rAW=""
SPLIT=""
sYSROOT=""
VERSION=""
Usummary=""
# calling function
call_function(){
while [[ $key != "q" ]]
do
lsmem $aLL $bYTES $hELP $JSON $nOHEADINGS $oUTPUT $Ouput_list $PAIRS $rAW $SPLIT $sYSROOT $VERSION $Usummary
read -s -n 1 key
clear
done
}
# taking flags and options
eval set -- `getopt -o abhJno:OPrs:S:VU: -- "$@"`
while :
do
case "$1" in
-a | --all) aLL="-a" ; shift ;;
-b | --bytes) bYTES="-b" ; shift ;;
-h | --help) hELP="-h" ; shift ;;
-J | --json) JSON="-J" ; shift ;;
-n | --noheadings) nOHEADINGS="-n" ; shift ;;
-o | --output) oUTPUT="-o $2" ; shift 2 ;;
-O | --output-list) Ouput_list="--output-list" ; shift ;;
-P | --pairs) PAIRS="-P" ; shift ;;
-r | --raw) rAW="-r" ; shift ;;
-S | --split) SPLIT="-S $2" ; shift 2 ;;
-s | --sysroot) sYSROOT="-s $2" ; shift 2 ;;
-V | --version) VERSION="-V" ; shift ;;
-U | --summary) Usummary="--summary $2" ; shift 2 ;;
--) shift; break ;;
*) echo "INVALID FLAG" ;;
esac
done
call_function
| true
|
f295af3226349bf0f4e5521c162fc5ab5e568ca2
|
Shell
|
1456764506/netdata_install
|
/netdata_install.sh
|
UTF-8
| 8,869
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
export PATH
#=================================================================#
# Script Name: netdata.sh #
# System Required: Centos Ubuntu Debian #
# Description: #
# Version Number: Beta 1.0.1 #
# Updated: 2019-04-17 #
# Official Website: https://www.91linux.org #
#=================================================================#
# 判断用户是否为root
# Make sure only root can run our script
rootness(){
if [[ $EUID -ne 0 ]]; then
echo "Error: This script must be run as root!" 1>&2
exit 1
fi
}
##检测是否可以联网
checknetwork(){
ping -c 3 114.114.114.114 &>/dev/null
if [ ! $? -eq 0 ];then
echo -e "\033[31m error: Please check your network connection. \033[0m"
exit
fi
}
# Check OS
checkos(){
if [ -f /etc/redhat-release ];then
OS='CentOS'
if centosversion 5; then
echo "Not support CentOS 5, please change OS to CentOS 6+ and retry."
exit 1
fi
elif [ ! -z "`cat /etc/issue | grep bian`" ];then
OS='Debian'
elif [ ! -z "`cat /etc/issue | grep Ubuntu`" ];then
OS='Ubuntu'
else
echo "Not support OS, Please reinstall OS to CentOS 6+/Debian 7+/Ubuntu 12+ and retry!"
exit 1
fi
}
# Get version
getversion(){
if [[ -s /etc/redhat-release ]];then
grep -oE "[0-9.]+" /etc/redhat-release
else
grep -oE "[0-9.]+" /etc/issue
fi
}
# CentOS version
centosversion(){
local code=$1
local version="`getversion`"
local main_ver=${version%%.*}
if [ $main_ver == $code ];then
return 0
else
return 1
fi
}
# firewall set
firewall_set(){
if [ "$OS" == "CentOS" ];then
echo "Firewall set start..."
if centosversion 6; then
/etc/init.d/iptables status > /dev/null 2>&1
if [ $? -eq 0 ]; then
iptables -L -n | grep 19999 | grep 'ACCEPT' > /dev/null 2>&1
if [ $? -ne 0 ]; then
iptables -I INPUT -m state --state NEW -m tcp -p tcp --dport 19999 -j ACCEPT
/etc/init.d/iptables save
/etc/init.d/iptables restart
else
echo "port 19999 has been set up."
fi
else
echo "WARNING: iptables looks like shutdown or not installed, please manually set it if necessary."
fi
elif centosversion 7; then
systemctl status firewalld > /dev/null 2>&1
if [ $? -eq 0 ];then
firewall-cmd --zone=public --list-all | grep 19999 > /dev/null 2>&1
if [ $? -ne 0 ]; then
firewall-cmd --permanent --zone=public --add-port=19999/tcp
firewall-cmd --reload
fi
else
echo "Firewalld looks like not running !"
fi
fi
echo "firewall set completed..."
elif [ "$OS" == "Ubuntu" ] || [ "$OS" == "Debian" ];then
echo "Warning: Please manually configure your firewall"
fi
}
# Check selinux
disable_selinux(){
CHECK=$(grep SELINUX= /etc/selinux/config | grep -v "#")
if [ "$CHECK" == "SELINUX=enforcing" ]; then
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
fi
if [ "$CHECK" == "SELINUX=permissive" ]; then
sed -i 's/SELINUX=permissive/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
fi
if [ "$CHECK" == "SELINUX=disabled" ]; then
echo "SELINUX is disable."
fi
}
zip_check(){
if [ "$OS" == "CentOS" ];then
yum install -y epel-release
yum install -y wget bash-completion net-tools gcc-c++ autoconf automake curl gcc git libuv-devel libmnl-devel libuuid-devel lm_sensors make cmake MySQL-python nc pkgconfig python python-psycopg2 PyYAML zlib-devel python-yaml iproute python-pymongo libmnl
if [ $? -ne 0 ]; then
echo "netdata依赖软件安装出错,请联系管理员admin@91linux.org。"
exit 1
fi
# check_tar/unzip
tar --version >/dev/null 2>&1
[ ! $? -eq 0 ]&&yum install -y tar &&echo "yum install tar"
unzip -v >/dev/null 2>&1
[ ! $? -eq 0 ]&&yum install -y unzip &&echo "yum installing unzip"
elif [ "$OS" == "Ubuntu" ] || [ "$OS" == "Debian" ];then
sudo apt-get install -y wget bash-completion sysv-rc-conf axel zlib1g-dev uuid-dev libmnl-dev libuv-dev gcc make cmake git autoconf autoconf-archive autogen automake pkg-config curl iproute python python-yaml python-pymongo python-psycopg2
if [ $? -ne 0]; then
echo "netdata依赖软件安装出错,请联系管理员admin@91linux.org。"
exit 1
fi
tar --version >/dev/null 2>&1
[ ! $? -eq 0 ]&&apt-get install -y tar &&echo "install tar"
unzip -v >/dev/null 2>&1
[ ! $? -eq 0 ]&&apt-get install -y unzip &&echo "installing unzip"
fi
}
# Install netdata
install_netdata(){
if [ -e /etc/netdata/netdata.conf ]; then
cd /opt/netdata
if [ "$OS" == "CentOS" ]; then
if centosversion 6; then
cp -rf /opt/netdata/system/netdata-init-d /etc/init.d/netdata
chmod +x /etc/init.d/netdata
chkconfig netdata on
service netdata start
elif centosversion 7; then
systemctl enable netdata
systemctl start netdata
fi
elif [ "$OS" == "Ubuntu" ] || [ "$OS" == "Debian" ]; then
cp -rf /opt/netdata/system/netdata-lsb /etc/init.d/netdata
chmod +x /etc/init.d/netdata
update-rc.d netdata defaults
sysv-rc-conf netdata on
systemctl start netdata
fi
echo netdata is installed!
exit 0
else
cd /opt
rm -rf /opt/netdata
git clone https://github.com/firehol/netdata.git --depth=100
cd /opt/netdata
echo | bash netdata-installer.sh
if [ -e /etc/netdata/netdata.conf ]; then
cd /opt/netdata
if [ "$OS" == "CentOS" ]; then
if centosversion 6; then
cp -rf /opt/netdata/system/netdata-init-d /etc/init.d/netdata
chmod +x /etc/init.d/netdata
chkconfig netdata on
service netdata start
elif centosversion 7; then
systemctl enable netdata
systemctl start netdata
fi
elif [ "$OS" == "Ubuntu" ] || [ "$OS" == "Debian" ]; then
cp -rf /opt/netdata/system/netdata-lsb /etc/init.d/netdata
chmod +x /etc/init.d/netdata
update-rc.d netdata defaults
sysv-rc-conf netdata on
systemctl start netdata
fi
fi
if [ $? -eq 0 ];then
echo netdata Installation success!
else
echo "netdata安装失败,请联系管理员admin@91linux.org"
exit 1
fi
fi
}
# Set netdata
set_netdata(){
# 配置自动更新netdata
cat /etc/crontab | grep '/opt/netdata/netdata-updater.sh' > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "0 2 * * * root /opt/netdata/netdata-updater.sh >/dev/null 2>&1" >> /etc/crontab
fi
# 内存去重,可以节省 40%-60% 的内存开销
echo 1 >/sys/kernel/mm/ksm/run; echo 1000 >/sys/kernel/mm/ksm/sleep_millisecs
cat /etc/rc.local | grep sleep_millisecs > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "echo 1 >/sys/kernel/mm/ksm/run" >> /etc/rc.local
echo "echo 1000 >/sys/kernel/mm/ksm/sleep_millisecs" >> /etc/rc.local
fi
if [ "$OS" == "CentOS" ]; then
ipurl=`/sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'`:19999
elif [ "$OS" == "Ubuntu" ] || [ "$OS" == "Debian" ]; then
ipurl=`/sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|awk -F: '{print $2}'`:19999
fi
echo "--------------------"
echo "请在浏览器打开{$ipurl} 打开实时性能和健康监测界面!"
echo "更多详情请查看官网文档:https://github.com/firehol/netdata"
echo "--------------------"
}
# START #
checknetwork #判断网络
rootness #判断用户是否为root
checkos #判断操作系统
firewall_set #防火墙开放19999端口
disable_selinux #禁用selinux
zip_check #安装解压工具及依赖软件
install_netdata #安装netdata
set_netdata #配置netdata
# END #
| true
|
77979916fd5a6c166025f48456ed0bc432b4e989
|
Shell
|
merlinvn/dotfiles
|
/shell/.profile
|
UTF-8
| 2,683
| 2.765625
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# those 3 variables were used by ibus
GTK_IM_MODULE=ibus
QT_IM_MODULE=ibus
XMODIFIERS=@im=ibus
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
if [ -d /sbin ] ; then
PATH="/sbin:$PATH"
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
if xhost >& /dev/null ; then
setxkbmap -option caps:escape
fi
if [ -f "$HOME/.cargo/env" ]; then
. "$HOME/.cargo/env"
fi
if [ -d "$HOME/.cargo/bin" ] ; then
PATH="$HOME/.cargo/bin:$PATH"
fi
export GO111MODULE=on
export GOPATH=$HOME/go
export PATH=$PATH:$GOPATH/bin
[ -d "/usr/local/go/bin" ] && export PATH=$PATH:/usr/local/go/bin
# GCC 10.2.0
if [ -d "/usr/local/gcc-10.2.0" ]; then
export PATH=/usr/local/gcc-10.2.0/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/gcc-10.2.0/lib64:$LD_LIBRARY_PATH
alias gcc=gcc-10.2
alias g++=g++-10.2
fi
if [ -f "/usr/bin/rbenv" ]; then
eval "$(rbenv init -)"
fi
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
export PATH="$PATH:$HOME/.rvm/bin"
# [[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
[[ -f "/etc/arch-release" ]] && [[ $(fgconsole 2>/dev/null) == 1 ]] && startx -- vt1
# if [[ "$OSTYPE" == "darwin"* ]]; then
# alias python=python3
# alias pip=pip3
# export PATH=$HOME/Library/Python/3.10/bin:$PATH
# fi
if [ -d "$HOME/.volta" ]; then
export VOLTA_HOME="$HOME/.volta"
export PATH="$VOLTA_HOME/bin:$PATH"
fi
# bun
export BUN_INSTALL="$HOME/.bun"
[[ -d $BUN_INSTALL ]] && export PATH="$BUN_INSTALL/bin:$PATH"
# root less podman
[[ -f "/usr/bin/podman" ]] && export DOCKER_HOST=unix:///run/user/$UID/podman/podman.sock
[[ -x "$(command -v sccache)" ]] && export RUSTC_WRAPPER="sccache"
export JUPYTERLAB_DIR="$HOME/.local/share/jupyter/lab"
[[ -d "$HOME/.local/share/bob/nvim-bin" ]] && export PATH="$HOME/.local/share/bob/nvim-bin:$PATH"
export EDITOR="nvim"
| true
|
5ff6ea52e4e0ba8e5940b0e5816d6c66b219aa93
|
Shell
|
bewt85/docker-etcdctl
|
/build.sh
|
UTF-8
| 1,283
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "Must be run as root. Use \`sudo -E $0\` to keep the DOCKER_ACCOUNT_NAME variable accessible"
exit 1
fi
PROJECT=etcdctl
DIR="$( cd "$( dirname $0 )" && pwd )"
if [[ ! -z $DOCKER_ACCOUNT_NAME ]]; then
container_name="${DOCKER_ACCOUNT_NAME}/${PROJECT}"
else
container_name="${PROJECT}"
fi
cd $DIR
if [[ ! $(git tag | grep -e "^release/") ]]; then
echo 'Could not find any releases. You can make some with `git tag release/<version_number> [commit reference]`'
fi
make_scripts_executable() {
for file in $(find . -name "*.sh" -o -name "*.py" | xargs ls -l | awk '/^...-/ {print $NF}'); do
echo "WARNING: '$file' appears to be a script but is not executable. Updating permissions"
chmod +x $file
done
}
echo "Building ${container_name}:latest from $DIR"
make_scripts_executable
docker build -t ${container_name} $DIR
for version in $(git tag | awk '/release\// { sub(/^release\//,""); print }'); do
cd $DIR
TEMP_DIR=$(mktemp -d)
git archive "release/${version}" > ${TEMP_DIR}/${PROJECT}.tar
cd $TEMP_DIR
tar xf ${TEMP_DIR}/${PROJECT}.tar
echo "Building ${container_name}:${version} from $DIR"
make_scripts_executable
docker build -t ${container_name}:${version} $TEMP_DIR
rm -r $TEMP_DIR
done
| true
|
d4a5608a4b8d8634cc1ec18387d7093eb83ce3a6
|
Shell
|
66RING/dotfiles
|
/install.sh
|
UTF-8
| 748
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
# STOW_DIR=$(pwd)/..
# TAR_DIR=${HOME}
# mkdir -p \
# "$TAR_DIR/.local/bin" \
# "$TAR_DIR/.local/share/applications"
# stow -t "$TAR_DIR" -d "$STOW_DIR" --ignore='.*\.md' --ignore='.*\.sh' --ignore='\.git.*' dotfiles
# remove
# stow -t "$TAR_DIR" -d "$STOW_DIR" -D dotfiles
git submodule update --init --recursive
mkdir -p \
"$HOME/.local/bin" \
"$HOME/.local/share/applications"
SCRIPT=$(readlink -f "$0")
BASEDIR=$(dirname "$SCRIPT")
ln -s $BASEDIR/.config $HOME
ln -s $BASEDIR/.local/bin/* $HOME/.local/bin/
ln -s $BASEDIR/.local/share/applications/* $HOME/.local/share/applications/
ln -s $BASEDIR/.config/zsh/.zprofile $HOME/
ln -s $BASEDIR/.config/tmux/.tmux.conf $HOME/
# pacman -S --needed - < pkglist.txt
| true
|
e0854930433001b304b347e6fe7a3f2b0723e7f3
|
Shell
|
trankmichael/intro
|
/comp15/lab5/run_tests
|
UTF-8
| 443
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# run all the tests
#
cp /dev/null test_results
for t in test*-?
do
echo "Testing $t..."
rm -f core.*
echo ":::: Test $t :::::" >> test_results
./$t > $$ 2> $$.e
if test $? -gt 127
then
echo "$t CRASHED"
rm -f core.*
fi
cat $$ >> test_results
if test -s $$.e
then
echo "cerr said: "
cat $$.e
fi
echo "Output of $t: "
cat $$
rm -f $$ $$.e
echo ""
echo "" >> test_results
done
| true
|
6e47a151cb5523f84625c30c3716905f3adcddf8
|
Shell
|
nvitucci/pyke
|
/make_release
|
UTF-8
| 7,516
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# make_release release_number
usage() {
echo "usage: make_release release_number" >&2
exit 2
}
[ $# -eq 1 ] || usage
set -e
RELEASE_1=release_1
PYKE=pyke
PRE_2TO3_R1=pre_2to3_r1
PRE_2TO3=pre_2to3
TMPFILE=/tmp/make_release.$$
release_number="$1"
start_repo=`pwd`
# conditional commit -- only if "hg status" reports changes!
# all args passed to "hg commit"
do_commit() {
hg status > $TMPFILE
if [ -s $TMPFILE ]
then
#echo hg commit "$@"
hg commit "$@"
fi
rm -f $TMPFILE
}
# Raise error if "hg status" reports any changes!
# no args
check_status() {
hg status > $TMPFILE
if [ -s $TMPFILE ]
then
echo "ERROR: Uncommitted files:" >&2
cat $TMPFILE >&2
rm -f $TMPFILE
return 1
fi
rm -f $TMPFILE
}
# Raise error if "hg heads" reports multiple heads!
# optional -q arg to nix the error message to stderr
check_heads() {
hg heads --template '{desc|firstline}\n' > $TMPFILE
if [ `wc -l $TMPFILE | cut -f1 '-d '` -gt 1 ]
then
if [ x"$1" != x-q ]
then
echo "ERROR: Multiple Heads:" >&2
cat $TMPFILE >&2
fi
rm -f $TMPFILE
return 1
fi
rm -f $TMPFILE
}
# Run "hg fetch" and return error if no merge/commit done
# 1 arg, passed to "hg pull -u"
do_fetch() {
hg pull -u "$1"
if check_heads -q
then
return 1
else
hg merge
check_heads
hg commit -m "Automated merge with $1"
fi
}
echo
echo "*********************************************************************"
echo " Deleting .pyc files"
echo "*********************************************************************"
echo
find . -name '*.pyc' -exec rm {} +
echo
echo "*********************************************************************"
echo " Rebuilding compiler_bc.py"
echo "*********************************************************************"
echo
dir=pyke/krb_compiler/compiled_krb
if [ ! -d "$dir" ]
then
mkdir "$dir"
fi
python <<!
from pyke import krb_compiler
krb_compiler.compile_krb('compiler', 'pyke.krb_compiler.compiled_krb',
'pyke/krb_compiler/compiled_krb',
'pyke/krb_compiler/compiler.krb')
!
mv pyke/krb_compiler/compiled_krb/compiler_bc.py pyke/krb_compiler
echo
echo "*************************************************************************"
echo " Running testpyke"
echo "*************************************************************************"
echo
./testpyke
echo
echo "*************************************************************************"
echo " Committing release documentation"
echo "*************************************************************************"
echo
do_commit -m "Release documentation for $release_number"
check_status # there shouldn't be any uncommitted files!
echo
echo "*************************************************************************"
echo " Regenerating HTML documentation"
echo "*************************************************************************"
echo
cd doc/source
bin/gen_html
cd ../..
do_commit -Am "Regenerated HTML documentation for $release_number"
echo
echo "*************************************************************************"
echo " Fetching from sourceforge"
echo "*************************************************************************"
echo
cd "../$RELEASE_1"
hg pull
cd "$start_repo"
if do_fetch ../"$RELEASE_1"
then
./testpyke
fi
echo
echo "*************************************************************************"
echo " Merging into $PYKE"
echo "*************************************************************************"
echo
cd "../$PYKE"
hg pull
cd ..
rm -rf "$PYKE"_temp
hg clone -U "$PYKE" "$PYKE"_temp
cd "$PYKE"_temp
hg update
hg fetch "$start_repo"
#./testpyke # I don't want errors here stopping the release!
hg push # back to $PYKE
cd ..
rm -rf "$PYKE"_temp
echo
echo "*************************************************************************"
echo " Merging into $PRE_2TO3_R1"
echo "*************************************************************************"
echo
cd "$PRE_2TO3_R1"
hg pull
cd ..
rm -rf "$PRE_2TO3_R1"_temp
hg clone -U "$PRE_2TO3_R1" "$PRE_2TO3_R1"_temp
cd "$PRE_2TO3_R1"_temp
hg update
hg fetch "$start_repo"
cd doc/source
bin/gen_html
cd ../..
do_commit -Am "Regenerated 3.1 HTML documenation for $release_number"
./run_pre_test
hg push # back to $PRE_2TO3_R1
cd ..
rm -rf "$PRE_2TO3_R1"_temp
echo
echo "*************************************************************************"
echo " Merging into $PRE_2TO3"
echo "*************************************************************************"
echo
cd "$PRE_2TO3"
hg pull
cd ..
rm -rf "$PRE_2TO3"_temp
hg clone -U "$PRE_2TO3" "$PRE_2TO3"_temp
cd "$PRE_2TO3"_temp
hg update
# do this first to avoid dealing with the $PRE_2TO3_R1 merge conflicts twice!
hg fetch ../"$PRE_2TO3_R1"
hg fetch ../"$PYKE"
#./run_pre_test # I don't want errors here stopping the release!
hg push # back to $PRE_2TO3
cd ..
rm -rf "$PRE_2TO3"_temp
echo
echo "*************************************************************************"
echo " Building release files into dist directory"
echo "*************************************************************************"
echo
cd "$start_repo"
rm -rf build dist pyke.egg-info
python setup.py -q sdist --formats zip
cp ../"$PRE_2TO3_R1"_temp_test/dist/pyke*.zip dist
rm -rf ../"$PRE_2TO3_R1"_temp_test
cp RELEASE_NOTES-1.txt dist
echo
echo "*********************************************************************"
echo " Tagging release $release_number"
echo "*********************************************************************"
echo
hg tag -f "$release_number"
hg push # back to $RELEASE_1
############################################################################
# #
# This is where the script starts pushing the release to sourceforge. #
# #
############################################################################
echo
echo "*************************************************************************"
echo " Pushing sources to sourceforge"
echo "*************************************************************************"
echo
cd ../"$RELEASE_1"
hg push
cd ../"$PYKE"
hg push
cd ../"$PRE_2TO3_R1"
hg push
cd ../"$PRE_2TO3"
hg push
echo
echo "*********************************************************************"
echo " Copying release files to sourceforge"
echo "*********************************************************************"
echo
cd "$start_repo"
rsync -avP --delete -e ssh dist/ mtnyogi,pyke@frs.sourceforge.net:/home/frs/project/p/py/pyke/pyke/"$release_number"
echo
echo "*************************************************************************"
echo " Copying HTML documentation to sourceforge"
echo "*************************************************************************"
echo
rsync -avP --delete -e ssh doc/html/ mtnyogi,pyke@web.sourceforge.net:htdocs/
echo Done!
| true
|
56dd5e740067d840cd267f433cd597cb340f8c09
|
Shell
|
dwheltzel/Shell-Scripts-for-Oracle-DBAs
|
/manage_tablespaces.sh
|
UTF-8
| 3,925
| 2.90625
| 3
|
[] |
no_license
|
# Manage tablespaces and partitions created by incremental partitioning
#
# Author: Dennis Heltzel
. /home/oracle/bin/ora_funcs.sh
ORACLE_BASE=/u01/app/oracle
ORACLE_HOME=${ORACLE_BASE}/product/12.1.0.2/DbHome_2
PATH=$PATH:$ORACLE_HOME/bin
CRED=${CRED:-/}
RUN_DDL=Y
TS_SUFFIX=`date '+%Y%m'`
usage() {
echo "Usage: $0 [-d database name] [-s suffix for tablespaces]"
echo " -d database name - defaults to $ORACLE_SID"
echo " -s suffix for tablespaces - defaults to $TS_SUFFIX"
exit 1
}
# Handle parameters
while getopts ":d:s:" opt; do
case $opt in
d)
DB_NAME=$OPTARG
;;
s)
TS_SUFFIX=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
;;
:)
echo "Option -$OPTARG requires an argument." >&2
usage
;;
esac
done
DB_NAME=${DB_NAME:-${ORACLE_SID}}
oe ${ORACLE_SID}
#echo "DB_NAME=${DB_NAME}:ORACLE_SID=${ORACLE_SID}:"
BASE_NAME=ManageTS${DB_NAME}-`date "+%y%m%d%H%M"`
REPORT_NAME=${BASE_NAME}.lst
SQL_NAME=${BASE_NAME}.sql
CMD_OUTPUT_NAME=${BASE_NAME}.out
#echo "DB_NAME: ${DB_NAME}"
#echo "REPORT_NAME: ${REPORT_NAME}"
#echo "SQL_NAME: ${SQL_NAME}"
(sqlplus -s ${CRED} as sysdba <<!
SET SERVEROUT ON SIZE UNLIMITED
SET FEED OFF
SET HEAD OFF
SET PAGES 0
SET LINES 200
SELECT 'create bigfile tablespace ETL_${TS_SUFFIX};' FROM dual
UNION ALL
SELECT 'create bigfile tablespace APP_${TS_SUFFIX};' FROM dual
MINUS
SELECT 'create bigfile tablespace '||tablespace_name||';' FROM dba_tablespaces;
-- change default tablespaces for partitioned tables (reference only,over-ridden by "store in" clause)
(SELECT 'alter table '||owner||'.'||table_name||' MODIFY DEFAULT ATTRIBUTES TABLESPACE ETL_${TS_SUFFIX};' FROM dba_tables WHERE partitioned = 'YES' AND owner IN ('ETL')
MINUS
SELECT 'alter table '||owner||'.'||table_name||' MODIFY DEFAULT ATTRIBUTES TABLESPACE ETL_${TS_SUFFIX};' FROM dba_part_tables WHERE owner IN ('ETL') AND def_tablespace_name IN ('ETL_${TS_SUFFIX}','SHORT_TERM'))
union all
(SELECT 'alter table '||owner||'.'||table_name||' MODIFY DEFAULT ATTRIBUTES TABLESPACE APP_${TS_SUFFIX};'
FROM dba_tables WHERE partitioned = 'YES' AND owner IN ('APP')
MINUS
SELECT 'alter table '||owner||'.'||table_name||' MODIFY DEFAULT ATTRIBUTES TABLESPACE APP_${TS_SUFFIX};'
FROM dba_part_tables WHERE owner IN ('APP') AND def_tablespace_name IN ('APP_${TS_SUFFIX}','SHORT_TERM'));
-- change "real" default tablespaces for partitioned tables
(SELECT 'alter table '||owner||'.'||table_name||' set store in (ETL_${TS_SUFFIX});' FROM dba_tables WHERE partitioned = 'YES' AND owner IN ('ETL')
MINUS
SELECT 'alter table '||o.owner||'.'||o.object_name||' set store in (ETL_${TS_SUFFIX});' FROM sys.insert_tsn_list$ l JOIN sys.ts$ ts ON (ts.ts# = l.ts#) JOIN dba_objects o ON (o.object_id = l.bo#)
WHERE ts.name IN ('ETL_${TS_SUFFIX}','SHORT_TERM') AND o.owner IN ('ETL'))
union all
(SELECT 'alter table '||owner||'.'||table_name||' set store in (APP_${TS_SUFFIX});'
FROM dba_tables WHERE partitioned = 'YES' AND owner IN ('APP')
MINUS
SELECT 'alter table '||o.owner||'.'||o.object_name||' set store in (APP_${TS_SUFFIX});' FROM sys.insert_tsn_list$ l JOIN sys.ts$ ts ON (ts.ts# = l.ts#) JOIN dba_objects o ON (o.object_id = l.bo#)
WHERE ts.name IN ('APP_${TS_SUFFIX}','SHORT_TERM') AND o.owner IN ('APP'));
exit
!
) > ${SQL_NAME}
ls -l ${SQL_NAME}
SQL2_NAME=${BASE_NAME}-ind.sql
(sqlplus -s ${CRED} as sysdba <<!
SET SERVEROUT ON SIZE UNLIMITED
SET FEED OFF
SET HEAD OFF
SET PAGES 0
SET LINES 200
-- change the indexes to the new partitions
SELECT 'alter index '||i.owner||'.'||i.index_name||' MODIFY DEFAULT ATTRIBUTES TABLESPACE '||t.def_tablespace_name||';'
FROM dba_part_indexes i JOIN dba_part_tables t ON (i.owner = t.owner AND i.table_name = t.table_name)
WHERE i.owner NOT LIKE 'SYS%' AND i.index_name NOT LIKE 'SYS%' AND i.def_tablespace_name <> t.def_tablespace_name ORDER BY 1;
exit
!
) > ${SQL2_NAME}
ls -l ${SQL2_NAME}
| true
|
4ec984b3637bacecd5b1941976e92300dce117af
|
Shell
|
kobrl/workstation
|
/bin/install_workstation
|
UTF-8
| 913
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
cd ~/compozed-workstation/
git submodule update --init
# Sprout wrap installation
cd ~/compozed-workstation/sprout-wrap
if [ ! -x /usr/bin/gcc ]; then
xcode-select --install
fi
sudo gem install bundler
sudo bundle install
caffeinate bundle exec soloist
# Vim configs installation
rm -rf ~/.vim
ln -s ~/compozed-workstation/vim-config/ ~/.vim
~/.vim/bin/install
# Copy bin folder
rm -rf ~/bin
ln -s ~/compozed-workstation/bin/ ~/bin
# Add bin to path
echo "export PATH=$PATH:~/bin" >> ~/.bash_profile
echo "git config --global hub.protocol https" >> ~/.bash_profile
echo 'git config --global credential.helper "cache --timeout=86400"' >> ~/.bash_profile
# Tmux configs
rm -rf ~/.tmux.conf
ln -s ~/compozed-workstation/.tmux.conf ~/.tmux.conf
echo 'FINISH WORKSTATION INSTALLATION SUCCESSFULLY'
read -p "Hit ENTER to reboot or ctrl+c to exit" ;
echo 'Rebooting ...'
sudo reboot
| true
|
a3bc6e836d3b50efeff66e5052e9b7b0bb48cdec
|
Shell
|
Slaid480/Android-Universal-Fly-On-Mod
|
/system/etc/init.d/IO_Tweaks
|
MacCentralEurope
| 1,411
| 2.8125
| 3
|
[] |
no_license
|
#!/system/bin/sh
# I/O Tweaks For Fly-On Mod by Slaid480!
#============ Copyright (C) 2015 Salah Abouabdallah(Slaid480)===========#
see <http://www.gnu.org/licenses/>.
#=======================================================================#
mount -o remount,rw /
mount -o remount,rw rootfs
mount -o remount,rw /system
busybox mount -o remount,rw /
busybox mount -o remount,rw rootfs
busybox mount -o remount,rw /system
FLY=/data/Fly-On/25IO.log
busybox rm -f $FLY
busybox touch $FLY
echo "# Fly-On Mod LOGGING ENGINE" | tee -a $FLY
echo "" | tee -a $FLY
echo "$( date +"%m-%d-%Y %H:%M:%S" ) Enabling I/O Tweaks please wait..." | tee -a $FLY;
path=`busybox ls -d /sys/block/* 2>/dev/null`;
for S in $path;
do
if [ -e $S/queue/rotational ]; then
busybox echo "0" > $S/queue/rotational
fi
if [ -e $S/queue/iostats ]; then
busybox echo "0" > $S/queue/iostats
fi
if [ -e $S/queue/nr_requests ]; then
busybox echo "512" > $S/queue/nr_requests
fi
if [ -e $S/queue/nomerges ]; then
busybox echo "0" > $S/queue/nomerges
fi
done
for L in $(busybox mount | grep relatime | cut -d " " -f3);
do
busybox mount -o noatime,remount $L
done
for M in $(busybox mount | grep ext4 | cut -d " " -f3);
do
busybox mount -o noatime,remount,rw,discard,barrier=0,commit=60,noauto_da_alloc,delalloc $M
done
echo "" | tee -a $FLY
echo "$( date +"%m-%d-%Y %H:%M:%S" ) I/O Tweaks Enabled, enjoy !" | tee -a $FLY;
| true
|
824042559360a5d2bf1c42079a648aaf7fb72acf
|
Shell
|
aerogear-attic/aerogear-xamarin-sdk
|
/scripts/test/update_showcase.test.js
|
UTF-8
| 7,161
| 2.875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
'use strict'
const assert = require('assert');
const fs = require('fs-extra')
const promisify = require("util").promisify
const xmlhandling = require('../modules/xml_handling')
const showcaseop = require('../modules/showcase_operations')
const projop = require('../modules/project_operations')
const readFile = promisify(fs.readFile)
describe('Showcase App updater', () => {
describe('', () => {
const csProjFileNuGetsOrig = `Showcase.Test.NuGets.csproj`
const csProjFileNuGets = `${csProjFileNuGetsOrig}.copy`
const csProjFileProjDepsOrig = `Showcase.Test.ProjDeps.csproj`
const csProjFileProjDeps = `${csProjFileProjDepsOrig}.copy`
beforeEach(() => {
fs.copySync(`${__dirname}/${csProjFileNuGetsOrig}`, `${__dirname}/${csProjFileNuGets}`)
fs.copySync(`${__dirname}/${csProjFileProjDepsOrig}`, `${__dirname}/${csProjFileProjDeps}`)
})
it('should remove NuGet dependencies', async () => {
const config = {
"AeroGear.Mobile.Core": "0.0.7",
"AeroGear.Mobile.Auth": "0.0.7",
"AeroGear.Mobile.Security": "0.0.8"
}
await projop.processProject(`scripts/test/${csProjFileNuGets}`, config, showcaseop.removeNuGets, false, true)
const doc = await xmlhandling.openXML(`${__dirname}/${csProjFileNuGets}`, '')
const packageRefs = doc.getElementsByTagName("PackageReference")
for (let i = 0; i < packageRefs.length; i++) {
const element = packageRefs.item(i)
if (element.hasAttribute('Include')) {
assert.equal(Object.keys(config).includes(element.getAttribute('Include')), false, "Check that all NuGet dependencies removed properly")
}
}
});
describe("adding project dependencies", () => {
const config = [
"..\\..\\Core\\Core\\Core.csproj",
"..\\..\\Auth\\Auth\\Auth.csproj",
"..\\..\\Security\\Security\\Security.csproj"
]
function getCount(doc) {
const projectRefs = doc.getElementsByTagName("ProjectReference")
let count = 0
for (let i = 0; i < projectRefs.length; i++) {
const element = projectRefs.item(i)
if (element.hasAttribute('Include')) {
if (config.includes(element.getAttribute('Include'))) count++
}
}
return count;
}
it('should add project dependencies', async () => {
await projop.processProject(`scripts/test/${csProjFileNuGets}`, config, showcaseop.addProjDeps, false, true)
const doc = await xmlhandling.openXML(`${__dirname}/${csProjFileNuGets}`, '')
const count = getCount(doc)
assert.equal(count, config.length, "Check that all project dependencies were added")
})
it('shouldn\'t add project dependencies, if they are already there', async () => {
await projop.processProject(`scripts/test/${csProjFileNuGets}`, config, showcaseop.addProjDeps, false, true)
let doc = await xmlhandling.openXML(`${__dirname}/${csProjFileNuGets}`, '')
const count = getCount(doc)
try {
await projop.processProject(`scripts/test/${csProjFileNuGets}`, config, showcaseop.addProjDeps, false, true)
} catch (e) { }
doc = await xmlhandling.openXML(`${__dirname}/${csProjFileNuGets}`, '')
const newCount = getCount(doc)
assert.equal(newCount, count, "Check that no more project dependencies were added")
})
})
it('should remove project dependencies', async () => {
const config = [
"..\\..\\Core\\Core.Platform.Android\\Core.Platform.Android.csproj",
"..\\..\\Core\\Core\\Core.csproj",
"..\\..\\Security\\Security\\Security.csproj",
"..\\..\\Auth\\Auth\\Auth.csproj"
]
await projop.processProject(`scripts/test/${csProjFileProjDeps}`, config, showcaseop.removeProjDeps, false, true)
const doc = await xmlhandling.openXML(`${__dirname}/${csProjFileProjDeps}`, '')
const projectRefs = doc.getElementsByTagName("ProjectReference")
for (let i = 0; i < projectRefs.length; i++) {
const element = projectRefs.item(i)
if (element.hasAttribute('Include')) {
assert.equal(config.includes(element.getAttribute('Include')), false, "Check that all project dependencies removed properly")
}
}
});
describe("adding NuGet dependencies", () => {
function getCount(doc) {
const packageRefs = doc.getElementsByTagName("PackageReference")
let count = 0
for (let i = 0; i < packageRefs.length; i++) {
const element = packageRefs.item(i)
if (element.hasAttribute('Include')) {
const val = element.getAttribute('Include')
if (Object.keys(config).includes(val) && element.getAttribute("Version") == config[val]) count++
}
}
return count
}
const config = {
"AeroGear.Mobile.Core": "0.0.7",
"AeroGear.Mobile.Auth": "0.0.7",
"AeroGear.Mobile.Security": "0.0.8"
}
it('should add NuGet dependencies', async () => {
await projop.processProject(`scripts/test/${csProjFileProjDeps}`, config, showcaseop.addNuGets, false, true)
const doc = await xmlhandling.openXML(`${__dirname}/${csProjFileProjDeps}`, '')
const count = getCount(doc)
assert.equal(count, Object.keys(config).length, "Check that all NuGet dependencies were added")
});
it('shouldn\'t add NuGet dependencies more than once', async () => {
await projop.processProject(`scripts/test/${csProjFileProjDeps}`, config, showcaseop.addNuGets, false, true)
let doc = await xmlhandling.openXML(`${__dirname}/${csProjFileProjDeps}`, '')
const count = getCount(doc)
try {
await projop.processProject(`scripts/test/${csProjFileProjDeps}`, config, showcaseop.addNuGets, false, true)
} catch (e) {
}
doc = await xmlhandling.openXML(`${__dirname}/${csProjFileProjDeps}`, '')
const newCount = getCount(doc)
assert.strictEqual(newCount, count, "Check that NuGet dependencies are added only once")
});
})
afterEach(() => {
fs.removeSync(`${__dirname}/${csProjFileNuGets}`)
fs.removeSync(`${__dirname}/${csProjFileProjDeps}`)
})
});
});
| true
|
85d9bac6d26c7af2cff2828d86abdda0dfb38d59
|
Shell
|
namgivu/suitecrm-start
|
/00.suitcrm-setup-commands.sh
|
UTF-8
| 3,251
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#ref. https://www.vultr.com/docs/how-to-install-suitecrm-on-ubuntu-16-04
#os: ubuntu-16-04
#set locale (droplet missed the below two fields)
sudo echo "
export LANGUAGE='en_US.UTF-8'
export LC_ALL='en_US.UTF-8'
" >> ~/.bashrc && source ~/.bashrc
#update os
sudo apt update -y && sudo apt install -y && sudo apt autoremove
#install LAMP
sudo apt install -y apache2 mariadb-server php7.0 php7.0-mysql php7.0-gd php7.0-curl php7.0-imap libapache2-mod-php7.0 php7.0-mcrypt php7.0-xml php7.0-json php7.0-zip
#tweak apache
sudo echo "
; BEGIN suitecrm setting
post_max_size = 64M
upload_max_filesize = 64M
max_input_time = 120
memory_limit = 256M
; END suitecrm setting
" | sudo tee --append /etc/php/7.0/apache2/php.ini #TODO why tee instead of echo >>
#enable the IMAP module with the following command
sudo phpenmod imap
#setup mysql
sudo mysql_secure_installation
#root/root as login user/pass
#yes for the rest
alias msql='mysql -u root -proot -e'
s='suitecrm'; db_name=$s; db_user=$s; db_pass=$s
msql "
DROP DATABASE IF EXISTS $db_name;
CREATE DATABASE $db_name;
CREATE USER '$db_user'@'localhost' IDENTIFIED BY '$db_pass';
--DROP USER IF EXISTS '$db_user'@'localhost'; --TODO why this is not working?
GRANT ALL PRIVILEGES ON $db_name.* TO '$db_user'@'localhost';
FLUSH PRIVILEGES;
"
#download & extract suitecrm package
sudo apt install -y unzip
d='/tmp/suitecrm' ; mkdir -p $d && cd $d && wget -q https://suitecrm.com/files/160/SuiteCRM-7.10.5/284/SuiteCRM-7.10.5.zip
suitecrm='SuiteCRM-7.10.5'; cd $d && unzip "$suitecrm.zip" && sudo mv "$suitecrm" /var/www/html/suitecrm
#configure Apache for suitecrm
site_name='suitecrm'
host_ip='159.65.2.33'
app_home="/var/www/html/$site_name"
sudo chown -R www-data:www-data "$app_home"
sudo chmod -R 755 "$app_home"
sudo echo "
<VirtualHost *:80>
ServerAdmin nam.vu@unicorn.vn
DocumentRoot /var/www/html/suitecrm/
ServerName $host_ip
ServerAlias $host_ip
<Directory /var/www/html/suitecrm/>
Options FollowSymLinks
AllowOverride All
</Directory>
ErrorLog /var/log/apache2/suitecrm-error_log
CustomLog /var/log/apache2/suitecrm-access_log common
</VirtualHost>
" > "/etc/apache2/sites-available/$site_name.conf"
sudo a2ensite "$site_name"
sudo systemctl restart apache2
#utils
sudo echo "
alias reload_apache='sudo systemctl restart apache2'
" >> ~/.bashrc && source ~/.bashrc
#proceed web-install
note="
follow guide on <your-host>/install.php and fix all the error you may encounter
admin login to be admin/admin for user/pass
"
#setup crontab
note="
ref. guide from suitecrm's web-install@after-apache-deployed
In order to run SuiteCRM Schedulers, edit your web server user's crontab file with this command:
sudo crontab -e -u www-data
and add the following line to the crontab file:
* * * * * cd /var/www/html/suitecrm; php -f cron.php > /dev/null 2>&1
You should do this only AFTER the installation is concluded.
"
crontab -l > cron_file #write out current crontab to file
echo "* * * * * cd /var/www/html/suitecrm; php -f cron.php > /dev/null 2>&1" >> cron_file #echo new cron into cron file
crontab cron_file #install new cron file
rm cron_file #clean up
| true
|
5a512bf97f1baf1da1159430810d35dd8f466d6c
|
Shell
|
jmoggridge/sklearn_example_pipelines
|
/MoggridgeJ_A2_all_my_programs/scripts/gather_results.sh
|
UTF-8
| 1,244
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# Gather all results .csv files and figures
#
mkdir -p results
for x in 0 1 2 3 4
do
cp abalone-folded-0$x/svn_performance.csv ./results/abalone_svn_$x.csv
cp abalone-folded-0$x/logreg_performance.csv ./results/abalone_logreg_$x.csv
cp cancer-folded-0$x/svn_performance.csv ./results/cancer_svn_$x.csv
cp cancer-folded-0$x/logreg_performance.csv ./results/cancer_logreg_$x.csv
cp diabetes-folded-0$x/svn_performance.csv ./results/diabetes_svn_$x.csv
cp diabetes-folded-0$x/logreg_performance.csv ./results/diabetes_logreg_$x.csv
done
header="algorithm,precision,accuracy,recall,f1"
echo $header > ./results/kfold_abalone.csv
echo $header > ./results/kfold_cancer.csv
echo $header > ./results/kfold_diabetes.csv
echo "\n\n"
echo "Abalone"
echo "--------------"
echo $header
for result in ./results/abalone_*.csv
do
cat $result
cat $result >> ./results/kfold_abalone.csv
done
echo "\n\n"
echo "Cancer"
echo "--------------"
echo $header
for result in ./results/cancer_*.csv
do
cat $result
cat $result >> ./results/kfold_cancer.csv
done
echo "\n\n"
echo "Diabetes"
echo "--------------"
echo $header
for result in ./results/diabetes_*.csv
do
cat $result
cat $result >> ./results/kfold_diabetes.csv
done
| true
|
3acf109e60eee7f33b16d2750a65d06aa7f6d280
|
Shell
|
awsque/learn
|
/bash/rename.sh
|
UTF-8
| 252
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#将当前目录下打png,jpeg格式打图片重命名
count=1
for img in `find . -maxdepth 1 -name '*.jpeg' -o -name '*.png' -type f`
do
newimg=image-${count}.${img##*.}
mv "$img" "$newimg"
echo 'rename success!!!!'
let count++
done
| true
|
7dce6216917db4a1c2d72633f3a05e5c3d22f431
|
Shell
|
ngkim/vagrant
|
/kilo/include/openstack/02_endpoint.sh
|
UTF-8
| 2,490
| 2.859375
| 3
|
[] |
no_license
|
create_keystone_service() {
openstack service create \
--name keystone --description "OpenStack Identity" identity
}
create_keystone_endpoint() {
openstack endpoint create \
--publicurl http://controller:5000/v2.0 \
--internalurl http://controller:5000/v2.0 \
--adminurl http://controller:35357/v2.0 \
--region ${REGION_NAME} \
identity
}
create_glance_service() {
openstack service create \
--name glance --description "OpenStack Image service" image
}
create_glance_endpoint() {
openstack endpoint create \
--publicurl http://controller:9292 \
--internalurl http://controller:9292 \
--adminurl http://controller:9292 \
--region ${REGION_NAME} \
image
}
create_nova_service() {
openstack service create --name nova \
--description "OpenStack Compute" compute
}
create_nova_endpoint() {
openstack endpoint create \
--publicurl http://controller:8774/v2/%\(tenant_id\)s \
--internalurl http://controller:8774/v2/%\(tenant_id\)s \
--adminurl http://controller:8774/v2/%\(tenant_id\)s \
--region ${REGION_NAME} \
compute
}
create_neutron_service() {
openstack service create --name neutron \
--description "OpenStack Networking" network
}
create_neutron_endpoint() {
openstack endpoint create \
--publicurl http://controller:9696 \
--adminurl http://controller:9696 \
--internalurl http://controller:9696 \
--region ${REGION_NAME} \
network
}
create_heat_service() {
openstack service create \
--name heat --description "Orchestration" orchestration
}
create_heat_cfn_service() {
openstack service create --name heat-cfn \
--description "Orchestration" cloudformation
}
create_heat_orchestration_endpoint() {
openstack endpoint create \
--publicurl http://controller:8004/v1/%\(tenant_id\)s \
--internalurl http://controller:8004/v1/%\(tenant_id\)s \
--adminurl http://controller:8004/v1/%\(tenant_id\)s \
--region ${REGION_NAME} \
orchestration
}
create_heat_cfn_endpoint() {
openstack endpoint create \
--publicurl http://controller:8000/v1 \
--internalurl http://controller:8000/v1 \
--adminurl http://controller:8000/v1 \
--region ${REGION_NAME} \
cloudformation
}
create_ceilometer_service() {
openstack service create --name ceilometer \
--description "Telemetry" metering
}
create_ceilometer_endpoint() {
openstack endpoint create \
--publicurl http://controller:8777 \
--internalurl http://controller:8777 \
--adminurl http://controller:8777 \
--region RegionOne \
metering
}
| true
|
763102ee5da854d384da961dad9ed9e1f5936e2f
|
Shell
|
frossi933/IAA
|
/TP2/ejb/getError.sh
|
UTF-8
| 386
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# Obtenemos el test discreto a partir de las salids del algoritmo
# ./getError
for n in 2 5 10 20 40
do
ARR=(`grep "Test discreto:" sal_${n} | awk -F ':' '{print $2}' | sed s/'%'/' '/g`)
for i in {1..10}
do
echo ${ARR[$i]} >> tmp
done
ARRSORT=( `sort -g tmp` )
rm tmp
MED=`echo "scale=2; ( ${ARRSORT[5]} + ${ARRSORT[6]} ) / 2.0" | bc`
echo $n $MED >> tab
done
| true
|
ee7d6bcca43d8a31a131568c9b4846d25ae61ae5
|
Shell
|
mradovic95/WebshopComposer
|
/autocomplete.sh
|
UTF-8
| 842
| 2.84375
| 3
|
[] |
no_license
|
#autocomplete
#http://fahdshariff.blogspot.com/2011/04/writing-your-own-bash-completion.html
#tr
#http://askubuntu.com/questions/164056/how-do-i-combine-all-lines-in-a-text-file-into-a-single-line
#cat myfile.txt | tr -d '\n' ' ' > oneline.txt
#cat myfile.txt | tr -d '\n' > oneline.txt
#sed
#http://www.cyberciti.biz/faq/unix-linux-replace-string-words-in-many-files/
_initmodule()
{
local cur=${COMP_WORDS[COMP_CWORD]}
COMPREPLY=( $(compgen -W "$(find . -maxdepth 5 -type f -name init.sh | sed 's/.\/modules\///g' | sed 's/\/init\.sh//g' | tr '\n' ' ')" -- $cur) )
}
complete -F _initmodule ./bin/initmodule
_initmanifest()
{
local cur=${COMP_WORDS[COMP_CWORD]}
COMPREPLY=( $(compgen -W "$(find manifest/ -type f | sed 's/manifest\///g' | sed 's/\.sh//g' | tr '\n' ' ')" -- $cur) )
}
complete -F _initmanifest ./bin/initmanifest
| true
|
3f9e975e4752396193e6a63815f8bf2280742c0b
|
Shell
|
feedsbrain/pssh-box-static
|
/src/build.sh
|
UTF-8
| 944
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
set +e
cd $(dirname $0)
VERSION=1.0.0
OUTPUT_PATH="unknown"
# TODO: Detect processor architecture
# For the moment we only support x64
echo ""
echo "Assigning build output for OS: $OSTYPE"
if [[ "$OSTYPE" == "linux-gnu" ]]; then
OUTPUT_PATH=../bin/linux/x64
elif [[ "$OSTYPE" == "darwin"* ]]; then
OUTPUT_PATH=../bin/darwin/x64
elif [[ "$OSTYPE" == "cygwin" ]]; then
OUTPUT_PATH=../bin/win32/x64
elif [[ "$OSTYPE" == "msys" ]]; then
OUTPUT_PATH=../bin/win32/x64
elif [[ "$OSTYPE" == "win32" ]]; then
OUTPUT_PATH=../bin/win32/x64
fi
# Start compiling if OS supported
if [[ "$OUTPUT_PATH" != "unknown" ]]; then
echo "Compiling static binary for $OSTYPE ..."
echo ""
pyinstaller --clean --onefile --paths ./pyproto --paths ./pyproto/packager/media/base --hidden-import pkgutil --hidden-import six --distpath $OUTPUT_PATH pssh-box.py
else
echo ""
echo "Unsupported OS: $OSTYPE"
echo ""
fi
| true
|
2c29e99a161bd1799e092230c470d5d23863c0a8
|
Shell
|
webscale-networks/mod_pagespeed
|
/devel/lots_of_vhosts.sh
|
UTF-8
| 2,010
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: morlovich@google.com (Maksim Orlovich)
#
# Helpers for doing experiments with lots of vhosts.
#
# usage:
# scripts/lots_of_vhosts.sh --config | --traffic
#
# You can also set envvar NUM_VHOSTS to configure how many hosts to use.
set -e # exit script if any command returns an error
set -u # exit the script if any variable is uninitialized
NUM_VHOSTS=${NUM_VHOSTS:-10000}
function usage {
cat <<EOF >&2
Usage:
scripts/lots_of_vhosts.sh --config | --traffic
--config generates a suffix for pagespeed.conf
--traffic generates a list of URLs for trace_stress_test.sh
You can also set environment variable NUM_VHOSTS to control the number of
virtual hosts produced.
See also https://github.com/pagespeed/mod_pagespeed/wiki/Memory-Profiling
EOF
}
function config {
echo "NameVirtualHost *:8080"
for i in $(seq 0 $NUM_VHOSTS); do
echo "<VirtualHost *:8080>"
echo " DocumentRoot $HOME/apache2/htdocs/"
echo " ServerName vhost"$i".example.com"
echo " ModPagespeed on"
echo " ModPagespeedFileCachePath \"/tmp/vhost\""
echo " ModPagespeedBlockingRewriteKey \"foo"$i"\""
echo "</VirtualHost>"
done
}
function traffic {
for i in $(seq 0 $NUM_VHOSTS); do
echo "http://vhost"$i".example.com/mod_pagespeed_example/"
done
}
if [ $# -ne 1 ]; then
usage
exit 1
fi
case $1 in
--config)
config;;
--traffic)
traffic;;
*)
usage
exit 1
;;
esac
| true
|
aa6fe2094b5112c76c5734e1dec719e7043a0c6f
|
Shell
|
davidbeermann/dotdotdot
|
/scripts/get_yarn.sh
|
UTF-8
| 560
| 3.25
| 3
|
[] |
no_license
|
echo "Install Yarn"
echo "More info: https://yarnpkg.com/"
echo "-------------------------------"
# https://stackoverflow.com/a/26759734
if ! [ -x "$(command -v curl)" ]; then
echo 'Error: curl is not installed.' >&2
exit 1
fi
echo "1. Adding Debian package repository"
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
echo "2. Installing Yarn"
sudo apt update --quiet && sudo apt install --no-install-recommends --yes yarn
exit 0
| true
|
e66e51247aa50932634fe5d95ab722bca1848b92
|
Shell
|
tordans/osmbc
|
/test.sh
|
UTF-8
| 699
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# This script starts the test module using Istanbul for codecoverage
# The coverage is NOT uploaded afterwards to codecov.io
# it is used on NON TRAVIS machines
#
# it is started by npm test with the $TRAVIS variable set to nothing
# ("test": "NODE_ENV=test ./test$TRAVIS.sh") (from package.json)
#
#
if [ "$TRAVIS" = "TRUE" ]
then
echo "Start Travis Test With Coverage Upload"
istanbul cover ./node_modules/mocha/bin/_mocha --report lcovonly -- -R min && cat ./coverage/lcov.info | ./node_modules/codecov.io/bin/codecov.io.js && rm -rf ./coverage
else
echo "Start Travis Test Without Coverage Upload"
istanbul cover ./node_modules/mocha/bin/_mocha --report html
fi
| true
|
bc0c8c7ae9340f12fcbae278e2697a1e2111cadd
|
Shell
|
arghyagod-coder/hydra
|
/linux_install.sh
|
UTF-8
| 495
| 3.140625
| 3
|
[
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
echo "Downloading hydra..."
wget https://github.com/Shravan-1908/hydra/releases/latest/download/hydra-linux-amd64
echo "Adding hydra into PATH..."
mkdir -p ~/.hydra
mv ./hydra-linux-amd64 ~/.hydra/hydra
chmod +x ~/.hydra/hydra
echo "export PATH=$PATH:~/.hydra" >> ~/.bashrc
fish -c "set -U fish_user_paths ~/.hydra/ $fish_user_paths"
echo "export PATH=$PATH:~/.hydra" >> ~/.zshrc
echo "hydra installation is completed!"
echo "You need to restart the shell to use hydra."
| true
|
09047fb3dd0775345351cd84a6e21daad087584c
|
Shell
|
StephanSchw/Sample-Code
|
/_PHP/ProcessControl/webscript.sh
|
UTF-8
| 1,415
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# sudo web script allowing user www-data to run commands with root privilegs
#
# visudo:
# www-data ALL=NOPASSWD:/var/sudowebscript.sh
#
ProgramBinPath=(
'apache2::/etc/init.d/apache2'
'oidentd::/etc/init.d/oidentd'
)
start_prog() {
prog=$1
PID="$(ps auxw | grep -v grep | grep -v $0 | grep $prog | awk {'print $2'})"
if [[ ! -z $PID ]] ; then
echo "$prog ist bereits gestartet"
else
$(GetProgBinPath $prog) start &
sleep 1
PID="$(ps auxw | grep -v grep | grep -v $0 | grep $prog | awk {'print $2'})"
if [[ -z $PID ]] ; then
echo "$prog konnte nicht gestartet werden!?"
else
echo "$prog gestartet"
fi
fi
}
stop_prog() {
prog=$1
PID="$(ps auxw | grep -v grep | grep -v $0 | grep $prog | awk {'print $2'})"
if [[ -z $PID ]] ; then
echo "$prog ist bereits beendet"
else
pkill -9 $prog >/dev/null 2>&1
sleep 2
PID="$(pgrep -x $prog)"
if [[ ! -n $PID ]] ; then
echo "$prog beendet"
else
pkill -9 $prog
echo "$prog gekillt"
fi
fi
}
GetProgBinPath() {
prog=$1
for index in "${ProgramBinPath[@]}" ; do
KEY="${index%%::*}"
VALUE="${index##*::}"
[[ "$KEY" == "$prog" ]] && echo "$VALUE" && break
done
}
case "$1" in
start)
start_prog "$2"
;;
stop)
stop_prog "$2"
;;
restart)
stop_prog "$2"
sleep 2
start_prog "$2"
;;
reb) /sbin/shutdown -r now ;;
*) echo "ERROR: invalid parameter: $1 (for $0)"; exit 1 ;;
esac
exit 0
| true
|
99afc601374fcfe47ab81170b0991a4ca4c190d8
|
Shell
|
cjlovering/Aminon
|
/test-scripts/scanner_query.sh
|
UTF-8
| 123
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
IP="$1"
wget $IP --no-dns-cache
if[[ $? -eq 0 ]]; then
echo "SCAN ATTACK WORKED" >> scan_results.txt
fi
| true
|
4e6dcaf8f55dd29fafbf5326a1e4bc2b8111a37f
|
Shell
|
nunofsantos/LaundryAlarm
|
/setup-service.sh
|
UTF-8
| 434
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
mkdir -p /var/log/laundryalarm
chown pi.pi /var/log/laundryalarm
touch /var/log/laundryalarm/laundryalarm.log
chown pi.pi /var/log/laundryalarm/laundryalarm.log
cp laundryalarm.service /lib/systemd/system/laundryalarm.service
chmod 644 /lib/systemd/system/laundryalarm.service
systemctl daemon-reload
systemctl enable laundryalarm.service
systemctl start laundryalarm.service
systemctl status laundryalarm.service
| true
|
e88807c379720ae363b6b5472e28e96fb3521745
|
Shell
|
ijayden-lung/hpc
|
/Jun_test_RNA_seq/tools/perl-5.30.2/Policy.sh.2020060112
|
UTF-8
| 6,214
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# This file was produced by running the Policy_sh.SH script, which
# gets its values from config.sh, which is generally produced by
# running Configure.
#
# The idea here is to distill in one place the common site-wide
# "policy" answers (such as installation directories) that are
# to be "sticky". If you keep the file Policy.sh around in
# the same directory as you are building Perl, then Configure will
# (by default) load up the Policy.sh file just before the
# platform-specific hints file and rewrite it at the end.
#
# The sequence of events is as follows:
# A: If you are NOT re-using an old config.sh:
# 1. At start-up, Configure loads up the defaults from the
# os-specific hints/osname_osvers.sh file and any previous
# Policy.sh file.
# 2. At the end, Configure runs Policy_sh.SH, which creates
# Policy.sh, overwriting a previous Policy.sh if necessary.
#
# B: If you are re-using an old config.sh:
# 1. At start-up, Configure loads up the defaults from config.sh,
# ignoring any previous Policy.sh file.
# 2. At the end, Configure runs Policy_sh.SH, which creates
# Policy.sh, overwriting a previous Policy.sh if necessary.
#
# Thus the Policy.sh file gets overwritten each time
# Configure is run. Any variables you add to Policy.sh will be lost
# unless you copy Policy.sh somewhere else before running Configure.
#
# Allow Configure command-line overrides; usually these won't be
# needed, but something like -Dprefix=/test/location can be quite
# useful for testing out new versions.
#Site-specific values:
case "$perladmin" in
'') perladmin='bio-longyk@ln02.ts10k' ;;
esac
# Installation prefixes. Allow a Configure -D override. You
# may wish to reinstall perl under a different prefix, perhaps
# in order to test a different configuration.
# For an explanation of the installation directories, see the
# INSTALL file section on "Installation Directories".
case "$prefix" in
'') prefix='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2' ;;
esac
# By default, the next three are the same as $prefix.
# If the user changes $prefix, and previously $siteprefix was the
# same as $prefix, then change $siteprefix as well.
# Use similar logic for $vendorprefix and $installprefix.
case "$siteprefix" in
'') if test "/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2" = "/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2"; then
siteprefix="$prefix"
else
siteprefix='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2'
fi
;;
esac
case "$vendorprefix" in
'') if test "" = "/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2"; then
vendorprefix="$prefix"
else
vendorprefix=''
fi
;;
esac
# Where installperl puts things.
case "$installprefix" in
'') if test "/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2" = "/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2"; then
installprefix="$prefix"
else
installprefix='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2'
fi
;;
esac
# Installation directives. Note that each one comes in three flavors.
# For example, we have privlib, privlibexp, and installprivlib.
# privlib is for private (to perl) library files.
# privlibexp is the same, except any '~' the user gave to Configure
# is expanded to the user's home directory. This is figured
# out automatically by Configure, so you don't have to include it here.
# installprivlib is for systems (such as those running AFS) that
# need to distinguish between the place where things
# get installed and where they finally will reside. As of 5.005_6x,
# this too is handled automatically by Configure based on
# /home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2, so it isn't included here either.
#
# Note also that there are three broad hierarchies of installation
# directories, as discussed in the INSTALL file under
# "Installation Directories":
#
# =item Directories for the perl distribution
#
# =item Directories for site-specific add-on files
#
# =item Directories for vendor-supplied add-on files
#
# See Porting/Glossary for the definitions of these names, and see the
# INSTALL file for further explanation and some examples.
#
# In each case, if your previous value was the default, leave it commented
# out. That way, if you override prefix, all of these will be
# automatically adjusted.
#
# WARNING: Be especially careful about architecture-dependent and
# version-dependent names, particularly if you reuse this file for
# different versions of perl.
# bin='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/bin'
# scriptdir='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/bin'
# privlib='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/lib/5.30.2'
# archlib='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/lib/5.30.2/x86_64-linux-thread-multi'
# man1dir='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/man/man1'
# man3dir='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/man/man3'
# man1ext='1'
# man3ext='3'
# Preserving custom html1dir
html1dir=' '
# Preserving custom html3dir
html3dir=' '
# sitebin='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/bin'
# sitescript='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/bin'
# sitelib='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/lib/site_perl/5.30.2'
# sitearch='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/lib/site_perl/5.30.2/x86_64-linux-thread-multi'
# siteman1dir='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/man/man1'
# siteman3dir='/home/bio-longyk/Jun_test_RNA_seq/tools/perl-5.30.2/man/man3'
# sitehtml1dir=''
# sitehtml3dir=''
# vendorbin=''
# vendorscript=''
# vendorlib=''
# vendorarch=''
# Preserving custom vendorman1dir
vendorman1dir=' '
# Preserving custom vendorman3dir
vendorman3dir=' '
# Preserving custom vendorhtml1dir
vendorhtml1dir=' '
# Preserving custom vendorhtml3dir
vendorhtml3dir=' '
# Lastly, you may add additional items here. For example, to set the
# pager to your local favorite value, uncomment the following line in
# the original Policy_sh.SH file and re-run sh Policy_sh.SH.
#
# pager='/usr/bin/less -R'
#
# A full Glossary of all the config.sh variables is in the file
# Porting/Glossary.
| true
|
dfc3d033c0389fe03ca7c625d84477b714866d08
|
Shell
|
michaelchen1225/homeclass
|
/param.sh
|
UTF-8
| 207
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
show_param() {
echo '$0: '$0
echo '$1: '$1
echo '$2: '$2
}
echo '$0: '$0
echo '$1: '$1
echo '$2: '$2
echo '$10: '$10
echo '${10}: '${10}
shift 1
echo '$#: '$#
echo '$*: '$*
show_param "$@"
| true
|
5601755de8adcfdaf6950d5a197d23a3731aab00
|
Shell
|
AndreiBorac/DeityGuard
|
/2-fingen/overlay/dg-any-base/tmp/dg/rc_local_inner.sh
|
UTF-8
| 332
| 2.890625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/false
set -o xtrace
set -o errexit
set -o nounset
set -o pipefail
shopt -s failglob
shopt -s nullglob
ARG1="${1-}"
. /tmp/dg/conf
f_defs
if [ -d /tmp/dg/as_"$ARG1" ]
then
for i in /tmp/dg/as_"$ARG1"/*
do
if [ -f "$i" ]
then
. "$i"
fi
done
fi
f_"$ARG1"
echo '+OK (rc_local_inner.sh) ['"$ARG1"']'
| true
|
f753298f992924e50d07de27ae9c884cf141af8c
|
Shell
|
carsonyan25/ansible
|
/TD/scripts/tools/set_ip_and_hostname.sh
|
UTF-8
| 405
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
IP=$1
NAME=$2
verify_args() {
if test -z "$IP" ; then
echo "parameter 1 must be ip address"
exit 1
fi
if test -z "$NAME" ; then
echo "parameter 2 must be hostname"
exit 2
fi
}
set_ip() {
echo "IPADDR=${IP}" >> /etc/sysconfig/network-scripts/ifcfg-eth0
}
set_hostname() {
echo "${NAME}" > /etc/hostname
hostname -b ${NAME}
}
verify_args
set_ip
set_hostname
reboot now
| true
|
3907ba95fc5979a09a1b8e1bc624ca04634285de
|
Shell
|
avallonking/ScienceScripts
|
/sci/sen.call.sh
|
UTF-8
| 1,564
| 2.78125
| 3
|
[] |
no_license
|
#call variants with sentieon
#!/bin/sh
#Constant and Enviroment
refseq="/data/home/lijiaj/reference/combined_ucsc-hg19_EBV.fa"
dbsnp="/data/home/lijiaj/reference/dbsnp_138.hg19.vcf"
regions_human="/data/home/lijiaj/reference/wholegenomeregions_human.bed"
regions_EBV="/data/home/lijiaj/reference/wholegenomeregions_EBV.bed"
sample_dir=/data/home/guoym/1-NPC_genomics_project/1-NPC_WGS_data/3-GATK
release_dir=/share/apps/sentieon/201603
work_dir=/data/home/lijiaj/data/guoymProject/analysis/truth_sentieon
gvcf_option="--emit_mode gvcf"
export SENTIEON_LICENSE=login01.hpc.sysu:8990
#Main
cd /data/home/lijiaj/data/guoymProject/sample/1X
for i in *
do
#cd $work_dir
#test -e $i.human.vcf && continue
bamfile=$i\_sorted_PCRremoved_realigned_recal.bam
#$release_dir/bin/sentieon driver -r $refseq --interval $regions_human -t 10 -i $sample_dir/$bamfile --algo Haplotyper -d $dbsnp $gvcf_option --emit_conf=30 --call_conf=30 $i.human.g.vcf
$release_dir/bin/sentieon driver -r $refseq --interval $regions_EBV -t 10 -i $sample_dir/$bamfile --algo Haplotyper -d $dbsnp $gvcf_option --emit_conf=30 --call_conf=30 $i.EBV.g.vcf
$release_dir/bin/sentieon driver -r $refseq --interval $regions_EBV -t 10 -i $sample_dir/$bamfile --algo Haplotyper -d $dbsnp --emit_conf=30 --call_conf=30 $i.EBV.vcf
#$release_dir/bin/sentieon driver -r $refseq -t 10 --algo GVCFtyper -v $i.human.g.vcf -d $dbsnp $i.human.vcf
$release_dir/bin/sentieon driver -r $refseq -t 10 --algo GVCFtyper -v $i.EBV.g.vcf -d $dbsnp $i.EBV.combine.vcf
#rm $i.human.g.vcf $i.EBV.g.vcf
done
| true
|
d4d1df5c186dfc3a50f9636767023252eb3edb29
|
Shell
|
tkekan/programming
|
/user_setup.sh
|
UTF-8
| 2,531
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
path="/usr/bin"
name=`whoami`
tenant=`whoami`
pass=Juniper1
tmpfile=`mktemp`
host=`hostname`
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=nova
export OS_AUTH_URL=http://$host:35357/v2.0
$path/keystone tenant-create --name $tenant --description "$tenant Tenant" 2>/dev/null
$path/keystone user-create --name $name --pass $pass --email $name@juniper.net 2>/dev/null
if [ $? != 0 ]
then
echo -e "\nCumulus user \"$name\" and tenant \"$tenant\" already present."
echo -e "\nHere is your Cumulus environment variables:\nexport OS_TENANT_NAME=$tenant\nexport OS_USERNAME=$name\nexport OS_PASSWORD=$pass\nexport OS_AUTH_URL=http://$host:5000/v2.0\n"
exit 1
fi
$path/keystone user-role-add --tenant $tenant --user $name --role _member_
# Create tenant network for internal IPs
export OS_TENANT_NAME=$name
export OS_USERNAME=$name
export OS_PASSWORD=$pass
export OS_AUTH_URL=http://$host:5000/v2.0
$path/neutron net-create $name-net
$path/neutron subnet-create $name-net --name $name-subnet --gateway 192.168.1.1 192.168.1.0/24
$path/neutron router-create $name-router
$path/neutron router-interface-add $name-router $name-subnet
$path/neutron router-gateway-set $name-router ext-net
# Create security group rules to allow TCP, ICMP and UDP traffic to VMs
$path/nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0
$path/nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
$path/nova secgroup-add-rule default udp 1 65535 0.0.0.0/0
# Generate and send welcome email to user."
echo -e "\n===Welcome to cumulus pod($host)===\n" >> $tmpfile
echo -e "Here is your cumulus environment variables:" >> $tmpfile
echo -e "=================================================" >> $tmpfile
echo -e "export OS_TENANT_NAME=$name" >> $tmpfile
echo -e "export OS_USERNAME=$name" >> $tmpfile
echo -e "export OS_PASSWORD=$pass" >> $tmpfile
echo -e "export OS_AUTH_URL=http://$host:5000/v2.0" >> $tmpfile
echo -e "=================================================\n" >> $tmpfile
echo -e "Here is your horizon dashboard access info for this cumulus pod:\n" >> $tmpfile
echo -e "=================================================" >> $tmpfile
echo -e "Web URL: http://$host/horizon" >> $tmpfile
echo -e "Username: $name" >> $tmpfile
echo -e "Password: $pass" >> $tmpfile
echo -e "=================================================\n" >> $tmpfile
echo -e "Thanks!" >> $tmpfile
echo -e "cumulus-dev" >> $tmpfile
cat $tmpfile | mailx -s "Welcome $name!!!" $name@juniper.net 2>/dev/null;
rm -f $tmpfile
| true
|
8b42971f500f2947cabb38c7be9afece0281ad24
|
Shell
|
lzhlee/px3se-linux
|
/px3se_linux/device/rockchip/px3-se/S90_vehicle_exit
|
UTF-8
| 399
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
case "$1" in
start)
if [ -e /dev/vehicle ] ; then
echo 88 > /dev/vehicle
[ $? = 0 ] && echo "OK" || echo "FAIL"
else
echo "/dev/vehicle do not exist"
fi
;;
*)
echo "Usage: $0 {start}"
exit 1
esac
exit 0
| true
|
e868dda10e3c414f995d7e5629f00501d704b58b
|
Shell
|
notlixiang/robotics_setup
|
/qdirstat.sh
|
UTF-8
| 571
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
echo "####################"
echo "# Qdirstat disk utilization visualization"
echo "####################"
echo ""
echo "# https://github.com/shundhammer/qdirstat"
echo ""
OS=`uname`
case $OS in
'Linux')
OS='Linux'
sudo add-apt-repository ppa:nathan-renniewaldock/qdirstat
sudo apt-get update
sudo apt-get install -y qdirstat
;;
'FreeBSD')
OS='FreeBSD'
alias ls='ls -G'
;;
'WindowsNT')
OS='Windows'
;;
'Darwin')
OS='Mac'
brew install qdirstat
;;
'SunOS')
OS='Solaris'
;;
'AIX') ;;
*) ;;
esac
| true
|
2eaaabacb1efe34bd8493efc6d8377c53cd2975d
|
Shell
|
mechmind/dotfiles
|
/.bin/respawn-scratchpad
|
UTF-8
| 899
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
if ! xdotool search --classname urxvt-scratchpad ; then
urxvt -name urxvt-scratchpad -e ensure-tmux $DISPLAY &>/dev/null &
while ! xdotool search --classname urxvt-scratchpad ; do
sleep 0.05
done
# hack for preserving scratchpad geometry.
# currently geometry partially lost when window is moved into scratchpad
i3-msg '[instance="urxvt-scratchpad"] move to scratchpad'
i3-msg '[instance="urxvt-scratchpad"] scratchpad show'
sleep 0.1
# detect multihead
width_percent=100
if [ "`xrandr | awk '$2 == "connected"' | wc -l`" -gt 1 ] ; then
width_percent=50
fi
xdotool search --classname urxvt-scratchpad windowsize ${width_percent}% 74%
xdotool search --classname urxvt-scratchpad windowmove 0 0
i3-msg '[instance="urxvt-scratchpad"] scratchpad show'
fi
i3-msg '[instance="urxvt-scratchpad"] scratchpad show'
| true
|
095f600c647b79024fb01685f5ff65c0932c75d8
|
Shell
|
intel-analytics/BigDL
|
/python/nano/test/run-nano-pytorch-tests-ipex.sh
|
UTF-8
| 535
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export ANALYTICS_ZOO_ROOT=${ANALYTICS_ZOO_ROOT}
export NANO_HOME=${ANALYTICS_ZOO_ROOT}/python/nano/src
export PYTORCH_NANO_TEST_DIR=${ANALYTICS_ZOO_ROOT}/python/nano/test/pytorch
wget -nv ${FTP_URI}/analytics-zoo-data/cifar-10-python.tar.gz -P /tmp/data
set -e
# Install ipex to test IPEX related modules
echo "# Start testing IPEX"
start=$(date "+%s")
python -m pytest -s ${PYTORCH_NANO_TEST_DIR}/tests/ -k 'ipex'
now=$(date "+%s")
time=$((now-start))
echo "Bigdl-nano tests finished"
echo "Time used:$time seconds"
| true
|
7f3243029e614217329be8ad9c83e7dc44de574d
|
Shell
|
manjdk/Carbon-Based-Life-Forms
|
/scripts/create-table.sh
|
UTF-8
| 960
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
ENDPOINT_URL="http://localhost:8000"
MINERAL_TABLE="Mineral"
aws dynamodb describe-table \
--region eu-west-1 \
--endpoint-url ${ENDPOINT_URL} \
--table-name ${MINERAL_TABLE} \
if [ $? -ne 0 ];
then
aws dynamodb create-table \
--region eu-west-1 \
--endpoint-url ${ENDPOINT_URL} \
--table-name ${MINERAL_TABLE} \
--attribute-definitions \
AttributeName=id,AttributeType=S \
AttributeName=clientId,AttributeType=S \
--key-schema \
AttributeName=id,KeyType=HASH \
--provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=10 \
--global-secondary-indexes '[
{
"IndexName": "clientId-index",
"KeySchema": [
{"AttributeName": "clientId", "KeyType": "HASH"}
],
"Projection": {
"ProjectionType": "ALL"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,"WriteCapacityUnits": 10
}
}
]'
fi
| true
|
b87f8665d1b37d20705bdfde0a3e909e3522a692
|
Shell
|
fultonj/edge-standalone
|
/standalone-central.sh
|
UTF-8
| 2,442
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
export ZONE=central
export INTERFACE=eth0
export IP=$(ip a s $INTERFACE | grep 192 | awk {'print $2'} | sed s/\\/24//g)
export NETMASK=24
export DNS_SERVERS=192.168.122.1
export NTP_SERVERS=pool.ntp.org
sudo sh -c "echo standalone-${ZONE} > /etc/hostname ; hostname -F /etc/hostname"
cat <<EOF > $HOME/standalone_parameters.yaml
parameter_defaults:
CertmongerCA: local
CloudName: $IP
ControlPlaneStaticRoutes: []
Debug: true
DeploymentUser: $USER
DnsServers: $DNS_SERVERS
NtpServer: $NTP_SERVERS
# needed for vip & pacemaker
KernelIpNonLocalBind: 1
DockerInsecureRegistryAddress:
- $IP:8787
NeutronPublicInterface: $INTERFACE
# domain name used by the host
NeutronDnsDomain: localdomain
# re-use ctlplane bridge for public net
NeutronBridgeMappings: datacentre:br-ctlplane
NeutronPhysicalBridge: br-ctlplane
# enable to force metadata for public net
#NeutronEnableForceMetadata: true
StandaloneEnableRoutedNetworks: false
StandaloneHomeDir: $HOME
StandaloneLocalMtu: 1400
# Needed if running in a VM
StandaloneExtraConfig:
oslo_messaging_notify_use_ssl: false
oslo_messaging_rpc_use_ssl: false
EOF
if [ ! -f $HOME/ceph_parameters.yaml ]; then
cat <<EOF > $HOME/ceph_parameters.yaml
parameter_defaults:
CephAnsibleDisksConfig:
devices:
- /dev/loop3
journal_size: 1024
CephAnsibleExtraConfig:
osd_scenario: collocated
osd_objectstore: filestore
cluster_network: 192.168.24.0/24
public_network: 192.168.24.0/24
CephPoolDefaultPgNum: 32
CephPoolDefaultSize: 1
CephAnsiblePlaybookVerbosity: 3
LocalCephAnsibleFetchDirectoryBackup: /root/ceph_ansible_fetch
EOF
fi
cat <<EOF > $HOME/central_parameters.yaml
parameter_defaults:
GlanceBackend: swift
StandaloneExtraConfig:
cinder::backend_host: ''
EOF
if [[ ! -d ~/templates ]]; then
ln -s /usr/share/openstack-tripleo-heat-templates ~/templates
fi
sudo pkill -9 heat-all # Remove any previously running heat processes
sudo openstack tripleo deploy \
--templates ~/templates \
--local-ip=$IP/$NETMASK \
-e ~/templates/environments/standalone.yaml \
-e ~/templates/environments/ceph-ansible/ceph-ansible.yaml \
-r ~/templates/roles/Standalone.yaml \
-e ~/containers-prepare-parameters.yaml \
-e ~/standalone_parameters.yaml \
-e ~/ceph_parameters.yaml \
-e ~/central_parameters.yaml \
--output-dir $HOME \
--keep-running \
--standalone $@
| true
|
8a694899dfa1c02ad466975171d2b2cc8303b156
|
Shell
|
chuckis/n-nine-hundreed
|
/free_rootfs.sh
|
UTF-8
| 1,298
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
# Nokia N900 Maemo 5 Script to free up space on rootfs
# ignoring errors when creating dirs that may already exist
# Moving ?? icons to /home/opt
mkdir -p /home/opt/usr/share/icons 2> /dev/null
cp -r /usr/share/icons/* /home/opt/usr/share/icons
rm -r /usr/share/icons
ln -s /home/opt/usr/share/icons /usr/share/icons
# Moving video on start-up to /home/opt
mkdir -p /home/opt/usr/share/hildon-welcome 2> /dev/null
cp -r /usr/share/hildon-welcome/* /home/opt/usr/share/hildon-welcome
rm -r /usr/share/hildon-welcome
ln -s /home/opt/usr/share/hildon-welcome /usr/share/hildon-welcome
# ??
mkdir -p /home/opt/usr/share/pixmaps 2> /dev/null
cp -r /usr/share/pixmaps/* /home/opt/usr/share/pixmaps
rm -r /usr/share/pixmaps
ln -s /home/opt/usr/share/pixmaps /usr/share/pixmaps
# Moving 'apt cache' to /home/opt - Valid until Bug 5746 is fixed.
mkdir -p /home/opt/var/cache/apt 2> /dev/null
cp -r /var/cache/apt/* /home/opt/var/cache/apt
rm -r /var/cache/apt
ln -s /home/opt/var/cache/apt /var/cache/apt
# Moving locales Source
mv /usr/share/locale /opt
ln -s /opt/locale /usr/share/locale
#sudo gainroot
mv /usr/share/nokia-maps /home/opt/
ln -s /home/opt/nokia-maps /usr/share/nokia-maps
mv /usr/share/tutorial-applet /home/opt
ln -s /home/opt/tutorial-applet /usr/share/tutorial-applet
| true
|
9dc65e7b3d252c3a53c1a9e9f5fb762f12a44618
|
Shell
|
glynnbird/celsius
|
/data/populate.sh
|
UTF-8
| 456
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# check for environment variable
if [ -z "$COUCH_URL" ]
then
echo "Need to set COUCH_URL in environment"
echo " e.g. export COUCH_URL=http://127.0.0.1:5984"
exit 1
fi
# check that ccurl is installed
hash ccurl 2>/dev/null || { echo >&2 "Need 'ccurl' installed. Try 'sudo npm install -g ccurl'"; exit 1; }
echo "Creating the database"
ccurl -X PUT /logger
echo "Adding design docs"
ccurl -X POST -d @design.json /logger/_bulk_docs
| true
|
16c108e872175c9ba266944885ee58ac0c95ceb0
|
Shell
|
TimWolla/dockerdns-knot
|
/docker-entrypoint.sh
|
UTF-8
| 688
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
mkdir /run/knot
chown knot:knot /run/knot
chown knot:knot /var/lib/knot/
if [ ! -d "/var/lib/knot/confdb" ]; then
if [ -z "$KNOT_ZONE" ]; then
echo >&2 'You need to specify a KNOT_ZONE'
exit 1
fi
knotd -d
until knotc status
do
echo "Waiting"
done
knotc conf-import /etc/knot/knot.conf
knotc stop
knotd -d -C /var/lib/knot/confdb/
until knotc status
do
echo "Waiting"
done
knotc conf-begin
knotc conf-set "zone[$KNOT_ZONE]"
knotc conf-diff
knotc conf-commit
knotc zone-begin $KNOT_ZONE
knotc zone-set $KNOT_ZONE @ 3600 SOA ns hostmaster 1 86400 900 691200 3600
knotc zone-commit $KNOT_ZONE
knotc stop
fi
exec knotd -C /var/lib/knot/confdb/
| true
|
99c58bdb34888bf3291f58a9c8adb46f9c7e801b
|
Shell
|
YasuhiroOsajima/coder-automate
|
/5_setup_drawio.sh
|
UTF-8
| 464
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
# https://hub.docker.com/r/fjudith/draw.io
# 8080 is http
# 8443 is https
# If using LETS_ENCRYPT_ENABLED=true, port 80 is cert-bot
DRAW_IO_VERSION=13.1.1
firewall-cmd --zone=public --add-service=http
firewall-cmd --zone=public --add-service=https --permanent
docker run -t -d \
-e LETS_ENCRYPT_ENABLED=true \
-e PUBLIC_DNS=${SERVER_FQDN} \
--name="drawio" \
-p 80:80 \
-p 443:8443 fjudith/draw.io:${DRAW_IO_VERSION}
firewall-cmd --reload
| true
|
8201416cc940401ef9c17dc3972309d4f68d9b24
|
Shell
|
fariquelme/g14_dgpu_acpi
|
/nv-gpu-manager.sh
|
UTF-8
| 1,707
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -eq "0" ]
then
echo -e "ERROR: No arguments supplied, please provide a valid argument:"
echo -e "> -k,--kill\n\tCuts the gpu power off"
echo -e "> -s,--start\n\tTurns the gpu power on"
else
case "$1" in
-k|--kill )
# Choose integrated gpu for graphics
sudo prime-select intel
# Unload Nvidia Modules
sudo modprobe -r nvidia
sudo modprobe -r nvidia_modeset
# Check if nvidia GPU is being used
if xrandr --listproviders | grep NVIDIA; then
echo "GPU is in use for PRIME, keeping on"
else
# Load acpi-call-dkms kenrel module
sudo modprobe acpi_call
# Turn GPU off with acpi_call
sudo sh -c 'echo "\\_SB.PCI0.GPP0.PG00._OFF" > /proc/acpi/call'
echo "GPU powered off"
fi
;;
-s|--start )
# Choose nvidia gpu for graphics
sudo prime-select on-demand
# Load Nvidia Modules
sudo modprobe nvidia
sudo modprobe nvidia_modeset
# Load acpi-call-dkms kenrel module
sudo modprobe acpi_call
# Turn GPU on with acpi_call
sudo sh -c 'echo "\\_SB.PCI0.GPP0.PG00._ON" > /proc/acpi/call'
echo "GPU powered on"
;;
*)
echo "Please provide a valid argument:"
echo -e "> -k,--kill\n\tCuts the gpu power off"
echo -e "> -s,--start\n\tTurns the gpu power on"
;;
esac
fi
| true
|
97ab1d4da4f3c3f4d7703ae9270bb6212b01de54
|
Shell
|
asaaki/luft
|
/cross/container-setup.sh
|
UTF-8
| 898
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
set -xeuo pipefail
# find other supported archs here: https://github.com/rust-lang/rustup#other-installation-methods
# RUSTUP_INIT_URL=https://static.rust-lang.org/rustup/dist/aarch64-unknown-linux-gnueabihf/rustup-init
RUSTUP_INIT_URL=https://static.rust-lang.org/rustup/dist/armv7-unknown-linux-gnueabihf/rustup-init
apt-get update
apt-get install -y --no-install-recommends \
build-essential autoconf automake file gcc g++ make m4 pkg-config \
ca-certificates curl wget git gnupg2 \
libc6-dev libusb-1.0-0-dev libssl-dev zlib1g-dev
curl --proto '=https' --tlsv1.2 -sSf -o /tmp/rustup-init $RUSTUP_INIT_URL
chmod +x /tmp/rustup-init
/tmp/rustup-init -y --no-modify-path
cat > ~/.bashrc <<-EOF
source $HOME/.cargo/env
EOF
cat > ~/.cargo/config <<-EOF
# defaults should be fine
EOF
source $HOME/.cargo/env
rustup toolchain list
rustup target list --installed
cargo version
| true
|
6b210076c288b4a8613b5518bfc231405f59423a
|
Shell
|
zzz3639/EpiBLAST_data
|
/experimentsettings/sampleselect.sh
|
UTF-8
| 556
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# select samples to be analysed by EpiBLAST
# Usage: ./run.sh roadmapsamples core18idx
samples=$1
core18idx=$2
grep -E "\<SD\>" ${samples} > ${samples}SD
grep -E "\<C\>" ${samples} > ${samples}C
rm ${samples}_core18
while read line ; do
read -a strline <<< "$line"
existsample=`grep ${strline[0]} ${core18idx}| wc -l`
if [ ${existsample} != 0 ]
then
echo "$line" >>${samples}_core18
fi
done < "$1"
grep -E "\<SD\>" ${samples}_core18 > ${samples}_core18SD
grep -E "\<C\>" ${samples}_core18 > ${samples}_core18C
| true
|
36e6d62d60126d5bc885c29ecd994a0ce3a6c35a
|
Shell
|
qian-zhu/script
|
/mktable.sh
|
UTF-8
| 591
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# This script print a html table to a temporary fille /tmp/mktable
# You can use it in vim by "!mktable 3 4"
#
# $1 : row number of table
# $2 : column number of table
if [ -n "$1" ] && [ -n "$2" ]; then
echo "<table>" > /tmp/mktable
for ((i=0;i<$1;i++)); do
echo -e "\t<tr>" >> /tmp/mktable
for ((j=0;j<$2;j++)); do
echo -e -n "\t\t<td> </td>">> /tmp/mktable;
done
echo -e -n "\n" >> /tmp/mktable
echo -e -n "\t</tr>\n" >> /tmp/mktable
done
echo "</table>" >> /tmp/mktable
cat /tmp/mktable | xsel -b
else
echo "Please input row and column"
exit -1;
fi
| true
|
eb43329b048894c880b7a7829e651c522381e3a4
|
Shell
|
indrajithbandara/ghci
|
/gce/utils/commands.sh
|
UTF-8
| 2,362
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Some common methods to talk to GCE
set -eu
# Print a log message if VERBOSE is set to true
function _log() {
echo "[$(basename "$0"):${FUNCNAME[2]}:${BASH_LINENO[1]}]" "$@"
}
function log() {
if [ "${VERBOSE-}" = yes ]; then
echo -n "INFO: "
_log "$@"
fi
}
# Test whether $1 is the name of an existing instance on GCE
function test_vm() {
(( $(gcloud compute instances list --filter="name=$1" | wc -l) > 1 ))
}
# Wait for a VM $1 in zone $2 to be up and running using ssh.
# This function will wait for at most $3 seconds.
function wait_vm() {
local vm="$1"
local zone="$2"
local timeout="${3-60}" # Wait for 1 minute maximum by default
local cmd="${4-/bin/true}"
local starttime="$(date +%s)"
while (( "$(date +%s)" - "$starttime" < "$timeout" )); do
# gcloud compute ssh forward the return code of the executed command.
if gcloud compute ssh --zone="$zone" --command "${cmd}" "$vm" &>/dev/null
then
return 0
fi
done
return 1
}
# SSH to a VM $1 on zone $2 and execute the command giving by the rest of
# the arguments.
function ssh_command() {
local TAG="$1"
local LOCATION="$2"
local tmpdir="${TMPDIR:-/tmp}"
# ${tmp} points to a file containing the list of commands to execute.
local tmp="$(mktemp ${tmpdir%%/}/vm-ssh.XXXXXXXX)"
trap "rm -f ${tmp}" EXIT
shift 2
# Truncate the list of commands
echo -n >"${tmp}"
# And then add the commands provided as argument.
for i in "$@"; do
if [ -f "$i" ]; then
cat "$i" >>"${tmp}"
else
echo "$i" >>"${tmp}"
fi
done
cat "${tmp}" | gcloud compute ssh --zone="${LOCATION}" \
--command "cat >/tmp/s.sh; sudo bash /tmp/s.sh; rm /tmp/s.sh" \
"${TAG}"
rm -f "${tmp}"
trap - EXIT
}
| true
|
fbca2172a10e55f1fec91191c5944f987da8e55d
|
Shell
|
bnamestka/screamingdata
|
/cmd/sd
|
UTF-8
| 822
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
PI_SD=/Volumes/boot
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
# source utils
source $DIR/../lib/utils.sh
# parse options
source $DIR/../lib/parse-options.sh
# ensure sd card is mounted
if [ ! -d $PI_SD ]; then
exit_with_error "Could not find the SD card. Is it mounted?"
fi
# ensure required options are supplied
if [ -z "$SSID_NAME" ] || [ -z "$SSID_PASSWORD" ]; then
exit_with_error "You must specify --ssid-name, and --ssid-password to set up the SD card!"
fi
cd $PI_SD
# enable SSH access
touch ssh
# enable headless wifi
cat > wpa_supplicant.conf <<EOF
country=US
ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
update_config=1
network={
ssid="${SSID_NAME}"
psk="${SSID_PASSWORD}"
}
EOF
echo "It is now safe to eject the SD card!"
| true
|
8fc29f8893e730d544408663f569432e45ce2004
|
Shell
|
lrascao/simple_web_server
|
/scripts/setup
|
UTF-8
| 1,115
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
die() { echo "$@" 1>&2 ; exit 1; }
usage()
{
echo "setup"
echo " options:"
echo " [--endpoint <http://localhost:8585>]"
echo
echo "Examples:"
echo " setup"
}
# Parse arguments
TEMP=$(getopt -n version --options e:,h:: --longoptions endpoint:,help:: -- $@)
# Die if they fat finger arguments, this program will be run as root
[ $? = 0 ] || die "Error parsing arguments. version --help"
ENDPOINT='http://${SERVICE_ENDPOINT:-localhost:8585}'
eval set -- "$TEMP"
while true; do
case $1 in
-h|--help)
usage
exit 0
;;
-e|--endpoint)
ENDPOINT=$2; shift; shift; continue
;;
--)
# no more arguments to parse
break
;;
*)
printf "Unknown option %s\n" "$1"
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
eval set -- "$@"
eval curl --silent -X POST $ENDPOINT/v1/setup/create_tables
| true
|
f5a6db5ed1e175aea8ffc62dc55ce188c6ce0df0
|
Shell
|
soondokwon/ebs-radio
|
/tomp3.sh
|
UTF-8
| 158
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
#convert to mp3
bitrate=`avconv -i "$1" 2>&1|sed -nr '/Audio\:/{s,^.* ([0-9]+) (.)b/s.*$,\1\2,g;p}'`
avconv -i $1 -b:a $bitrate "${2:-${1%.*}.mp3}"
| true
|
0666c3a1bdb0dab93dd269e884991fb023468c27
|
Shell
|
occlum/occlum
|
/demos/benchmarks/iperf3/iperf3.sh
|
UTF-8
| 1,139
| 3.5625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/bash
set -e
CUR_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
TEST_TIME=${1:-60}
BUF_LEN=${2:-128K}
STREMS=${3:-8}
function iperf3_prepare()
{
./build.sh
}
function iperf3_run()
{
echo ""
echo "*** Doing iperf3 with ${STREMS} client streams in parallel ***"
echo "*** with read/write buffer length ${BUF_LEN} for ${TEST_TIME} seconds. ***"
pushd occlum_server
occlum run /bin/iperf3 -s -p 6777 -1 1>/dev/null &
popd
sleep 3
pushd occlum_client
occlum run /bin/iperf3 -c 127.0.0.1 -p 6777 -f Mbits \
-P ${STREMS} -t ${TEST_TIME} -l ${BUF_LEN} | tee output.txt
popd
}
function iperf3_result()
{
output="occlum_client/output.txt"
SENDER_RES=$(grep "SUM" ${output} | grep "sender" | awk '{print $6}')
RECV_RES=$(grep "SUM" ${output} | grep "receiver" | awk '{print $6}')
jq --argjson sender $SENDER_RES --argjson recv $RECV_RES \
'(.[] | select(.extra == "sender") | .value) |= $sender |
(.[] | select(.extra == "receiver") | .value) |= $recv' \
result_template.json > result.json
}
iperf3_prepare
iperf3_run
iperf3_result
| true
|
949c9fe2687143fe356404cb44ad52ef3563af26
|
Shell
|
janmojzis/curvevpn
|
/debian/service/curvecp/run
|
UTF-8
| 1,724
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh -e
exec 2>&1
DEFAULTFILE="/etc/default/curvevpn-server-run"
[ -f "${DEFAULTFILE}" ] && . "${DEFAULTFILE}"
if [ x"${ROOT}" = x ]; then echo "\$ROOT not set, please edit ${DEFAULTFILE}"; exit 111; fi
if [ x"${IP}" = x ]; then echo "\$IP not set, please edit ${DEFAULTFILE}"; exit 111; fi
if [ x"${PORT}" = x ]; then echo "\$PORT not set, please edit ${DEFAULTFILE}"; exit 111; fi
if [ x"${NAME}" = x ]; then echo "\$NAME not set, please edit ${DEFAULTFILE}"; exit 111; fi
if [ x"${USER}" = x ]; then echo "\$USER not set, please edit ${DEFAULTFILE}"; exit 111; fi
if [ x"${NONCESTART}" = x ]; then echo "\$NONCESTART not set, please edit ${DEFAULTFILE}"; exit 111; fi
export ROOT
#CurveVPN key
while true; do
[ -d "${KEYDIR}" ] && break
entropy="`cat /proc/sys/kernel/random/entropy_avail`"
if [ "${entropy}" -ge 256 ]; then
curvevpn-makekey "${KEYDIR}"
else
echo "entropy too low (${entropy} < 256) - please wait" >&2
echo "or run it manualy (at your own risk)" >&2
sleep 10
fi
done
#update nonce counter
curvevpn-updatenoncecounter -s "${NONCESTART}" "${KEYDIR}"
#change directory
cd "${ROOT}" || exit 111
#make data.cdb file
if [ -f data ]; then
make
else
echo "data file not exist, please create ${ROOT}/data using example /usr/share/curvevpn-server/data.example"
exit 111
fi
#remove lock directory
rm -f lock/????????????????????????????????????????????????????????????????
echo "Starting CurveVPN server: ${NAME} -> uz7`curvevpn-printkey32 ${KEYDIR}`.${EXTENSION}.{domain}" >&2
exec envuidgid "${USER}" curvevpn-server "${NAME}" "${KEYDIR}" "${IP}" "${PORT}" "${EXTENSION}" curvevpn-message curvevpn -s ................................................................
| true
|
c56d10286d1af4a953e016efb2d605f30cb1ff38
|
Shell
|
FJchen/ts150
|
/violate/1_create_table/hive_insert/INSERT_EXT_T0042_TBPC1510_H.sh
|
UTF-8
| 675
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
######################################################
# 客户联系位置信息: EXT_T0042_TBPC1510_H临时表导入到贴源表中
# wuzhaohui@tienon.com
######################################################
#引用基础Shell函数库
source /home/ap/dip_ts150/ts150_script/base.sh
#登录Hadoop
hadoop_login
#解释命令行参数
logdate_arg $*
# 依赖数据源--当天数据
IN_CUR_HIVE=
IN_CUR_HDFS=/bigdata/input/TS150/case_trace/T0042_TBPC1510_H/
# Hive输出表,判断脚本是否已成功运行完成
OUT_CUR_HIVE=INN_T0042_TBPC1510_H
run()
{
beeline -f ./hive_insert/INSERT_EXT_T0042_TBPC1510_H.sql --hivevar log_date=${log_date}
}
| true
|
da73ce3cc76335150064e4c8c2f095350a63d7e4
|
Shell
|
fnord0/blackarch
|
/packages/jnetmap/PKGBUILD
|
UTF-8
| 882
| 2.546875
| 3
|
[] |
no_license
|
pkgname=jnetmap
pkgver=0.5.3
pkgrel=1
epoch=100
groups=('blackarch' 'blackarch-networking')
pkgdesc="A network monitor of sorts"
arch=('i686' 'x86_64' 'armv6h' 'armv7h')
url='http://www.rakudave.ch/jnetmap/?file=introduction'
license=('GPL3')
depends=('java-runtime')
source=("http://downloads.sourceforge.net/project/jnetmap/jNetMap%200.5.3/jNetMap-$pkgver.tar.gz")
md5sums=('a3512136fccd8b43f6a3ed8a5b9a326a')
package() {
cd "$srcdir/jNetMap-$pkgver-591"
# Base directories.
install -dm755 "$pkgdir/usr/bin"
install -dm755 "$pkgdir/usr/share/jnetmap"
install -dm755 "$pkgdir/usr/share/applications"
# Icons and desktop files.
install -m644 jnetmap.png "$pkgdir/usr/share/jnetmap"
install -m644 jnetmap.desktop "$pkgdir/usr/share/applications"
# Bin.
install -m755 jNetMap.jar "$pkgdir/usr/share/jnetmap"
install -m755 jnetmap "$pkgdir/usr/bin/jnetmap"
}
| true
|
4e04e709ed66ef39dbced2852be4c71090bc7af7
|
Shell
|
Liangtaiwan/dotfiles
|
/zprofile
|
UTF-8
| 116
| 2.765625
| 3
|
[] |
no_license
|
#! /bin/zsh
if [[ -f "${HOME}/.zshenv" && -z "${path[(r)${HOME}/.local/bin]}" ]]; then
source ${HOME}/.zshenv
fi
| true
|
d24f4a3cfa98d4fadf12f0d4e0714a3011288afd
|
Shell
|
juliantellez/drone-ci-pipeline
|
/scripts/github-access.sh
|
UTF-8
| 683
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -z "${ID_RSA}" ]; then
echo "ERROR: No ID_RSA file detected."
exit 1
fi
# GitHub access
mkdir -p ~/.ssh/
echo "$ID_RSA" > ~/.ssh/id_rsa
chmod 0600 ~/.ssh/id_rsa
# Accept github domain
touch ~/.ssh/known_hosts
ssh-keyscan github.com >> ~/.ssh/known_hosts
# Global configuration
git config --global url.ssh://git@github.com/.insteadOf https://github.com/
git config --global push.default simple
git config --global user.email "bot@juliantellez.com"
git config --global user.name "[ BOT ] Julian Tellez"
# DEBUG
# ssh-keygen -y -e -f ~/.ssh/id_rsa
# echo "\n\n Verify server: \n\n"
# ssh -vT git@github.com
# echo "\n\n Verify user: \n\n"
# ssh -T git@github.com
| true
|
8ac77a5c79794b1f5608bfacfd67b0e4d90e02b0
|
Shell
|
yoooov/glftpd-installer
|
/packages/scripts/extra/TVMaze_nuke.sh
|
UTF-8
| 7,280
| 3.671875
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
VER=1.0
#--[ Info ]-----------------------------------------------------#
#
# This script comes without any warranty, use it at your own risk.
#
# Changelog
# 20XX-00-00 v.1x Orginale creator Sokar aka PicdleRick
# 2020-04-18 v.2x Code modifications and improvements Teqno/TeRRaNoVA
#
# Installation: copy TVMaze_nuke.sh to glftpd/bin and chmod it
# 755. Copy the modificated TVMaze.tcl into your eggdrop pzs-ng
# plugins dir.
#
# Modify GLROOT into /glftpd or /jail/glftpd.
#
# To ensure log file exist, run: "./TVMaze_nuke.sh sanity" from
# shell, this will create the log file and set the correct
# permissions.
#
#--[ Settings ]-------------------------------------------------#
GLROOT=/glftpd
GLCONF=$GLROOT/etc/glftpd.conf
DEBUG=0
LOG_FILE=$GLROOT/ftp-data/logs/tvmaze_nuke.log
# Username of person to nuke with. This user must be a glftpd user account.
NUKE_USER=glftpd
# Multiplier to use when nuking a release
NUKE_MULTIPLER=5
# Show Types: Animation Award_Show Documentary Game_Show News Panel_Show Reality Scripted Sports Talk_Show Variety
# Space delimited list of show types to nuke.
NUKE_SHOW_TYPES=""
# Show Types: Animation Award_Show Documentary Game_Show News Panel_Show Reality Scripted Sports Talk_Show Variety
NUKE_SECTION_TYPES="
/site/TV-HD:(Sports|Game_Show)
/site/TV-NL:(Sports|Game_Show)
/site/TV-SD:(Sports|Game_Show)
"
# Configured like NUKE_SECTION_TYPES
# Genres: Action Adult Adventure Anime Children Comedy Crime DIY Drama Espionage Family Fantasy Food History Horror Legal Medical Music Mystery Nature Romance Science-Fiction
# Sports Supernatural Thriller Travel War Western
NUKE_SECTION_GENRES="
/site/TV-HD:(Sports|Game_Show)
/site/TV-NL:(Sports|Game_Show)
/site/TV-SD:(Sports|Game_Show)
"
# Space delimited list of episodes with an air date before this year will be nuked
NUKE_EPS_BEFORE_YEAR="2018"
# Space delimited list of TV Shows produced in these countries will be nuked
NUKE_ORIGIN_COUNTRIES="DE"
# 0 = Nuke / 1 = Do not nuke
NUKE_EP_BEFORE_YEAR=0
NUKE_SHOW_TYPE=0
NUKE_SECTION_TYPE=0
NUKE_SECTION_GENRE=0
NUKE_ORIGIN_COUNTRY=0
# Space delimited list of TV shows to never nuke, use releasename and not show name ie use The.Flash and NOT The Flash
ALLOWED_SHOWS=""
# Space delimited list of sections to never nuke
EXCLUDED_SECTIONS="ARCHIVE REQUEST"
# Space delimited list of groups to never nuke ie affils
EXCLUDED_GROUPS=""
#--[ Script Start ]---------------------------------------------#
function LogMsg()
{
timestamp=`date --rfc-3339=seconds`
echo "$timestamp $@" >> $LOG_FILE
}
if [ "$1" = "sanity" ]
then
echo
echo "Creating log file $LOG_FILE and setting permission 666"
touch $LOG_FILE ; chmod 666 $LOG_FILE
exit 0
fi
if [ ! -f $LOG_FILE ]
then
echo
echo "Log file $LOG_FILE do not exist, create it by running ./TVMaze_nuke.sh sanity"
echo
exit 1
fi
if [ $# -ne 7 ]
then
echo
echo "ERROR! Missing arguments."
echo
LogMsg "ERROR! Not enough arguments passed in."
exit 1
fi
# Process args and remove encapsulating double quotes.
RLS_NAME=`sed -e 's/^"//' -e 's/"$//' <<<"$1"`
SHOW_GENRES=`sed -e 's/^"//' -e 's/"$//' <<<"$2"`
SHOW_COUNTRY=`sed -e 's/^"//' -e 's/"$//' <<<"$3"`
SHOW_NETWORK=`sed -e 's/^"//' -e 's/"$//' <<<"$4"`
SHOW_STATUS=`sed -e 's/^"//' -e 's/"$//' <<<"$5"`
SHOW_TYPE=`sed -e 's/^"//' -e 's/"$//' <<<"$6"`
EP_AIR_DATE=`sed -e 's/^"//' -e 's/"$//' <<<"$7"`
if [ "$DEBUG" -eq 1 ]
then
LogMsg "Release: $RLS_NAME Genres: $SHOW_GENRES Country: $SHOW_COUNTRY Network: $SHOW_NETWORK Status: $SHOW_STATUS Type: $SHOW_TYPE Air date: $EP_AIR_DATE"
fi
for show in $ALLOWED_SHOWS
do
result=`echo "$RLS_NAME" | grep -i "$show"`
if [ -n "$result" ]
then
if [ "$DEBUG" -eq 1 ]
then
LogMsg "Skipping allowed show: $RLS_NAME"
fi
echo "Skipping allowed show: $RLS_NAME"
exit 0
fi
done
for group in $EXCLUDED_GROUPS
do
result=`echo "$RLS_NAME" | grep -i "\-$group"`
if [ -n "$result" ]
then
if [ "$DEBUG" -eq 1 ]
then
LogMsg "Skipping group: $RLS_NAME"
fi
echo "Skipping group: $RLS_NAME"
exit 0
fi
done
if [ "$NUKE_SECTION_TYPE" -eq 1 ]
then
for rawdata in $NUKE_SECTION_TYPES
do
section="`echo "$rawdata" | cut -d ':' -f1`"
denied="`echo "$rawdata" | cut -d ':' -f2`"
if [ "`echo "$RLS_NAME" | egrep -i "$section/"`" ]
then
if [ "`echo $SHOW_TYPE | egrep -i $denied`" ]
then
type="`echo $SHOW_TYPE | egrep -oi $denied`"
$GLROOT/bin/nuker -r $GLCONF -N $NUKE_USER -n {$RLS_NAME} $NUKE_MULTIPLER "$type type of TV show is not allowed"
LogMsg "Nuked release: {$RLS_NAME} because its show type is $type which is not allowed in section $section."
exit 0
fi
fi
done
fi
if [ "$NUKE_SECTION_GENRE" -eq 1 ]
then
for rawdata in $NUKE_SECTION_GENRES
do
section="`echo "$rawdata" | cut -d ':' -f1`"
denied="`echo "$rawdata" | cut -d ':' -f2`"
if [ "`echo "$RLS_NAME" | egrep -i "$section/"`" ]
then
if [ "`echo $SHOW_GENRES | egrep -i $denied`" ]
then
genre="`echo $SHOW_GENRES | egrep -oi $denied`"
$GLROOT/bin/nuker -r $GLCONF -N $NUKE_USER -n {$RLS_NAME} $NUKE_MULTIPLER "$genre genre is not allowed"
LogMsg "Nuked release: {$RLS_NAME} because its genre is $genre which is not allowed in section $section."
exit 0
fi
fi
done
fi
if [ "$NUKE_EP_BEFORE_YEAR" -eq 1 ]
then
if [ -n "$EP_AIR_DATE" ]
then
if [ "$EP_AIR_DATE" != "N/A" ]
then
ep_air_year=`date +"%Y" -d "$EP_AIR_DATE"`
if [ -n "$ep_air_year" ]
then
if [ "$ep_air_year" -lt $NUKE_EPS_BEFORE_YEAR ]
then
$GLROOT/bin/nuker -r $GLCONF -N $NUKE_USER -n {$RLS_NAME} $NUKE_MULTIPLER "Episode air date must be $NUKE_EPS_BEFORE_YEAR or newer"
LogMsg "Nuked release: {$RLS_NAME} because its year of release of $ep_air_year is before $NUKE_EPS_BEFORE_YEAR"
exit 0
fi
fi
fi
fi
fi
if [ "$NUKE_SHOW_TYPE" -eq 1 ]
then
if [ -n "$NUKE_SHOW_TYPES" ]
then
for type in $NUKE_SHOW_TYPES
do
if [ "$SHOW_TYPE" == "$type" ]
then
$GLROOT/bin/nuker -r $GLCONF -N $NUKE_USER -n {$RLS_NAME} $NUKE_MULTIPLER "$type TV shows are not allowed"
LogMsg "Nuked release: {$RLS_NAME} because its show type is $SHOW_TYPE which is not allowed."
exit 0
fi
done
fi
fi
if [ "$NUKE_ORIGIN_COUNTRY" -eq 1 ]
then
if [ -n "$NUKE_ORIGIN_COUNTRIES" ]
then
for country in $NUKE_ORIGIN_COUNTRIES
do
if [ "$SHOW_COUNTRY" == "$country" ]
then
$GLROOT/bin/nuker -r $GLCONF -N $NUKE_USER -n {$RLS_NAME} $NUKE_MULTIPLER "TV shows from $country are not allowed"
LogMsg "Nuked release: {$RLS_NAME} because its country of origin is $SHOW_COUNTRY which is not allowed."
exit 0
fi
done
fi
fi
exit 0
| true
|
08901579d238f61d7f8c8e6426b92e9046ee3b6f
|
Shell
|
feilen/oculus-wine-wrapper
|
/oculus_wine_wrapper.sh
|
UTF-8
| 2,998
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Oculus Wine Wrapper
#
# (C) 2014 Jared Stafford (jspenguin@jspenguin.org)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
IFS=
[ -z "$WINE" ] && WINE=wine
while [[ ${1:0:1} = "-" ]]; do
case "$1" in
-o|--ovrd) OVRD="$2"; shift;;
-r|--norestart) NORESTART=1;;
-k|--nokill) NOKILL=1;;
-u|--utilsdir) UTILSDIR="$2"; shift;;
esac
shift
done
if [ -z $OVRD ]; then
OVRD=/usr/bin/ovrd
fi
if [ -z $UTILSDIR ]; then
UTILSDIR=/usr/share/oculus-wine-wrapper
fi
if [ ! -x $OVRD ]; then
echo "Cannot run $OVRD"
exit 1
fi
if [ ! -d $UTILSDIR ]; then
echo "Cannot find utilities"
exit 1
fi
if [ $# -lt 1 ]; then
echo "Usage: $0 [options] /path/to/game.exe [arguments]"
echo "$0 options:"
echo " -o, --ovrd specify location of ovrd (default /usr/bin/ovrd)"
echo " -u, --utilsdir specify location of wrapper utilities (default /usr/share/oculus-wine-wrapper)"
echo " -r, --norestart don't re-execute ovrd after game exits"
echo " -k, --nokill don't kill running ovrd service"
exit 1
fi
if [ -z $NOKILL ]; then
old_oculus_pid=$(pidof ovrd)
kill -TERM $old_oculus_pid
# wait 3 seconds for it to quit
i=15
while [ ! -z $(pidof ovrd) -o $i -gt 0 ]; do
sleep 0.2
i=$(($i - 1))
done
if [ ! -z $(pidof ovrd) ]; then
echo "Unable to kill running $OVRD process"
exit 1
fi
fi
LD_PRELOAD=$UTILSDIR/no_xselectinput.so $OVRD & oculus_pid=$!
sleep .5
if ! kill -0 $oculus_pid 2>/dev/null; then
echo "oculus service exited prematurely: is another instance already running?"
exit 1
fi
while [ ! -e /dev/shm/OVR* ]; do
if ! kill -0 $oculus_pid $ 2>/dev/null; then
wait
echo "ovrd exited without creating SHM"
exit 1
fi
sleep .1
done
$WINE $UTILSDIR/oculus_shm_adapter.exe & wine_pid=$!
sleep .1
$WINE "$@"
echo
echo "Game exited, stopping service..."
echo
kill $wine_pid
kill $oculus_pid
wait
if [ -z $NORESTART ]; then
echo "Killing and re-forking $OVRD"
nohup $OVRD > /dev/null &
fi
| true
|
fb13f0e4d33d134c6f6ac65d02e6ded09e909f5c
|
Shell
|
ubaceslab/swenv
|
/build_scripts/build_gmp.sh
|
UTF-8
| 1,041
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e # Fail on first error
GMP_VERSION=$1
echo "Building GMP for ${UBCESLAB_SYSTEMTYPE:?undefined}"
mkdir -p ${UBCESLAB_SWENV_PREFIX:?undefined}/sourcesdir/gmp
(cd $UBCESLAB_SWENV_PREFIX/sourcesdir/gmp
if [ ! -f gmp-$GMP_VERSION.tar.bz2 ]; then
wget https://ftp.gnu.org/gnu/gmp/gmp-$GMP_VERSION.tar.bz2
fi
)
TOPDIR=${UBCESLAB_SWENV_PREFIX:?undefined}/libs/gmp
export GMP_DIR=$TOPDIR/$GMP_VERSION
mkdir -p $UBCESLAB_SWENV_PREFIX/builddir
BUILDDIR=`mktemp -d $UBCESLAB_SWENV_PREFIX/builddir/gmp-XXXXXX`
mkdir -p $BUILDDIR
cd $BUILDDIR
tar xjf $UBCESLAB_SWENV_PREFIX/sourcesdir/gmp/gmp-$GMP_VERSION.tar.bz2
cd gmp-$GMP_VERSION || exit 1
./configure --prefix=$GMP_DIR
make -j ${NPROC:-1}
make -j ${NPROC:-1} check
rm -rf $GMP_DIR
make install
cd $UBCESLAB_SWENV_PREFIX
rm -rf $BUILDDIR || true
| true
|
038267e2cf7b66bc51ef83ce699cad5b0dbe7318
|
Shell
|
lxgolovin/mint-quick-install
|
/more_software/install-teamviewer-v1.sh
|
UTF-8
| 601
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
# Downloading and installing latest teamviewer
echo "################################################################"
echo "############## teamviewer installation #################"
rm /tmp/teamviewer_i386.deb
wget https://download.teamviewer.com/download/teamviewer_i386.deb -O /tmp/teamviewer_i386.deb
sudo apt install -y libjpeg62:i386
sudo dpkg -i /tmp/teamviewer_i386.deb
rm /tmp/teamviewer_i386.deb
sudo apt-get -f install
echo "############## teamviewer installed #################"
echo "################################################################"
| true
|
cb32071562a11ca964defc076af73ef1345e67d6
|
Shell
|
hupi-analytics/formation
|
/bureau_etudes_insa/2021/exemple_creation_Docker/start.sh
|
UTF-8
| 225
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
source activate $(head -1 /tmp/environment.yml | cut -d ' ' -f2 | sed -e 's/[\r\n]*//g')
export PATH=/opt/conda/envs/$(head -1 /tmp/environment.yml | cut -d' ' -f2 | sed -e 's/[\r\n]*//g')/bin:$PATH
$1 $2
exit $?
| true
|
d2f75d817c710dba2d3f4288a99bf5b7807cf83f
|
Shell
|
pythseq/lncRNA-screen
|
/code/group_mean.bash
|
UTF-8
| 2,154
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
source code/custom-bashrc
if [ "$#" -ne 4 ]; then
printf "\n\n###### Usage\n\n"
printf "$0 <FPKM CUTOFF> <PATH to group_info.txt file> <GROUP_BY> <FPKM MATRIX FILE>\n\n"
exit
fi
cutoff=$1
GROUP_FILE=$2
GROUP_BY=$3
MATRIX=$4
GROUP_INDEX=`head -1 $GROUP_FILE | tr '\t' '\n' | cat -n| grep -P "\t\${GROUP_BY}$" | cut -c-7 | sed -E 's/\s+//g'`
myfile=`basename $MATRIX`
CATEGORY=${myfile%.*}
if [ ! -d $GROUP_BY ]
then
mkdir $GROUP_BY
fi
echo
echo
echo "##############################################"
echo "** Calculating FPKM Cutoff for $CATEGORY grouping by $GROUP_BY"
printf "\t"
for group in `cat ${GROUP_FILE} | cut -f${GROUP_INDEX} | skipn 1 | sort | uniq`
do
printf $group" "
column=`echo $(cut -f1,$GROUP_INDEX $GROUP_FILE | cat -n| grep -P "\t\${group}$" | cut -c-7 | sed -E 's/\s+//g') | tr ' ' ','`
total=`cut -f1,$GROUP_INDEX $GROUP_FILE | grep -P "\t\${group}$" | wc -l`
echo -e ".ID_\t${group}_PCTGofSample_above"$cutoff"" > $GROUP_BY/${group}_${CATEGORY}_PCTGofSample_above"$cutoff".txt
cat $MATRIX | cut -f1,$column | skipn 1 | sed 's/\t/ /g' | replace_with_tab ' ' | vectors test -g -e -c $cutoff | vectors sum -n 0 | awk '{print $1"\t"$2/'$total'*100}' | awk '$2>0' >> $GROUP_BY/${group}_${CATEGORY}_PCTGofSample_above"$cutoff".txt
echo -e ".ID_\t${group}_Mean" > $GROUP_BY/${group}_${CATEGORY}_Mean.txt
cat $MATRIX | cut -f1,$column | skipn 1 | sed 's/\t/ /g' | replace_with_tab ' ' | vectors m -n 4 | awk '$2>0' >> $GROUP_BY/${group}_${CATEGORY}_Mean.txt
# computing the mean fpkm only on samples that pass the cutoff (divide by the number of patient pass the cutoff)
echo -e ".ID_\t${group}_Mean_above"$cutoff"" > $GROUP_BY/${group}_${CATEGORY}_Mean_above"$cutoff".txt
join -1 1 -2 1 <(cat $MATRIX | cut -f1,$column | skipn 1 | sed 's/\t/ /g' | replace_with_tab ' ' | vectors cutoff -c $cutoff | vectors sum | sort -k1,1) <(cat $GROUP_BY/${group}_${CATEGORY}_PCTGofSample_above"$cutoff".txt | awk '$2>0{print $1"\t"'$total'*$2/100}' | sort -k1,1) | awk '{print $1"\t"$2/$3}' >> $GROUP_BY/${group}_${CATEGORY}_Mean_above"$cutoff".txt
rm -rf $GROUP_BY/*{Mean.txt}
done
echo
echo
| true
|
5d9fd595fb20c6b0335d0035ab223210156ff6dc
|
Shell
|
lewis-jg-lan/my_tools
|
/Desktop/Script/shell/NYC_Check_.sh
|
UTF-8
| 518
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
#1.0
#SN=`cat /Phoenix/Logs/SerialNumber.txt`
#NYCPN=`cat /Phoenix/Logs/processlog.plog | grep "WIP scanned=" | cut -d "=" -f 2 | awk '{print $1}'`
NYCPN=`cat /Phoenix/Logs/processlog.plog | grep -m 1 -o 'WIP scanned=".*"' | cut -f2 -d"+" | cut -f1 -d '"'`
#echo "$SN"
#echo "$KSN"
if [ "${NYCPN}" == "K0QM1LL/A" ] || [ "${NYCPN}" == "K0QN0LL/A" ] || [ "${NYCPN}" == "K0QN2LL/A" ];then
echo "$NYCPN is NYC model,PASS"
exit 0
else
echo "$NYCPN is Not NYC model,pls re-download with normal bundle"
exit 1
fi
| true
|
3ca08464085b4c3439adb976294ecfce24e7d94a
|
Shell
|
bjx2130/kafka-cluster
|
/kafka_2.12-1.0.0-cluster/kafka-cluster-server.sh
|
UTF-8
| 1,121
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# zookeeper 集群的根目录
KAFKA_CLUSTER__HOME=./
# 以下是3个集群节点的配置
conf0=${KAFKA_CLUSTER__HOME}/config/server-0.properties
conf1=${KAFKA_CLUSTER__HOME}/config/server-1.properties
conf2=${KAFKA_CLUSTER__HOME}/config/server-2.properties
if [ $1 = start ]; then
echo "启动集群 kafka-cluster ------------ "
nohup ${KAFKA_CLUSTER__HOME}/kafka_2.12-1.0.0/bin/kafka-server-start.sh -daemon ${conf0}
nohup ${KAFKA_CLUSTER__HOME}/kafka_2.12-1.0.0/bin/kafka-server-start.sh -daemon ${conf1}
nohup ${KAFKA_CLUSTER__HOME}/kafka_2.12-1.0.0/bin/kafka-server-start.sh -daemon ${conf2}
elif [ $1 = stop ]; then
echo "关闭集群 kafka-cluster ------------ "
nohup ${KAFKA_CLUSTER__HOME}/kafka_2.12-1.0.0/bin/kafka-server-stop.sh -daemon ${conf0}
nohup ${KAFKA_CLUSTER__HOME}/kafka_2.12-1.0.0/bin/kafka-server-stop.sh -daemon ${conf1}
nohup ${KAFKA_CLUSTER__HOME}/kafka_2.12-1.0.0/bin/kafka-server-stop.sh -daemon ${conf2}
else
echo ">>>>>>>>>> command is not [start|stop|status] <<<<<<<<<<< "
fi
echo ">>>>>>>>>> [start|stop|status] 日志在当前目录 nohup.out 文件 "
| true
|
fd4b570852976be16d1a9e74e1b123ad52551dde
|
Shell
|
thomasgranbohm/magisk-custom-display-settings
|
/build.sh
|
UTF-8
| 496
| 2.625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
pushd app
aapt package -M AndroidManifest.xml -S res/ \
-I "$HOME/android-9/android.jar" \
-F CustomDisplaySettings.apk || exit 1
mv CustomDisplaySettings.apk ..
popd
pushd magisk
mv ../CustomDisplaySettings.apk system/vendor/overlay/CustomDisplaySettings/
find -exec touch -d @0 -h {} +
find -type d -exec chmod 0755 {} +
find -type f -exec chmod 0644 {} +
version=$(grep -Po "version=\K.*" module.prop)
zip -r -y -9 ../custom-display-settings-$version.zip . || exit 1
popd
| true
|
0a90cd66ed7940a8f5e196b52e720522e5b0618b
|
Shell
|
wangqifeng/9nfl
|
/example/data_join/dist_data_join_follower.sh
|
UTF-8
| 934
| 3.078125
| 3
|
[
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"Zlib",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-ssleay",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
WORK_DIR=$(cd `dirname $0`;pwd)
BASE_DIR=`readlink -f "${WORK_DIR}/../.."`
create_pod(){
set -x
partition_id=$1
data_source_name="jdfl-opensource-data-join-v1"
task_name="${data_source_name}-follower-worker-${partition_id}"
uuid="jdfl_DataJoinWorkerService_${partition_id}"
image_path="" # data join follower image path
raw_data_dir="" # hdfs data dir for the input of data join
PORT0=8001 # data join port
data_block_dir="" # hdfs data dir for the output of data join
proxy="" # follower proxy host and port
sed \
"s/TASK_NAME/${task_name}/;s!IMAGE_PATH!${image_path}!;s/PID/${partition_id}/;s/DSN/${data_source_name}/;s!RDD!${raw_data_dir}!;s/DATA_JOIN_PORT/${PORT0}/;s!DBD!${data_block_dir}!;s/RI/${uuid}/;s/internal/${proxy}/g" \
${BASE_DIR}/deploy/data_join/k8s/follower.yaml > follower.yaml
namespace="fl-follower"
cat follower.yaml |kubectl --namespace=${namespace} create -f -
}
# data join for partition 0
create_pod 0
| true
|
dc87a229724f4eea516c1c49977cb662847a9fe0
|
Shell
|
QServ/QServ
|
/src/test/discovery/runTests.sh
|
UTF-8
| 615
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
TEST_PATH="$( cd "$(dirname "$0")" ; pwd -P )"
export KDB_SVC_CONFIG_PATH="${QSERV_HOME}/config/svc"
export KDB_COMMON_CONFIG_PATH="${QSERV_HOME}/config/common"
### Start the discovery service
pushd ${QSERV_HOME}/src/q/discovery
$QHOME/l32/q discovery.q &
DISCOVERY_PID=$!
popd
pushd ${TEST_PATH}
## Start the mult test server
$QHOME/l32/q multService.q &
MULT_SVC_PID=$!
#Wait for the mult server to start and register its funtions in discovery
sleep 2 ## TODO: change this to check the discovery service instead.
$QHOME/l32/q testDiscovery.q
kill -9 $DISCOVERY_PID
kill -9 $MULT_SVC_PID
popd
| true
|
f92e9ee32ec9368565c7d5de1b734f056ec68f4e
|
Shell
|
mjerger/ALT
|
/Software/tools/capture_canon_tethered.sh
|
UTF-8
| 728
| 2.5625
| 3
|
[] |
no_license
|
# get available camera
# gphoto2 --auto-detect
#then use model string to identify cam
# get abilities
# gphoto2 --camera "Canon EOS 5D Mark II" --list-config
#
ss=1/200; if [ -n "$1" ] ; then ss=$1; fi
ap=2.8; if [ -n "$2" ] ; then ap=$2; fi
# set default config
gphoto2 --camera "Canon EOS 5D Mark II" \
--set-config autopoweroff=0 \
--set-config iso=100 \
--set-config whitebalance=1 \
--set-config aperture=$ap \
--set-config shutterspeed=$ss
# download image like this:
gphoto2 --camera "Canon EOS 5D Mark II" \
--capture-tethered --force-overwrite --filename preview.cr2
#--capture-image-and-download --force-overwrite --filename preview.cr2
dcraw -4 preview.cr2
rm preview.cr2
pfsv preview.ppm
| true
|
17554e8bb5095daacdd65c047dfb8f085c9eeb3d
|
Shell
|
sureshrmdec/notes-2
|
/devOps/docker/dataV.sh
|
UTF-8
| 802
| 2.796875
| 3
|
[] |
no_license
|
#mount a data volumn with -v, it will be created upon start, and will persist
docker create --name postgres-data -v /var/lib/postgresql/data postgres
#mount a host directory can be useful for testing: mount source code inside container and o
#use DVC to share data between containers, and/or use it from non-persistent containers
docker create -v /dbdata --name dbstore training/postgres /bin/true
docker run -d --volumes-from dbstore --name db1 training/postgres
#find dangling volumn
docker volume ls -f dangling=true
docker volumn rm $VOL_NAME
docker run --rm --volumes-from dbstore -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata
#1. multiple containers writing to a single shared DV can cause data corruption
#2. access them from dockerhost is can cause data corruption as well
| true
|
d2008dc533fd040242225ee0ce225e46e53ba6d6
|
Shell
|
DiploDatos/AprendizajeProfundo
|
/run.sh
|
UTF-8
| 1,201
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -ex
if [ ! -d "./data/meli-challenge-2019/" ]
then
mkdir -p ./data
echo >&2 "Downloading Meli Challenge Dataset"
curl -L https://cs.famaf.unc.edu.ar/\~ccardellino/resources/diplodatos/meli-challenge-2019.tar.bz2 -o ./data/meli-challenge-2019.tar.bz2
tar jxvf ./data/meli-challenge-2019.tar.bz2 -C ./data/
fi
if [ ! -f "./data/SBW-vectors-300-min5.txt.gz" ]
then
mkdir -p ./data
echo >&2 "Downloading SBWCE"
curl -L https://cs.famaf.unc.edu.ar/\~ccardellino/resources/diplodatos/SBW-vectors-300-min5.txt.gz -o ./data/SBW-vectors-300-min5.txt.gz
fi
# Be sure the correct nvcc is in the path with the correct pytorch installation
export CUDA_HOME=/opt/cuda/10.1
export PATH=$CUDA_HOME/bin:$PATH
export CUDA_VISIBLE_DEVICES=0
python -m experiment.mlp \
--train-data ./data/meli-challenge-2019/spanish.train.jsonl.gz \
--token-to-index ./data/meli-challenge-2019/spanish_token_to_index.json.gz \
--pretrained-embeddings ./data/SBW-vectors-300-min5.txt.gz \
--language spanish \
--validation-data ./data/meli-challenge-2019/spanish.validation.jsonl.gz \
--embeddings-size 300 \
--hidden-layers 256 128 \
--dropout 0.3
| true
|
2580261f80794efb6fff33f9b888271d80506887
|
Shell
|
slinkydeveloper/knative-kafka-scaling-demo
|
/producer/produce.sh
|
UTF-8
| 1,439
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
check_command_exists() {
local CMD_NAME=$1
command -v "$CMD_NAME" > /dev/null || {
echo "Command $CMD_NAME not exists, please install it"
exit 1
}
}
start_producer()
{
local pipe_name=$1
local bootstrap_url=$2
local topic=$3
kafkacat -b "$bootstrap_url" -t "$topic" -P < "$pipe_name" &
}
produce()
{
local pipe_name=$1
local message_id=$2
local load_gen_id=$3
echo "{\"id\": \"${message_id}\", \"hostname\": \"$load_gen_id\", \"payload\": \"Hello!\"}" >> "$pipe_name"
}
do_math()
{
local math_op=$1
echo "scale=4; $math_op" | bc -l | awk '{printf "%.4f", $0}'
}
calculate_sleep_time()
{
local exec_time=$1
if [[ $exec_time -gt "1000" ]]; then
thpt="max"
else
thpt=$(do_math "(9*${exec_time})+10")
fi
if [[ $thpt == "max" ]]; then
echo "0"
else
do_math "1/${thpt}"
fi
}
if [[ $# -le 2 ]]
then
echo "Usage: $0 <kafka_url> <topic>"
exit 1
fi
check_command_exists "kafkacat"
kafka_url=$1
topic=$2
load_gen_id=$([ -z "$HOSTNAME" ] && echo "someone" || echo $HOSTNAME)
id=1
pipe_name=$(mktemp -u)
mkfifo "$pipe_name"
# Kill bg process at the end
trap 'kill $(jobs -p)' EXIT
# Keep pipe open
sleep infinity > "$pipe_name" &
start_producer "$pipe_name" "$kafka_url" "$topic"
while :
do
wait_time=$(calculate_sleep_time $SECONDS)
#echo "Sleeping for ${wait_time}"
sleep "${wait_time}s"
produce "$pipe_name" "$id" "$load_gen_id"
((id++))
done
| true
|
e3b4fae749e7255243fa9e3dc8a3f3a7975e43c0
|
Shell
|
bazelbuild/bazel
|
/src/test/shell/integration/jvm_flags_escaping_test.sh
|
UTF-8
| 9,674
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --- begin runfiles.bash initialization ---
# Copy-pasted from Bazel's Bash runfiles library (tools/bash/runfiles/runfiles.bash).
set -euo pipefail
if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
if [[ -f "$0.runfiles_manifest" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest"
elif [[ -f "$0.runfiles/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"
elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
export RUNFILES_DIR="$0.runfiles"
fi
fi
if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"
elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \
"$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)"
else
echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"
exit 1
fi
# --- end runfiles.bash initialization ---
source "$(rlocation "io_bazel/src/test/shell/integration_test_setup.sh")" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
# `uname` returns the current platform, e.g "MSYS_NT-10.0" or "Linux".
# `tr` converts all upper case letters to lower case.
# `case` matches the result if the `uname | tr` expression to string prefixes
# that use the same wildcards as names do in Bash, i.e. "msys*" matches strings
# starting with "msys", and "*" matches everything (it's the default case).
case "$(uname -s | tr [:upper:] [:lower:])" in
msys*)
# As of 2019-02-20, Bazel on Windows only supports MSYS Bash.
declare -r is_windows=true
;;
*)
declare -r is_windows=false
;;
esac
if "$is_windows"; then
# Disable MSYS path conversion that converts path-looking command arguments to
# Windows paths (even if they arguments are not in fact paths).
export MSYS_NO_PATHCONV=1
export MSYS2_ARG_CONV_EXCL="*"
declare -r EXE_EXT=".exe"
else
declare -r EXE_EXT=""
fi
# ----------------------------------------------------------------------
# HELPER FUNCTIONS
# ----------------------------------------------------------------------
# Writes a java file that prints System.getProperty(argN).
#
# The program prints every JVM definition of the form argN, where N >= 0, until
# the first N is found for which argN is empty.
#
# Args:
# $1: directory (package path) where the file will be written
function create_java_file_that_prints_jvm_args() {
local -r pkg="$1"; shift
mkdir -p "$pkg" || fail "mkdir -p $pkg"
cat >"$pkg/A.java" <<'eof'
package test;
public class A {
public static void main(String[] args) {
for (int i = 0; ; ++i) {
String value = System.getProperty("arg" + i);
if (value == null) {
break;
} else {
System.out.printf("arg%d=(%s)%n", i, value);
}
}
}
}
eof
}
# Writes a BUILD file for a java_binary with an untokenizable jvm_flags entry.
#
# Args:
# $1: directory (package path) where the file will be written
function create_build_file_for_untokenizable_flag() {
local -r pkg="$1"; shift
mkdir -p "$pkg" || fail "mkdir -p $pkg"
cat >"$pkg/BUILD" <<'eof'
java_binary(
name = "cannot_tokenize",
srcs = ["A.java"],
main_class = "A",
jvm_flags = ["-Darg0='abc"],
)
eof
}
# Writes a BUILD file for a java_binary with many different jvm_flags entries.
#
# Use this together with assert_output_of_the_program_with_many_jvm_flags().
#
# Args:
# $1: directory (package path) where the file will be written
function create_build_file_with_many_jvm_flags() {
local -r pkg="$1"; shift
mkdir -p "$pkg" || fail "mkdir -p $pkg"
cat >"$pkg/BUILD" <<'eof'
java_binary(
name = "x",
srcs = ["A.java"],
main_class = "test.A",
jvm_flags = [
"-Darg0=''",
"-Darg1=' '",
"-Darg2='\"'",
"-Darg3='\"\\'",
"-Darg4='\\'",
"-Darg5='\\\"'",
"-Darg6='with space'",
"-Darg7='with^caret'",
"-Darg8='space ^caret'",
"-Darg9='caret^ space'",
"-Darg10='with\"quote'",
"-Darg11='with\\backslash'",
"-Darg12='one\\ backslash and \\space'",
"-Darg13='two\\\\backslashes'",
"-Darg14='two\\\\ backslashes \\\\and space'",
"-Darg15='one\\\"x'",
"-Darg16='two\\\\\"x'",
"-Darg17='a \\ b'",
"-Darg18='a \\\" b'",
"-Darg19='A'",
"-Darg20='\"a\"'",
"-Darg21='B C'",
"-Darg22='\"b c\"'",
"-Darg23='D\"E'",
"-Darg24='\"d\"e\"'",
"-Darg25='C:\\F G'",
"-Darg26='\"C:\\f g\"'",
"-Darg27='C:\\H\"I'",
"-Darg28='\"C:\\h\"i\"'",
"-Darg29='C:\\J\\\"K'",
"-Darg30='\"C:\\j\\\"k\"'",
"-Darg31='C:\\L M '",
"-Darg32='\"C:\\l m \"'",
"-Darg33='C:\\N O\\'",
"-Darg34='\"C:\\n o\\\"'",
"-Darg35='C:\\P Q\\ '",
"-Darg36='\"C:\\p q\\ \"'",
"-Darg37='C:\\R\\S\\'",
"-Darg38='C:\\R x\\S\\'",
"-Darg39='\"C:\\r\\s\\\"'",
"-Darg40='\"C:\\r x\\s\\\"'",
"-Darg41='C:\\T U\\W\\'",
"-Darg42='\"C:\\t u\\w\\\"'",
],
)
eof
}
# Asserts that the $TEST_log contains all JVM definitions of the form argN.
#
# See create_build_file_with_many_jvm_flags() and
# create_java_file_that_prints_jvm_args().
function assert_output_of_the_program_with_many_jvm_flags() {
expect_log 'arg0=()'
expect_log 'arg1=( )'
expect_log 'arg2=(")'
expect_log 'arg3=("\\)'
expect_log 'arg4=(\\)'
expect_log 'arg5=(\\")'
expect_log 'arg6=(with space)'
expect_log 'arg7=(with^caret)'
expect_log 'arg8=(space ^caret)'
expect_log 'arg9=(caret^ space)'
expect_log 'arg10=(with"quote)'
expect_log 'arg11=(with\\backslash)'
expect_log 'arg12=(one\\ backslash and \\space)'
expect_log 'arg13=(two\\\\backslashes)'
expect_log 'arg14=(two\\\\ backslashes \\\\and space)'
expect_log 'arg15=(one\\"x)'
expect_log 'arg16=(two\\\\"x)'
expect_log 'arg17=(a \\ b)'
expect_log 'arg18=(a \\" b)'
expect_log 'arg19=(A)'
expect_log 'arg20=("a")'
expect_log 'arg21=(B C)'
expect_log 'arg22=("b c")'
expect_log 'arg23=(D"E)'
expect_log 'arg24=("d"e")'
expect_log 'arg25=(C:\\F G)'
expect_log 'arg26=("C:\\f g")'
expect_log 'arg27=(C:\\H"I)'
expect_log 'arg28=("C:\\h"i")'
expect_log 'arg29=(C:\\J\\"K)'
expect_log 'arg30=("C:\\j\\"k")'
expect_log 'arg31=(C:\\L M )'
expect_log 'arg32=("C:\\l m ")'
expect_log 'arg33=(C:\\N O\\)'
expect_log 'arg34=("C:\\n o\\")'
expect_log 'arg35=(C:\\P Q\\ )'
expect_log 'arg36=("C:\\p q\\ ")'
expect_log 'arg37=(C:\\R\\S\\)'
expect_log 'arg38=(C:\\R x\\S\\)'
expect_log 'arg39=("C:\\r\\s\\")'
expect_log 'arg40=("C:\\r x\\s\\")'
expect_log 'arg41=(C:\\T U\\W\\)'
expect_log 'arg42=("C:\\t u\\w\\")'
}
# Runs a program, expecting it to succeed. Redirects all output to $TEST_log.
#
# Args:
# $1: path of the program
function expect_program_runs() {
local -r path="$1"; shift
(RUNFILES_DIR= \
RUNFILES_MANIFEST_FILE= \
RUNFILES_MANIFEST_ONLY= \
"$path" >&"$TEST_log" ; ) \
|| fail "Expected running '$path' succeed but failed with exit code $?"
}
# Runs a program, expecting it to fail. Redirects all output to $TEST_log.
#
# Args:
# $1: path of the program
function expect_program_cannot_run() {
local -r path="$1"; shift
(RUNFILES_DIR= \
RUNFILES_MANIFEST_FILE= \
RUNFILES_MANIFEST_ONLY= \
"$path" >&"$TEST_log" ; ) \
&& fail "Expected running '$path' to fail but succeeded" || true
}
# ----------------------------------------------------------------------
# TESTS
# ----------------------------------------------------------------------
function test_jvm_flags_escaping() {
local -r pkg="${FUNCNAME[0]}" # unique package name for this test
create_java_file_that_prints_jvm_args "$pkg"
create_build_file_with_many_jvm_flags "$pkg"
# On all platforms, Bazel can build and run the target.
bazel build --verbose_failures \
"${pkg}:x" &>"$TEST_log" || fail "expected success"
expect_program_runs "bazel-bin/$pkg/x${EXE_EXT}"
assert_output_of_the_program_with_many_jvm_flags
}
function test_untokenizable_jvm_flag_when_escaping_is_enabled() {
local -r pkg="${FUNCNAME[0]}" # unique package name for this test
create_java_file_that_prints_jvm_args "$pkg"
create_build_file_for_untokenizable_flag "$pkg"
if "$is_windows"; then
# On Windows, Bazel will check the flag.
bazel build --verbose_failures "${pkg}:cannot_tokenize" \
2>"$TEST_log" && fail "expected failure" || true
expect_log "Error in tokenize: unterminated quotation"
else
# On other platforms, Bazel will build the target but it fails to run.
bazel build --verbose_failures "${pkg}:cannot_tokenize" \
2>"$TEST_log" || fail "expected success"
expect_program_cannot_run "bazel-bin/$pkg/cannot_tokenize${EXE_EXT}"
expect_log "syntax error"
fi
}
run_suite "Tests about how Bazel passes java_binary.jvm_flags to the binary"
| true
|
038b52dcbb14caf4c65e6696b683cbe4d0ea481e
|
Shell
|
xukai286/farm
|
/numa/iperf-tcp/parse.sh
|
UTF-8
| 379
| 2.6875
| 3
|
[] |
no_license
|
#! /bin/sh
source ./defination
test -e $Parsedir || mkdir -p $Parsedir
# bandwidth
for ps in $pss
do
for bs in $bss
do
for cpunode in $cpunodes
do
for memnode in $memnodes
do
# example output
# [ 4] 0.0-60.0 sec 63.3 GBytes 9.06 Gbits/sec
tail -1 $Logdir/iperf-c$cpunode-m$memnode-$bs-$ps.log | cut -c 35-39 >> $Parsedir/iperf-$bs-$ps.log
done
done
done
done
| true
|
eded9e14af6c6791f45100d329f8e3c36c5c353a
|
Shell
|
hongzhouye/mpe-mol
|
/script/get_qchem_grid.sh
|
UTF-8
| 381
| 3.15625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#first argument of script is molecule name
#run ./mpeinput.sh name
#adjust scratch directory name
if [ $# != 2 ]
then
echo "Usage: path prefix"
exit
fi
run_path=$PWD
path=$1
name=$2
scratchdir=/scratch/hzye2011/$name
cd $path
# obtain grid
/home/hzye2011/local/bin/shutils/qchem.20180508 ${name}_grid.in ${name}_grid.out
python ${run_path}/writegrid.py $name
| true
|
e1dd3ee110374070e863a21c97a3e5d705a15881
|
Shell
|
jnonino/environment-setup
|
/linux/maven.sh
|
UTF-8
| 486
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MAVEN_VERSION=3.6.1
echo "Install Maven"
wget http://apache.dattatec.com/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz
tar -zxvf apache-maven-$MAVEN_VERSION-bin.tar.gz
rm -rf apache-maven-$MAVEN_VERSION-bin.tar.gz
mv apache-maven-$MAVEN_VERSION /opt/maven
echo "export MAVEN_HOME=/opt/maven" >> /etc/profile.d/maven.sh
echo "export PATH=\$PATH:\$MAVEN_HOME/bin:\$MAVEN_HOME/lib" >> /etc/profile.d/maven.sh
chmod +x /etc/profile.d/maven.sh
| true
|
98007c459e08cfae2e2ad19bf7845ea3ac0b35f7
|
Shell
|
gilbutITbook/006718
|
/chapter03/sysloger
|
UTF-8
| 164
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
result=syslogresult`date +%y%m%d`
if [ $1 -gt 30 ]
then
echo "too big"
exit 1
else
line=${1:-10}
fi
tail /var/log/syslog -n$1ine > $result
exit 0
| true
|
7e384b9baa430b1343416a53cfc4677c64e46828
|
Shell
|
B-Rich/community-mirror
|
/mandvd/repos/community-i686/PKGBUILD
|
UTF-8
| 1,209
| 2.578125
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Jaroslav Lichtblau <dragonlord@aur.archlinux.org>
# Contributor: JJDaNiMoTh <jjdanimoth.aur@gmail.com>
# Contributor: Jesus Lazaro Plaza <jesuslazaro84@gmail.com>
pkgname=mandvd
pkgver=2.6
pkgrel=2
pkgdesc="A program to simply create DVD Video"
arch=('i686' 'x86_64')
url="http://www.kde-apps.org/content/show.php/ManDVD?content=83906"
license=('GPL')
depends=('dvd+rw-tools>=5.21.4' 'dvd-slideshow>=0.7.5' 'mjpegtools>=1.8.0' 'mplayer' 'qt3' 'xine-ui>=0.99.4')
changelog=$pkgname.changelog
source=(http://vectorlinux.osuosl.org/veclinux-7.0/source/abs/xap/$pkgname/$pkgname-$pkgver-1.fc12.tar.gz)
sha256sums=('967fd66da31f0619001b3aaea494ab41579ab7ffdc6a1b0a51a4a4add012eb9c')
build() {
cd ${srcdir}/$pkgname-$pkgver
qmake-qt3
sed -i "s|O2|O1|" ./Makefile
sed -i 's|-I$(QTDIR)/include|-I/usr/include/qt3|' Makefile
sed -i '55 i\#include <unistd.h>' main.cpp
sed -i '28 i\#include <unistd.h>' mandvd.cpp
make QTDIR=/usr MOC=/usr/bin/moc-qt3 UIC=/usr/bin/uic-qt3
}
package() {
cd ${srcdir}/$pkgname-$pkgver
install -D -m755 $pkgname ${pkgdir}/usr/bin/$pkgname
# Creating menu item
install -D -m644 mandvdico.png ${pkgdir}/usr/share/pixmaps/$pkgname.png
install -D -m644 $pkgname.desktop ${pkgdir}/usr/share/applications/$pkgname.desktop
}
| true
|
42a072b01a5be754937085b89d35aac7eba560f6
|
Shell
|
abenjak/filter_phylogic_clusters
|
/cluster.sh
|
UTF-8
| 2,520
| 2.890625
| 3
|
[] |
no_license
|
# reproduce the clustering in in_absolute_total_sequenza by using a bash loop
# clustering round1 already done
r=1
# for fname in Cluster.round${r}.blacklist/*log; do tail -n2 $fname | head -1; done | sort -k2,2nr | grep -v -P "\t0$" > Cluster.round${r}.blacklisted_samples.txt
if [ $(wc -l < Cluster.round${r}.blacklisted_samples.txt) -eq 0 ]; then
echo clustering done
else
while :
do
r=$(( $r + 1 ))
# Copy everything from the previous clustering and just do the needed samples
cp -r Cluster.round$(( $r - 1 )) Cluster.round${r} &&cd Cluster.round${r}
# cut -f1 ../Cluster.round$(( $r - 1 )).blacklisted_samples.txt | while read fname; do
for fname in $(cut -f1 ../Cluster.round$(( $r - 1 )).blacklisted_samples.txt); do
srun -J $fname --mem 5000 -t 1000 -o $fname.log -e $fname.log singularity exec -B /storage/ibu_wes_pipe/postprocessing/phylogicNDT/PhylogicNDT/ -B /storage/ibu-projects/p375_HNSCC_ZimmerAebersold /storage/ibu_wes_pipe/postprocessing/phylogicNDT/images/phylogicNDT.v1.img /storage/ibu_wes_pipe/postprocessing/phylogicNDT/PhylogicNDT/PhylogicNDT.py Cluster -i $fname -sif ../sif/$fname.sif --order_by_timepoint -ni 1000 --driver_genes_file /storage/ibu-projects/p375_HNSCC_ZimmerAebersold/PhylogicNDT/IntOGen-DriverGenes_HNSC.list --seed 123 -bl ../Cluster.round$(( $r - 1 )).blacklist/$fname.blacklist.txt &
done &&wait &&cd ..
# filter clusters (use --old_blacklist)
cp -r Cluster.round$(( $r - 1 )).blacklist Cluster.round${r}.blacklist &&cd Cluster.round${r}.blacklist
for fname in ../Cluster.round${r}/*.mut_ccfs.txt; do
srun -o $(basename $fname .mut_ccfs.txt).blacklist.log -e $(basename $fname .mut_ccfs.txt).blacklist.log singularity exec -B /storage/ibu-projects/p375_HNSCC_ZimmerAebersold/PhylogicNDT /storage/ibu_wes_pipe/postprocessing/phylogicNDT/images/phylogicNDT.v1.img Rscript ../filter_clusters.R --old_blacklist ../Cluster.round$(( $r - 1 )).blacklist/$(basename $fname .mut_ccfs.txt).blacklist.txt --cancer_genes ../../IntOGen-DriverGenes_HNSC.list $fname &
done &&wait &&cd ..
#Which samples got something blacklisted:
for fname in Cluster.round${r}.blacklist/*log; do tail -n2 $fname | head -1; done | sort -k2,2nr | grep -v -P "\t0$" > Cluster.round${r}.blacklisted_samples.txt
if [ $(wc -l < Cluster.round${r}.blacklisted_samples.txt) -eq 0 ]
then
echo clustering done
break
fi
done
fi
| true
|
218fd48a9897cdf87ec142d0299e1d2c0cb5f560
|
Shell
|
inissa/system-management
|
/buildscripts/buildlibidn
|
UTF-8
| 457
| 3.125
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash -e
export PKGNAME=libidn
export PKGVER=1.33
export PKGTAR=${PKGNAME}-${PKGVER}.tar.gz
export PKGURL="https://ftp.gnu.org/gnu/libidn/${PKGTAR}"
export MAKE_JOBS_FLAGS="-j4"
make_install_post() {
rm -rf ${DEST}/usr/share/emacs ${DEST}/usr/share/info/*.png
install -dm755 ${DEST}/lib
mv ${DEST}/usr/lib/libidn.so.* ${DEST}/lib
ln -sf ../../lib/$(readlink ${DEST}/usr/lib/libidn.so) ${DEST}/usr/lib/libidn.so
}
. $(dirname $0)/master.sh
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.